You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

trsm_kernel_LN_power10.c 40 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279
  1. /*********************************************************************/
  2. /* Copyright 2009, 2010 The University of Texas at Austin. */
  3. /* All rights reserved. */
  4. /* */
  5. /* Redistribution and use in source and binary forms, with or */
  6. /* without modification, are permitted provided that the following */
  7. /* conditions are met: */
  8. /* */
  9. /* 1. Redistributions of source code must retain the above */
  10. /* copyright notice, this list of conditions and the following */
  11. /* disclaimer. */
  12. /* */
  13. /* 2. Redistributions in binary form must reproduce the above */
  14. /* copyright notice, this list of conditions and the following */
  15. /* disclaimer in the documentation and/or other materials */
  16. /* provided with the distribution. */
  17. /* */
  18. /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
  19. /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
  20. /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
  21. /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
  22. /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
  23. /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
  24. /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
  25. /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
  26. /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
  27. /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
  28. /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
  29. /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
  30. /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
  31. /* POSSIBILITY OF SUCH DAMAGE. */
  32. /* */
  33. /* The views and conclusions contained in the software and */
  34. /* documentation are those of the authors and should not be */
  35. /* interpreted as representing official policies, either expressed */
  36. /* or implied, of The University of Texas at Austin. */
  37. /*********************************************************************/
  38. #include "common.h"
  39. #include <altivec.h>
  40. static FLOAT dm1 = -1.;
  41. #ifdef CONJ
  42. #define GEMM_KERNEL GEMM_KERNEL_L
  43. #else
  44. #define GEMM_KERNEL GEMM_KERNEL_N
  45. #endif
  46. #if GEMM_DEFAULT_UNROLL_M == 1
  47. #define GEMM_UNROLL_M_SHIFT 0
  48. #endif
  49. #if GEMM_DEFAULT_UNROLL_M == 2
  50. #define GEMM_UNROLL_M_SHIFT 1
  51. #endif
  52. #if GEMM_DEFAULT_UNROLL_M == 4
  53. #define GEMM_UNROLL_M_SHIFT 2
  54. #endif
  55. #if GEMM_DEFAULT_UNROLL_M == 6
  56. #define GEMM_UNROLL_M_SHIFT 2
  57. #endif
  58. #if GEMM_DEFAULT_UNROLL_M == 8
  59. #define GEMM_UNROLL_M_SHIFT 3
  60. #endif
  61. #if GEMM_DEFAULT_UNROLL_M == 16
  62. #define GEMM_UNROLL_M_SHIFT 4
  63. #endif
  64. #if GEMM_DEFAULT_UNROLL_N == 1
  65. #define GEMM_UNROLL_N_SHIFT 0
  66. #endif
  67. #if GEMM_DEFAULT_UNROLL_N == 2
  68. #define GEMM_UNROLL_N_SHIFT 1
  69. #endif
  70. #if GEMM_DEFAULT_UNROLL_N == 4
  71. #define GEMM_UNROLL_N_SHIFT 2
  72. #endif
  73. #if GEMM_DEFAULT_UNROLL_N == 8
  74. #define GEMM_UNROLL_N_SHIFT 3
  75. #endif
  76. #if GEMM_DEFAULT_UNROLL_N == 16
  77. #define GEMM_UNROLL_N_SHIFT 4
  78. #endif
  79. #ifndef COMPLEX
  80. #ifdef DOUBLE
  81. static inline __attribute__ ((always_inline)) void solve8x8(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) {
  82. FLOAT *c0, *c1, *c2, *c3, *c4, *c5, *c6, *c7;
  83. c0 = &c[0*ldc];
  84. c1 = &c[1*ldc];
  85. c2 = &c[2*ldc];
  86. c3 = &c[3*ldc];
  87. c4 = &c[4*ldc];
  88. c5 = &c[5*ldc];
  89. c6 = &c[6*ldc];
  90. c7 = &c[7*ldc];
  91. vector FLOAT *Va = (vector FLOAT *) a;
  92. vector FLOAT *Vb = (vector FLOAT *) b;
  93. vector FLOAT *Vc0 = (vector FLOAT *) c0;
  94. vector FLOAT *Vc1 = (vector FLOAT *) c1;
  95. vector FLOAT *Vc2 = (vector FLOAT *) c2;
  96. vector FLOAT *Vc3 = (vector FLOAT *) c3;
  97. vector FLOAT *Vc4 = (vector FLOAT *) c4;
  98. vector FLOAT *Vc5 = (vector FLOAT *) c5;
  99. vector FLOAT *Vc6 = (vector FLOAT *) c6;
  100. vector FLOAT *Vc7 = (vector FLOAT *) c7;
  101. vector FLOAT VbS0, VbS1, VbS2, VbS3, VbS4, VbS5, VbS6, VbS7;
  102. b[56] = (c0[7] *= a[63]);
  103. b[57] = (c1[7] *= a[63]);
  104. b[58] = (c2[7] *= a[63]);
  105. b[59] = (c3[7] *= a[63]);
  106. b[60] = (c4[7] *= a[63]);
  107. b[61] = (c5[7] *= a[63]);
  108. b[62] = (c6[7] *= a[63]);
  109. b[63] = (c7[7] *= a[63]);
  110. VbS0 = vec_splat(Vb[28], 0);
  111. VbS1 = vec_splat(Vb[28], 1);
  112. VbS2 = vec_splat(Vb[29], 0);
  113. VbS3 = vec_splat(Vb[29], 1);
  114. VbS4 = vec_splat(Vb[30], 0);
  115. VbS5 = vec_splat(Vb[30], 1);
  116. VbS6 = vec_splat(Vb[31], 0);
  117. VbS7 = vec_splat(Vb[31], 1);
  118. Vc0[0] = vec_nmsub(VbS0, Va[28], Vc0[0]);
  119. Vc0[1] = vec_nmsub(VbS0, Va[29], Vc0[1]);
  120. Vc0[2] = vec_nmsub(VbS0, Va[30], Vc0[2]);
  121. Vc1[0] = vec_nmsub(VbS1, Va[28], Vc1[0]);
  122. Vc1[1] = vec_nmsub(VbS1, Va[29], Vc1[1]);
  123. Vc1[2] = vec_nmsub(VbS1, Va[30], Vc1[2]);
  124. Vc2[0] = vec_nmsub(VbS2, Va[28], Vc2[0]);
  125. Vc2[1] = vec_nmsub(VbS2, Va[29], Vc2[1]);
  126. Vc2[2] = vec_nmsub(VbS2, Va[30], Vc2[2]);
  127. Vc3[0] = vec_nmsub(VbS3, Va[28], Vc3[0]);
  128. Vc3[1] = vec_nmsub(VbS3, Va[29], Vc3[1]);
  129. Vc3[2] = vec_nmsub(VbS3, Va[30], Vc3[2]);
  130. Vc4[0] = vec_nmsub(VbS4, Va[28], Vc4[0]);
  131. Vc4[1] = vec_nmsub(VbS4, Va[29], Vc4[1]);
  132. Vc4[2] = vec_nmsub(VbS4, Va[30], Vc4[2]);
  133. Vc5[0] = vec_nmsub(VbS5, Va[28], Vc5[0]);
  134. Vc5[1] = vec_nmsub(VbS5, Va[29], Vc5[1]);
  135. Vc5[2] = vec_nmsub(VbS5, Va[30], Vc5[2]);
  136. Vc6[0] = vec_nmsub(VbS6, Va[28], Vc6[0]);
  137. Vc6[1] = vec_nmsub(VbS6, Va[29], Vc6[1]);
  138. Vc6[2] = vec_nmsub(VbS6, Va[30], Vc6[2]);
  139. Vc7[0] = vec_nmsub(VbS7, Va[28], Vc7[0]);
  140. Vc7[1] = vec_nmsub(VbS7, Va[29], Vc7[1]);
  141. Vc7[2] = vec_nmsub(VbS7, Va[30], Vc7[2]);
  142. c0[6] -= c0[7] * a[62];
  143. c1[6] -= c1[7] * a[62];
  144. c2[6] -= c2[7] * a[62];
  145. c3[6] -= c3[7] * a[62];
  146. c4[6] -= c4[7] * a[62];
  147. c5[6] -= c5[7] * a[62];
  148. c6[6] -= c6[7] * a[62];
  149. c7[6] -= c7[7] * a[62];
  150. b[48] = (c0[6] *= a[54]);
  151. b[49] = (c1[6] *= a[54]);
  152. b[50] = (c2[6] *= a[54]);
  153. b[51] = (c3[6] *= a[54]);
  154. b[52] = (c4[6] *= a[54]);
  155. b[53] = (c5[6] *= a[54]);
  156. b[54] = (c6[6] *= a[54]);
  157. b[55] = (c7[6] *= a[54]);
  158. VbS0 = vec_splat(Vb[24], 0);
  159. VbS1 = vec_splat(Vb[24], 1);
  160. VbS2 = vec_splat(Vb[25], 0);
  161. VbS3 = vec_splat(Vb[25], 1);
  162. VbS4 = vec_splat(Vb[26], 0);
  163. VbS5 = vec_splat(Vb[26], 1);
  164. VbS6 = vec_splat(Vb[27], 0);
  165. VbS7 = vec_splat(Vb[27], 1);
  166. Vc0[0] = vec_nmsub(VbS0, Va[24], Vc0[0]);
  167. Vc0[1] = vec_nmsub(VbS0, Va[25], Vc0[1]);
  168. Vc0[2] = vec_nmsub(VbS0, Va[26], Vc0[2]);
  169. Vc1[0] = vec_nmsub(VbS1, Va[24], Vc1[0]);
  170. Vc1[1] = vec_nmsub(VbS1, Va[25], Vc1[1]);
  171. Vc1[2] = vec_nmsub(VbS1, Va[26], Vc1[2]);
  172. Vc2[0] = vec_nmsub(VbS2, Va[24], Vc2[0]);
  173. Vc2[1] = vec_nmsub(VbS2, Va[25], Vc2[1]);
  174. Vc2[2] = vec_nmsub(VbS2, Va[26], Vc2[2]);
  175. Vc3[0] = vec_nmsub(VbS3, Va[24], Vc3[0]);
  176. Vc3[1] = vec_nmsub(VbS3, Va[25], Vc3[1]);
  177. Vc3[2] = vec_nmsub(VbS3, Va[26], Vc3[2]);
  178. Vc4[0] = vec_nmsub(VbS4, Va[24], Vc4[0]);
  179. Vc4[1] = vec_nmsub(VbS4, Va[25], Vc4[1]);
  180. Vc4[2] = vec_nmsub(VbS4, Va[26], Vc4[2]);
  181. Vc5[0] = vec_nmsub(VbS5, Va[24], Vc5[0]);
  182. Vc5[1] = vec_nmsub(VbS5, Va[25], Vc5[1]);
  183. Vc5[2] = vec_nmsub(VbS5, Va[26], Vc5[2]);
  184. Vc6[0] = vec_nmsub(VbS6, Va[24], Vc6[0]);
  185. Vc6[1] = vec_nmsub(VbS6, Va[25], Vc6[1]);
  186. Vc6[2] = vec_nmsub(VbS6, Va[26], Vc6[2]);
  187. Vc7[0] = vec_nmsub(VbS7, Va[24], Vc7[0]);
  188. Vc7[1] = vec_nmsub(VbS7, Va[25], Vc7[1]);
  189. Vc7[2] = vec_nmsub(VbS7, Va[26], Vc7[2]);
  190. b[40] = (c0[5] *= a[45]);
  191. b[41] = (c1[5] *= a[45]);
  192. b[42] = (c2[5] *= a[45]);
  193. b[43] = (c3[5] *= a[45]);
  194. b[44] = (c4[5] *= a[45]);
  195. b[45] = (c5[5] *= a[45]);
  196. b[46] = (c6[5] *= a[45]);
  197. b[47] = (c7[5] *= a[45]);
  198. VbS0 = vec_splat(Vb[20], 0);
  199. VbS1 = vec_splat(Vb[20], 1);
  200. VbS2 = vec_splat(Vb[21], 0);
  201. VbS3 = vec_splat(Vb[21], 1);
  202. VbS4 = vec_splat(Vb[22], 0);
  203. VbS5 = vec_splat(Vb[22], 1);
  204. VbS6 = vec_splat(Vb[23], 0);
  205. VbS7 = vec_splat(Vb[23], 1);
  206. Vc0[0] = vec_nmsub(VbS0, Va[20], Vc0[0]);
  207. Vc0[1] = vec_nmsub(VbS0, Va[21], Vc0[1]);
  208. Vc1[0] = vec_nmsub(VbS1, Va[20], Vc1[0]);
  209. Vc1[1] = vec_nmsub(VbS1, Va[21], Vc1[1]);
  210. Vc2[0] = vec_nmsub(VbS2, Va[20], Vc2[0]);
  211. Vc2[1] = vec_nmsub(VbS2, Va[21], Vc2[1]);
  212. Vc3[0] = vec_nmsub(VbS3, Va[20], Vc3[0]);
  213. Vc3[1] = vec_nmsub(VbS3, Va[21], Vc3[1]);
  214. Vc4[0] = vec_nmsub(VbS4, Va[20], Vc4[0]);
  215. Vc4[1] = vec_nmsub(VbS4, Va[21], Vc4[1]);
  216. Vc5[0] = vec_nmsub(VbS5, Va[20], Vc5[0]);
  217. Vc5[1] = vec_nmsub(VbS5, Va[21], Vc5[1]);
  218. Vc6[0] = vec_nmsub(VbS6, Va[20], Vc6[0]);
  219. Vc6[1] = vec_nmsub(VbS6, Va[21], Vc6[1]);
  220. Vc7[0] = vec_nmsub(VbS7, Va[20], Vc7[0]);
  221. Vc7[1] = vec_nmsub(VbS7, Va[21], Vc7[1]);
  222. c0[4] -= c0[5] * a[44];
  223. c1[4] -= c1[5] * a[44];
  224. c2[4] -= c2[5] * a[44];
  225. c3[4] -= c3[5] * a[44];
  226. c4[4] -= c4[5] * a[44];
  227. c5[4] -= c5[5] * a[44];
  228. c6[4] -= c6[5] * a[44];
  229. c7[4] -= c7[5] * a[44];
  230. b[32] = (c0[4] *= a[36]);
  231. b[33] = (c1[4] *= a[36]);
  232. b[34] = (c2[4] *= a[36]);
  233. b[35] = (c3[4] *= a[36]);
  234. b[36] = (c4[4] *= a[36]);
  235. b[37] = (c5[4] *= a[36]);
  236. b[38] = (c6[4] *= a[36]);
  237. b[39] = (c7[4] *= a[36]);
  238. VbS0 = vec_splat(Vb[16], 0);
  239. VbS1 = vec_splat(Vb[16], 1);
  240. VbS2 = vec_splat(Vb[17], 0);
  241. VbS3 = vec_splat(Vb[17], 1);
  242. VbS4 = vec_splat(Vb[18], 0);
  243. VbS5 = vec_splat(Vb[18], 1);
  244. VbS6 = vec_splat(Vb[19], 0);
  245. VbS7 = vec_splat(Vb[19], 1);
  246. Vc0[0] = vec_nmsub(VbS0, Va[16], Vc0[0]);
  247. Vc0[1] = vec_nmsub(VbS0, Va[17], Vc0[1]);
  248. Vc1[0] = vec_nmsub(VbS1, Va[16], Vc1[0]);
  249. Vc1[1] = vec_nmsub(VbS1, Va[17], Vc1[1]);
  250. Vc2[0] = vec_nmsub(VbS2, Va[16], Vc2[0]);
  251. Vc2[1] = vec_nmsub(VbS2, Va[17], Vc2[1]);
  252. Vc3[0] = vec_nmsub(VbS3, Va[16], Vc3[0]);
  253. Vc3[1] = vec_nmsub(VbS3, Va[17], Vc3[1]);
  254. Vc4[0] = vec_nmsub(VbS4, Va[16], Vc4[0]);
  255. Vc4[1] = vec_nmsub(VbS4, Va[17], Vc4[1]);
  256. Vc5[0] = vec_nmsub(VbS5, Va[16], Vc5[0]);
  257. Vc5[1] = vec_nmsub(VbS5, Va[17], Vc5[1]);
  258. Vc6[0] = vec_nmsub(VbS6, Va[16], Vc6[0]);
  259. Vc6[1] = vec_nmsub(VbS6, Va[17], Vc6[1]);
  260. Vc7[0] = vec_nmsub(VbS7, Va[16], Vc7[0]);
  261. Vc7[1] = vec_nmsub(VbS7, Va[17], Vc7[1]);
  262. b[24] = (c0[3] *= a[27]);
  263. b[25] = (c1[3] *= a[27]);
  264. b[26] = (c2[3] *= a[27]);
  265. b[27] = (c3[3] *= a[27]);
  266. b[28] = (c4[3] *= a[27]);
  267. b[29] = (c5[3] *= a[27]);
  268. b[30] = (c6[3] *= a[27]);
  269. b[31] = (c7[3] *= a[27]);
  270. VbS0 = vec_splat(Vb[12], 0);
  271. VbS1 = vec_splat(Vb[12], 1);
  272. VbS2 = vec_splat(Vb[13], 0);
  273. VbS3 = vec_splat(Vb[13], 1);
  274. VbS4 = vec_splat(Vb[14], 0);
  275. VbS5 = vec_splat(Vb[14], 1);
  276. VbS6 = vec_splat(Vb[15], 0);
  277. VbS7 = vec_splat(Vb[15], 1);
  278. Vc0[0] = vec_nmsub(VbS0, Va[12], Vc0[0]);
  279. Vc1[0] = vec_nmsub(VbS1, Va[12], Vc1[0]);
  280. Vc2[0] = vec_nmsub(VbS2, Va[12], Vc2[0]);
  281. Vc3[0] = vec_nmsub(VbS3, Va[12], Vc3[0]);
  282. Vc4[0] = vec_nmsub(VbS4, Va[12], Vc4[0]);
  283. Vc5[0] = vec_nmsub(VbS5, Va[12], Vc5[0]);
  284. Vc6[0] = vec_nmsub(VbS6, Va[12], Vc6[0]);
  285. Vc7[0] = vec_nmsub(VbS7, Va[12], Vc7[0]);
  286. c0[2] -= c0[3] * a[26];
  287. c1[2] -= c1[3] * a[26];
  288. c2[2] -= c2[3] * a[26];
  289. c3[2] -= c3[3] * a[26];
  290. c4[2] -= c4[3] * a[26];
  291. c5[2] -= c5[3] * a[26];
  292. c6[2] -= c6[3] * a[26];
  293. c7[2] -= c7[3] * a[26];
  294. b[16] = (c0[2] *= a[18]);
  295. b[17] = (c1[2] *= a[18]);
  296. b[18] = (c2[2] *= a[18]);
  297. b[19] = (c3[2] *= a[18]);
  298. b[20] = (c4[2] *= a[18]);
  299. b[21] = (c5[2] *= a[18]);
  300. b[22] = (c6[2] *= a[18]);
  301. b[23] = (c7[2] *= a[18]);
  302. VbS0 = vec_splat(Vb[ 8], 0);
  303. VbS1 = vec_splat(Vb[ 8], 1);
  304. VbS2 = vec_splat(Vb[ 9], 0);
  305. VbS3 = vec_splat(Vb[ 9], 1);
  306. VbS4 = vec_splat(Vb[10], 0);
  307. VbS5 = vec_splat(Vb[10], 1);
  308. VbS6 = vec_splat(Vb[11], 0);
  309. VbS7 = vec_splat(Vb[11], 1);
  310. Vc0[0] = vec_nmsub(VbS0, Va[8], Vc0[0]);
  311. Vc1[0] = vec_nmsub(VbS1, Va[8], Vc1[0]);
  312. Vc2[0] = vec_nmsub(VbS2, Va[8], Vc2[0]);
  313. Vc3[0] = vec_nmsub(VbS3, Va[8], Vc3[0]);
  314. Vc4[0] = vec_nmsub(VbS4, Va[8], Vc4[0]);
  315. Vc5[0] = vec_nmsub(VbS5, Va[8], Vc5[0]);
  316. Vc6[0] = vec_nmsub(VbS6, Va[8], Vc6[0]);
  317. Vc7[0] = vec_nmsub(VbS7, Va[8], Vc7[0]);
  318. b[ 8] = (c0[1] *= a[9]);
  319. b[ 9] = (c1[1] *= a[9]);
  320. b[10] = (c2[1] *= a[9]);
  321. b[11] = (c3[1] *= a[9]);
  322. b[12] = (c4[1] *= a[9]);
  323. b[13] = (c5[1] *= a[9]);
  324. b[14] = (c6[1] *= a[9]);
  325. b[15] = (c7[1] *= a[9]);
  326. c0[0] -= c0[1] * a[8];
  327. c1[0] -= c1[1] * a[8];
  328. c2[0] -= c2[1] * a[8];
  329. c3[0] -= c3[1] * a[8];
  330. c4[0] -= c4[1] * a[8];
  331. c5[0] -= c5[1] * a[8];
  332. c6[0] -= c6[1] * a[8];
  333. c7[0] -= c7[1] * a[8];
  334. b[0] = (c0[0] *= a[0]);
  335. b[1] = (c1[0] *= a[0]);
  336. b[2] = (c2[0] *= a[0]);
  337. b[3] = (c3[0] *= a[0]);
  338. b[4] = (c4[0] *= a[0]);
  339. b[5] = (c5[0] *= a[0]);
  340. b[6] = (c6[0] *= a[0]);
  341. b[7] = (c7[0] *= a[0]);
  342. }
  343. #else
  344. static inline __attribute__ ((always_inline)) void solve16x8(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) {
  345. FLOAT *c0, *c1, *c2, *c3, *c4, *c5, *c6, *c7;
  346. c0 = &c[0*ldc];
  347. c1 = &c[1*ldc];
  348. c2 = &c[2*ldc];
  349. c3 = &c[3*ldc];
  350. c4 = &c[4*ldc];
  351. c5 = &c[5*ldc];
  352. c6 = &c[6*ldc];
  353. c7 = &c[7*ldc];
  354. vector FLOAT *Va = (vector FLOAT *) a;
  355. vector FLOAT *Vb = (vector FLOAT *) b;
  356. vector FLOAT *Vc0 = (vector FLOAT *) c0;
  357. vector FLOAT *Vc1 = (vector FLOAT *) c1;
  358. vector FLOAT *Vc2 = (vector FLOAT *) c2;
  359. vector FLOAT *Vc3 = (vector FLOAT *) c3;
  360. vector FLOAT *Vc4 = (vector FLOAT *) c4;
  361. vector FLOAT *Vc5 = (vector FLOAT *) c5;
  362. vector FLOAT *Vc6 = (vector FLOAT *) c6;
  363. vector FLOAT *Vc7 = (vector FLOAT *) c7;
  364. vector FLOAT VbS0, VbS1, VbS2, VbS3, VbS4, VbS5, VbS6, VbS7;
  365. b[120] = (c0[15] *= a[255]);
  366. b[121] = (c1[15] *= a[255]);
  367. b[122] = (c2[15] *= a[255]);
  368. b[123] = (c3[15] *= a[255]);
  369. b[124] = (c4[15] *= a[255]);
  370. b[125] = (c5[15] *= a[255]);
  371. b[126] = (c6[15] *= a[255]);
  372. b[127] = (c7[15] *= a[255]);
  373. VbS0 = vec_splat(Vb[30], 0);
  374. VbS1 = vec_splat(Vb[30], 1);
  375. VbS2 = vec_splat(Vb[30], 2);
  376. VbS3 = vec_splat(Vb[30], 3);
  377. VbS4 = vec_splat(Vb[31], 0);
  378. VbS5 = vec_splat(Vb[31], 1);
  379. VbS6 = vec_splat(Vb[31], 2);
  380. VbS7 = vec_splat(Vb[31], 3);
  381. Vc0[0] = vec_nmsub(VbS0, Va[60], Vc0[0]);
  382. Vc0[1] = vec_nmsub(VbS0, Va[61], Vc0[1]);
  383. Vc0[2] = vec_nmsub(VbS0, Va[62], Vc0[2]);
  384. Vc1[0] = vec_nmsub(VbS1, Va[60], Vc1[0]);
  385. Vc1[1] = vec_nmsub(VbS1, Va[61], Vc1[1]);
  386. Vc1[2] = vec_nmsub(VbS1, Va[62], Vc1[2]);
  387. Vc2[0] = vec_nmsub(VbS2, Va[60], Vc2[0]);
  388. Vc2[1] = vec_nmsub(VbS2, Va[61], Vc2[1]);
  389. Vc2[2] = vec_nmsub(VbS2, Va[62], Vc2[2]);
  390. Vc3[0] = vec_nmsub(VbS3, Va[60], Vc3[0]);
  391. Vc3[1] = vec_nmsub(VbS3, Va[61], Vc3[1]);
  392. Vc3[2] = vec_nmsub(VbS3, Va[62], Vc3[2]);
  393. Vc4[0] = vec_nmsub(VbS4, Va[60], Vc4[0]);
  394. Vc4[1] = vec_nmsub(VbS4, Va[61], Vc4[1]);
  395. Vc4[2] = vec_nmsub(VbS4, Va[62], Vc4[2]);
  396. Vc5[0] = vec_nmsub(VbS5, Va[60], Vc5[0]);
  397. Vc5[1] = vec_nmsub(VbS5, Va[61], Vc5[1]);
  398. Vc5[2] = vec_nmsub(VbS5, Va[62], Vc5[2]);
  399. Vc6[0] = vec_nmsub(VbS6, Va[60], Vc6[0]);
  400. Vc6[1] = vec_nmsub(VbS6, Va[61], Vc6[1]);
  401. Vc6[2] = vec_nmsub(VbS6, Va[62], Vc6[2]);
  402. Vc7[0] = vec_nmsub(VbS7, Va[60], Vc7[0]);
  403. Vc7[1] = vec_nmsub(VbS7, Va[61], Vc7[1]);
  404. Vc7[2] = vec_nmsub(VbS7, Va[62], Vc7[2]);
  405. c0[12] -= b[120] * a[252];
  406. c0[13] -= b[120] * a[253];
  407. c0[14] -= b[120] * a[254];
  408. c1[12] -= b[121] * a[252];
  409. c1[13] -= b[121] * a[253];
  410. c1[14] -= b[121] * a[254];
  411. c2[12] -= b[122] * a[252];
  412. c2[13] -= b[122] * a[253];
  413. c2[14] -= b[122] * a[254];
  414. c3[12] -= b[123] * a[252];
  415. c3[13] -= b[123] * a[253];
  416. c3[14] -= b[123] * a[254];
  417. c4[12] -= b[124] * a[252];
  418. c4[13] -= b[124] * a[253];
  419. c4[14] -= b[124] * a[254];
  420. c5[12] -= b[125] * a[252];
  421. c5[13] -= b[125] * a[253];
  422. c5[14] -= b[125] * a[254];
  423. c6[12] -= b[126] * a[252];
  424. c6[13] -= b[126] * a[253];
  425. c6[14] -= b[126] * a[254];
  426. c7[12] -= b[127] * a[252];
  427. c7[13] -= b[127] * a[253];
  428. c7[14] -= b[127] * a[254];
  429. b[112] = (c0[14] *= a[238]);
  430. b[113] = (c1[14] *= a[238]);
  431. b[114] = (c2[14] *= a[238]);
  432. b[115] = (c3[14] *= a[238]);
  433. b[116] = (c4[14] *= a[238]);
  434. b[117] = (c5[14] *= a[238]);
  435. b[118] = (c6[14] *= a[238]);
  436. b[119] = (c7[14] *= a[238]);
  437. VbS0 = vec_splat(Vb[28], 0);
  438. VbS1 = vec_splat(Vb[28], 1);
  439. VbS2 = vec_splat(Vb[28], 2);
  440. VbS3 = vec_splat(Vb[28], 3);
  441. VbS4 = vec_splat(Vb[29], 0);
  442. VbS5 = vec_splat(Vb[29], 1);
  443. VbS6 = vec_splat(Vb[29], 2);
  444. VbS7 = vec_splat(Vb[29], 3);
  445. Vc0[0] = vec_nmsub(VbS0, Va[56], Vc0[0]);
  446. Vc0[1] = vec_nmsub(VbS0, Va[57], Vc0[1]);
  447. Vc0[2] = vec_nmsub(VbS0, Va[58], Vc0[2]);
  448. Vc1[0] = vec_nmsub(VbS1, Va[56], Vc1[0]);
  449. Vc1[1] = vec_nmsub(VbS1, Va[57], Vc1[1]);
  450. Vc1[2] = vec_nmsub(VbS1, Va[58], Vc1[2]);
  451. Vc2[0] = vec_nmsub(VbS2, Va[56], Vc2[0]);
  452. Vc2[1] = vec_nmsub(VbS2, Va[57], Vc2[1]);
  453. Vc2[2] = vec_nmsub(VbS2, Va[58], Vc2[2]);
  454. Vc3[0] = vec_nmsub(VbS3, Va[56], Vc3[0]);
  455. Vc3[1] = vec_nmsub(VbS3, Va[57], Vc3[1]);
  456. Vc3[2] = vec_nmsub(VbS3, Va[58], Vc3[2]);
  457. Vc4[0] = vec_nmsub(VbS4, Va[56], Vc4[0]);
  458. Vc4[1] = vec_nmsub(VbS4, Va[57], Vc4[1]);
  459. Vc4[2] = vec_nmsub(VbS4, Va[58], Vc4[2]);
  460. Vc5[0] = vec_nmsub(VbS5, Va[56], Vc5[0]);
  461. Vc5[1] = vec_nmsub(VbS5, Va[57], Vc5[1]);
  462. Vc5[2] = vec_nmsub(VbS5, Va[58], Vc5[2]);
  463. Vc6[0] = vec_nmsub(VbS6, Va[56], Vc6[0]);
  464. Vc6[1] = vec_nmsub(VbS6, Va[57], Vc6[1]);
  465. Vc6[2] = vec_nmsub(VbS6, Va[58], Vc6[2]);
  466. Vc7[0] = vec_nmsub(VbS7, Va[56], Vc7[0]);
  467. Vc7[1] = vec_nmsub(VbS7, Va[57], Vc7[1]);
  468. Vc7[2] = vec_nmsub(VbS7, Va[58], Vc7[2]);
  469. c0[12] -= b[112] * a[236];
  470. c0[13] -= b[112] * a[237];
  471. c1[12] -= b[113] * a[236];
  472. c1[13] -= b[113] * a[237];
  473. c2[12] -= b[114] * a[236];
  474. c2[13] -= b[114] * a[237];
  475. c3[12] -= b[115] * a[236];
  476. c3[13] -= b[115] * a[237];
  477. c4[12] -= b[116] * a[236];
  478. c4[13] -= b[116] * a[237];
  479. c5[12] -= b[117] * a[236];
  480. c5[13] -= b[117] * a[237];
  481. c6[12] -= b[118] * a[236];
  482. c6[13] -= b[118] * a[237];
  483. c7[12] -= b[119] * a[236];
  484. c7[13] -= b[119] * a[237];
  485. b[104] = (c0[13] *= a[221]);
  486. b[105] = (c1[13] *= a[221]);
  487. b[106] = (c2[13] *= a[221]);
  488. b[107] = (c3[13] *= a[221]);
  489. b[108] = (c4[13] *= a[221]);
  490. b[109] = (c5[13] *= a[221]);
  491. b[110] = (c6[13] *= a[221]);
  492. b[111] = (c7[13] *= a[221]);
  493. VbS0 = vec_splat(Vb[26], 0);
  494. VbS1 = vec_splat(Vb[26], 1);
  495. VbS2 = vec_splat(Vb[26], 2);
  496. VbS3 = vec_splat(Vb[26], 3);
  497. VbS4 = vec_splat(Vb[27], 0);
  498. VbS5 = vec_splat(Vb[27], 1);
  499. VbS6 = vec_splat(Vb[27], 2);
  500. VbS7 = vec_splat(Vb[27], 3);
  501. Vc0[0] = vec_nmsub(VbS0, Va[52], Vc0[0]);
  502. Vc0[1] = vec_nmsub(VbS0, Va[53], Vc0[1]);
  503. Vc0[2] = vec_nmsub(VbS0, Va[54], Vc0[2]);
  504. Vc1[0] = vec_nmsub(VbS1, Va[52], Vc1[0]);
  505. Vc1[1] = vec_nmsub(VbS1, Va[53], Vc1[1]);
  506. Vc1[2] = vec_nmsub(VbS1, Va[54], Vc1[2]);
  507. Vc2[0] = vec_nmsub(VbS2, Va[52], Vc2[0]);
  508. Vc2[1] = vec_nmsub(VbS2, Va[53], Vc2[1]);
  509. Vc2[2] = vec_nmsub(VbS2, Va[54], Vc2[2]);
  510. Vc3[0] = vec_nmsub(VbS3, Va[52], Vc3[0]);
  511. Vc3[1] = vec_nmsub(VbS3, Va[53], Vc3[1]);
  512. Vc3[2] = vec_nmsub(VbS3, Va[54], Vc3[2]);
  513. Vc4[0] = vec_nmsub(VbS4, Va[52], Vc4[0]);
  514. Vc4[1] = vec_nmsub(VbS4, Va[53], Vc4[1]);
  515. Vc4[2] = vec_nmsub(VbS4, Va[54], Vc4[2]);
  516. Vc5[0] = vec_nmsub(VbS5, Va[52], Vc5[0]);
  517. Vc5[1] = vec_nmsub(VbS5, Va[53], Vc5[1]);
  518. Vc5[2] = vec_nmsub(VbS5, Va[54], Vc5[2]);
  519. Vc6[0] = vec_nmsub(VbS6, Va[52], Vc6[0]);
  520. Vc6[1] = vec_nmsub(VbS6, Va[53], Vc6[1]);
  521. Vc6[2] = vec_nmsub(VbS6, Va[54], Vc6[2]);
  522. Vc7[0] = vec_nmsub(VbS7, Va[52], Vc7[0]);
  523. Vc7[1] = vec_nmsub(VbS7, Va[53], Vc7[1]);
  524. Vc7[2] = vec_nmsub(VbS7, Va[54], Vc7[2]);
  525. c0[12] -= b[104] * a[220];
  526. c1[12] -= b[105] * a[220];
  527. c2[12] -= b[106] * a[220];
  528. c3[12] -= b[107] * a[220];
  529. c4[12] -= b[108] * a[220];
  530. c5[12] -= b[109] * a[220];
  531. c6[12] -= b[110] * a[220];
  532. c7[12] -= b[111] * a[220];
  533. b[ 96] = (c0[12] *= a[204]);
  534. b[ 97] = (c1[12] *= a[204]);
  535. b[ 98] = (c2[12] *= a[204]);
  536. b[ 99] = (c3[12] *= a[204]);
  537. b[100] = (c4[12] *= a[204]);
  538. b[101] = (c5[12] *= a[204]);
  539. b[102] = (c6[12] *= a[204]);
  540. b[103] = (c7[12] *= a[204]);
  541. VbS0 = vec_splat(Vb[24], 0);
  542. VbS1 = vec_splat(Vb[24], 1);
  543. VbS2 = vec_splat(Vb[24], 2);
  544. VbS3 = vec_splat(Vb[24], 3);
  545. VbS4 = vec_splat(Vb[25], 0);
  546. VbS5 = vec_splat(Vb[25], 1);
  547. VbS6 = vec_splat(Vb[25], 2);
  548. VbS7 = vec_splat(Vb[25], 3);
  549. Vc0[0] = vec_nmsub(VbS0, Va[48], Vc0[0]);
  550. Vc0[1] = vec_nmsub(VbS0, Va[49], Vc0[1]);
  551. Vc0[2] = vec_nmsub(VbS0, Va[50], Vc0[2]);
  552. Vc1[0] = vec_nmsub(VbS1, Va[48], Vc1[0]);
  553. Vc1[1] = vec_nmsub(VbS1, Va[49], Vc1[1]);
  554. Vc1[2] = vec_nmsub(VbS1, Va[50], Vc1[2]);
  555. Vc2[0] = vec_nmsub(VbS2, Va[48], Vc2[0]);
  556. Vc2[1] = vec_nmsub(VbS2, Va[49], Vc2[1]);
  557. Vc2[2] = vec_nmsub(VbS2, Va[50], Vc2[2]);
  558. Vc3[0] = vec_nmsub(VbS3, Va[48], Vc3[0]);
  559. Vc3[1] = vec_nmsub(VbS3, Va[49], Vc3[1]);
  560. Vc3[2] = vec_nmsub(VbS3, Va[50], Vc3[2]);
  561. Vc4[0] = vec_nmsub(VbS4, Va[48], Vc4[0]);
  562. Vc4[1] = vec_nmsub(VbS4, Va[49], Vc4[1]);
  563. Vc4[2] = vec_nmsub(VbS4, Va[50], Vc4[2]);
  564. Vc5[0] = vec_nmsub(VbS5, Va[48], Vc5[0]);
  565. Vc5[1] = vec_nmsub(VbS5, Va[49], Vc5[1]);
  566. Vc5[2] = vec_nmsub(VbS5, Va[50], Vc5[2]);
  567. Vc6[0] = vec_nmsub(VbS6, Va[48], Vc6[0]);
  568. Vc6[1] = vec_nmsub(VbS6, Va[49], Vc6[1]);
  569. Vc6[2] = vec_nmsub(VbS6, Va[50], Vc6[2]);
  570. Vc7[0] = vec_nmsub(VbS7, Va[48], Vc7[0]);
  571. Vc7[1] = vec_nmsub(VbS7, Va[49], Vc7[1]);
  572. Vc7[2] = vec_nmsub(VbS7, Va[50], Vc7[2]);
  573. b[88] = (c0[11] *= a[187]);
  574. b[89] = (c1[11] *= a[187]);
  575. b[90] = (c2[11] *= a[187]);
  576. b[91] = (c3[11] *= a[187]);
  577. b[92] = (c4[11] *= a[187]);
  578. b[93] = (c5[11] *= a[187]);
  579. b[94] = (c6[11] *= a[187]);
  580. b[95] = (c7[11] *= a[187]);
  581. VbS0 = vec_splat(Vb[22], 0);
  582. VbS1 = vec_splat(Vb[22], 1);
  583. VbS2 = vec_splat(Vb[22], 2);
  584. VbS3 = vec_splat(Vb[22], 3);
  585. VbS4 = vec_splat(Vb[23], 0);
  586. VbS5 = vec_splat(Vb[23], 1);
  587. VbS6 = vec_splat(Vb[23], 2);
  588. VbS7 = vec_splat(Vb[23], 3);
  589. Vc0[0] = vec_nmsub(VbS0, Va[44], Vc0[0]);
  590. Vc0[1] = vec_nmsub(VbS0, Va[45], Vc0[1]);
  591. Vc1[0] = vec_nmsub(VbS1, Va[44], Vc1[0]);
  592. Vc1[1] = vec_nmsub(VbS1, Va[45], Vc1[1]);
  593. Vc2[0] = vec_nmsub(VbS2, Va[44], Vc2[0]);
  594. Vc2[1] = vec_nmsub(VbS2, Va[45], Vc2[1]);
  595. Vc3[0] = vec_nmsub(VbS3, Va[44], Vc3[0]);
  596. Vc3[1] = vec_nmsub(VbS3, Va[45], Vc3[1]);
  597. Vc4[0] = vec_nmsub(VbS4, Va[44], Vc4[0]);
  598. Vc4[1] = vec_nmsub(VbS4, Va[45], Vc4[1]);
  599. Vc5[0] = vec_nmsub(VbS5, Va[44], Vc5[0]);
  600. Vc5[1] = vec_nmsub(VbS5, Va[45], Vc5[1]);
  601. Vc6[0] = vec_nmsub(VbS6, Va[44], Vc6[0]);
  602. Vc6[1] = vec_nmsub(VbS6, Va[45], Vc6[1]);
  603. Vc7[0] = vec_nmsub(VbS7, Va[44], Vc7[0]);
  604. Vc7[1] = vec_nmsub(VbS7, Va[45], Vc7[1]);
  605. c0[ 8] -= b[88] * a[184];
  606. c0[ 9] -= b[88] * a[185];
  607. c0[10] -= b[88] * a[186];
  608. c1[ 8] -= b[89] * a[184];
  609. c1[ 9] -= b[89] * a[185];
  610. c1[10] -= b[89] * a[186];
  611. c2[ 8] -= b[90] * a[184];
  612. c2[ 9] -= b[90] * a[185];
  613. c2[10] -= b[90] * a[186];
  614. c3[ 8] -= b[91] * a[184];
  615. c3[ 9] -= b[91] * a[185];
  616. c3[10] -= b[91] * a[186];
  617. c4[ 8] -= b[92] * a[184];
  618. c4[ 9] -= b[92] * a[185];
  619. c4[10] -= b[92] * a[186];
  620. c5[ 8] -= b[93] * a[184];
  621. c5[ 9] -= b[93] * a[185];
  622. c5[10] -= b[93] * a[186];
  623. c6[ 8] -= b[94] * a[184];
  624. c6[ 9] -= b[94] * a[185];
  625. c6[10] -= b[94] * a[186];
  626. c7[ 8] -= b[95] * a[184];
  627. c7[ 9] -= b[95] * a[185];
  628. c7[10] -= b[95] * a[186];
  629. b[80] = (c0[10] *= a[170]);
  630. b[81] = (c1[10] *= a[170]);
  631. b[82] = (c2[10] *= a[170]);
  632. b[83] = (c3[10] *= a[170]);
  633. b[84] = (c4[10] *= a[170]);
  634. b[85] = (c5[10] *= a[170]);
  635. b[86] = (c6[10] *= a[170]);
  636. b[87] = (c7[10] *= a[170]);
  637. VbS0 = vec_splat(Vb[20], 0);
  638. VbS1 = vec_splat(Vb[20], 1);
  639. VbS2 = vec_splat(Vb[20], 2);
  640. VbS3 = vec_splat(Vb[20], 3);
  641. VbS4 = vec_splat(Vb[21], 0);
  642. VbS5 = vec_splat(Vb[21], 1);
  643. VbS6 = vec_splat(Vb[21], 2);
  644. VbS7 = vec_splat(Vb[21], 3);
  645. Vc0[0] = vec_nmsub(VbS0, Va[40], Vc0[0]);
  646. Vc0[1] = vec_nmsub(VbS0, Va[41], Vc0[1]);
  647. Vc1[0] = vec_nmsub(VbS1, Va[40], Vc1[0]);
  648. Vc1[1] = vec_nmsub(VbS1, Va[41], Vc1[1]);
  649. Vc2[0] = vec_nmsub(VbS2, Va[40], Vc2[0]);
  650. Vc2[1] = vec_nmsub(VbS2, Va[41], Vc2[1]);
  651. Vc3[0] = vec_nmsub(VbS3, Va[40], Vc3[0]);
  652. Vc3[1] = vec_nmsub(VbS3, Va[41], Vc3[1]);
  653. Vc4[0] = vec_nmsub(VbS4, Va[40], Vc4[0]);
  654. Vc4[1] = vec_nmsub(VbS4, Va[41], Vc4[1]);
  655. Vc5[0] = vec_nmsub(VbS5, Va[40], Vc5[0]);
  656. Vc5[1] = vec_nmsub(VbS5, Va[41], Vc5[1]);
  657. Vc6[0] = vec_nmsub(VbS6, Va[40], Vc6[0]);
  658. Vc6[1] = vec_nmsub(VbS6, Va[41], Vc6[1]);
  659. Vc7[0] = vec_nmsub(VbS7, Va[40], Vc7[0]);
  660. Vc7[1] = vec_nmsub(VbS7, Va[41], Vc7[1]);
  661. c0[8] -= b[80] * a[168];
  662. c0[9] -= b[80] * a[169];
  663. c1[8] -= b[81] * a[168];
  664. c1[9] -= b[81] * a[169];
  665. c2[8] -= b[82] * a[168];
  666. c2[9] -= b[82] * a[169];
  667. c3[8] -= b[83] * a[168];
  668. c3[9] -= b[83] * a[169];
  669. c4[8] -= b[84] * a[168];
  670. c4[9] -= b[84] * a[169];
  671. c5[8] -= b[85] * a[168];
  672. c5[9] -= b[85] * a[169];
  673. c6[8] -= b[86] * a[168];
  674. c6[9] -= b[86] * a[169];
  675. c7[8] -= b[87] * a[168];
  676. c7[9] -= b[87] * a[169];
  677. b[72] = (c0[9] *= a[153]);
  678. b[73] = (c1[9] *= a[153]);
  679. b[74] = (c2[9] *= a[153]);
  680. b[75] = (c3[9] *= a[153]);
  681. b[76] = (c4[9] *= a[153]);
  682. b[77] = (c5[9] *= a[153]);
  683. b[78] = (c6[9] *= a[153]);
  684. b[79] = (c7[9] *= a[153]);
  685. VbS0 = vec_splat(Vb[18], 0);
  686. VbS1 = vec_splat(Vb[18], 1);
  687. VbS2 = vec_splat(Vb[18], 2);
  688. VbS3 = vec_splat(Vb[18], 3);
  689. VbS4 = vec_splat(Vb[19], 0);
  690. VbS5 = vec_splat(Vb[19], 1);
  691. VbS6 = vec_splat(Vb[19], 2);
  692. VbS7 = vec_splat(Vb[19], 3);
  693. Vc0[0] = vec_nmsub(VbS0, Va[36], Vc0[0]);
  694. Vc0[1] = vec_nmsub(VbS0, Va[37], Vc0[1]);
  695. Vc1[0] = vec_nmsub(VbS1, Va[36], Vc1[0]);
  696. Vc1[1] = vec_nmsub(VbS1, Va[37], Vc1[1]);
  697. Vc2[0] = vec_nmsub(VbS2, Va[36], Vc2[0]);
  698. Vc2[1] = vec_nmsub(VbS2, Va[37], Vc2[1]);
  699. Vc3[0] = vec_nmsub(VbS3, Va[36], Vc3[0]);
  700. Vc3[1] = vec_nmsub(VbS3, Va[37], Vc3[1]);
  701. Vc4[0] = vec_nmsub(VbS4, Va[36], Vc4[0]);
  702. Vc4[1] = vec_nmsub(VbS4, Va[37], Vc4[1]);
  703. Vc5[0] = vec_nmsub(VbS5, Va[36], Vc5[0]);
  704. Vc5[1] = vec_nmsub(VbS5, Va[37], Vc5[1]);
  705. Vc6[0] = vec_nmsub(VbS6, Va[36], Vc6[0]);
  706. Vc6[1] = vec_nmsub(VbS6, Va[37], Vc6[1]);
  707. Vc7[0] = vec_nmsub(VbS7, Va[36], Vc7[0]);
  708. Vc7[1] = vec_nmsub(VbS7, Va[37], Vc7[1]);
  709. c0[8] -= b[72] * a[152];
  710. c1[8] -= b[73] * a[152];
  711. c2[8] -= b[74] * a[152];
  712. c3[8] -= b[75] * a[152];
  713. c4[8] -= b[76] * a[152];
  714. c5[8] -= b[77] * a[152];
  715. c6[8] -= b[78] * a[152];
  716. c7[8] -= b[79] * a[152];
  717. b[64] = (c0[8] *= a[136]);
  718. b[65] = (c1[8] *= a[136]);
  719. b[66] = (c2[8] *= a[136]);
  720. b[67] = (c3[8] *= a[136]);
  721. b[68] = (c4[8] *= a[136]);
  722. b[69] = (c5[8] *= a[136]);
  723. b[70] = (c6[8] *= a[136]);
  724. b[71] = (c7[8] *= a[136]);
  725. VbS0 = vec_splat(Vb[16], 0);
  726. VbS1 = vec_splat(Vb[16], 1);
  727. VbS2 = vec_splat(Vb[16], 2);
  728. VbS3 = vec_splat(Vb[16], 3);
  729. VbS4 = vec_splat(Vb[17], 0);
  730. VbS5 = vec_splat(Vb[17], 1);
  731. VbS6 = vec_splat(Vb[17], 2);
  732. VbS7 = vec_splat(Vb[17], 3);
  733. Vc0[0] = vec_nmsub(VbS0, Va[32], Vc0[0]);
  734. Vc0[1] = vec_nmsub(VbS0, Va[33], Vc0[1]);
  735. Vc1[0] = vec_nmsub(VbS1, Va[32], Vc1[0]);
  736. Vc1[1] = vec_nmsub(VbS1, Va[33], Vc1[1]);
  737. Vc2[0] = vec_nmsub(VbS2, Va[32], Vc2[0]);
  738. Vc2[1] = vec_nmsub(VbS2, Va[33], Vc2[1]);
  739. Vc3[0] = vec_nmsub(VbS3, Va[32], Vc3[0]);
  740. Vc3[1] = vec_nmsub(VbS3, Va[33], Vc3[1]);
  741. Vc4[0] = vec_nmsub(VbS4, Va[32], Vc4[0]);
  742. Vc4[1] = vec_nmsub(VbS4, Va[33], Vc4[1]);
  743. Vc5[0] = vec_nmsub(VbS5, Va[32], Vc5[0]);
  744. Vc5[1] = vec_nmsub(VbS5, Va[33], Vc5[1]);
  745. Vc6[0] = vec_nmsub(VbS6, Va[32], Vc6[0]);
  746. Vc6[1] = vec_nmsub(VbS6, Va[33], Vc6[1]);
  747. Vc7[0] = vec_nmsub(VbS7, Va[32], Vc7[0]);
  748. Vc7[1] = vec_nmsub(VbS7, Va[33], Vc7[1]);
  749. b[56] = (c0[7] *= a[119]);
  750. b[57] = (c1[7] *= a[119]);
  751. b[58] = (c2[7] *= a[119]);
  752. b[59] = (c3[7] *= a[119]);
  753. b[60] = (c4[7] *= a[119]);
  754. b[61] = (c5[7] *= a[119]);
  755. b[62] = (c6[7] *= a[119]);
  756. b[63] = (c7[7] *= a[119]);
  757. VbS0 = vec_splat(Vb[14], 0);
  758. VbS1 = vec_splat(Vb[14], 1);
  759. VbS2 = vec_splat(Vb[14], 2);
  760. VbS3 = vec_splat(Vb[14], 3);
  761. VbS4 = vec_splat(Vb[15], 0);
  762. VbS5 = vec_splat(Vb[15], 1);
  763. VbS6 = vec_splat(Vb[15], 2);
  764. VbS7 = vec_splat(Vb[15], 3);
  765. Vc0[0] = vec_nmsub(VbS0, Va[28], Vc0[0]);
  766. Vc1[0] = vec_nmsub(VbS1, Va[28], Vc1[0]);
  767. Vc2[0] = vec_nmsub(VbS2, Va[28], Vc2[0]);
  768. Vc3[0] = vec_nmsub(VbS3, Va[28], Vc3[0]);
  769. Vc4[0] = vec_nmsub(VbS4, Va[28], Vc4[0]);
  770. Vc5[0] = vec_nmsub(VbS5, Va[28], Vc5[0]);
  771. Vc6[0] = vec_nmsub(VbS6, Va[28], Vc6[0]);
  772. Vc7[0] = vec_nmsub(VbS7, Va[28], Vc7[0]);
  773. c0[4] -= b[56] * a[116];
  774. c0[5] -= b[56] * a[117];
  775. c0[6] -= b[56] * a[118];
  776. c1[4] -= b[57] * a[116];
  777. c1[5] -= b[57] * a[117];
  778. c1[6] -= b[57] * a[118];
  779. c2[4] -= b[58] * a[116];
  780. c2[5] -= b[58] * a[117];
  781. c2[6] -= b[58] * a[118];
  782. c3[4] -= b[59] * a[116];
  783. c3[5] -= b[59] * a[117];
  784. c3[6] -= b[59] * a[118];
  785. c4[4] -= b[60] * a[116];
  786. c4[5] -= b[60] * a[117];
  787. c4[6] -= b[60] * a[118];
  788. c5[4] -= b[61] * a[116];
  789. c5[5] -= b[61] * a[117];
  790. c5[6] -= b[61] * a[118];
  791. c6[4] -= b[62] * a[116];
  792. c6[5] -= b[62] * a[117];
  793. c6[6] -= b[62] * a[118];
  794. c7[4] -= b[63] * a[116];
  795. c7[5] -= b[63] * a[117];
  796. c7[6] -= b[63] * a[118];
  797. b[48] = (c0[6] *= a[102]);
  798. b[49] = (c1[6] *= a[102]);
  799. b[50] = (c2[6] *= a[102]);
  800. b[51] = (c3[6] *= a[102]);
  801. b[52] = (c4[6] *= a[102]);
  802. b[53] = (c5[6] *= a[102]);
  803. b[54] = (c6[6] *= a[102]);
  804. b[55] = (c7[6] *= a[102]);
  805. VbS0 = vec_splat(Vb[12], 0);
  806. VbS1 = vec_splat(Vb[12], 1);
  807. VbS2 = vec_splat(Vb[12], 2);
  808. VbS3 = vec_splat(Vb[12], 3);
  809. VbS4 = vec_splat(Vb[13], 0);
  810. VbS5 = vec_splat(Vb[13], 1);
  811. VbS6 = vec_splat(Vb[13], 2);
  812. VbS7 = vec_splat(Vb[13], 3);
  813. Vc0[0] = vec_nmsub(VbS0, Va[24], Vc0[0]);
  814. Vc1[0] = vec_nmsub(VbS1, Va[24], Vc1[0]);
  815. Vc2[0] = vec_nmsub(VbS2, Va[24], Vc2[0]);
  816. Vc3[0] = vec_nmsub(VbS3, Va[24], Vc3[0]);
  817. Vc4[0] = vec_nmsub(VbS4, Va[24], Vc4[0]);
  818. Vc5[0] = vec_nmsub(VbS5, Va[24], Vc5[0]);
  819. Vc6[0] = vec_nmsub(VbS6, Va[24], Vc6[0]);
  820. Vc7[0] = vec_nmsub(VbS7, Va[24], Vc7[0]);
  821. c0[4] -= b[48] * a[100];
  822. c0[5] -= b[48] * a[101];
  823. c1[4] -= b[49] * a[100];
  824. c1[5] -= b[49] * a[101];
  825. c2[4] -= b[50] * a[100];
  826. c2[5] -= b[50] * a[101];
  827. c3[4] -= b[51] * a[100];
  828. c3[5] -= b[51] * a[101];
  829. c4[4] -= b[52] * a[100];
  830. c4[5] -= b[52] * a[101];
  831. c5[4] -= b[53] * a[100];
  832. c5[5] -= b[53] * a[101];
  833. c6[4] -= b[54] * a[100];
  834. c6[5] -= b[54] * a[101];
  835. c7[4] -= b[55] * a[100];
  836. c7[5] -= b[55] * a[101];
  837. b[40] = (c0[5] *= a[85]);
  838. b[41] = (c1[5] *= a[85]);
  839. b[42] = (c2[5] *= a[85]);
  840. b[43] = (c3[5] *= a[85]);
  841. b[44] = (c4[5] *= a[85]);
  842. b[45] = (c5[5] *= a[85]);
  843. b[46] = (c6[5] *= a[85]);
  844. b[47] = (c7[5] *= a[85]);
  845. VbS0 = vec_splat(Vb[10], 0);
  846. VbS1 = vec_splat(Vb[10], 1);
  847. VbS2 = vec_splat(Vb[10], 2);
  848. VbS3 = vec_splat(Vb[10], 3);
  849. VbS4 = vec_splat(Vb[11], 0);
  850. VbS5 = vec_splat(Vb[11], 1);
  851. VbS6 = vec_splat(Vb[11], 2);
  852. VbS7 = vec_splat(Vb[11], 3);
  853. Vc0[0] = vec_nmsub(VbS0, Va[20], Vc0[0]);
  854. Vc1[0] = vec_nmsub(VbS1, Va[20], Vc1[0]);
  855. Vc2[0] = vec_nmsub(VbS2, Va[20], Vc2[0]);
  856. Vc3[0] = vec_nmsub(VbS3, Va[20], Vc3[0]);
  857. Vc4[0] = vec_nmsub(VbS4, Va[20], Vc4[0]);
  858. Vc5[0] = vec_nmsub(VbS5, Va[20], Vc5[0]);
  859. Vc6[0] = vec_nmsub(VbS6, Va[20], Vc6[0]);
  860. Vc7[0] = vec_nmsub(VbS7, Va[20], Vc7[0]);
  861. c0[4] -= b[40] * a[84];
  862. c1[4] -= b[41] * a[84];
  863. c2[4] -= b[42] * a[84];
  864. c3[4] -= b[43] * a[84];
  865. c4[4] -= b[44] * a[84];
  866. c5[4] -= b[45] * a[84];
  867. c6[4] -= b[46] * a[84];
  868. c7[4] -= b[47] * a[84];
  869. b[32] = (c0[4] *= a[68]);
  870. b[33] = (c1[4] *= a[68]);
  871. b[34] = (c2[4] *= a[68]);
  872. b[35] = (c3[4] *= a[68]);
  873. b[36] = (c4[4] *= a[68]);
  874. b[37] = (c5[4] *= a[68]);
  875. b[38] = (c6[4] *= a[68]);
  876. b[39] = (c7[4] *= a[68]);
  877. VbS0 = vec_splat(Vb[8], 0);
  878. VbS1 = vec_splat(Vb[8], 1);
  879. VbS2 = vec_splat(Vb[8], 2);
  880. VbS3 = vec_splat(Vb[8], 3);
  881. VbS4 = vec_splat(Vb[9], 0);
  882. VbS5 = vec_splat(Vb[9], 1);
  883. VbS6 = vec_splat(Vb[9], 2);
  884. VbS7 = vec_splat(Vb[9], 3);
  885. Vc0[0] = vec_nmsub(VbS0, Va[16], Vc0[0]);
  886. Vc1[0] = vec_nmsub(VbS1, Va[16], Vc1[0]);
  887. Vc2[0] = vec_nmsub(VbS2, Va[16], Vc2[0]);
  888. Vc3[0] = vec_nmsub(VbS3, Va[16], Vc3[0]);
  889. Vc4[0] = vec_nmsub(VbS4, Va[16], Vc4[0]);
  890. Vc5[0] = vec_nmsub(VbS5, Va[16], Vc5[0]);
  891. Vc6[0] = vec_nmsub(VbS6, Va[16], Vc6[0]);
  892. Vc7[0] = vec_nmsub(VbS7, Va[16], Vc7[0]);
  893. b[24] = (c0[3] *= a[51]);
  894. b[25] = (c1[3] *= a[51]);
  895. b[26] = (c2[3] *= a[51]);
  896. b[27] = (c3[3] *= a[51]);
  897. b[28] = (c4[3] *= a[51]);
  898. b[29] = (c5[3] *= a[51]);
  899. b[30] = (c6[3] *= a[51]);
  900. b[31] = (c7[3] *= a[51]);
  901. c0[0] -= b[24] * a[48];
  902. c0[1] -= b[24] * a[49];
  903. c0[2] -= b[24] * a[50];
  904. c1[0] -= b[25] * a[48];
  905. c1[1] -= b[25] * a[49];
  906. c1[2] -= b[25] * a[50];
  907. c2[0] -= b[26] * a[48];
  908. c2[1] -= b[26] * a[49];
  909. c2[2] -= b[26] * a[50];
  910. c3[0] -= b[27] * a[48];
  911. c3[1] -= b[27] * a[49];
  912. c3[2] -= b[27] * a[50];
  913. c4[0] -= b[28] * a[48];
  914. c4[1] -= b[28] * a[49];
  915. c4[2] -= b[28] * a[50];
  916. c5[0] -= b[29] * a[48];
  917. c5[1] -= b[29] * a[49];
  918. c5[2] -= b[29] * a[50];
  919. c6[0] -= b[30] * a[48];
  920. c6[1] -= b[30] * a[49];
  921. c6[2] -= b[30] * a[50];
  922. c7[0] -= b[31] * a[48];
  923. c7[1] -= b[31] * a[49];
  924. c7[2] -= b[31] * a[50];
  925. b[16] = (c0[2] *= a[34]);
  926. b[17] = (c1[2] *= a[34]);
  927. b[18] = (c2[2] *= a[34]);
  928. b[19] = (c3[2] *= a[34]);
  929. b[20] = (c4[2] *= a[34]);
  930. b[21] = (c5[2] *= a[34]);
  931. b[22] = (c6[2] *= a[34]);
  932. b[23] = (c7[2] *= a[34]);
  933. c0[0] -= b[16] * a[32];
  934. c0[1] -= b[16] * a[33];
  935. c1[0] -= b[17] * a[32];
  936. c1[1] -= b[17] * a[33];
  937. c2[0] -= b[18] * a[32];
  938. c2[1] -= b[18] * a[33];
  939. c3[0] -= b[19] * a[32];
  940. c3[1] -= b[19] * a[33];
  941. c4[0] -= b[20] * a[32];
  942. c4[1] -= b[20] * a[33];
  943. c5[0] -= b[21] * a[32];
  944. c5[1] -= b[21] * a[33];
  945. c6[0] -= b[22] * a[32];
  946. c6[1] -= b[22] * a[33];
  947. c7[0] -= b[23] * a[32];
  948. c7[1] -= b[23] * a[33];
  949. b[ 8] = (c0[1] *= a[17]);
  950. b[ 9] = (c1[1] *= a[17]);
  951. b[10] = (c2[1] *= a[17]);
  952. b[11] = (c3[1] *= a[17]);
  953. b[12] = (c4[1] *= a[17]);
  954. b[13] = (c5[1] *= a[17]);
  955. b[14] = (c6[1] *= a[17]);
  956. b[15] = (c7[1] *= a[17]);
  957. c0[0] -= b[ 8] * a[16];
  958. c1[0] -= b[ 9] * a[16];
  959. c2[0] -= b[10] * a[16];
  960. c3[0] -= b[11] * a[16];
  961. c4[0] -= b[12] * a[16];
  962. c5[0] -= b[13] * a[16];
  963. c6[0] -= b[14] * a[16];
  964. c7[0] -= b[15] * a[16];
  965. b[0] = (c0[0] *= a[0]);
  966. b[1] = (c1[0] *= a[0]);
  967. b[2] = (c2[0] *= a[0]);
  968. b[3] = (c3[0] *= a[0]);
  969. b[4] = (c4[0] *= a[0]);
  970. b[5] = (c5[0] *= a[0]);
  971. b[6] = (c6[0] *= a[0]);
  972. b[7] = (c7[0] *= a[0]);
  973. }
  974. #endif
  975. static inline __attribute__ ((always_inline)) void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) {
  976. FLOAT aa, bb;
  977. int i, j, k;
  978. a += (m - 1) * m;
  979. b += (m - 1) * n;
  980. for (i = m - 1; i >= 0; i--) {
  981. aa = *(a + i);
  982. for (j = 0; j < n; j ++) {
  983. bb = *(c + i + j * ldc);
  984. bb *= aa;
  985. *b = bb;
  986. *(c + i + j * ldc) = bb;
  987. b ++;
  988. for (k = 0; k < i; k ++){
  989. *(c + k + j * ldc) -= bb * *(a + k);
  990. }
  991. }
  992. a -= m;
  993. b -= 2 * n;
  994. }
  995. }
  996. #else
  997. static inline __attribute__ ((always_inline)) void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) {
  998. FLOAT aa1, aa2;
  999. FLOAT bb1, bb2;
  1000. FLOAT cc1, cc2;
  1001. int i, j, k;
  1002. ldc *= 2;
  1003. a += (m - 1) * m * 2;
  1004. b += (m - 1) * n * 2;
  1005. for (i = m - 1; i >= 0; i--) {
  1006. aa1 = *(a + i * 2 + 0);
  1007. aa2 = *(a + i * 2 + 1);
  1008. for (j = 0; j < n; j ++) {
  1009. bb1 = *(c + i * 2 + 0 + j * ldc);
  1010. bb2 = *(c + i * 2 + 1 + j * ldc);
  1011. #ifndef CONJ
  1012. cc1 = aa1 * bb1 - aa2 * bb2;
  1013. cc2 = aa1 * bb2 + aa2 * bb1;
  1014. #else
  1015. cc1 = aa1 * bb1 + aa2 * bb2;
  1016. cc2 = aa1 * bb2 - aa2 * bb1;
  1017. #endif
  1018. *(b + 0) = cc1;
  1019. *(b + 1) = cc2;
  1020. *(c + i * 2 + 0 + j * ldc) = cc1;
  1021. *(c + i * 2 + 1 + j * ldc) = cc2;
  1022. b += 2;
  1023. for (k = 0; k < i; k ++){
  1024. #ifndef CONJ
  1025. *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) - cc2 * *(a + k * 2 + 1);
  1026. *(c + k * 2 + 1 + j * ldc) -= cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0);
  1027. #else
  1028. *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) + cc2 * *(a + k * 2 + 1);
  1029. *(c + k * 2 + 1 + j * ldc) -= - cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0);
  1030. #endif
  1031. }
  1032. }
  1033. a -= m * 2;
  1034. b -= 4 * n;
  1035. }
  1036. }
  1037. #endif
  1038. int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1,
  1039. #ifdef COMPLEX
  1040. FLOAT dummy2,
  1041. #endif
  1042. FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){
  1043. BLASLONG i, j;
  1044. FLOAT *aa, *cc;
  1045. BLASLONG kk;
  1046. #if 0
  1047. fprintf(stderr, "TRSM KERNEL LN : m = %3ld n = %3ld k = %3ld offset = %3ld\n",
  1048. m, n, k, offset);
  1049. #endif
  1050. #ifdef DOUBLE
  1051. int well_aligned = (GEMM_UNROLL_M==8) && (GEMM_UNROLL_N==8) && ((((unsigned long) a) & 0x7) == 0);
  1052. #else
  1053. int well_aligned = (GEMM_UNROLL_M==16) && (GEMM_UNROLL_N==8) && ((((unsigned long) a) & 0x7) == 0);
  1054. #endif
  1055. j = (n >> GEMM_UNROLL_N_SHIFT);
  1056. while (j > 0) {
  1057. kk = m + offset;
  1058. if (m & (GEMM_UNROLL_M - 1)) {
  1059. for (i = 1; i < GEMM_UNROLL_M; i *= 2){
  1060. if (m & i) {
  1061. aa = a + ((m & ~(i - 1)) - i) * k * COMPSIZE;
  1062. cc = c + ((m & ~(i - 1)) - i) * COMPSIZE;
  1063. if (k - kk > 0) {
  1064. GEMM_KERNEL(i, GEMM_UNROLL_N, k - kk, dm1,
  1065. #ifdef COMPLEX
  1066. ZERO,
  1067. #endif
  1068. aa + i * kk * COMPSIZE,
  1069. b + GEMM_UNROLL_N * kk * COMPSIZE,
  1070. cc,
  1071. ldc);
  1072. }
  1073. solve(i, GEMM_UNROLL_N,
  1074. aa + (kk - i) * i * COMPSIZE,
  1075. b + (kk - i) * GEMM_UNROLL_N * COMPSIZE,
  1076. cc, ldc);
  1077. kk -= i;
  1078. }
  1079. }
  1080. }
  1081. i = (m >> GEMM_UNROLL_M_SHIFT);
  1082. if (i > 0) {
  1083. aa = a + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * k * COMPSIZE;
  1084. cc = c + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * COMPSIZE;
  1085. do {
  1086. if (k - kk > 0) {
  1087. GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, k - kk, dm1,
  1088. #ifdef COMPLEX
  1089. ZERO,
  1090. #endif
  1091. aa + GEMM_UNROLL_M * kk * COMPSIZE,
  1092. b + GEMM_UNROLL_N * kk * COMPSIZE,
  1093. cc,
  1094. ldc);
  1095. }
  1096. if (well_aligned) {
  1097. #ifdef DOUBLE
  1098. solve8x8(aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE,
  1099. b + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_N * COMPSIZE, cc, ldc);
  1100. #else
  1101. solve16x8(aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE,
  1102. b + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_N * COMPSIZE, cc, ldc);
  1103. #endif
  1104. }
  1105. else {
  1106. solve(GEMM_UNROLL_M, GEMM_UNROLL_N,
  1107. aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE,
  1108. b + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_N * COMPSIZE,
  1109. cc, ldc);
  1110. }
  1111. aa -= GEMM_UNROLL_M * k * COMPSIZE;
  1112. cc -= GEMM_UNROLL_M * COMPSIZE;
  1113. kk -= GEMM_UNROLL_M;
  1114. i --;
  1115. } while (i > 0);
  1116. }
  1117. b += GEMM_UNROLL_N * k * COMPSIZE;
  1118. c += GEMM_UNROLL_N * ldc * COMPSIZE;
  1119. j --;
  1120. }
  1121. if (n & (GEMM_UNROLL_N - 1)) {
  1122. j = (GEMM_UNROLL_N >> 1);
  1123. while (j > 0) {
  1124. if (n & j) {
  1125. kk = m + offset;
  1126. if (m & (GEMM_UNROLL_M - 1)) {
  1127. for (i = 1; i < GEMM_UNROLL_M; i *= 2){
  1128. if (m & i) {
  1129. aa = a + ((m & ~(i - 1)) - i) * k * COMPSIZE;
  1130. cc = c + ((m & ~(i - 1)) - i) * COMPSIZE;
  1131. if (k - kk > 0) {
  1132. GEMM_KERNEL(i, j, k - kk, dm1,
  1133. #ifdef COMPLEX
  1134. ZERO,
  1135. #endif
  1136. aa + i * kk * COMPSIZE,
  1137. b + j * kk * COMPSIZE,
  1138. cc, ldc);
  1139. }
  1140. solve(i, j,
  1141. aa + (kk - i) * i * COMPSIZE,
  1142. b + (kk - i) * j * COMPSIZE,
  1143. cc, ldc);
  1144. kk -= i;
  1145. }
  1146. }
  1147. }
  1148. i = (m >> GEMM_UNROLL_M_SHIFT);
  1149. if (i > 0) {
  1150. aa = a + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * k * COMPSIZE;
  1151. cc = c + ((m & ~(GEMM_UNROLL_M - 1)) - GEMM_UNROLL_M) * COMPSIZE;
  1152. do {
  1153. if (k - kk > 0) {
  1154. GEMM_KERNEL(GEMM_UNROLL_M, j, k - kk, dm1,
  1155. #ifdef COMPLEX
  1156. ZERO,
  1157. #endif
  1158. aa + GEMM_UNROLL_M * kk * COMPSIZE,
  1159. b + j * kk * COMPSIZE,
  1160. cc,
  1161. ldc);
  1162. }
  1163. solve(GEMM_UNROLL_M, j,
  1164. aa + (kk - GEMM_UNROLL_M) * GEMM_UNROLL_M * COMPSIZE,
  1165. b + (kk - GEMM_UNROLL_M) * j * COMPSIZE,
  1166. cc, ldc);
  1167. aa -= GEMM_UNROLL_M * k * COMPSIZE;
  1168. cc -= GEMM_UNROLL_M * COMPSIZE;
  1169. kk -= GEMM_UNROLL_M;
  1170. i --;
  1171. } while (i > 0);
  1172. }
  1173. b += j * k * COMPSIZE;
  1174. c += j * ldc * COMPSIZE;
  1175. }
  1176. j >>= 1;
  1177. }
  1178. }
  1179. return 0;
  1180. }