You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

trsm_kernel_LT_power10.c 39 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265
  1. /*********************************************************************/
  2. /* Copyright 2009, 2010 The University of Texas at Austin. */
  3. /* All rights reserved. */
  4. /* */
  5. /* Redistribution and use in source and binary forms, with or */
  6. /* without modification, are permitted provided that the following */
  7. /* conditions are met: */
  8. /* */
  9. /* 1. Redistributions of source code must retain the above */
  10. /* copyright notice, this list of conditions and the following */
  11. /* disclaimer. */
  12. /* */
  13. /* 2. Redistributions in binary form must reproduce the above */
  14. /* copyright notice, this list of conditions and the following */
  15. /* disclaimer in the documentation and/or other materials */
  16. /* provided with the distribution. */
  17. /* */
  18. /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
  19. /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
  20. /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
  21. /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
  22. /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
  23. /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
  24. /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
  25. /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
  26. /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
  27. /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
  28. /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
  29. /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
  30. /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
  31. /* POSSIBILITY OF SUCH DAMAGE. */
  32. /* */
  33. /* The views and conclusions contained in the software and */
  34. /* documentation are those of the authors and should not be */
  35. /* interpreted as representing official policies, either expressed */
  36. /* or implied, of The University of Texas at Austin. */
  37. /*********************************************************************/
  38. #include "common.h"
  39. #include <altivec.h>
  40. static FLOAT dm1 = -1.;
  41. #ifdef CONJ
  42. #define GEMM_KERNEL GEMM_KERNEL_L
  43. #else
  44. #define GEMM_KERNEL GEMM_KERNEL_N
  45. #endif
  46. #if GEMM_DEFAULT_UNROLL_M == 1
  47. #define GEMM_UNROLL_M_SHIFT 0
  48. #endif
  49. #if GEMM_DEFAULT_UNROLL_M == 2
  50. #define GEMM_UNROLL_M_SHIFT 1
  51. #endif
  52. #if GEMM_DEFAULT_UNROLL_M == 4
  53. #define GEMM_UNROLL_M_SHIFT 2
  54. #endif
  55. #if GEMM_DEFAULT_UNROLL_M == 6
  56. #define GEMM_UNROLL_M_SHIFT 2
  57. #endif
  58. #if GEMM_DEFAULT_UNROLL_M == 8
  59. #define GEMM_UNROLL_M_SHIFT 3
  60. #endif
  61. #if GEMM_DEFAULT_UNROLL_M == 16
  62. #define GEMM_UNROLL_M_SHIFT 4
  63. #endif
  64. #if GEMM_DEFAULT_UNROLL_N == 1
  65. #define GEMM_UNROLL_N_SHIFT 0
  66. #endif
  67. #if GEMM_DEFAULT_UNROLL_N == 2
  68. #define GEMM_UNROLL_N_SHIFT 1
  69. #endif
  70. #if GEMM_DEFAULT_UNROLL_N == 4
  71. #define GEMM_UNROLL_N_SHIFT 2
  72. #endif
  73. #if GEMM_DEFAULT_UNROLL_N == 8
  74. #define GEMM_UNROLL_N_SHIFT 3
  75. #endif
  76. #if GEMM_DEFAULT_UNROLL_N == 16
  77. #define GEMM_UNROLL_N_SHIFT 4
  78. #endif
  79. #ifndef COMPLEX
  80. #ifdef DOUBLE
  81. static inline __attribute__ ((always_inline)) void solve8x8(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) {
  82. FLOAT *c0, *c1, *c2, *c3, *c4, *c5, *c6, *c7;
  83. c0 = &c[0*ldc];
  84. c1 = &c[1*ldc];
  85. c2 = &c[2*ldc];
  86. c3 = &c[3*ldc];
  87. c4 = &c[4*ldc];
  88. c5 = &c[5*ldc];
  89. c6 = &c[6*ldc];
  90. c7 = &c[7*ldc];
  91. vector FLOAT *Va = (vector FLOAT *) a;
  92. vector FLOAT *Vb = (vector FLOAT *) b;
  93. vector FLOAT *Vc0 = (vector FLOAT *) c0;
  94. vector FLOAT *Vc1 = (vector FLOAT *) c1;
  95. vector FLOAT *Vc2 = (vector FLOAT *) c2;
  96. vector FLOAT *Vc3 = (vector FLOAT *) c3;
  97. vector FLOAT *Vc4 = (vector FLOAT *) c4;
  98. vector FLOAT *Vc5 = (vector FLOAT *) c5;
  99. vector FLOAT *Vc6 = (vector FLOAT *) c6;
  100. vector FLOAT *Vc7 = (vector FLOAT *) c7;
  101. vector FLOAT VbS0, VbS1, VbS2, VbS3, VbS4, VbS5, VbS6, VbS7;
  102. b[0] = (c0[0] *= a[0]);
  103. b[1] = (c1[0] *= a[0]);
  104. b[2] = (c2[0] *= a[0]);
  105. b[3] = (c3[0] *= a[0]);
  106. b[4] = (c4[0] *= a[0]);
  107. b[5] = (c5[0] *= a[0]);
  108. b[6] = (c6[0] *= a[0]);
  109. b[7] = (c7[0] *= a[0]);
  110. VbS0 = vec_splat(Vb[0], 0);
  111. VbS1 = vec_splat(Vb[0], 1);
  112. VbS2 = vec_splat(Vb[1], 0);
  113. VbS3 = vec_splat(Vb[1], 1);
  114. VbS4 = vec_splat(Vb[2], 0);
  115. VbS5 = vec_splat(Vb[2], 1);
  116. VbS6 = vec_splat(Vb[3], 0);
  117. VbS7 = vec_splat(Vb[3], 1);
  118. Vc0[1] = vec_nmsub(VbS0, Va[1], Vc0[1]);
  119. Vc0[2] = vec_nmsub(VbS0, Va[2], Vc0[2]);
  120. Vc0[3] = vec_nmsub(VbS0, Va[3], Vc0[3]);
  121. Vc1[1] = vec_nmsub(VbS1, Va[1], Vc1[1]);
  122. Vc1[2] = vec_nmsub(VbS1, Va[2], Vc1[2]);
  123. Vc1[3] = vec_nmsub(VbS1, Va[3], Vc1[3]);
  124. Vc2[1] = vec_nmsub(VbS2, Va[1], Vc2[1]);
  125. Vc2[2] = vec_nmsub(VbS2, Va[2], Vc2[2]);
  126. Vc2[3] = vec_nmsub(VbS2, Va[3], Vc2[3]);
  127. Vc3[1] = vec_nmsub(VbS3, Va[1], Vc3[1]);
  128. Vc3[2] = vec_nmsub(VbS3, Va[2], Vc3[2]);
  129. Vc3[3] = vec_nmsub(VbS3, Va[3], Vc3[3]);
  130. Vc4[1] = vec_nmsub(VbS4, Va[1], Vc4[1]);
  131. Vc4[2] = vec_nmsub(VbS4, Va[2], Vc4[2]);
  132. Vc4[3] = vec_nmsub(VbS4, Va[3], Vc4[3]);
  133. Vc5[1] = vec_nmsub(VbS5, Va[1], Vc5[1]);
  134. Vc5[2] = vec_nmsub(VbS5, Va[2], Vc5[2]);
  135. Vc5[3] = vec_nmsub(VbS5, Va[3], Vc5[3]);
  136. Vc6[1] = vec_nmsub(VbS6, Va[1], Vc6[1]);
  137. Vc6[2] = vec_nmsub(VbS6, Va[2], Vc6[2]);
  138. Vc6[3] = vec_nmsub(VbS6, Va[3], Vc6[3]);
  139. Vc7[1] = vec_nmsub(VbS7, Va[1], Vc7[1]);
  140. Vc7[2] = vec_nmsub(VbS7, Va[2], Vc7[2]);
  141. Vc7[3] = vec_nmsub(VbS7, Va[3], Vc7[3]);
  142. c0[1] -= c0[0] * a[1];
  143. c1[1] -= c1[0] * a[1];
  144. c2[1] -= c2[0] * a[1];
  145. c3[1] -= c3[0] * a[1];
  146. c4[1] -= c4[0] * a[1];
  147. c5[1] -= c5[0] * a[1];
  148. c6[1] -= c6[0] * a[1];
  149. c7[1] -= c7[0] * a[1];
  150. b[ 8] = (c0[1] *= a[9]);
  151. b[ 9] = (c1[1] *= a[9]);
  152. b[10] = (c2[1] *= a[9]);
  153. b[11] = (c3[1] *= a[9]);
  154. b[12] = (c4[1] *= a[9]);
  155. b[13] = (c5[1] *= a[9]);
  156. b[14] = (c6[1] *= a[9]);
  157. b[15] = (c7[1] *= a[9]);
  158. VbS0 = vec_splat(Vb[4], 0);
  159. VbS1 = vec_splat(Vb[4], 1);
  160. VbS2 = vec_splat(Vb[5], 0);
  161. VbS3 = vec_splat(Vb[5], 1);
  162. VbS4 = vec_splat(Vb[6], 0);
  163. VbS5 = vec_splat(Vb[6], 1);
  164. VbS6 = vec_splat(Vb[7], 0);
  165. VbS7 = vec_splat(Vb[7], 1);
  166. Vc0[1] = vec_nmsub(VbS0, Va[5], Vc0[1]);
  167. Vc0[2] = vec_nmsub(VbS0, Va[6], Vc0[2]);
  168. Vc0[3] = vec_nmsub(VbS0, Va[7], Vc0[3]);
  169. Vc1[1] = vec_nmsub(VbS1, Va[5], Vc1[1]);
  170. Vc1[2] = vec_nmsub(VbS1, Va[6], Vc1[2]);
  171. Vc1[3] = vec_nmsub(VbS1, Va[7], Vc1[3]);
  172. Vc2[1] = vec_nmsub(VbS2, Va[5], Vc2[1]);
  173. Vc2[2] = vec_nmsub(VbS2, Va[6], Vc2[2]);
  174. Vc2[3] = vec_nmsub(VbS2, Va[7], Vc2[3]);
  175. Vc3[1] = vec_nmsub(VbS3, Va[5], Vc3[1]);
  176. Vc3[2] = vec_nmsub(VbS3, Va[6], Vc3[2]);
  177. Vc3[3] = vec_nmsub(VbS3, Va[7], Vc3[3]);
  178. Vc4[1] = vec_nmsub(VbS4, Va[5], Vc4[1]);
  179. Vc4[2] = vec_nmsub(VbS4, Va[6], Vc4[2]);
  180. Vc4[3] = vec_nmsub(VbS4, Va[7], Vc4[3]);
  181. Vc5[1] = vec_nmsub(VbS5, Va[5], Vc5[1]);
  182. Vc5[2] = vec_nmsub(VbS5, Va[6], Vc5[2]);
  183. Vc5[3] = vec_nmsub(VbS5, Va[7], Vc5[3]);
  184. Vc6[1] = vec_nmsub(VbS6, Va[5], Vc6[1]);
  185. Vc6[2] = vec_nmsub(VbS6, Va[6], Vc6[2]);
  186. Vc6[3] = vec_nmsub(VbS6, Va[7], Vc6[3]);
  187. Vc7[1] = vec_nmsub(VbS7, Va[5], Vc7[1]);
  188. Vc7[2] = vec_nmsub(VbS7, Va[6], Vc7[2]);
  189. Vc7[3] = vec_nmsub(VbS7, Va[7], Vc7[3]);
  190. b[16] = (c0[2] *= a[18]);
  191. b[17] = (c1[2] *= a[18]);
  192. b[18] = (c2[2] *= a[18]);
  193. b[19] = (c3[2] *= a[18]);
  194. b[20] = (c4[2] *= a[18]);
  195. b[21] = (c5[2] *= a[18]);
  196. b[22] = (c6[2] *= a[18]);
  197. b[23] = (c7[2] *= a[18]);
  198. VbS0 = vec_splat(Vb[ 8], 0);
  199. VbS1 = vec_splat(Vb[ 8], 1);
  200. VbS2 = vec_splat(Vb[ 9], 0);
  201. VbS3 = vec_splat(Vb[ 9], 1);
  202. VbS4 = vec_splat(Vb[10], 0);
  203. VbS5 = vec_splat(Vb[10], 1);
  204. VbS6 = vec_splat(Vb[11], 0);
  205. VbS7 = vec_splat(Vb[11], 1);
  206. Vc0[2] = vec_nmsub(VbS0, Va[10], Vc0[2]);
  207. Vc0[3] = vec_nmsub(VbS0, Va[11], Vc0[3]);
  208. Vc1[2] = vec_nmsub(VbS1, Va[10], Vc1[2]);
  209. Vc1[3] = vec_nmsub(VbS1, Va[11], Vc1[3]);
  210. Vc2[2] = vec_nmsub(VbS2, Va[10], Vc2[2]);
  211. Vc2[3] = vec_nmsub(VbS2, Va[11], Vc2[3]);
  212. Vc3[2] = vec_nmsub(VbS3, Va[10], Vc3[2]);
  213. Vc3[3] = vec_nmsub(VbS3, Va[11], Vc3[3]);
  214. Vc4[2] = vec_nmsub(VbS4, Va[10], Vc4[2]);
  215. Vc4[3] = vec_nmsub(VbS4, Va[11], Vc4[3]);
  216. Vc5[2] = vec_nmsub(VbS5, Va[10], Vc5[2]);
  217. Vc5[3] = vec_nmsub(VbS5, Va[11], Vc5[3]);
  218. Vc6[2] = vec_nmsub(VbS6, Va[10], Vc6[2]);
  219. Vc6[3] = vec_nmsub(VbS6, Va[11], Vc6[3]);
  220. Vc7[2] = vec_nmsub(VbS7, Va[10], Vc7[2]);
  221. Vc7[3] = vec_nmsub(VbS7, Va[11], Vc7[3]);
  222. c0[3] -= c0[2] * a[19];
  223. c1[3] -= c1[2] * a[19];
  224. c2[3] -= c2[2] * a[19];
  225. c3[3] -= c3[2] * a[19];
  226. c4[3] -= c4[2] * a[19];
  227. c5[3] -= c5[2] * a[19];
  228. c6[3] -= c6[2] * a[19];
  229. c7[3] -= c7[2] * a[19];
  230. b[24] = (c0[3] *= a[27]);
  231. b[25] = (c1[3] *= a[27]);
  232. b[26] = (c2[3] *= a[27]);
  233. b[27] = (c3[3] *= a[27]);
  234. b[28] = (c4[3] *= a[27]);
  235. b[29] = (c5[3] *= a[27]);
  236. b[30] = (c6[3] *= a[27]);
  237. b[31] = (c7[3] *= a[27]);
  238. VbS0 = vec_splat(Vb[12], 0);
  239. VbS1 = vec_splat(Vb[12], 1);
  240. VbS2 = vec_splat(Vb[13], 0);
  241. VbS3 = vec_splat(Vb[13], 1);
  242. VbS4 = vec_splat(Vb[14], 0);
  243. VbS5 = vec_splat(Vb[14], 1);
  244. VbS6 = vec_splat(Vb[15], 0);
  245. VbS7 = vec_splat(Vb[15], 1);
  246. Vc0[2] = vec_nmsub(VbS0, Va[14], Vc0[2]);
  247. Vc0[3] = vec_nmsub(VbS0, Va[15], Vc0[3]);
  248. Vc1[2] = vec_nmsub(VbS1, Va[14], Vc1[2]);
  249. Vc1[3] = vec_nmsub(VbS1, Va[15], Vc1[3]);
  250. Vc2[2] = vec_nmsub(VbS2, Va[14], Vc2[2]);
  251. Vc2[3] = vec_nmsub(VbS2, Va[15], Vc2[3]);
  252. Vc3[2] = vec_nmsub(VbS3, Va[14], Vc3[2]);
  253. Vc3[3] = vec_nmsub(VbS3, Va[15], Vc3[3]);
  254. Vc4[2] = vec_nmsub(VbS4, Va[14], Vc4[2]);
  255. Vc4[3] = vec_nmsub(VbS4, Va[15], Vc4[3]);
  256. Vc5[2] = vec_nmsub(VbS5, Va[14], Vc5[2]);
  257. Vc5[3] = vec_nmsub(VbS5, Va[15], Vc5[3]);
  258. Vc6[2] = vec_nmsub(VbS6, Va[14], Vc6[2]);
  259. Vc6[3] = vec_nmsub(VbS6, Va[15], Vc6[3]);
  260. Vc7[2] = vec_nmsub(VbS7, Va[14], Vc7[2]);
  261. Vc7[3] = vec_nmsub(VbS7, Va[15], Vc7[3]);
  262. b[32] = (c0[4] *= a[36]);
  263. b[33] = (c1[4] *= a[36]);
  264. b[34] = (c2[4] *= a[36]);
  265. b[35] = (c3[4] *= a[36]);
  266. b[36] = (c4[4] *= a[36]);
  267. b[37] = (c5[4] *= a[36]);
  268. b[38] = (c6[4] *= a[36]);
  269. b[39] = (c7[4] *= a[36]);
  270. VbS0 = vec_splat(Vb[16], 0);
  271. VbS1 = vec_splat(Vb[16], 1);
  272. VbS2 = vec_splat(Vb[17], 0);
  273. VbS3 = vec_splat(Vb[17], 1);
  274. VbS4 = vec_splat(Vb[18], 0);
  275. VbS5 = vec_splat(Vb[18], 1);
  276. VbS6 = vec_splat(Vb[19], 0);
  277. VbS7 = vec_splat(Vb[19], 1);
  278. Vc0[3] = vec_nmsub(VbS0, Va[19], Vc0[3]);
  279. Vc1[3] = vec_nmsub(VbS1, Va[19], Vc1[3]);
  280. Vc2[3] = vec_nmsub(VbS2, Va[19], Vc2[3]);
  281. Vc3[3] = vec_nmsub(VbS3, Va[19], Vc3[3]);
  282. Vc4[3] = vec_nmsub(VbS4, Va[19], Vc4[3]);
  283. Vc5[3] = vec_nmsub(VbS5, Va[19], Vc5[3]);
  284. Vc6[3] = vec_nmsub(VbS6, Va[19], Vc6[3]);
  285. Vc7[3] = vec_nmsub(VbS7, Va[19], Vc7[3]);
  286. c0[5] -= c0[4] * a[37];
  287. c1[5] -= c1[4] * a[37];
  288. c2[5] -= c2[4] * a[37];
  289. c3[5] -= c3[4] * a[37];
  290. c4[5] -= c4[4] * a[37];
  291. c5[5] -= c5[4] * a[37];
  292. c6[5] -= c6[4] * a[37];
  293. c7[5] -= c7[4] * a[37];
  294. b[40] = (c0[5] *= a[45]);
  295. b[41] = (c1[5] *= a[45]);
  296. b[42] = (c2[5] *= a[45]);
  297. b[43] = (c3[5] *= a[45]);
  298. b[44] = (c4[5] *= a[45]);
  299. b[45] = (c5[5] *= a[45]);
  300. b[46] = (c6[5] *= a[45]);
  301. b[47] = (c7[5] *= a[45]);
  302. VbS0 = vec_splat(Vb[20], 0);
  303. VbS1 = vec_splat(Vb[20], 1);
  304. VbS2 = vec_splat(Vb[21], 0);
  305. VbS3 = vec_splat(Vb[21], 1);
  306. VbS4 = vec_splat(Vb[22], 0);
  307. VbS5 = vec_splat(Vb[22], 1);
  308. VbS6 = vec_splat(Vb[23], 0);
  309. VbS7 = vec_splat(Vb[23], 1);
  310. Vc0[3] = vec_nmsub(VbS0, Va[23], Vc0[3]);
  311. Vc1[3] = vec_nmsub(VbS1, Va[23], Vc1[3]);
  312. Vc2[3] = vec_nmsub(VbS2, Va[23], Vc2[3]);
  313. Vc3[3] = vec_nmsub(VbS3, Va[23], Vc3[3]);
  314. Vc4[3] = vec_nmsub(VbS4, Va[23], Vc4[3]);
  315. Vc5[3] = vec_nmsub(VbS5, Va[23], Vc5[3]);
  316. Vc6[3] = vec_nmsub(VbS6, Va[23], Vc6[3]);
  317. Vc7[3] = vec_nmsub(VbS7, Va[23], Vc7[3]);
  318. b[48] = (c0[6] *= a[54]);
  319. b[49] = (c1[6] *= a[54]);
  320. b[50] = (c2[6] *= a[54]);
  321. b[51] = (c3[6] *= a[54]);
  322. b[52] = (c4[6] *= a[54]);
  323. b[53] = (c5[6] *= a[54]);
  324. b[54] = (c6[6] *= a[54]);
  325. b[55] = (c7[6] *= a[54]);
  326. c0[7] -= c0[6] * a[55];
  327. c1[7] -= c1[6] * a[55];
  328. c2[7] -= c2[6] * a[55];
  329. c3[7] -= c3[6] * a[55];
  330. c4[7] -= c4[6] * a[55];
  331. c5[7] -= c5[6] * a[55];
  332. c6[7] -= c6[6] * a[55];
  333. c7[7] -= c7[6] * a[55];
  334. b[56] = (c0[7] *= a[63]);
  335. b[57] = (c1[7] *= a[63]);
  336. b[58] = (c2[7] *= a[63]);
  337. b[59] = (c3[7] *= a[63]);
  338. b[60] = (c4[7] *= a[63]);
  339. b[61] = (c5[7] *= a[63]);
  340. b[62] = (c6[7] *= a[63]);
  341. b[63] = (c7[7] *= a[63]);
  342. }
  343. #else
  344. static inline __attribute__ ((always_inline)) void solve16x8(FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) {
  345. FLOAT *c0, *c1, *c2, *c3, *c4, *c5, *c6, *c7;
  346. c0 = &c[0*ldc];
  347. c1 = &c[1*ldc];
  348. c2 = &c[2*ldc];
  349. c3 = &c[3*ldc];
  350. c4 = &c[4*ldc];
  351. c5 = &c[5*ldc];
  352. c6 = &c[6*ldc];
  353. c7 = &c[7*ldc];
  354. vector FLOAT *Va = (vector FLOAT *) a;
  355. vector FLOAT *Vb = (vector FLOAT *) b;
  356. vector FLOAT *Vc0 = (vector FLOAT *) c0;
  357. vector FLOAT *Vc1 = (vector FLOAT *) c1;
  358. vector FLOAT *Vc2 = (vector FLOAT *) c2;
  359. vector FLOAT *Vc3 = (vector FLOAT *) c3;
  360. vector FLOAT *Vc4 = (vector FLOAT *) c4;
  361. vector FLOAT *Vc5 = (vector FLOAT *) c5;
  362. vector FLOAT *Vc6 = (vector FLOAT *) c6;
  363. vector FLOAT *Vc7 = (vector FLOAT *) c7;
  364. vector FLOAT VbS0, VbS1, VbS2, VbS3, VbS4, VbS5, VbS6, VbS7;
  365. int j;
  366. b[0] = (c0[0] *= a[0]);
  367. b[1] = (c1[0] *= a[0]);
  368. b[2] = (c2[0] *= a[0]);
  369. b[3] = (c3[0] *= a[0]);
  370. b[4] = (c4[0] *= a[0]);
  371. b[5] = (c5[0] *= a[0]);
  372. b[6] = (c6[0] *= a[0]);
  373. b[7] = (c7[0] *= a[0]);
  374. VbS0 = vec_splat(Vb[0], 0);
  375. VbS1 = vec_splat(Vb[0], 1);
  376. VbS2 = vec_splat(Vb[0], 2);
  377. VbS3 = vec_splat(Vb[0], 3);
  378. VbS4 = vec_splat(Vb[1], 0);
  379. VbS5 = vec_splat(Vb[1], 1);
  380. VbS6 = vec_splat(Vb[1], 2);
  381. VbS7 = vec_splat(Vb[1], 3);
  382. Vc0[1] = vec_nmsub(VbS0, Va[1], Vc0[1]);
  383. Vc0[2] = vec_nmsub(VbS0, Va[2], Vc0[2]);
  384. Vc0[3] = vec_nmsub(VbS0, Va[3], Vc0[3]);
  385. Vc1[1] = vec_nmsub(VbS1, Va[1], Vc1[1]);
  386. Vc1[2] = vec_nmsub(VbS1, Va[2], Vc1[2]);
  387. Vc1[3] = vec_nmsub(VbS1, Va[3], Vc1[3]);
  388. Vc2[1] = vec_nmsub(VbS2, Va[1], Vc2[1]);
  389. Vc2[2] = vec_nmsub(VbS2, Va[2], Vc2[2]);
  390. Vc2[3] = vec_nmsub(VbS2, Va[3], Vc2[3]);
  391. Vc3[1] = vec_nmsub(VbS3, Va[1], Vc3[1]);
  392. Vc3[2] = vec_nmsub(VbS3, Va[2], Vc3[2]);
  393. Vc3[3] = vec_nmsub(VbS3, Va[3], Vc3[3]);
  394. Vc4[1] = vec_nmsub(VbS4, Va[1], Vc4[1]);
  395. Vc4[2] = vec_nmsub(VbS4, Va[2], Vc4[2]);
  396. Vc4[3] = vec_nmsub(VbS4, Va[3], Vc4[3]);
  397. Vc5[1] = vec_nmsub(VbS5, Va[1], Vc5[1]);
  398. Vc5[2] = vec_nmsub(VbS5, Va[2], Vc5[2]);
  399. Vc5[3] = vec_nmsub(VbS5, Va[3], Vc5[3]);
  400. Vc6[1] = vec_nmsub(VbS6, Va[1], Vc6[1]);
  401. Vc6[2] = vec_nmsub(VbS6, Va[2], Vc6[2]);
  402. Vc6[3] = vec_nmsub(VbS6, Va[3], Vc6[3]);
  403. Vc7[1] = vec_nmsub(VbS7, Va[1], Vc7[1]);
  404. Vc7[2] = vec_nmsub(VbS7, Va[2], Vc7[2]);
  405. Vc7[3] = vec_nmsub(VbS7, Va[3], Vc7[3]);
  406. c0[1] -= b[0] * a[ 1];
  407. c0[2] -= b[0] * a[ 2];
  408. c0[3] -= b[0] * a[ 3];
  409. c1[1] -= b[1] * a[ 1];
  410. c1[2] -= b[1] * a[ 2];
  411. c1[3] -= b[1] * a[ 3];
  412. c2[1] -= b[2] * a[ 1];
  413. c2[2] -= b[2] * a[ 2];
  414. c2[3] -= b[2] * a[ 3];
  415. c3[1] -= b[3] * a[ 1];
  416. c3[2] -= b[3] * a[ 2];
  417. c3[3] -= b[3] * a[ 3];
  418. c4[1] -= b[4] * a[ 1];
  419. c4[2] -= b[4] * a[ 2];
  420. c4[3] -= b[4] * a[ 3];
  421. c5[1] -= b[5] * a[ 1];
  422. c5[2] -= b[5] * a[ 2];
  423. c5[3] -= b[5] * a[ 3];
  424. c6[1] -= b[6] * a[ 1];
  425. c6[2] -= b[6] * a[ 2];
  426. c6[3] -= b[6] * a[ 3];
  427. c7[1] -= b[7] * a[ 1];
  428. c7[2] -= b[7] * a[ 2];
  429. c7[3] -= b[7] * a[ 3];
  430. b[ 8] = (c0[1] *= a[17]);
  431. b[ 9] = (c1[1] *= a[17]);
  432. b[10] = (c2[1] *= a[17]);
  433. b[11] = (c3[1] *= a[17]);
  434. b[12] = (c4[1] *= a[17]);
  435. b[13] = (c5[1] *= a[17]);
  436. b[14] = (c6[1] *= a[17]);
  437. b[15] = (c7[1] *= a[17]);
  438. VbS0 = vec_splat(Vb[2], 0);
  439. VbS1 = vec_splat(Vb[2], 1);
  440. VbS2 = vec_splat(Vb[2], 2);
  441. VbS3 = vec_splat(Vb[2], 3);
  442. VbS4 = vec_splat(Vb[3], 0);
  443. VbS5 = vec_splat(Vb[3], 1);
  444. VbS6 = vec_splat(Vb[3], 2);
  445. VbS7 = vec_splat(Vb[3], 3);
  446. Vc0[1] = vec_nmsub(VbS0, Va[5], Vc0[1]);
  447. Vc0[2] = vec_nmsub(VbS0, Va[6], Vc0[2]);
  448. Vc0[3] = vec_nmsub(VbS0, Va[7], Vc0[3]);
  449. Vc1[1] = vec_nmsub(VbS1, Va[5], Vc1[1]);
  450. Vc1[2] = vec_nmsub(VbS1, Va[6], Vc1[2]);
  451. Vc1[3] = vec_nmsub(VbS1, Va[7], Vc1[3]);
  452. Vc2[1] = vec_nmsub(VbS2, Va[5], Vc2[1]);
  453. Vc2[2] = vec_nmsub(VbS2, Va[6], Vc2[2]);
  454. Vc2[3] = vec_nmsub(VbS2, Va[7], Vc2[3]);
  455. Vc3[1] = vec_nmsub(VbS3, Va[5], Vc3[1]);
  456. Vc3[2] = vec_nmsub(VbS3, Va[6], Vc3[2]);
  457. Vc3[3] = vec_nmsub(VbS3, Va[7], Vc3[3]);
  458. Vc4[1] = vec_nmsub(VbS4, Va[5], Vc4[1]);
  459. Vc4[2] = vec_nmsub(VbS4, Va[6], Vc4[2]);
  460. Vc4[3] = vec_nmsub(VbS4, Va[7], Vc4[3]);
  461. Vc5[1] = vec_nmsub(VbS5, Va[5], Vc5[1]);
  462. Vc5[2] = vec_nmsub(VbS5, Va[6], Vc5[2]);
  463. Vc5[3] = vec_nmsub(VbS5, Va[7], Vc5[3]);
  464. Vc6[1] = vec_nmsub(VbS6, Va[5], Vc6[1]);
  465. Vc6[2] = vec_nmsub(VbS6, Va[6], Vc6[2]);
  466. Vc6[3] = vec_nmsub(VbS6, Va[7], Vc6[3]);
  467. Vc7[1] = vec_nmsub(VbS7, Va[5], Vc7[1]);
  468. Vc7[2] = vec_nmsub(VbS7, Va[6], Vc7[2]);
  469. Vc7[3] = vec_nmsub(VbS7, Va[7], Vc7[3]);
  470. c0[2] -= b[ 8] * a[18];
  471. c0[3] -= b[ 8] * a[19];
  472. c1[2] -= b[ 9] * a[18];
  473. c1[3] -= b[ 9] * a[19];
  474. c2[2] -= b[10] * a[18];
  475. c2[3] -= b[10] * a[19];
  476. c3[2] -= b[11] * a[18];
  477. c3[3] -= b[11] * a[19];
  478. c4[2] -= b[12] * a[18];
  479. c4[3] -= b[12] * a[19];
  480. c5[2] -= b[13] * a[18];
  481. c5[3] -= b[13] * a[19];
  482. c6[2] -= b[14] * a[18];
  483. c6[3] -= b[14] * a[19];
  484. c7[2] -= b[15] * a[18];
  485. c7[3] -= b[15] * a[19];
  486. b[16] = (c0[2] *= a[34]);
  487. b[17] = (c1[2] *= a[34]);
  488. b[18] = (c2[2] *= a[34]);
  489. b[19] = (c3[2] *= a[34]);
  490. b[20] = (c4[2] *= a[34]);
  491. b[21] = (c5[2] *= a[34]);
  492. b[22] = (c6[2] *= a[34]);
  493. b[23] = (c7[2] *= a[34]);
  494. VbS0 = vec_splat(Vb[4], 0);
  495. VbS1 = vec_splat(Vb[4], 1);
  496. VbS2 = vec_splat(Vb[4], 2);
  497. VbS3 = vec_splat(Vb[4], 3);
  498. VbS4 = vec_splat(Vb[5], 0);
  499. VbS5 = vec_splat(Vb[5], 1);
  500. VbS6 = vec_splat(Vb[5], 2);
  501. VbS7 = vec_splat(Vb[5], 3);
  502. Vc0[1] = vec_nmsub(VbS0, Va[ 9], Vc0[1]);
  503. Vc0[2] = vec_nmsub(VbS0, Va[10], Vc0[2]);
  504. Vc0[3] = vec_nmsub(VbS0, Va[11], Vc0[3]);
  505. Vc1[1] = vec_nmsub(VbS1, Va[ 9], Vc1[1]);
  506. Vc1[2] = vec_nmsub(VbS1, Va[10], Vc1[2]);
  507. Vc1[3] = vec_nmsub(VbS1, Va[11], Vc1[3]);
  508. Vc2[1] = vec_nmsub(VbS2, Va[ 9], Vc2[1]);
  509. Vc2[2] = vec_nmsub(VbS2, Va[10], Vc2[2]);
  510. Vc2[3] = vec_nmsub(VbS2, Va[11], Vc2[3]);
  511. Vc3[1] = vec_nmsub(VbS3, Va[ 9], Vc3[1]);
  512. Vc3[2] = vec_nmsub(VbS3, Va[10], Vc3[2]);
  513. Vc3[3] = vec_nmsub(VbS3, Va[11], Vc3[3]);
  514. Vc4[1] = vec_nmsub(VbS4, Va[ 9], Vc4[1]);
  515. Vc4[2] = vec_nmsub(VbS4, Va[10], Vc4[2]);
  516. Vc4[3] = vec_nmsub(VbS4, Va[11], Vc4[3]);
  517. Vc5[1] = vec_nmsub(VbS5, Va[ 9], Vc5[1]);
  518. Vc5[2] = vec_nmsub(VbS5, Va[10], Vc5[2]);
  519. Vc5[3] = vec_nmsub(VbS5, Va[11], Vc5[3]);
  520. Vc6[1] = vec_nmsub(VbS6, Va[ 9], Vc6[1]);
  521. Vc6[2] = vec_nmsub(VbS6, Va[10], Vc6[2]);
  522. Vc6[3] = vec_nmsub(VbS6, Va[11], Vc6[3]);
  523. Vc7[1] = vec_nmsub(VbS7, Va[ 9], Vc7[1]);
  524. Vc7[2] = vec_nmsub(VbS7, Va[10], Vc7[2]);
  525. Vc7[3] = vec_nmsub(VbS7, Va[11], Vc7[3]);
  526. c0[3] -= b[16] * a[35];
  527. c1[3] -= b[17] * a[35];
  528. c2[3] -= b[18] * a[35];
  529. c3[3] -= b[19] * a[35];
  530. c4[3] -= b[20] * a[35];
  531. c5[3] -= b[21] * a[35];
  532. c6[3] -= b[22] * a[35];
  533. c7[3] -= b[23] * a[35];
  534. b[24] = (c0[3] *= a[51]);
  535. b[25] = (c1[3] *= a[51]);
  536. b[26] = (c2[3] *= a[51]);
  537. b[27] = (c3[3] *= a[51]);
  538. b[28] = (c4[3] *= a[51]);
  539. b[29] = (c5[3] *= a[51]);
  540. b[30] = (c6[3] *= a[51]);
  541. b[31] = (c7[3] *= a[51]);
  542. VbS0 = vec_splat(Vb[6], 0);
  543. VbS1 = vec_splat(Vb[6], 1);
  544. VbS2 = vec_splat(Vb[6], 2);
  545. VbS3 = vec_splat(Vb[6], 3);
  546. VbS4 = vec_splat(Vb[7], 0);
  547. VbS5 = vec_splat(Vb[7], 1);
  548. VbS6 = vec_splat(Vb[7], 2);
  549. VbS7 = vec_splat(Vb[7], 3);
  550. Vc0[1] = vec_nmsub(VbS0, Va[13], Vc0[1]);
  551. Vc0[2] = vec_nmsub(VbS0, Va[14], Vc0[2]);
  552. Vc0[3] = vec_nmsub(VbS0, Va[15], Vc0[3]);
  553. Vc1[1] = vec_nmsub(VbS1, Va[13], Vc1[1]);
  554. Vc1[2] = vec_nmsub(VbS1, Va[14], Vc1[2]);
  555. Vc1[3] = vec_nmsub(VbS1, Va[15], Vc1[3]);
  556. Vc2[1] = vec_nmsub(VbS2, Va[13], Vc2[1]);
  557. Vc2[2] = vec_nmsub(VbS2, Va[14], Vc2[2]);
  558. Vc2[3] = vec_nmsub(VbS2, Va[15], Vc2[3]);
  559. Vc3[1] = vec_nmsub(VbS3, Va[13], Vc3[1]);
  560. Vc3[2] = vec_nmsub(VbS3, Va[14], Vc3[2]);
  561. Vc3[3] = vec_nmsub(VbS3, Va[15], Vc3[3]);
  562. Vc4[1] = vec_nmsub(VbS4, Va[13], Vc4[1]);
  563. Vc4[2] = vec_nmsub(VbS4, Va[14], Vc4[2]);
  564. Vc4[3] = vec_nmsub(VbS4, Va[15], Vc4[3]);
  565. Vc5[1] = vec_nmsub(VbS5, Va[13], Vc5[1]);
  566. Vc5[2] = vec_nmsub(VbS5, Va[14], Vc5[2]);
  567. Vc5[3] = vec_nmsub(VbS5, Va[15], Vc5[3]);
  568. Vc6[1] = vec_nmsub(VbS6, Va[13], Vc6[1]);
  569. Vc6[2] = vec_nmsub(VbS6, Va[14], Vc6[2]);
  570. Vc6[3] = vec_nmsub(VbS6, Va[15], Vc6[3]);
  571. Vc7[1] = vec_nmsub(VbS7, Va[13], Vc7[1]);
  572. Vc7[2] = vec_nmsub(VbS7, Va[14], Vc7[2]);
  573. Vc7[3] = vec_nmsub(VbS7, Va[15], Vc7[3]);
  574. b[32] = (c0[4] *= a[68]);
  575. b[33] = (c1[4] *= a[68]);
  576. b[34] = (c2[4] *= a[68]);
  577. b[35] = (c3[4] *= a[68]);
  578. b[36] = (c4[4] *= a[68]);
  579. b[37] = (c5[4] *= a[68]);
  580. b[38] = (c6[4] *= a[68]);
  581. b[39] = (c7[4] *= a[68]);
  582. VbS0 = vec_splat(Vb[8], 0);
  583. VbS1 = vec_splat(Vb[8], 1);
  584. VbS2 = vec_splat(Vb[8], 2);
  585. VbS3 = vec_splat(Vb[8], 3);
  586. VbS4 = vec_splat(Vb[9], 0);
  587. VbS5 = vec_splat(Vb[9], 1);
  588. VbS6 = vec_splat(Vb[9], 2);
  589. VbS7 = vec_splat(Vb[9], 3);
  590. Vc0[2] = vec_nmsub(VbS0, Va[18], Vc0[2]);
  591. Vc0[3] = vec_nmsub(VbS0, Va[19], Vc0[3]);
  592. Vc1[2] = vec_nmsub(VbS1, Va[18], Vc1[2]);
  593. Vc1[3] = vec_nmsub(VbS1, Va[19], Vc1[3]);
  594. Vc2[2] = vec_nmsub(VbS2, Va[18], Vc2[2]);
  595. Vc2[3] = vec_nmsub(VbS2, Va[19], Vc2[3]);
  596. Vc3[2] = vec_nmsub(VbS3, Va[18], Vc3[2]);
  597. Vc3[3] = vec_nmsub(VbS3, Va[19], Vc3[3]);
  598. Vc4[2] = vec_nmsub(VbS4, Va[18], Vc4[2]);
  599. Vc4[3] = vec_nmsub(VbS4, Va[19], Vc4[3]);
  600. Vc5[2] = vec_nmsub(VbS5, Va[18], Vc5[2]);
  601. Vc5[3] = vec_nmsub(VbS5, Va[19], Vc5[3]);
  602. Vc6[2] = vec_nmsub(VbS6, Va[18], Vc6[2]);
  603. Vc6[3] = vec_nmsub(VbS6, Va[19], Vc6[3]);
  604. Vc7[2] = vec_nmsub(VbS7, Va[18], Vc7[2]);
  605. Vc7[3] = vec_nmsub(VbS7, Va[19], Vc7[3]);
  606. c0[5] -= b[32] * a[69];
  607. c0[6] -= b[32] * a[70];
  608. c0[7] -= b[32] * a[71];
  609. c1[5] -= b[33] * a[69];
  610. c1[6] -= b[33] * a[70];
  611. c1[7] -= b[33] * a[71];
  612. c2[5] -= b[34] * a[69];
  613. c2[6] -= b[34] * a[70];
  614. c2[7] -= b[34] * a[71];
  615. c3[5] -= b[35] * a[69];
  616. c3[6] -= b[35] * a[70];
  617. c3[7] -= b[35] * a[71];
  618. c4[5] -= b[36] * a[69];
  619. c4[6] -= b[36] * a[70];
  620. c4[7] -= b[36] * a[71];
  621. c5[5] -= b[37] * a[69];
  622. c5[6] -= b[37] * a[70];
  623. c5[7] -= b[37] * a[71];
  624. c6[5] -= b[38] * a[69];
  625. c6[6] -= b[38] * a[70];
  626. c6[7] -= b[38] * a[71];
  627. c7[5] -= b[39] * a[69];
  628. c7[6] -= b[39] * a[70];
  629. c7[7] -= b[39] * a[71];
  630. b[40] = (c0[5] *= a[85]);
  631. b[41] = (c1[5] *= a[85]);
  632. b[42] = (c2[5] *= a[85]);
  633. b[43] = (c3[5] *= a[85]);
  634. b[44] = (c4[5] *= a[85]);
  635. b[45] = (c5[5] *= a[85]);
  636. b[46] = (c6[5] *= a[85]);
  637. b[47] = (c7[5] *= a[85]);
  638. VbS0 = vec_splat(Vb[10], 0);
  639. VbS1 = vec_splat(Vb[10], 1);
  640. VbS2 = vec_splat(Vb[10], 2);
  641. VbS3 = vec_splat(Vb[10], 3);
  642. VbS4 = vec_splat(Vb[11], 0);
  643. VbS5 = vec_splat(Vb[11], 1);
  644. VbS6 = vec_splat(Vb[11], 2);
  645. VbS7 = vec_splat(Vb[11], 3);
  646. Vc0[2] = vec_nmsub(VbS0, Va[22], Vc0[2]);
  647. Vc0[3] = vec_nmsub(VbS0, Va[23], Vc0[3]);
  648. Vc1[2] = vec_nmsub(VbS1, Va[22], Vc1[2]);
  649. Vc1[3] = vec_nmsub(VbS1, Va[23], Vc1[3]);
  650. Vc2[2] = vec_nmsub(VbS2, Va[22], Vc2[2]);
  651. Vc2[3] = vec_nmsub(VbS2, Va[23], Vc2[3]);
  652. Vc3[2] = vec_nmsub(VbS3, Va[22], Vc3[2]);
  653. Vc3[3] = vec_nmsub(VbS3, Va[23], Vc3[3]);
  654. Vc4[2] = vec_nmsub(VbS4, Va[22], Vc4[2]);
  655. Vc4[3] = vec_nmsub(VbS4, Va[23], Vc4[3]);
  656. Vc5[2] = vec_nmsub(VbS5, Va[22], Vc5[2]);
  657. Vc5[3] = vec_nmsub(VbS5, Va[23], Vc5[3]);
  658. Vc6[2] = vec_nmsub(VbS6, Va[22], Vc6[2]);
  659. Vc6[3] = vec_nmsub(VbS6, Va[23], Vc6[3]);
  660. Vc7[2] = vec_nmsub(VbS7, Va[22], Vc7[2]);
  661. Vc7[3] = vec_nmsub(VbS7, Va[23], Vc7[3]);
  662. c0[6] -= b[40] * a[86];
  663. c0[7] -= b[40] * a[87];
  664. c1[6] -= b[41] * a[86];
  665. c1[7] -= b[41] * a[87];
  666. c2[6] -= b[42] * a[86];
  667. c2[7] -= b[42] * a[87];
  668. c3[6] -= b[43] * a[86];
  669. c3[7] -= b[43] * a[87];
  670. c4[6] -= b[44] * a[86];
  671. c4[7] -= b[44] * a[87];
  672. c5[6] -= b[45] * a[86];
  673. c5[7] -= b[45] * a[87];
  674. c6[6] -= b[46] * a[86];
  675. c6[7] -= b[46] * a[87];
  676. c7[6] -= b[47] * a[86];
  677. c7[7] -= b[47] * a[87];
  678. b[48] = (c0[6] *= a[102]);
  679. b[49] = (c1[6] *= a[102]);
  680. b[50] = (c2[6] *= a[102]);
  681. b[51] = (c3[6] *= a[102]);
  682. b[52] = (c4[6] *= a[102]);
  683. b[53] = (c5[6] *= a[102]);
  684. b[54] = (c6[6] *= a[102]);
  685. b[55] = (c7[6] *= a[102]);
  686. VbS0 = vec_splat(Vb[12], 0);
  687. VbS1 = vec_splat(Vb[12], 1);
  688. VbS2 = vec_splat(Vb[12], 2);
  689. VbS3 = vec_splat(Vb[12], 3);
  690. VbS4 = vec_splat(Vb[13], 0);
  691. VbS5 = vec_splat(Vb[13], 1);
  692. VbS6 = vec_splat(Vb[13], 2);
  693. VbS7 = vec_splat(Vb[13], 3);
  694. Vc0[2] = vec_nmsub(VbS0, Va[26], Vc0[2]);
  695. Vc0[3] = vec_nmsub(VbS0, Va[27], Vc0[3]);
  696. Vc1[2] = vec_nmsub(VbS1, Va[26], Vc1[2]);
  697. Vc1[3] = vec_nmsub(VbS1, Va[27], Vc1[3]);
  698. Vc2[2] = vec_nmsub(VbS2, Va[26], Vc2[2]);
  699. Vc2[3] = vec_nmsub(VbS2, Va[27], Vc2[3]);
  700. Vc3[2] = vec_nmsub(VbS3, Va[26], Vc3[2]);
  701. Vc3[3] = vec_nmsub(VbS3, Va[27], Vc3[3]);
  702. Vc4[2] = vec_nmsub(VbS4, Va[26], Vc4[2]);
  703. Vc4[3] = vec_nmsub(VbS4, Va[27], Vc4[3]);
  704. Vc5[2] = vec_nmsub(VbS5, Va[26], Vc5[2]);
  705. Vc5[3] = vec_nmsub(VbS5, Va[27], Vc5[3]);
  706. Vc6[2] = vec_nmsub(VbS6, Va[26], Vc6[2]);
  707. Vc6[3] = vec_nmsub(VbS6, Va[27], Vc6[3]);
  708. Vc7[2] = vec_nmsub(VbS7, Va[26], Vc7[2]);
  709. Vc7[3] = vec_nmsub(VbS7, Va[27], Vc7[3]);
  710. c0[7] -= b[48] * a[103];
  711. c1[7] -= b[49] * a[103];
  712. c2[7] -= b[50] * a[103];
  713. c3[7] -= b[51] * a[103];
  714. c4[7] -= b[52] * a[103];
  715. c5[7] -= b[53] * a[103];
  716. c6[7] -= b[54] * a[103];
  717. c7[7] -= b[55] * a[103];
  718. b[56] = (c0[7] *= a[119]);
  719. b[57] = (c1[7] *= a[119]);
  720. b[58] = (c2[7] *= a[119]);
  721. b[59] = (c3[7] *= a[119]);
  722. b[60] = (c4[7] *= a[119]);
  723. b[61] = (c5[7] *= a[119]);
  724. b[62] = (c6[7] *= a[119]);
  725. b[63] = (c7[7] *= a[119]);
  726. VbS0 = vec_splat(Vb[14], 0);
  727. VbS1 = vec_splat(Vb[14], 1);
  728. VbS2 = vec_splat(Vb[14], 2);
  729. VbS3 = vec_splat(Vb[14], 3);
  730. VbS4 = vec_splat(Vb[15], 0);
  731. VbS5 = vec_splat(Vb[15], 1);
  732. VbS6 = vec_splat(Vb[15], 2);
  733. VbS7 = vec_splat(Vb[15], 3);
  734. Vc0[2] = vec_nmsub(VbS0, Va[30], Vc0[2]);
  735. Vc0[3] = vec_nmsub(VbS0, Va[31], Vc0[3]);
  736. Vc1[2] = vec_nmsub(VbS1, Va[30], Vc1[2]);
  737. Vc1[3] = vec_nmsub(VbS1, Va[31], Vc1[3]);
  738. Vc2[2] = vec_nmsub(VbS2, Va[30], Vc2[2]);
  739. Vc2[3] = vec_nmsub(VbS2, Va[31], Vc2[3]);
  740. Vc3[2] = vec_nmsub(VbS3, Va[30], Vc3[2]);
  741. Vc3[3] = vec_nmsub(VbS3, Va[31], Vc3[3]);
  742. Vc4[2] = vec_nmsub(VbS4, Va[30], Vc4[2]);
  743. Vc4[3] = vec_nmsub(VbS4, Va[31], Vc4[3]);
  744. Vc5[2] = vec_nmsub(VbS5, Va[30], Vc5[2]);
  745. Vc5[3] = vec_nmsub(VbS5, Va[31], Vc5[3]);
  746. Vc6[2] = vec_nmsub(VbS6, Va[30], Vc6[2]);
  747. Vc6[3] = vec_nmsub(VbS6, Va[31], Vc6[3]);
  748. Vc7[2] = vec_nmsub(VbS7, Va[30], Vc7[2]);
  749. Vc7[3] = vec_nmsub(VbS7, Va[31], Vc7[3]);
  750. b[64] = (c0[8] *= a[136]);
  751. b[65] = (c1[8] *= a[136]);
  752. b[66] = (c2[8] *= a[136]);
  753. b[67] = (c3[8] *= a[136]);
  754. b[68] = (c4[8] *= a[136]);
  755. b[69] = (c5[8] *= a[136]);
  756. b[70] = (c6[8] *= a[136]);
  757. b[71] = (c7[8] *= a[136]);
  758. VbS0 = vec_splat(Vb[16], 0);
  759. VbS1 = vec_splat(Vb[16], 1);
  760. VbS2 = vec_splat(Vb[16], 2);
  761. VbS3 = vec_splat(Vb[16], 3);
  762. VbS4 = vec_splat(Vb[17], 0);
  763. VbS5 = vec_splat(Vb[17], 1);
  764. VbS6 = vec_splat(Vb[17], 2);
  765. VbS7 = vec_splat(Vb[17], 3);
  766. Vc0[3] = vec_nmsub(VbS0, Va[35], Vc0[3]);
  767. Vc1[3] = vec_nmsub(VbS1, Va[35], Vc1[3]);
  768. Vc2[3] = vec_nmsub(VbS2, Va[35], Vc2[3]);
  769. Vc3[3] = vec_nmsub(VbS3, Va[35], Vc3[3]);
  770. Vc4[3] = vec_nmsub(VbS4, Va[35], Vc4[3]);
  771. Vc5[3] = vec_nmsub(VbS5, Va[35], Vc5[3]);
  772. Vc6[3] = vec_nmsub(VbS6, Va[35], Vc6[3]);
  773. Vc7[3] = vec_nmsub(VbS7, Va[35], Vc7[3]);
  774. c0[ 9] -= b[64] * a[137];
  775. c0[10] -= b[64] * a[138];
  776. c0[11] -= b[64] * a[139];
  777. c1[ 9] -= b[65] * a[137];
  778. c1[10] -= b[65] * a[138];
  779. c1[11] -= b[65] * a[139];
  780. c2[ 9] -= b[66] * a[137];
  781. c2[10] -= b[66] * a[138];
  782. c2[11] -= b[66] * a[139];
  783. c3[ 9] -= b[67] * a[137];
  784. c3[10] -= b[67] * a[138];
  785. c3[11] -= b[67] * a[139];
  786. c4[ 9] -= b[68] * a[137];
  787. c4[10] -= b[68] * a[138];
  788. c4[11] -= b[68] * a[139];
  789. c5[ 9] -= b[69] * a[137];
  790. c5[10] -= b[69] * a[138];
  791. c5[11] -= b[69] * a[139];
  792. c6[ 9] -= b[70] * a[137];
  793. c6[10] -= b[70] * a[138];
  794. c6[11] -= b[70] * a[139];
  795. c7[ 9] -= b[71] * a[137];
  796. c7[10] -= b[71] * a[138];
  797. c7[11] -= b[71] * a[139];
  798. b[72] = (c0[9] *= a[153]);
  799. b[73] = (c1[9] *= a[153]);
  800. b[74] = (c2[9] *= a[153]);
  801. b[75] = (c3[9] *= a[153]);
  802. b[76] = (c4[9] *= a[153]);
  803. b[77] = (c5[9] *= a[153]);
  804. b[78] = (c6[9] *= a[153]);
  805. b[79] = (c7[9] *= a[153]);
  806. VbS0 = vec_splat(Vb[18], 0);
  807. VbS1 = vec_splat(Vb[18], 1);
  808. VbS2 = vec_splat(Vb[18], 2);
  809. VbS3 = vec_splat(Vb[18], 3);
  810. VbS4 = vec_splat(Vb[19], 0);
  811. VbS5 = vec_splat(Vb[19], 1);
  812. VbS6 = vec_splat(Vb[19], 2);
  813. VbS7 = vec_splat(Vb[19], 3);
  814. Vc0[3] = vec_nmsub(VbS0, Va[39], Vc0[3]);
  815. Vc1[3] = vec_nmsub(VbS1, Va[39], Vc1[3]);
  816. Vc2[3] = vec_nmsub(VbS2, Va[39], Vc2[3]);
  817. Vc3[3] = vec_nmsub(VbS3, Va[39], Vc3[3]);
  818. Vc4[3] = vec_nmsub(VbS4, Va[39], Vc4[3]);
  819. Vc5[3] = vec_nmsub(VbS5, Va[39], Vc5[3]);
  820. Vc6[3] = vec_nmsub(VbS6, Va[39], Vc6[3]);
  821. Vc7[3] = vec_nmsub(VbS7, Va[39], Vc7[3]);
  822. c0[10] -= b[72] * a[154];
  823. c0[11] -= b[72] * a[155];
  824. c1[10] -= b[73] * a[154];
  825. c1[11] -= b[73] * a[155];
  826. c2[10] -= b[74] * a[154];
  827. c2[11] -= b[74] * a[155];
  828. c3[10] -= b[75] * a[154];
  829. c3[11] -= b[75] * a[155];
  830. c4[10] -= b[76] * a[154];
  831. c4[11] -= b[76] * a[155];
  832. c5[10] -= b[77] * a[154];
  833. c5[11] -= b[77] * a[155];
  834. c6[10] -= b[78] * a[154];
  835. c6[11] -= b[78] * a[155];
  836. c7[10] -= b[79] * a[154];
  837. c7[11] -= b[79] * a[155];
  838. b[80] = (c0[10] *= a[170]);
  839. b[81] = (c1[10] *= a[170]);
  840. b[82] = (c2[10] *= a[170]);
  841. b[83] = (c3[10] *= a[170]);
  842. b[84] = (c4[10] *= a[170]);
  843. b[85] = (c5[10] *= a[170]);
  844. b[86] = (c6[10] *= a[170]);
  845. b[87] = (c7[10] *= a[170]);
  846. VbS0 = vec_splat(Vb[20], 0);
  847. VbS1 = vec_splat(Vb[20], 1);
  848. VbS2 = vec_splat(Vb[20], 2);
  849. VbS3 = vec_splat(Vb[20], 3);
  850. VbS4 = vec_splat(Vb[21], 0);
  851. VbS5 = vec_splat(Vb[21], 1);
  852. VbS6 = vec_splat(Vb[21], 2);
  853. VbS7 = vec_splat(Vb[21], 3);
  854. Vc0[3] = vec_nmsub(VbS0, Va[43], Vc0[3]);
  855. Vc1[3] = vec_nmsub(VbS1, Va[43], Vc1[3]);
  856. Vc2[3] = vec_nmsub(VbS2, Va[43], Vc2[3]);
  857. Vc3[3] = vec_nmsub(VbS3, Va[43], Vc3[3]);
  858. Vc4[3] = vec_nmsub(VbS4, Va[43], Vc4[3]);
  859. Vc5[3] = vec_nmsub(VbS5, Va[43], Vc5[3]);
  860. Vc6[3] = vec_nmsub(VbS6, Va[43], Vc6[3]);
  861. Vc7[3] = vec_nmsub(VbS7, Va[43], Vc7[3]);
  862. c0[11] -= b[80] * a[171];
  863. c1[11] -= b[81] * a[171];
  864. c2[11] -= b[82] * a[171];
  865. c3[11] -= b[83] * a[171];
  866. c4[11] -= b[84] * a[171];
  867. c5[11] -= b[85] * a[171];
  868. c6[11] -= b[86] * a[171];
  869. c7[11] -= b[87] * a[171];
  870. b[88] = (c0[11] *= a[187]);
  871. b[89] = (c1[11] *= a[187]);
  872. b[90] = (c2[11] *= a[187]);
  873. b[91] = (c3[11] *= a[187]);
  874. b[92] = (c4[11] *= a[187]);
  875. b[93] = (c5[11] *= a[187]);
  876. b[94] = (c6[11] *= a[187]);
  877. b[95] = (c7[11] *= a[187]);
  878. VbS0 = vec_splat(Vb[22], 0);
  879. VbS1 = vec_splat(Vb[22], 1);
  880. VbS2 = vec_splat(Vb[22], 2);
  881. VbS3 = vec_splat(Vb[22], 3);
  882. VbS4 = vec_splat(Vb[23], 0);
  883. VbS5 = vec_splat(Vb[23], 1);
  884. VbS6 = vec_splat(Vb[23], 2);
  885. VbS7 = vec_splat(Vb[23], 3);
  886. Vc0[3] = vec_nmsub(VbS0, Va[47], Vc0[3]);
  887. Vc1[3] = vec_nmsub(VbS1, Va[47], Vc1[3]);
  888. Vc2[3] = vec_nmsub(VbS2, Va[47], Vc2[3]);
  889. Vc3[3] = vec_nmsub(VbS3, Va[47], Vc3[3]);
  890. Vc4[3] = vec_nmsub(VbS4, Va[47], Vc4[3]);
  891. Vc5[3] = vec_nmsub(VbS5, Va[47], Vc5[3]);
  892. Vc6[3] = vec_nmsub(VbS6, Va[47], Vc6[3]);
  893. Vc7[3] = vec_nmsub(VbS7, Va[47], Vc7[3]);
  894. b[ 96] = (c0[12] *= a[204]);
  895. b[ 97] = (c1[12] *= a[204]);
  896. b[ 98] = (c2[12] *= a[204]);
  897. b[ 99] = (c3[12] *= a[204]);
  898. b[100] = (c4[12] *= a[204]);
  899. b[101] = (c5[12] *= a[204]);
  900. b[102] = (c6[12] *= a[204]);
  901. b[103] = (c7[12] *= a[204]);
  902. c0[13] -= b[ 96] * a[205];
  903. c0[14] -= b[ 96] * a[206];
  904. c0[15] -= b[ 96] * a[207];
  905. c1[13] -= b[ 97] * a[205];
  906. c1[14] -= b[ 97] * a[206];
  907. c1[15] -= b[ 97] * a[207];
  908. c2[13] -= b[ 98] * a[205];
  909. c2[14] -= b[ 98] * a[206];
  910. c2[15] -= b[ 98] * a[207];
  911. c3[13] -= b[ 99] * a[205];
  912. c3[14] -= b[ 99] * a[206];
  913. c3[15] -= b[ 99] * a[207];
  914. c4[13] -= b[100] * a[205];
  915. c4[14] -= b[100] * a[206];
  916. c4[15] -= b[100] * a[207];
  917. c5[13] -= b[101] * a[205];
  918. c5[14] -= b[101] * a[206];
  919. c5[15] -= b[101] * a[207];
  920. c6[13] -= b[102] * a[205];
  921. c6[14] -= b[102] * a[206];
  922. c6[15] -= b[102] * a[207];
  923. c7[13] -= b[103] * a[205];
  924. c7[14] -= b[103] * a[206];
  925. c7[15] -= b[103] * a[207];
  926. b[104] = (c0[13] *= a[221]);
  927. b[105] = (c1[13] *= a[221]);
  928. b[106] = (c2[13] *= a[221]);
  929. b[107] = (c3[13] *= a[221]);
  930. b[108] = (c4[13] *= a[221]);
  931. b[109] = (c5[13] *= a[221]);
  932. b[110] = (c6[13] *= a[221]);
  933. b[111] = (c7[13] *= a[221]);
  934. c0[14] -= b[104] * a[222];
  935. c0[15] -= b[104] * a[223];
  936. c1[14] -= b[105] * a[222];
  937. c1[15] -= b[105] * a[223];
  938. c2[14] -= b[106] * a[222];
  939. c2[15] -= b[106] * a[223];
  940. c3[14] -= b[107] * a[222];
  941. c3[15] -= b[107] * a[223];
  942. c4[14] -= b[108] * a[222];
  943. c4[15] -= b[108] * a[223];
  944. c5[14] -= b[109] * a[222];
  945. c5[15] -= b[109] * a[223];
  946. c6[14] -= b[110] * a[222];
  947. c6[15] -= b[110] * a[223];
  948. c7[14] -= b[111] * a[222];
  949. c7[15] -= b[111] * a[223];
  950. b[112] = (c0[14] *= a[238]);
  951. b[113] = (c1[14] *= a[238]);
  952. b[114] = (c2[14] *= a[238]);
  953. b[115] = (c3[14] *= a[238]);
  954. b[116] = (c4[14] *= a[238]);
  955. b[117] = (c5[14] *= a[238]);
  956. b[118] = (c6[14] *= a[238]);
  957. b[119] = (c7[14] *= a[238]);
  958. c0[15] -= b[112] * a[239];
  959. c1[15] -= b[113] * a[239];
  960. c2[15] -= b[114] * a[239];
  961. c3[15] -= b[115] * a[239];
  962. c4[15] -= b[116] * a[239];
  963. c5[15] -= b[117] * a[239];
  964. c6[15] -= b[118] * a[239];
  965. c7[15] -= b[119] * a[239];
  966. b[120] = (c0[15] *= a[255]);
  967. b[121] = (c1[15] *= a[255]);
  968. b[122] = (c2[15] *= a[255]);
  969. b[123] = (c3[15] *= a[255]);
  970. b[124] = (c4[15] *= a[255]);
  971. b[125] = (c5[15] *= a[255]);
  972. b[126] = (c6[15] *= a[255]);
  973. b[127] = (c7[15] *= a[255]);
  974. }
  975. #endif
  976. static inline __attribute__ ((always_inline)) void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) {
  977. FLOAT aa, bb;
  978. int i, j, k;
  979. for (i = 0; i < m; i++) {
  980. aa = *(a + i);
  981. for (j = 0; j < n; j ++) {
  982. bb = *(c + i + j * ldc);
  983. bb *= aa;
  984. *b = bb;
  985. *(c + i + j * ldc) = bb;
  986. b ++;
  987. for (k = i + 1; k < m; k ++){
  988. *(c + k + j * ldc) -= bb * *(a + k);
  989. }
  990. }
  991. a += m;
  992. }
  993. }
  994. #else
  995. static inline __attribute__ ((always_inline)) void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) {
  996. FLOAT aa1, aa2;
  997. FLOAT bb1, bb2;
  998. FLOAT cc1, cc2;
  999. int i, j, k;
  1000. ldc *= 2;
  1001. for (i = 0; i < m; i++) {
  1002. aa1 = *(a + i * 2 + 0);
  1003. aa2 = *(a + i * 2 + 1);
  1004. for (j = 0; j < n; j ++) {
  1005. bb1 = *(c + i * 2 + 0 + j * ldc);
  1006. bb2 = *(c + i * 2 + 1 + j * ldc);
  1007. #ifndef CONJ
  1008. cc1 = aa1 * bb1 - aa2 * bb2;
  1009. cc2 = aa1 * bb2 + aa2 * bb1;
  1010. #else
  1011. cc1 = aa1 * bb1 + aa2 * bb2;
  1012. cc2 = aa1 * bb2 - aa2 * bb1;
  1013. #endif
  1014. *(b + 0) = cc1;
  1015. *(b + 1) = cc2;
  1016. *(c + i * 2 + 0 + j * ldc) = cc1;
  1017. *(c + i * 2 + 1 + j * ldc) = cc2;
  1018. b += 2;
  1019. for (k = i + 1; k < m; k ++){
  1020. #ifndef CONJ
  1021. *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) - cc2 * *(a + k * 2 + 1);
  1022. *(c + k * 2 + 1 + j * ldc) -= cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0);
  1023. #else
  1024. *(c + k * 2 + 0 + j * ldc) -= cc1 * *(a + k * 2 + 0) + cc2 * *(a + k * 2 + 1);
  1025. *(c + k * 2 + 1 + j * ldc) -= -cc1 * *(a + k * 2 + 1) + cc2 * *(a + k * 2 + 0);
  1026. #endif
  1027. }
  1028. }
  1029. a += m * 2;
  1030. }
  1031. }
  1032. #endif
  1033. int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1,
  1034. #ifdef COMPLEX
  1035. FLOAT dummy2,
  1036. #endif
  1037. FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){
  1038. FLOAT *aa, *cc;
  1039. BLASLONG kk;
  1040. BLASLONG i, j, jj;
  1041. #if 0
  1042. fprintf(stderr, "TRSM KERNEL LT : m = %3ld n = %3ld k = %3ld offset = %3ld\n",
  1043. m, n, k, offset);
  1044. #endif
  1045. jj = 0;
  1046. j = (n >> GEMM_UNROLL_N_SHIFT);
  1047. #ifdef DOUBLE
  1048. int well_aligned = (GEMM_UNROLL_M==8) && (GEMM_UNROLL_N==8) && ((((unsigned long) a) & 0x7) == 0);
  1049. #else
  1050. int well_aligned = (GEMM_UNROLL_M==16) && (GEMM_UNROLL_N==8) && ((((unsigned long) a) & 0x7) == 0);
  1051. #endif
  1052. while (j > 0) {
  1053. kk = offset;
  1054. aa = a;
  1055. cc = c;
  1056. i = (m >> GEMM_UNROLL_M_SHIFT);
  1057. while (i > 0) {
  1058. if (kk > 0) {
  1059. GEMM_KERNEL(GEMM_UNROLL_M, GEMM_UNROLL_N, kk, dm1,
  1060. #ifdef COMPLEX
  1061. ZERO,
  1062. #endif
  1063. aa, b, cc, ldc);
  1064. }
  1065. if (well_aligned) {
  1066. #ifdef DOUBLE
  1067. solve8x8(aa + kk * GEMM_UNROLL_M * COMPSIZE,
  1068. b + kk * GEMM_UNROLL_N * COMPSIZE, cc, ldc);
  1069. #else
  1070. solve16x8(aa + kk * GEMM_UNROLL_M * COMPSIZE,
  1071. b + kk * GEMM_UNROLL_N * COMPSIZE, cc, ldc);
  1072. #endif
  1073. }
  1074. else {
  1075. solve(GEMM_UNROLL_M, GEMM_UNROLL_N,
  1076. aa + kk * GEMM_UNROLL_M * COMPSIZE,
  1077. b + kk * GEMM_UNROLL_N * COMPSIZE,
  1078. cc, ldc);
  1079. }
  1080. aa += GEMM_UNROLL_M * k * COMPSIZE;
  1081. cc += GEMM_UNROLL_M * COMPSIZE;
  1082. kk += GEMM_UNROLL_M;
  1083. i --;
  1084. }
  1085. if (m & (GEMM_UNROLL_M - 1)) {
  1086. i = (GEMM_UNROLL_M >> 1);
  1087. while (i > 0) {
  1088. if (m & i) {
  1089. if (kk > 0) {
  1090. GEMM_KERNEL(i, GEMM_UNROLL_N, kk, dm1,
  1091. #ifdef COMPLEX
  1092. ZERO,
  1093. #endif
  1094. aa, b, cc, ldc);
  1095. }
  1096. solve(i, GEMM_UNROLL_N,
  1097. aa + kk * i * COMPSIZE,
  1098. b + kk * GEMM_UNROLL_N * COMPSIZE,
  1099. cc, ldc);
  1100. aa += i * k * COMPSIZE;
  1101. cc += i * COMPSIZE;
  1102. kk += i;
  1103. }
  1104. i >>= 1;
  1105. }
  1106. }
  1107. b += GEMM_UNROLL_N * k * COMPSIZE;
  1108. c += GEMM_UNROLL_N * ldc * COMPSIZE;
  1109. j --;
  1110. jj += GEMM_UNROLL_M;
  1111. }
  1112. if (n & (GEMM_UNROLL_N - 1)) {
  1113. j = (GEMM_UNROLL_N >> 1);
  1114. while (j > 0) {
  1115. if (n & j) {
  1116. kk = offset;
  1117. aa = a;
  1118. cc = c;
  1119. i = (m >> GEMM_UNROLL_M_SHIFT);
  1120. while (i > 0) {
  1121. if (kk > 0) {
  1122. GEMM_KERNEL(GEMM_UNROLL_M, j, kk, dm1,
  1123. #ifdef COMPLEX
  1124. ZERO,
  1125. #endif
  1126. aa,
  1127. b,
  1128. cc,
  1129. ldc);
  1130. }
  1131. solve(GEMM_UNROLL_M, j,
  1132. aa + kk * GEMM_UNROLL_M * COMPSIZE,
  1133. b + kk * j * COMPSIZE, cc, ldc);
  1134. aa += GEMM_UNROLL_M * k * COMPSIZE;
  1135. cc += GEMM_UNROLL_M * COMPSIZE;
  1136. kk += GEMM_UNROLL_M;
  1137. i --;
  1138. }
  1139. if (m & (GEMM_UNROLL_M - 1)) {
  1140. i = (GEMM_UNROLL_M >> 1);
  1141. while (i > 0) {
  1142. if (m & i) {
  1143. if (kk > 0) {
  1144. GEMM_KERNEL(i, j, kk, dm1,
  1145. #ifdef COMPLEX
  1146. ZERO,
  1147. #endif
  1148. aa,
  1149. b,
  1150. cc,
  1151. ldc);
  1152. }
  1153. solve(i, j,
  1154. aa + kk * i * COMPSIZE,
  1155. b + kk * j * COMPSIZE, cc, ldc);
  1156. aa += i * k * COMPSIZE;
  1157. cc += i * COMPSIZE;
  1158. kk += i;
  1159. }
  1160. i >>= 1;
  1161. }
  1162. }
  1163. b += j * k * COMPSIZE;
  1164. c += j * ldc * COMPSIZE;
  1165. }
  1166. j >>= 1;
  1167. }
  1168. }
  1169. return 0;
  1170. }