You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dgemm_kernel_power10.c 23 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866
  1. /*********************************************************************************
  2. Copyright (c) 2020, The OpenBLAS Project
  3. All rights reserved.
  4. Redistribution and use in source and binary forms, with or without
  5. modification, are permitted provided that the following conditions are
  6. met:
  7. 1. Redistributions of source code must retain the above copyright
  8. notice, this list of conditions and the following disclaimer.
  9. 2. Redistributions in binary form must reproduce the above copyright
  10. notice, this list of conditions and the following disclaimer in
  11. the documentation and/or other materials provided with the
  12. distribution.
  13. 3. Neither the name of the OpenBLAS project nor the names of
  14. its contributors may be used to endorse or promote products
  15. derived from this software without specific prior written permission.
  16. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  17. AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  18. IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  19. ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
  20. LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  21. DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  22. SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  23. CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  24. OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  25. USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. **********************************************************************************/
  27. #include "common.h"
  28. #include <altivec.h>
  29. typedef __vector unsigned char vec_t;
  30. typedef FLOAT v4sf_t __attribute__ ((vector_size (16)));
  31. #if !__has_builtin(__builtin_vsx_assemble_pair)
  32. #define __builtin_vsx_assemble_pair __builtin_mma_assemble_pair
  33. #endif
  34. #if !__has_builtin(__builtin_vsx_disassemble_pair)
  35. #define __builtin_vsx_disassemble_pair __builtin_mma_disassemble_pair
  36. #endif
  37. #ifdef TRMMKERNEL
  38. #define SAVE_ACC(ACC, J) \
  39. __builtin_mma_disassemble_acc ((void *)result, ACC); \
  40. rowC = (v4sf_t *) &CO[0* ldc+J]; \
  41. rowC[0] = result[0] * alpha; \
  42. rowC = (v4sf_t *) &CO[1*ldc+J]; \
  43. rowC[0] = result[1] * alpha; \
  44. rowC = (v4sf_t *) &CO[2*ldc+J]; \
  45. rowC[0] = result[2] * alpha; \
  46. rowC = (v4sf_t *) &CO[3*ldc+J]; \
  47. rowC[0] = result[3] * alpha;
  48. #define SAVE_ACC1(ACC, J) \
  49. __builtin_mma_disassemble_acc ((void *)result, ACC); \
  50. rowC = (v4sf_t *) &CO[4* ldc+J]; \
  51. rowC[0] = result[0] * alpha; \
  52. rowC = (v4sf_t *) &CO[5*ldc+J]; \
  53. rowC[0] = result[1] * alpha; \
  54. rowC = (v4sf_t *) &CO[6*ldc+J]; \
  55. rowC[0] = result[2] * alpha; \
  56. rowC = (v4sf_t *) &CO[7*ldc+J]; \
  57. rowC[0] = result[3] * alpha;
  58. #define SAVE2x4_ACC(ACC, J) \
  59. __builtin_mma_disassemble_acc ((void *)result, ACC); \
  60. rowC = (v4sf_t *) &CO[0* ldc+J]; \
  61. rowC[0] = result[0] * alpha; \
  62. rowC = (v4sf_t *) &CO[1* ldc+J]; \
  63. rowC[0] = result[1] * alpha;
  64. #else
  65. #define SAVE_ACC(ACC, J) \
  66. __builtin_mma_disassemble_acc ((void *)result, ACC); \
  67. rowC = (v4sf_t *) &CO[0* ldc+J]; \
  68. rowC[0] += result[0] * alpha; \
  69. rowC = (v4sf_t *) &CO[1*ldc+J]; \
  70. rowC[0] += result[1] * alpha; \
  71. rowC = (v4sf_t *) &CO[2*ldc+J]; \
  72. rowC[0] += result[2] * alpha; \
  73. rowC = (v4sf_t *) &CO[3*ldc+J]; \
  74. rowC[0] += result[3] * alpha;
  75. #define SAVE_ACC1(ACC, J) \
  76. __builtin_mma_disassemble_acc ((void *)result, ACC); \
  77. rowC = (v4sf_t *) &CO[4* ldc+J]; \
  78. rowC[0] += result[0] * alpha; \
  79. rowC = (v4sf_t *) &CO[5*ldc+J]; \
  80. rowC[0] += result[1] * alpha; \
  81. rowC = (v4sf_t *) &CO[6*ldc+J]; \
  82. rowC[0] += result[2] * alpha; \
  83. rowC = (v4sf_t *) &CO[7*ldc+J]; \
  84. rowC[0] += result[3] * alpha;
  85. #define SAVE2x4_ACC(ACC, J) \
  86. __builtin_mma_disassemble_acc ((void *)result, ACC); \
  87. rowC = (v4sf_t *) &CO[0* ldc+J]; \
  88. rowC[0] += result[0] * alpha; \
  89. rowC = (v4sf_t *) &CO[1* ldc+J]; \
  90. rowC[0] += result[1] * alpha;
  91. #endif
  92. #define PREFETCH1(x, y) asm volatile ("dcbt %0, %1" : : "r" (x), "b" (y) : "memory");
  93. #if (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
  94. #define REFRESH_TEMP_BK(x, y) \
  95. temp = k - off;
  96. #elif defined(LEFT)
  97. #define REFRESH_TEMP_BK(x, y) \
  98. temp = off + x;
  99. #else
  100. #define REFRESH_TEMP_BK(x, y) \
  101. temp = off + y;
  102. #endif
  103. #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
  104. #define REFRESH_POINTERS(x, y) \
  105. BO = B; \
  106. REFRESH_TEMP_BK(x, y)
  107. #else
  108. #define REFRESH_POINTERS(x, y) \
  109. AO += off * x; \
  110. BO = B + off * y; \
  111. REFRESH_TEMP_BK(x, y)
  112. #endif
  113. #ifdef LEFT
  114. #define REFRESH_OFF(x) \
  115. off += x;
  116. #else
  117. #define REFRESH_OFF(x)
  118. #endif
  119. #ifdef LEFT
  120. #define UPDATE_TEMP(x, y) \
  121. temp -= x;
  122. #else
  123. #define UPDATE_TEMP(x, y) \
  124. temp -= y;
  125. #endif
  126. #if (defined(LEFT) && defined(TRANSA)) || (!defined(LEFT) && !defined(TRANSA))
  127. #define REFRESH_TMP_AFTER_SAVE(x, y) \
  128. temp = k - off; \
  129. UPDATE_TEMP(x, y) \
  130. AO += temp * x; \
  131. BO += temp * y;
  132. #else
  133. #define REFRESH_TMP_AFTER_SAVE(x, y)
  134. #endif
  135. #define REFRESH_AFTER_SAVE(x,y) \
  136. REFRESH_TMP_AFTER_SAVE(x, y) \
  137. REFRESH_OFF(x)
  138. /*************************************************************************************
  139. * GEMM Kernel
  140. *************************************************************************************/
  141. int
  142. CNAME (BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, FLOAT * A, FLOAT * B,
  143. FLOAT * C, BLASLONG ldc
  144. #ifdef TRMMKERNEL
  145. , BLASLONG offset
  146. #endif
  147. )
  148. {
  149. BLASLONG i1;
  150. #if defined(TRMMKERNEL)
  151. BLASLONG off;
  152. #endif
  153. #if defined(TRMMKERNEL) && !defined(LEFT)
  154. off = -offset;
  155. #endif
  156. v4sf_t valpha = { alpha, alpha };
  157. for (i1 = 0; i1 < (n >> 3); i1++)
  158. {
  159. BLASLONG j, temp;
  160. FLOAT *CO;
  161. FLOAT *AO;
  162. #if defined(TRMMKERNEL) && defined(LEFT)
  163. off = offset;
  164. #endif
  165. CO = C;
  166. C += ldc << 3;
  167. AO = A;
  168. PREFETCH1 (A, 128);
  169. PREFETCH1 (A, 256);
  170. for (j = 0; j < (m >> 3); j++)
  171. {
  172. FLOAT *BO;
  173. #if defined(TRMMKERNEL)
  174. REFRESH_POINTERS (8, 8);
  175. #else
  176. BO = B;
  177. temp = k;
  178. #endif
  179. v4sf_t *rowC;
  180. v4sf_t result[4];
  181. __vector_quad acc0, acc1, acc2, acc3, acc4,acc5,acc6,acc7;
  182. BLASLONG l = 0;
  183. vec_t *rowA = (vec_t *) & AO[0];
  184. __vector_pair rowB, rowB1;
  185. rowB = *((__vector_pair *)((void *)&BO[0]));
  186. rowB1 = *((__vector_pair *)((void *)&BO[4]));
  187. __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]);
  188. __builtin_mma_xvf64ger (&acc1, rowB1, rowA[0]);
  189. __builtin_mma_xvf64ger (&acc2, rowB, rowA[1]);
  190. __builtin_mma_xvf64ger (&acc3, rowB1, rowA[1]);
  191. __builtin_mma_xvf64ger (&acc4, rowB, rowA[2]);
  192. __builtin_mma_xvf64ger (&acc5, rowB1, rowA[2]);
  193. __builtin_mma_xvf64ger (&acc6, rowB, rowA[3]);
  194. __builtin_mma_xvf64ger (&acc7, rowB1, rowA[3]);
  195. for (l = 1; l < temp; l++)
  196. {
  197. rowA = (vec_t *) & AO[l << 3];
  198. rowB = *((__vector_pair *)((void *)&BO[l << 3]));
  199. rowB1 = *((__vector_pair *)((void *)&BO[(l << 3) + 4]));
  200. __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]);
  201. __builtin_mma_xvf64gerpp (&acc1, rowB1, rowA[0]);
  202. __builtin_mma_xvf64gerpp (&acc2, rowB, rowA[1]);
  203. __builtin_mma_xvf64gerpp (&acc3, rowB1, rowA[1]);
  204. __builtin_mma_xvf64gerpp (&acc4, rowB, rowA[2]);
  205. __builtin_mma_xvf64gerpp (&acc5, rowB1, rowA[2]);
  206. __builtin_mma_xvf64gerpp (&acc6, rowB, rowA[3]);
  207. __builtin_mma_xvf64gerpp (&acc7, rowB1, rowA[3]);
  208. }
  209. SAVE_ACC (&acc0, 0);
  210. SAVE_ACC1 (&acc1, 0);
  211. SAVE_ACC (&acc2, 2);
  212. SAVE_ACC1 (&acc3, 2);
  213. SAVE_ACC (&acc4, 4);
  214. SAVE_ACC1 (&acc5, 4);
  215. SAVE_ACC (&acc6, 6);
  216. SAVE_ACC1 (&acc7, 6);
  217. CO += 8;
  218. AO += temp << 3;
  219. BO += temp << 3;
  220. #if defined(TRMMKERNEL)
  221. REFRESH_AFTER_SAVE (8, 8)
  222. #endif
  223. }
  224. if (m & 4)
  225. {
  226. FLOAT *BO;
  227. #if defined(TRMMKERNEL)
  228. REFRESH_POINTERS (4, 8);
  229. #else
  230. BO = B;
  231. temp = k;
  232. #endif
  233. v4sf_t *rowC;
  234. v4sf_t result[4];
  235. __vector_quad acc0, acc1, acc2, acc3;
  236. BLASLONG l = 0;
  237. vec_t *rowA = (vec_t *) & AO[0];
  238. __vector_pair rowB, rowB1;
  239. rowB = *((__vector_pair *)((void *)&BO[0]));
  240. rowB1 = *((__vector_pair *)((void *)&BO[4]));
  241. __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]);
  242. __builtin_mma_xvf64ger (&acc1, rowB1, rowA[0]);
  243. __builtin_mma_xvf64ger (&acc2, rowB, rowA[1]);
  244. __builtin_mma_xvf64ger (&acc3, rowB1, rowA[1]);
  245. for (l = 1; l < temp; l++)
  246. {
  247. rowA = (vec_t *) & AO[l << 2];
  248. rowB = *((__vector_pair *)((void *)&BO[l << 3]));
  249. rowB1 = *((__vector_pair *)((void *)&BO[(l << 3) + 4]));
  250. __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]);
  251. __builtin_mma_xvf64gerpp (&acc1, rowB1, rowA[0]);
  252. __builtin_mma_xvf64gerpp (&acc2, rowB, rowA[1]);
  253. __builtin_mma_xvf64gerpp (&acc3, rowB1, rowA[1]);
  254. }
  255. SAVE_ACC (&acc0, 0);
  256. SAVE_ACC1 (&acc1, 0);
  257. SAVE_ACC (&acc2, 2);
  258. SAVE_ACC1 (&acc3, 2);
  259. CO += 4;
  260. AO += temp << 2;
  261. BO += temp << 3;
  262. #if defined(TRMMKERNEL)
  263. REFRESH_AFTER_SAVE (4, 8)
  264. #endif
  265. }
  266. if (m & 2)
  267. {
  268. FLOAT *BO;
  269. #if defined(TRMMKERNEL)
  270. REFRESH_POINTERS (2, 8);
  271. #else
  272. BO = B;
  273. temp = k;
  274. #endif
  275. v4sf_t *rowC;
  276. v4sf_t result[4];
  277. __vector_quad acc0, acc1;
  278. BLASLONG l = 0;
  279. vec_t *rowA = (vec_t *) & AO[0];
  280. __vector_pair rowB, rowB1;
  281. rowB = *((__vector_pair *)((void *)&BO[0]));
  282. rowB1 = *((__vector_pair *)((void *)&BO[4]));
  283. __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]);
  284. __builtin_mma_xvf64ger (&acc1, rowB1, rowA[0]);
  285. for (l = 1; l < temp; l++)
  286. {
  287. rowA = (vec_t *) & AO[l << 1];
  288. rowB = *((__vector_pair *)((void *)&BO[l << 3]));
  289. rowB1 = *((__vector_pair *)((void *)&BO[(l << 3) + 4]));
  290. __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]);
  291. __builtin_mma_xvf64gerpp (&acc1, rowB1, rowA[0]);
  292. }
  293. SAVE_ACC (&acc0, 0);
  294. SAVE_ACC1 (&acc1, 0);
  295. CO += 2;
  296. AO += temp << 1;
  297. BO += temp << 3;
  298. #if defined(TRMMKERNEL)
  299. REFRESH_AFTER_SAVE (2, 8)
  300. #endif
  301. }
  302. if (m & 1)
  303. {
  304. FLOAT *BO;
  305. #if defined(TRMMKERNEL)
  306. REFRESH_POINTERS (1, 8);
  307. #else
  308. BO = B;
  309. temp = k;
  310. #endif
  311. BLASLONG l = 0;
  312. v4sf_t t = { 0, 0 };
  313. v4sf_t t1 = { 0, 0 };
  314. v4sf_t t2 = { 0, 0 };
  315. v4sf_t t3 = { 0, 0 };
  316. for (l = 0; l < temp; l++)
  317. {
  318. v4sf_t rowA = { AO[l], AO[l] };
  319. v4sf_t rowB = { BO[l << 3], BO[(l << 3) + 1] };
  320. v4sf_t rowB1 = { BO[(l << 3) + 2], BO[(l << 3) + 3] };
  321. v4sf_t rowB2 = { BO[(l << 3) + 4], BO[(l << 3) + 5] };
  322. v4sf_t rowB3 = { BO[(l << 3) + 6], BO[(l << 3) + 7] };
  323. t += rowA * rowB;
  324. t1 += rowA * rowB1;
  325. t2 += rowA * rowB2;
  326. t3 += rowA * rowB3;
  327. }
  328. t = t * valpha;
  329. t1 = t1 * valpha;
  330. t2 = t2 * valpha;
  331. t3 = t3 * valpha;
  332. #if defined(TRMMKERNEL)
  333. CO[0 * ldc] = t[0];
  334. CO[1 * ldc] = t[1];
  335. CO[2 * ldc] = t1[0];
  336. CO[3 * ldc] = t1[1];
  337. CO[4 * ldc] = t2[0];
  338. CO[5 * ldc] = t2[1];
  339. CO[6 * ldc] = t3[0];
  340. CO[7 * ldc] = t3[1];
  341. #else
  342. CO[0 * ldc] += t[0];
  343. CO[1 * ldc] += t[1];
  344. CO[2 * ldc] += t1[0];
  345. CO[3 * ldc] += t1[1];
  346. CO[4 * ldc] += t2[0];
  347. CO[5 * ldc] += t2[1];
  348. CO[6 * ldc] += t3[0];
  349. CO[7 * ldc] += t3[1];
  350. #endif
  351. CO += 1;
  352. AO += temp;
  353. BO += temp << 3;
  354. #if defined(TRMMKERNEL)
  355. REFRESH_AFTER_SAVE (1, 8)
  356. #endif
  357. }
  358. #if defined(TRMMKERNEL) && !defined(LEFT)
  359. off += 8; // number of values in A
  360. #endif
  361. B += k << 3;
  362. }
  363. if (n & 4)
  364. {
  365. BLASLONG j, temp;
  366. FLOAT *CO;
  367. FLOAT *AO;
  368. #if defined(TRMMKERNEL) && defined(LEFT)
  369. off = offset;
  370. #endif
  371. CO = C;
  372. C += ldc << 2;
  373. AO = A;
  374. PREFETCH1 (A, 128);
  375. PREFETCH1 (A, 256);
  376. for (j = 0; j < (m >> 3); j++)
  377. {
  378. FLOAT *BO;
  379. #if defined(TRMMKERNEL)
  380. REFRESH_POINTERS (8, 4);
  381. #else
  382. BO = B;
  383. temp = k;
  384. #endif
  385. v4sf_t *rowC;
  386. v4sf_t result[4];
  387. __vector_quad acc0, acc1, acc2, acc3;
  388. BLASLONG l = 0;
  389. vec_t *rowA = (vec_t *) & AO[0];
  390. __vector_pair rowB;
  391. rowB = *((__vector_pair *)((void *)&BO[0]));
  392. __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]);
  393. __builtin_mma_xvf64ger (&acc1, rowB, rowA[1]);
  394. __builtin_mma_xvf64ger (&acc2, rowB, rowA[2]);
  395. __builtin_mma_xvf64ger (&acc3, rowB, rowA[3]);
  396. for (l = 1; l < temp; l++)
  397. {
  398. rowA = (vec_t *) & AO[l << 3];
  399. rowB = *((__vector_pair *)((void *)&BO[l << 2]));
  400. __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]);
  401. __builtin_mma_xvf64gerpp (&acc1, rowB, rowA[1]);
  402. __builtin_mma_xvf64gerpp (&acc2, rowB, rowA[2]);
  403. __builtin_mma_xvf64gerpp (&acc3, rowB, rowA[3]);
  404. }
  405. SAVE_ACC (&acc0, 0);
  406. SAVE_ACC (&acc2, 4);
  407. SAVE_ACC (&acc1, 2);
  408. SAVE_ACC (&acc3, 6);
  409. CO += 8;
  410. AO += temp << 3;
  411. BO += temp << 2;
  412. #if defined(TRMMKERNEL)
  413. REFRESH_AFTER_SAVE (8, 4)
  414. #endif
  415. }
  416. if (m & 4)
  417. {
  418. FLOAT *BO;
  419. #if defined(TRMMKERNEL)
  420. REFRESH_POINTERS (4, 4);
  421. #else
  422. BO = B;
  423. temp = k;
  424. #endif
  425. v4sf_t *rowC;
  426. v4sf_t result[4];
  427. __vector_quad acc0, acc1;
  428. BLASLONG l = 0;
  429. vec_t *rowA = (vec_t *) & AO[0];
  430. __vector_pair rowB;
  431. rowB = *((__vector_pair *)((void *)&BO[0]));
  432. __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]);
  433. __builtin_mma_xvf64ger (&acc1, rowB, rowA[1]);
  434. for (l = 1; l < temp; l++)
  435. {
  436. rowA = (vec_t *) & AO[l << 2];
  437. rowB = *((__vector_pair *)((void *)&BO[l << 2]));
  438. __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]);
  439. __builtin_mma_xvf64gerpp (&acc1, rowB, rowA[1]);
  440. }
  441. SAVE_ACC (&acc0, 0);
  442. SAVE_ACC (&acc1, 2);
  443. CO += 4;
  444. AO += temp << 2;
  445. BO += temp << 2;
  446. #if defined(TRMMKERNEL)
  447. REFRESH_AFTER_SAVE (4, 4)
  448. #endif
  449. }
  450. if (m & 2)
  451. {
  452. FLOAT *BO;
  453. #if defined(TRMMKERNEL)
  454. REFRESH_POINTERS (2, 4);
  455. #else
  456. BO = B;
  457. temp = k;
  458. #endif
  459. v4sf_t *rowC;
  460. v4sf_t result[4];
  461. __vector_quad acc0;
  462. BLASLONG l = 0;
  463. vec_t *rowA = (vec_t *) & AO[0];
  464. __vector_pair rowB;
  465. rowB = *((__vector_pair *)((void *)&BO[0]));
  466. __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]);
  467. for (l = 1; l < temp; l++)
  468. {
  469. rowA = (vec_t *) & AO[l << 1];
  470. rowB = *((__vector_pair *)((void *)&BO[l << 2]));
  471. __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]);
  472. }
  473. SAVE_ACC (&acc0, 0);
  474. CO += 2;
  475. AO += temp << 1;
  476. BO += temp << 2;
  477. #if defined(TRMMKERNEL)
  478. REFRESH_AFTER_SAVE (2, 4)
  479. #endif
  480. }
  481. if (m & 1)
  482. {
  483. FLOAT *BO;
  484. #if defined(TRMMKERNEL)
  485. REFRESH_POINTERS (1, 4);
  486. #else
  487. BO = B;
  488. temp = k;
  489. #endif
  490. BLASLONG l = 0;
  491. v4sf_t t = { 0, 0 };
  492. v4sf_t t1 = { 0, 0 };
  493. for (l = 0; l < temp; l++)
  494. {
  495. v4sf_t rowA = { AO[l], AO[l] };
  496. v4sf_t rowB = { BO[l << 2], BO[(l << 2) + 1] };
  497. v4sf_t rowB1 = { BO[(l << 2) + 2], BO[(l << 2) + 3] };
  498. t += rowA * rowB;
  499. t1 += rowA * rowB1;
  500. }
  501. t = t * valpha;
  502. t1 = t1 * valpha;
  503. #if defined(TRMMKERNEL)
  504. CO[0 * ldc] = t[0];
  505. CO[1 * ldc] = t[1];
  506. CO[2 * ldc] = t1[0];
  507. CO[3 * ldc] = t1[1];
  508. #else
  509. CO[0 * ldc] += t[0];
  510. CO[1 * ldc] += t[1];
  511. CO[2 * ldc] += t1[0];
  512. CO[3 * ldc] += t1[1];
  513. #endif
  514. CO += 1;
  515. AO += temp;
  516. BO += temp << 2;
  517. #if defined(TRMMKERNEL)
  518. REFRESH_AFTER_SAVE (1, 4)
  519. #endif
  520. }
  521. #if defined(TRMMKERNEL) && !defined(LEFT)
  522. off += 4; // number of values in A
  523. #endif
  524. B += k << 2;
  525. }
  526. if (n & 2)
  527. {
  528. BLASLONG j, temp;
  529. #if defined(TRMMKERNEL) && defined(LEFT)
  530. off = offset;
  531. #endif
  532. FLOAT *CO;
  533. FLOAT *AO;
  534. CO = C;
  535. C += ldc << 1;
  536. AO = A;
  537. for (j = 0; j < (m >> 3); j++)
  538. {
  539. FLOAT *BO;
  540. #if defined(TRMMKERNEL)
  541. REFRESH_POINTERS (8, 2);
  542. #else
  543. BO = B;
  544. temp = k;
  545. #endif
  546. v4sf_t *rowC;
  547. v4sf_t result[4];
  548. __vector_quad acc0, acc1, acc2, acc3;
  549. BLASLONG l = 0;
  550. __vector_pair rowB;
  551. vec_t *rb = (vec_t *) & BO[0];
  552. __builtin_vsx_assemble_pair (&rowB, rb[0], rb[0]);
  553. vec_t *rowA = (vec_t *) & AO[0];
  554. __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]);
  555. __builtin_mma_xvf64ger (&acc1, rowB, rowA[1]);
  556. __builtin_mma_xvf64ger (&acc2, rowB, rowA[2]);
  557. __builtin_mma_xvf64ger (&acc3, rowB, rowA[3]);
  558. for (l = 1; l < temp; l++)
  559. {
  560. rb = (vec_t *) & BO[l << 1];
  561. __builtin_vsx_assemble_pair (&rowB, rb[0], rb[0]);
  562. rowA = (vec_t *) & AO[l << 3];
  563. __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]);
  564. __builtin_mma_xvf64gerpp (&acc1, rowB, rowA[1]);
  565. __builtin_mma_xvf64gerpp (&acc2, rowB, rowA[2]);
  566. __builtin_mma_xvf64gerpp (&acc3, rowB, rowA[3]);
  567. }
  568. SAVE2x4_ACC (&acc0, 0);
  569. SAVE2x4_ACC (&acc1, 2);
  570. SAVE2x4_ACC (&acc2, 4);
  571. SAVE2x4_ACC (&acc3, 6);
  572. CO += 8;
  573. AO += temp << 3;
  574. BO += temp << 1;
  575. #if defined(TRMMKERNEL)
  576. REFRESH_AFTER_SAVE (8, 2)
  577. #endif
  578. }
  579. if (m & 4)
  580. {
  581. FLOAT *BO;
  582. #if defined(TRMMKERNEL)
  583. REFRESH_POINTERS (4, 2);
  584. #else
  585. BO = B;
  586. temp = k;
  587. #endif
  588. v4sf_t *rowC;
  589. v4sf_t result[4];
  590. __vector_quad acc0, acc1;
  591. BLASLONG l = 0;
  592. __vector_pair rowB;
  593. vec_t *rb = (vec_t *) & BO[0];
  594. __builtin_vsx_assemble_pair (&rowB, rb[0], rb[0]);
  595. vec_t *rowA = (vec_t *) & AO[0];
  596. __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]);
  597. __builtin_mma_xvf64ger (&acc1, rowB, rowA[1]);
  598. for (l = 1; l < temp; l++)
  599. {
  600. rb = (vec_t *) & BO[l << 1];
  601. __builtin_vsx_assemble_pair (&rowB, rb[0], rb[0]);
  602. rowA = (vec_t *) & AO[l << 2];
  603. __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]);
  604. __builtin_mma_xvf64gerpp (&acc1, rowB, rowA[1]);
  605. }
  606. SAVE2x4_ACC (&acc0, 0);
  607. SAVE2x4_ACC (&acc1, 2);
  608. CO += 4;
  609. AO += temp << 2;
  610. BO += temp << 1;
  611. #if defined(TRMMKERNEL)
  612. REFRESH_AFTER_SAVE (4, 2)
  613. #endif
  614. }
  615. if (m & 2)
  616. {
  617. FLOAT *BO;
  618. #if defined(TRMMKERNEL)
  619. REFRESH_POINTERS (2, 2);
  620. #else
  621. BO = B;
  622. temp = k;
  623. #endif
  624. v4sf_t *rowC;
  625. v4sf_t result[4];
  626. __vector_quad acc0;
  627. BLASLONG l = 0;
  628. __vector_pair rowB;
  629. vec_t *rb = (vec_t *) & BO[0];
  630. __builtin_vsx_assemble_pair (&rowB, rb[0], rb[0]);
  631. vec_t *rowA = (vec_t *) & AO[0];
  632. __builtin_mma_xvf64ger (&acc0, rowB, rowA[0]);
  633. for (l = 1; l < temp; l++)
  634. {
  635. rb = (vec_t *) & BO[l << 1];
  636. __builtin_vsx_assemble_pair (&rowB, rb[0], rb[0]);
  637. rowA = (vec_t *) & AO[l << 1];
  638. __builtin_mma_xvf64gerpp (&acc0, rowB, rowA[0]);
  639. }
  640. SAVE2x4_ACC (&acc0, 0);
  641. CO += 2;
  642. AO += temp << 1;
  643. BO += temp << 1;
  644. #if defined(TRMMKERNEL)
  645. REFRESH_AFTER_SAVE (2, 2)
  646. #endif
  647. }
  648. if (m & 1)
  649. {
  650. FLOAT *BO;
  651. #if defined(TRMMKERNEL)
  652. REFRESH_POINTERS (1, 2);
  653. #else
  654. BO = B;
  655. temp = k;
  656. #endif
  657. BLASLONG l = 0;
  658. v4sf_t t = { 0, 0 };
  659. for (l = 0; l < temp; l++)
  660. {
  661. v4sf_t rowA = { AO[l], AO[l] };
  662. v4sf_t rowB = { BO[l << 1], BO[(l << 1) + 1] };
  663. t += rowA * rowB;
  664. }
  665. t = t * valpha;
  666. #if defined(TRMMKERNEL)
  667. CO[0 * ldc] = t[0];
  668. CO[1 * ldc] = t[1];
  669. #else
  670. CO[0 * ldc] += t[0];
  671. CO[1 * ldc] += t[1];
  672. #endif
  673. CO += 1;
  674. AO += temp;
  675. BO += temp << 1;
  676. #if defined(TRMMKERNEL)
  677. REFRESH_AFTER_SAVE (1, 2)
  678. #endif
  679. }
  680. #if defined(TRMMKERNEL) && !defined(LEFT)
  681. off += 2; // number of values in A
  682. #endif
  683. B += k << 1;
  684. }
  685. if (n & 1)
  686. {
  687. BLASLONG i, temp;
  688. #if defined(TRMMKERNEL) && defined(LEFT)
  689. off = offset;
  690. #endif
  691. FLOAT *CO;
  692. FLOAT *AO;
  693. CO = C;
  694. C += ldc;
  695. AO = A;
  696. for (i = 0; i < (m >> 3); i++)
  697. {
  698. FLOAT *BO;
  699. #if defined(TRMMKERNEL)
  700. REFRESH_POINTERS (8, 1)
  701. #else
  702. BO = B;
  703. temp = k;
  704. #endif
  705. BLASLONG l = 0;
  706. v4sf_t t = { 0, 0 };
  707. v4sf_t t1 = { 0, 0 };
  708. v4sf_t t2 = { 0, 0 };
  709. v4sf_t t3 = { 0, 0 };
  710. for (l = 0; l < temp; l++)
  711. {
  712. v4sf_t rowB = { BO[l], BO[l] };
  713. v4sf_t rowA = { AO[l << 3], AO[(l << 3) + 1] };
  714. v4sf_t rowA1 = { AO[(l << 3) + 2], AO[(l << 3) + 3] };
  715. v4sf_t rowA2 = { AO[(l << 3) + 4], AO[(l << 3) + 5] };
  716. v4sf_t rowA3 = { AO[(l << 3) + 6], AO[(l << 3) + 7] };
  717. t += rowA * rowB;
  718. t1 += rowA1 * rowB;
  719. t2 += rowA2 * rowB;
  720. t3 += rowA3 * rowB;
  721. }
  722. t = t * valpha;
  723. t1 = t1 * valpha;
  724. t2 = t2 * valpha;
  725. t3 = t3 * valpha;
  726. #if defined(TRMMKERNEL)
  727. CO[0] = t[0];
  728. CO[1] = t[1];
  729. CO[2] = t1[0];
  730. CO[3] = t1[1];
  731. CO[4] = t2[0];
  732. CO[5] = t2[1];
  733. CO[6] = t3[0];
  734. CO[7] = t3[1];
  735. #else
  736. CO[0] += t[0];
  737. CO[1] += t[1];
  738. CO[2] += t1[0];
  739. CO[3] += t1[1];
  740. CO[4] += t2[0];
  741. CO[5] += t2[1];
  742. CO[6] += t3[0];
  743. CO[7] += t3[1];
  744. #endif
  745. AO += temp << 3;
  746. BO += temp;
  747. CO += 8;
  748. #if defined(TRMMKERNEL)
  749. REFRESH_AFTER_SAVE (8, 1)
  750. #endif
  751. }
  752. if (m & 4)
  753. {
  754. FLOAT *BO;
  755. #if defined(TRMMKERNEL)
  756. REFRESH_POINTERS (4, 1)
  757. #else
  758. BO = B;
  759. temp = k;
  760. #endif
  761. BLASLONG l = 0;
  762. v4sf_t t = { 0, 0 };
  763. v4sf_t t1 = { 0, 0 };
  764. for (l = 0; l < temp; l++)
  765. {
  766. v4sf_t rowB = { BO[l], BO[l] };
  767. v4sf_t rowA = { AO[l << 2], AO[(l << 2) + 1] };
  768. v4sf_t rowA1 = { AO[(l << 2) + 2], AO[(l << 2) + 3] };
  769. t += rowA * rowB;
  770. t1 += rowA1 * rowB;
  771. }
  772. t = t * valpha;
  773. t1 = t1 * valpha;
  774. #if defined(TRMMKERNEL)
  775. CO[0] = t[0];
  776. CO[1] = t[1];
  777. CO[2] = t1[0];
  778. CO[3] = t1[1];
  779. #else
  780. CO[0] += t[0];
  781. CO[1] += t[1];
  782. CO[2] += t1[0];
  783. CO[3] += t1[1];
  784. #endif
  785. AO += temp << 2;
  786. BO += temp;
  787. CO += 4;
  788. #if defined(TRMMKERNEL)
  789. REFRESH_AFTER_SAVE (4, 1)
  790. #endif
  791. }
  792. if (m & 2)
  793. {
  794. FLOAT *BO;
  795. #if defined(TRMMKERNEL)
  796. REFRESH_POINTERS (2, 1)
  797. #else
  798. BO = B;
  799. temp = k;
  800. #endif
  801. BLASLONG l = 0;
  802. v4sf_t t = { 0, 0 };
  803. for (l = 0; l < temp; l++)
  804. {
  805. v4sf_t rowB = { BO[l], BO[l] };
  806. v4sf_t rowA = { AO[l << 1], AO[(l << 1) + 1] };
  807. t += rowA * rowB;
  808. }
  809. t = t * valpha;
  810. #if defined(TRMMKERNEL)
  811. CO[0] = t[0];
  812. CO[1] = t[1];
  813. #else
  814. CO[0] += t[0];
  815. CO[1] += t[1];
  816. #endif
  817. AO += temp << 1;
  818. BO += temp;
  819. CO += 2;
  820. #if defined(TRMMKERNEL)
  821. REFRESH_AFTER_SAVE (2, 1)
  822. #endif
  823. }
  824. if (m & 1)
  825. {
  826. FLOAT *BO;
  827. #if defined(TRMMKERNEL)
  828. REFRESH_POINTERS (1, 1)
  829. #else
  830. BO = B;
  831. temp = k;
  832. #endif
  833. BLASLONG l = 0;
  834. FLOAT t = 0;
  835. for (l = 0; l < temp; l++)
  836. {
  837. t += AO[l] * BO[l];
  838. }
  839. AO += temp;
  840. BO += temp;
  841. #if defined(TRMMKERNEL)
  842. CO[0] = t * alpha;
  843. #else
  844. CO[0] += t * alpha;
  845. #endif
  846. CO += 1;
  847. #if defined(TRMMKERNEL)
  848. REFRESH_AFTER_SAVE (1, 1)
  849. #endif
  850. }
  851. #if defined(TRMMKERNEL) && !defined(LEFT)
  852. off += 1; // number of values in A
  853. #endif
  854. B += k;
  855. }
  856. return 0;
  857. }