You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dtrsm_kernel_RN_haswell.c 20 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673
  1. /*********************************************************************/
  2. /* Copyright 2009, 2010 The University of Texas at Austin. */
  3. /* All rights reserved. */
  4. /* */
  5. /* Redistribution and use in source and binary forms, with or */
  6. /* without modification, are permitted provided that the following */
  7. /* conditions are met: */
  8. /* */
  9. /* 1. Redistributions of source code must retain the above */
  10. /* copyright notice, this list of conditions and the following */
  11. /* disclaimer. */
  12. /* */
  13. /* 2. Redistributions in binary form must reproduce the above */
  14. /* copyright notice, this list of conditions and the following */
  15. /* disclaimer in the documentation and/or other materials */
  16. /* provided with the distribution. */
  17. /* */
  18. /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
  19. /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
  20. /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
  21. /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
  22. /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
  23. /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
  24. /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
  25. /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
  26. /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
  27. /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
  28. /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
  29. /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
  30. /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
  31. /* POSSIBILITY OF SUCH DAMAGE. */
  32. /* */
  33. /* The views and conclusions contained in the software and */
  34. /* documentation are those of the authors and should not be */
  35. /* interpreted as representing official policies, either expressed */
  36. /* or implied, of The University of Texas at Austin. */
  37. /*********************************************************************/
  38. #include "common.h"
  39. static FLOAT dm1 = -1.;
  40. #ifdef CONJ
  41. #define GEMM_KERNEL GEMM_KERNEL_R
  42. #else
  43. #define GEMM_KERNEL GEMM_KERNEL_N
  44. #endif
  45. #if GEMM_DEFAULT_UNROLL_M == 1
  46. #define GEMM_UNROLL_M_SHIFT 0
  47. #endif
  48. #if GEMM_DEFAULT_UNROLL_M == 2
  49. #define GEMM_UNROLL_M_SHIFT 1
  50. #endif
  51. #if GEMM_DEFAULT_UNROLL_M == 4
  52. #define GEMM_UNROLL_M_SHIFT 2
  53. #endif
  54. #if GEMM_DEFAULT_UNROLL_M == 6
  55. #define GEMM_UNROLL_M_SHIFT 2
  56. #endif
  57. #if GEMM_DEFAULT_UNROLL_M == 8
  58. #define GEMM_UNROLL_M_SHIFT 3
  59. #endif
  60. #if GEMM_DEFAULT_UNROLL_M == 16
  61. #define GEMM_UNROLL_M_SHIFT 4
  62. #endif
  63. #if GEMM_DEFAULT_UNROLL_N == 1
  64. #define GEMM_UNROLL_N_SHIFT 0
  65. #endif
  66. #if GEMM_DEFAULT_UNROLL_N == 2
  67. #define GEMM_UNROLL_N_SHIFT 1
  68. #endif
  69. #if GEMM_DEFAULT_UNROLL_N == 4
  70. #define GEMM_UNROLL_N_SHIFT 2
  71. #endif
  72. #if GEMM_DEFAULT_UNROLL_N == 8
  73. #define GEMM_UNROLL_N_SHIFT 3
  74. #endif
  75. #if GEMM_DEFAULT_UNROLL_N == 16
  76. #define GEMM_UNROLL_N_SHIFT 4
  77. #endif
  78. static void dtrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs) __attribute__ ((noinline));
  79. static void dtrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, FLOAT *as, FLOAT *bs)
  80. {
  81. FLOAT *c3 = c + ldc + ldc*2 ;
  82. FLOAT *c6 = c + ldc*4 + ldc*2 ;
  83. ldc = ldc *8;
  84. BLASLONG n1 = n * 8;
  85. BLASLONG i=0;
  86. __asm__ __volatile__
  87. (
  88. " vzeroupper \n\t"
  89. " vxorpd %%ymm8 , %%ymm8 , %%ymm8 \n\t"
  90. " vxorpd %%ymm9 , %%ymm9 , %%ymm9 \n\t"
  91. " vxorpd %%ymm10, %%ymm10, %%ymm10 \n\t"
  92. " vxorpd %%ymm11, %%ymm11, %%ymm11 \n\t"
  93. " vxorpd %%ymm12, %%ymm12, %%ymm12 \n\t"
  94. " vxorpd %%ymm13, %%ymm13, %%ymm13 \n\t"
  95. " vxorpd %%ymm14, %%ymm14, %%ymm14 \n\t"
  96. " vxorpd %%ymm15, %%ymm15, %%ymm15 \n\t"
  97. " cmpq $0, %0 \n\t"
  98. " je 4f \n\t"
  99. " vmovups (%8,%1,4), %%ymm0 \n\t" // read a
  100. " vmovups (%9,%1,8), %%ymm1 \n\t" // read b0
  101. " vmovups 32(%9,%1,8), %%ymm2 \n\t" // read b1
  102. " addq $8, %1 \n\t"
  103. " cmpq %1, %0 \n\t"
  104. " je 21f \n\t"
  105. " .p2align 4 \n\t"
  106. "1: \n\t"
  107. " vmovups (%8,%1,4), %%ymm4 \n\t" // read a
  108. " vpermilpd $0x05 , %%ymm0 , %%ymm3 \n\t" // was vpermpd 0xb1
  109. " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm8 \n\t"
  110. " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm12 \n\t"
  111. " vmovups (%9,%1,8), %%ymm5 \n\t" // read b0
  112. " vfmadd231pd %%ymm3 , %%ymm1 , %%ymm9 \n\t"
  113. " vfmadd231pd %%ymm3 , %%ymm2 , %%ymm13 \n\t"
  114. " vpermpd $0x1b , %%ymm3 , %%ymm0 \n\t"
  115. " vmovups 32(%9,%1,8), %%ymm6 \n\t" // read b1
  116. " vpermilpd $0x05 , %%ymm0 , %%ymm3 \n\t"
  117. " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm10 \n\t"
  118. " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm14 \n\t"
  119. " addq $8, %1 \n\t"
  120. " vfmadd231pd %%ymm3 , %%ymm1 , %%ymm11 \n\t"
  121. " vfmadd231pd %%ymm3 , %%ymm2 , %%ymm15 \n\t"
  122. " cmpq %1, %0 \n\t"
  123. " jz 22f \n\t"
  124. " vmovups (%8,%1,4), %%ymm0 \n\t" // read a
  125. " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm8 \n\t"
  126. " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm12 \n\t"
  127. " vpermilpd $0x05 , %%ymm4 , %%ymm4 \n\t"
  128. " vmovups (%9,%1,8), %%ymm1 \n\t" // read b0
  129. " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm9 \n\t"
  130. " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm13 \n\t"
  131. " vpermpd $0x1b , %%ymm4 , %%ymm4 \n\t"
  132. " vmovups 32(%9,%1,8), %%ymm2 \n\t" // read b1
  133. " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm10 \n\t"
  134. " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm14 \n\t"
  135. " vpermilpd $0x05 , %%ymm4 , %%ymm4 \n\t"
  136. " addq $8, %1 \n\t"
  137. " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm11 \n\t"
  138. " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm15 \n\t"
  139. " cmpq %1, %0 \n\t"
  140. " jnz 1b \n\t"
  141. "21: \n\t"
  142. " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm8 \n\t"
  143. " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm12 \n\t"
  144. " vpermilpd $0x05 , %%ymm0 , %%ymm0 \n\t"
  145. " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm9 \n\t"
  146. " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm13 \n\t"
  147. " vpermpd $0x1b , %%ymm0 , %%ymm0 \n\t"
  148. " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm10 \n\t"
  149. " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm14 \n\t"
  150. " vpermilpd $0x05 , %%ymm0 , %%ymm0 \n\t"
  151. " vfmadd231pd %%ymm0 , %%ymm1 , %%ymm11 \n\t"
  152. " vfmadd231pd %%ymm0 , %%ymm2 , %%ymm15 \n\t"
  153. " jmp 3f \n\t"
  154. "22: \n\t"
  155. " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm8 \n\t"
  156. " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm12 \n\t"
  157. " vpermilpd $0x05 , %%ymm4 , %%ymm4 \n\t"
  158. " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm9 \n\t"
  159. " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm13 \n\t"
  160. " vpermpd $0x1b , %%ymm4 , %%ymm4 \n\t"
  161. " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm10 \n\t"
  162. " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm14 \n\t"
  163. " vpermilpd $0x05 , %%ymm4 , %%ymm4 \n\t"
  164. " vfmadd231pd %%ymm4 , %%ymm5 , %%ymm11 \n\t"
  165. " vfmadd231pd %%ymm4 , %%ymm6 , %%ymm15 \n\t"
  166. "3: \n\t"
  167. " vpermilpd $0x05 , %%ymm9 , %%ymm9 \n\t"
  168. " vpermilpd $0x05 , %%ymm11, %%ymm11 \n\t"
  169. " vblendpd $0x0a , %%ymm9 , %%ymm8 , %%ymm0 \n\t"
  170. " vblendpd $0x05 , %%ymm9 , %%ymm8 , %%ymm1 \n\t"
  171. " vblendpd $0x0a , %%ymm11, %%ymm10, %%ymm2 \n\t"
  172. " vblendpd $0x05 , %%ymm11, %%ymm10, %%ymm3 \n\t"
  173. " vperm2f128 $0x01 , %%ymm2 , %%ymm2 , %%ymm2 \n\t"
  174. " vperm2f128 $0x01 , %%ymm3 , %%ymm3 , %%ymm3 \n\t"
  175. " vblendpd $0x03 , %%ymm0 , %%ymm2 , %%ymm8 \n\t"
  176. " vblendpd $0x03 , %%ymm1 , %%ymm3 , %%ymm9 \n\t"
  177. " vblendpd $0x03 , %%ymm2 , %%ymm0 , %%ymm10 \n\t"
  178. " vblendpd $0x03 , %%ymm3 , %%ymm1 , %%ymm11 \n\t"
  179. " vpermilpd $0x05 , %%ymm13, %%ymm13 \n\t"
  180. " vpermilpd $0x05 , %%ymm15, %%ymm15 \n\t"
  181. " vblendpd $0x0a , %%ymm13, %%ymm12, %%ymm0 \n\t"
  182. " vblendpd $0x05 , %%ymm13, %%ymm12, %%ymm1 \n\t"
  183. " vblendpd $0x0a , %%ymm15, %%ymm14, %%ymm2 \n\t"
  184. " vblendpd $0x05 , %%ymm15, %%ymm14, %%ymm3 \n\t"
  185. " vperm2f128 $0x01 , %%ymm2 , %%ymm2 , %%ymm2 \n\t"
  186. " vperm2f128 $0x01 , %%ymm3 , %%ymm3 , %%ymm3 \n\t"
  187. " vblendpd $0x03 , %%ymm0 , %%ymm2 , %%ymm12 \n\t"
  188. " vblendpd $0x03 , %%ymm1 , %%ymm3 , %%ymm13 \n\t"
  189. " vblendpd $0x03 , %%ymm2 , %%ymm0 , %%ymm14 \n\t"
  190. " vblendpd $0x03 , %%ymm3 , %%ymm1 , %%ymm15 \n\t"
  191. "4: \n\t"
  192. " vmovups (%4) , %%ymm0 \n\t" // read c0
  193. " vmovups (%4,%7,1) , %%ymm1 \n\t" // read c1
  194. " vmovups (%4,%7,2) , %%ymm2 \n\t" // read c2
  195. " vmovups (%5) , %%ymm3 \n\t" // read c3
  196. " vmovups (%5,%7,1) , %%ymm4 \n\t" // read c4
  197. " vmovups (%5,%7,2) , %%ymm5 \n\t" // read c5
  198. " vmovups (%6) , %%ymm6 \n\t" // read c6
  199. " vmovups (%6,%7,1) , %%ymm7 \n\t" // read c7
  200. " vsubpd %%ymm8 , %%ymm0 , %%ymm8 \n\t"
  201. " vmovups (%3), %%ymm0 \n\t"
  202. " vsubpd %%ymm9 , %%ymm1 , %%ymm9 \n\t"
  203. " vpermpd $0x55 , %%ymm0 , %%ymm1 \n\t"
  204. " vsubpd %%ymm10, %%ymm2 , %%ymm10 \n\t"
  205. " vpermpd $0xaa , %%ymm0 , %%ymm2 \n\t"
  206. " vsubpd %%ymm11, %%ymm3 , %%ymm11 \n\t"
  207. " vpermpd $0xff , %%ymm0 , %%ymm3 \n\t"
  208. " vpermpd $0x00 , %%ymm0 , %%ymm0 \n\t"
  209. " vsubpd %%ymm12, %%ymm4 , %%ymm12 \n\t"
  210. " vmovups 32(%3), %%ymm4 \n\t"
  211. " vsubpd %%ymm13, %%ymm5 , %%ymm13 \n\t"
  212. " vpermpd $0x55 , %%ymm4 , %%ymm5 \n\t"
  213. " vsubpd %%ymm14, %%ymm6 , %%ymm14 \n\t"
  214. " vpermpd $0xaa , %%ymm4 , %%ymm6 \n\t"
  215. " vsubpd %%ymm15, %%ymm7 , %%ymm15 \n\t"
  216. " vpermpd $0xff , %%ymm4 , %%ymm7 \n\t"
  217. " vpermpd $0x00 , %%ymm4 , %%ymm4 \n\t"
  218. "5: \n\t" // i = 0
  219. " addq $64, %3 \n\t" // b=b+8
  220. " vmulpd %%ymm8 , %%ymm0, %%ymm8 \n\t" // a *bb
  221. " vmovups (%3), %%ymm0 \n\t"
  222. " vmovups %%ymm8 , (%2) \n\t" // write a
  223. " vmovups %%ymm8 , (%4) \n\t" // write c
  224. " vfnmadd231pd %%ymm8 , %%ymm1 , %%ymm9 \n\t"
  225. " vmovups 32(%3), %%ymm1 \n\t"
  226. " vfnmadd231pd %%ymm8 , %%ymm2 , %%ymm10 \n\t"
  227. " vpermpd $0xaa , %%ymm0 , %%ymm2 \n\t"
  228. " vfnmadd231pd %%ymm8 , %%ymm3 , %%ymm11 \n\t"
  229. " vpermpd $0xff , %%ymm0 , %%ymm3 \n\t"
  230. " vfnmadd231pd %%ymm8 , %%ymm4 , %%ymm12 \n\t"
  231. " vpermpd $0x55 , %%ymm0 , %%ymm0 \n\t"
  232. " vfnmadd231pd %%ymm8 , %%ymm5 , %%ymm13 \n\t"
  233. " vpermpd $0x55 , %%ymm1 , %%ymm5 \n\t"
  234. " vfnmadd231pd %%ymm8 , %%ymm6 , %%ymm14 \n\t"
  235. " vpermpd $0xaa , %%ymm1 , %%ymm6 \n\t"
  236. " vfnmadd231pd %%ymm8 , %%ymm7 , %%ymm15 \n\t"
  237. " vpermpd $0xff , %%ymm1 , %%ymm7 \n\t"
  238. " vpermpd $0x00 , %%ymm1 , %%ymm4 \n\t"
  239. " addq $64, %3 \n\t" // b=b+8
  240. " addq $32, %2 \n\t" // a=a+8
  241. " vmulpd %%ymm9 , %%ymm0, %%ymm9 \n\t" // a *bb
  242. " vmovups (%3), %%ymm0 \n\t"
  243. " vmovups 32(%3), %%ymm1 \n\t"
  244. " vmovups %%ymm9 , (%2) \n\t" // write a
  245. " vmovups %%ymm9 , (%4,%7,1) \n\t" // write c
  246. " vfnmadd231pd %%ymm9 , %%ymm2 , %%ymm10 \n\t"
  247. " vfnmadd231pd %%ymm9 , %%ymm3 , %%ymm11 \n\t"
  248. " vpermpd $0xff , %%ymm0 , %%ymm3 \n\t"
  249. " vfnmadd231pd %%ymm9 , %%ymm4 , %%ymm12 \n\t"
  250. " vpermpd $0xaa , %%ymm0 , %%ymm0 \n\t"
  251. " vfnmadd231pd %%ymm9 , %%ymm5 , %%ymm13 \n\t"
  252. " vpermpd $0x55 , %%ymm1 , %%ymm5 \n\t"
  253. " vfnmadd231pd %%ymm9 , %%ymm6 , %%ymm14 \n\t"
  254. " vpermpd $0xaa , %%ymm1 , %%ymm6 \n\t"
  255. " vfnmadd231pd %%ymm9 , %%ymm7 , %%ymm15 \n\t"
  256. " vpermpd $0xff , %%ymm1 , %%ymm7 \n\t"
  257. " vpermpd $0x00 , %%ymm1 , %%ymm4 \n\t"
  258. " addq $64, %3 \n\t" // b=b+8
  259. " addq $32, %2 \n\t" // a=a+8
  260. " vmulpd %%ymm10, %%ymm0, %%ymm10 \n\t" // a *bb
  261. " vmovups (%3), %%ymm0 \n\t"
  262. " vmovups 32(%3), %%ymm1 \n\t"
  263. " vmovups %%ymm10, (%2) \n\t" // write a
  264. " vmovups %%ymm10, (%4,%7,2) \n\t" // write c
  265. " vfnmadd231pd %%ymm10, %%ymm3 , %%ymm11 \n\t"
  266. " vpermpd $0xff , %%ymm0 , %%ymm0 \n\t"
  267. " vfnmadd231pd %%ymm10, %%ymm4 , %%ymm12 \n\t"
  268. " vfnmadd231pd %%ymm10, %%ymm5 , %%ymm13 \n\t"
  269. " vpermpd $0x55 , %%ymm1 , %%ymm5 \n\t"
  270. " vfnmadd231pd %%ymm10, %%ymm6 , %%ymm14 \n\t"
  271. " vpermpd $0xaa , %%ymm1 , %%ymm6 \n\t"
  272. " vfnmadd231pd %%ymm10, %%ymm7 , %%ymm15 \n\t"
  273. " vpermpd $0xff , %%ymm1 , %%ymm7 \n\t"
  274. " vpermpd $0x00 , %%ymm1 , %%ymm4 \n\t"
  275. " addq $64, %3 \n\t" // b=b+8
  276. " addq $32, %2 \n\t" // a=a+8
  277. " vmulpd %%ymm11, %%ymm0, %%ymm11 \n\t" // a *bb
  278. " vmovups 32(%3), %%ymm1 \n\t"
  279. " vmovups %%ymm11, (%2) \n\t" // write a
  280. " vmovups %%ymm11, (%5) \n\t" // write c
  281. " vfnmadd231pd %%ymm11, %%ymm4 , %%ymm12 \n\t"
  282. " vfnmadd231pd %%ymm11, %%ymm5 , %%ymm13 \n\t"
  283. " vpermpd $0x55 , %%ymm1 , %%ymm5 \n\t"
  284. " vfnmadd231pd %%ymm11, %%ymm6 , %%ymm14 \n\t"
  285. " vpermpd $0xaa , %%ymm1 , %%ymm6 \n\t"
  286. " vfnmadd231pd %%ymm11, %%ymm7 , %%ymm15 \n\t"
  287. " vpermpd $0xff , %%ymm1 , %%ymm7 \n\t"
  288. " vpermpd $0x00 , %%ymm1 , %%ymm0 \n\t"
  289. " addq $64, %3 \n\t" // b=b+8
  290. " addq $32, %2 \n\t" // a=a+8
  291. " vmulpd %%ymm12, %%ymm0, %%ymm12 \n\t" // a *bb
  292. " vmovups 32(%3), %%ymm1 \n\t"
  293. " vmovups %%ymm12, (%2) \n\t" // write a
  294. " vmovups %%ymm12, (%5,%7,1) \n\t" // write c
  295. " vfnmadd231pd %%ymm12, %%ymm5 , %%ymm13 \n\t"
  296. " vfnmadd231pd %%ymm12, %%ymm6 , %%ymm14 \n\t"
  297. " vpermpd $0xaa , %%ymm1 , %%ymm6 \n\t"
  298. " vfnmadd231pd %%ymm12, %%ymm7 , %%ymm15 \n\t"
  299. " vpermpd $0xff , %%ymm1 , %%ymm7 \n\t"
  300. " vpermpd $0x55 , %%ymm1 , %%ymm0 \n\t"
  301. " addq $64, %3 \n\t" // b=b+8
  302. " addq $32, %2 \n\t" // a=a+8
  303. " vmulpd %%ymm13, %%ymm0, %%ymm13 \n\t" // a *bb
  304. " vmovups 32(%3), %%ymm1 \n\t"
  305. " vmovups %%ymm13, (%2) \n\t" // write a
  306. " vmovups %%ymm13, (%5,%7,2) \n\t" // write c
  307. " vfnmadd231pd %%ymm13, %%ymm6 , %%ymm14 \n\t"
  308. " vfnmadd231pd %%ymm13, %%ymm7 , %%ymm15 \n\t"
  309. " vpermpd $0xff , %%ymm1 , %%ymm7 \n\t"
  310. " vpermpd $0xaa , %%ymm1 , %%ymm0 \n\t"
  311. " addq $64, %3 \n\t" // b=b+8
  312. " addq $32, %2 \n\t" // a=a+8
  313. " vmulpd %%ymm14, %%ymm0, %%ymm14 \n\t" // a *bb
  314. " vmovups 32(%3), %%ymm1 \n\t"
  315. " vmovups %%ymm14, (%2) \n\t" // write a
  316. " vmovups %%ymm14, (%6) \n\t" // write c
  317. " vfnmadd231pd %%ymm14, %%ymm7 , %%ymm15 \n\t"
  318. " vpermpd $0xff , %%ymm1 , %%ymm0 \n\t"
  319. " addq $32, %2 \n\t" // a=a+8
  320. " vmulpd %%ymm15, %%ymm0, %%ymm15 \n\t" // a *bb
  321. " vmovups %%ymm15, (%2) \n\t" // write a
  322. " vmovups %%ymm15, (%6,%7,1) \n\t" // write c
  323. " vzeroupper \n\t"
  324. :
  325. "+r" (n1), // 0
  326. "+a" (i), // 1
  327. "+r" (as), // 2
  328. "+r" (bs) // 3
  329. :
  330. "r" (c), // 4
  331. "r" (c3), // 5
  332. "r" (c6), // 6
  333. "r" (ldc), // 7
  334. "r" (a), // 8
  335. "r" (b) // 9
  336. : "cc",
  337. "%xmm0", "%xmm1", "%xmm2", "%xmm3",
  338. "%xmm4", "%xmm5", "%xmm6", "%xmm7",
  339. "%xmm8", "%xmm9", "%xmm10", "%xmm11",
  340. "%xmm12", "%xmm13", "%xmm14", "%xmm15",
  341. "memory"
  342. );
  343. }
  344. #ifndef COMPLEX
  345. static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) {
  346. FLOAT aa, bb;
  347. int i, j, k;
  348. for (i = 0; i < n; i++) {
  349. bb = *(b + i);
  350. for (j = 0; j < m; j ++) {
  351. aa = *(c + j + i * ldc);
  352. aa *= bb;
  353. *a = aa;
  354. *(c + j + i * ldc) = aa;
  355. a ++;
  356. for (k = i + 1; k < n; k ++){
  357. *(c + j + k * ldc) -= aa * *(b + k);
  358. }
  359. }
  360. b += n;
  361. }
  362. }
  363. #else
  364. static inline void solve(BLASLONG m, BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc) {
  365. FLOAT aa1, aa2;
  366. FLOAT bb1, bb2;
  367. FLOAT cc1, cc2;
  368. int i, j, k;
  369. ldc *= 2;
  370. for (i = 0; i < n; i++) {
  371. bb1 = *(b + i * 2 + 0);
  372. bb2 = *(b + i * 2 + 1);
  373. for (j = 0; j < m; j ++) {
  374. aa1 = *(c + j * 2 + 0 + i * ldc);
  375. aa2 = *(c + j * 2 + 1 + i * ldc);
  376. #ifndef CONJ
  377. cc1 = aa1 * bb1 - aa2 * bb2;
  378. cc2 = aa1 * bb2 + aa2 * bb1;
  379. #else
  380. cc1 = aa1 * bb1 + aa2 * bb2;
  381. cc2 = -aa1 * bb2 + aa2 * bb1;
  382. #endif
  383. *(a + 0) = cc1;
  384. *(a + 1) = cc2;
  385. *(c + j * 2 + 0 + i * ldc) = cc1;
  386. *(c + j * 2 + 1 + i * ldc) = cc2;
  387. a += 2;
  388. for (k = i + 1; k < n; k ++){
  389. #ifndef CONJ
  390. *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) - cc2 * *(b + k * 2 + 1);
  391. *(c + j * 2 + 1 + k * ldc) -= cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0);
  392. #else
  393. *(c + j * 2 + 0 + k * ldc) -= cc1 * *(b + k * 2 + 0) + cc2 * *(b + k * 2 + 1);
  394. *(c + j * 2 + 1 + k * ldc) -= - cc1 * *(b + k * 2 + 1) + cc2 * *(b + k * 2 + 0);
  395. #endif
  396. }
  397. }
  398. b += n * 2;
  399. }
  400. }
  401. #endif
  402. int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT dummy1,
  403. #ifdef COMPLEX
  404. FLOAT dummy2,
  405. #endif
  406. FLOAT *a, FLOAT *b, FLOAT *c, BLASLONG ldc, BLASLONG offset){
  407. FLOAT *aa, *cc;
  408. BLASLONG kk;
  409. BLASLONG i, j, jj;
  410. #if 0
  411. fprintf(stderr, "TRSM RN KERNEL m = %3ld n = %3ld k = %3ld offset = %3ld\n",
  412. m, n, k, offset);
  413. #endif
  414. jj = 0;
  415. j = (n >> GEMM_UNROLL_N_SHIFT);
  416. kk = -offset;
  417. while (j > 0) {
  418. aa = a;
  419. cc = c;
  420. i = (m >> GEMM_UNROLL_M_SHIFT);
  421. if (i > 0) {
  422. do {
  423. dtrsm_RN_solve_opt(kk, aa, b, cc, ldc, aa + kk * GEMM_UNROLL_M * COMPSIZE, b + kk * GEMM_UNROLL_N * COMPSIZE);
  424. /*
  425. solve(GEMM_UNROLL_M, GEMM_UNROLL_N,
  426. aa + kk * GEMM_UNROLL_M * COMPSIZE,
  427. b + kk * GEMM_UNROLL_N * COMPSIZE,
  428. cc, ldc);
  429. */
  430. aa += GEMM_UNROLL_M * k * COMPSIZE;
  431. cc += GEMM_UNROLL_M * COMPSIZE;
  432. i --;
  433. } while (i > 0);
  434. }
  435. if (m & (GEMM_UNROLL_M - 1)) {
  436. i = (GEMM_UNROLL_M >> 1);
  437. while (i > 0) {
  438. if (m & i) {
  439. if (kk > 0) {
  440. GEMM_KERNEL(i, GEMM_UNROLL_N, kk, dm1,
  441. #ifdef COMPLEX
  442. ZERO,
  443. #endif
  444. aa, b, cc, ldc);
  445. }
  446. solve(i, GEMM_UNROLL_N,
  447. aa + kk * i * COMPSIZE,
  448. b + kk * GEMM_UNROLL_N * COMPSIZE,
  449. cc, ldc);
  450. aa += i * k * COMPSIZE;
  451. cc += i * COMPSIZE;
  452. }
  453. i >>= 1;
  454. }
  455. }
  456. kk += GEMM_UNROLL_N;
  457. b += GEMM_UNROLL_N * k * COMPSIZE;
  458. c += GEMM_UNROLL_N * ldc * COMPSIZE;
  459. j --;
  460. jj += GEMM_UNROLL_M;
  461. }
  462. if (n & (GEMM_UNROLL_N - 1)) {
  463. j = (GEMM_UNROLL_N >> 1);
  464. while (j > 0) {
  465. if (n & j) {
  466. aa = a;
  467. cc = c;
  468. i = (m >> GEMM_UNROLL_M_SHIFT);
  469. while (i > 0) {
  470. if (kk > 0) {
  471. GEMM_KERNEL(GEMM_UNROLL_M, j, kk, dm1,
  472. #ifdef COMPLEX
  473. ZERO,
  474. #endif
  475. aa,
  476. b,
  477. cc,
  478. ldc);
  479. }
  480. solve(GEMM_UNROLL_M, j,
  481. aa + kk * GEMM_UNROLL_M * COMPSIZE,
  482. b + kk * j * COMPSIZE, cc, ldc);
  483. aa += GEMM_UNROLL_M * k * COMPSIZE;
  484. cc += GEMM_UNROLL_M * COMPSIZE;
  485. i --;
  486. }
  487. if (m & (GEMM_UNROLL_M - 1)) {
  488. i = (GEMM_UNROLL_M >> 1);
  489. while (i > 0) {
  490. if (m & i) {
  491. if (kk > 0) {
  492. GEMM_KERNEL(i, j, kk, dm1,
  493. #ifdef COMPLEX
  494. ZERO,
  495. #endif
  496. aa,
  497. b,
  498. cc,
  499. ldc);
  500. }
  501. solve(i, j,
  502. aa + kk * i * COMPSIZE,
  503. b + kk * j * COMPSIZE, cc, ldc);
  504. aa += i * k * COMPSIZE;
  505. cc += i * COMPSIZE;
  506. }
  507. i >>= 1;
  508. }
  509. }
  510. b += j * k * COMPSIZE;
  511. c += j * ldc * COMPSIZE;
  512. kk += j;
  513. }
  514. j >>= 1;
  515. }
  516. }
  517. return 0;
  518. }