You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

level3.c 13 kB

5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. /*********************************************************************/
  2. /* Copyright 2009, 2010 The University of Texas at Austin. */
  3. /* All rights reserved. */
  4. /* */
  5. /* Redistribution and use in source and binary forms, with or */
  6. /* without modification, are permitted provided that the following */
  7. /* conditions are met: */
  8. /* */
  9. /* 1. Redistributions of source code must retain the above */
  10. /* copyright notice, this list of conditions and the following */
  11. /* disclaimer. */
  12. /* */
  13. /* 2. Redistributions in binary form must reproduce the above */
  14. /* copyright notice, this list of conditions and the following */
  15. /* disclaimer in the documentation and/or other materials */
  16. /* provided with the distribution. */
  17. /* */
  18. /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
  19. /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
  20. /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
  21. /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
  22. /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
  23. /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
  24. /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
  25. /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
  26. /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
  27. /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
  28. /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
  29. /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
  30. /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
  31. /* POSSIBILITY OF SUCH DAMAGE. */
  32. /* */
  33. /* The views and conclusions contained in the software and */
  34. /* documentation are those of the authors and should not be */
  35. /* interpreted as representing official policies, either expressed */
  36. /* or implied, of The University of Texas at Austin. */
  37. /*********************************************************************/
  38. /* This file is a template for level 3 operation */
  39. #ifndef BETA_OPERATION
  40. #if !defined(XDOUBLE) || !defined(QUAD_PRECISION)
  41. #ifndef COMPLEX
  42. #define BETA_OPERATION(M_FROM, M_TO, N_FROM, N_TO, BETA, C, LDC) \
  43. GEMM_BETA((M_TO) - (M_FROM), (N_TO - N_FROM), 0, \
  44. BETA[0], NULL, 0, NULL, 0, \
  45. (FLOAT *)(C) + ((M_FROM) + (N_FROM) * (LDC)) * COMPSIZE, LDC)
  46. #else
  47. #define BETA_OPERATION(M_FROM, M_TO, N_FROM, N_TO, BETA, C, LDC) \
  48. GEMM_BETA((M_TO) - (M_FROM), (N_TO - N_FROM), 0, \
  49. BETA[0], BETA[1], NULL, 0, NULL, 0, \
  50. (FLOAT *)(C) + ((M_FROM) + (N_FROM) * (LDC)) * COMPSIZE, LDC)
  51. #endif
  52. #else
  53. #define BETA_OPERATION(M_FROM, M_TO, N_FROM, N_TO, BETA, C, LDC) \
  54. GEMM_BETA((M_TO) - (M_FROM), (N_TO - N_FROM), 0, \
  55. BETA, NULL, 0, NULL, 0, \
  56. (FLOAT *)(C) + ((M_FROM) + (N_FROM) * (LDC)) * COMPSIZE, LDC)
  57. #endif
  58. #endif
  59. #ifndef ICOPY_OPERATION
  60. #if defined(NN) || defined(NT) || defined(NC) || defined(NR) || \
  61. defined(RN) || defined(RT) || defined(RC) || defined(RR)
  62. #define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ITCOPY(M, N, (IFLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER);
  63. #else
  64. #define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_INCOPY(M, N, (IFLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER);
  65. #endif
  66. #endif
  67. #ifndef OCOPY_OPERATION
  68. #if defined(NN) || defined(TN) || defined(CN) || defined(RN) || \
  69. defined(NR) || defined(TR) || defined(CR) || defined(RR)
  70. #define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ONCOPY(M, N, (IFLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER);
  71. #else
  72. #define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_OTCOPY(M, N, (IFLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER);
  73. #endif
  74. #endif
  75. #ifndef KERNEL_FUNC
  76. #if defined(NN) || defined(NT) || defined(TN) || defined(TT)
  77. #define KERNEL_FUNC GEMM_KERNEL_N
  78. #endif
  79. #if defined(CN) || defined(CT) || defined(RN) || defined(RT)
  80. #define KERNEL_FUNC GEMM_KERNEL_L
  81. #endif
  82. #if defined(NC) || defined(TC) || defined(NR) || defined(TR)
  83. #define KERNEL_FUNC GEMM_KERNEL_R
  84. #endif
  85. #if defined(CC) || defined(CR) || defined(RC) || defined(RR)
  86. #define KERNEL_FUNC GEMM_KERNEL_B
  87. #endif
  88. #endif
  89. #ifndef KERNEL_OPERATION
  90. #if !defined(XDOUBLE) || !defined(QUAD_PRECISION)
  91. #ifndef COMPLEX
  92. #define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) \
  93. KERNEL_FUNC(M, N, K, ALPHA[0], SA, SB, (FLOAT *)(C) + ((X) + (Y) * LDC) * COMPSIZE, LDC)
  94. #else
  95. #define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) \
  96. KERNEL_FUNC(M, N, K, ALPHA[0], ALPHA[1], SA, SB, (FLOAT *)(C) + ((X) + (Y) * LDC) * COMPSIZE, LDC)
  97. #endif
  98. #else
  99. #define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) \
  100. KERNEL_FUNC(M, N, K, ALPHA, SA, SB, (FLOAT *)(C) + ((X) + (Y) * LDC) * COMPSIZE, LDC)
  101. #endif
  102. #endif
  103. #ifndef FUSED_KERNEL_OPERATION
  104. #if defined(NN) || defined(TN) || defined(CN) || defined(RN) || \
  105. defined(NR) || defined(TR) || defined(CR) || defined(RR)
  106. #ifndef COMPLEX
  107. #define FUSED_KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, B, LDB, C, LDC, I, J, L) \
  108. FUSED_GEMM_KERNEL_N(M, N, K, ALPHA[0], SA, SB, \
  109. (FLOAT *)(B) + ((L) + (J) * LDB) * COMPSIZE, LDB, (FLOAT *)(C) + ((I) + (J) * LDC) * COMPSIZE, LDC)
  110. #else
  111. #define FUSED_KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, B, LDB, C, LDC, I, J, L) \
  112. FUSED_GEMM_KERNEL_N(M, N, K, ALPHA[0], ALPHA[1], SA, SB, \
  113. (FLOAT *)(B) + ((L) + (J) * LDB) * COMPSIZE, LDB, (FLOAT *)(C) + ((I) + (J) * LDC) * COMPSIZE, LDC)
  114. #endif
  115. #else
  116. #ifndef COMPLEX
  117. #define FUSED_KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, B, LDB, C, LDC, I, J, L) \
  118. FUSED_GEMM_KERNEL_T(M, N, K, ALPHA[0], SA, SB, \
  119. (FLOAT *)(B) + ((J) + (L) * LDB) * COMPSIZE, LDB, (FLOAT *)(C) + ((I) + (J) * LDC) * COMPSIZE, LDC)
  120. #else
  121. #define FUSED_KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, B, LDB, C, LDC, I, J, L) \
  122. FUSED_GEMM_KERNEL_T(M, N, K, ALPHA[0], ALPHA[1], SA, SB, \
  123. (FLOAT *)(B) + ((J) + (L) * LDB) * COMPSIZE, LDB, (FLOAT *)(C) + ((I) + (J) * LDC) * COMPSIZE, LDC)
  124. #endif
  125. #endif
  126. #endif
  127. #ifndef A
  128. #define A args -> a
  129. #endif
  130. #ifndef LDA
  131. #define LDA args -> lda
  132. #endif
  133. #ifndef B
  134. #define B args -> b
  135. #endif
  136. #ifndef LDB
  137. #define LDB args -> ldb
  138. #endif
  139. #ifndef C
  140. #define C args -> c
  141. #endif
  142. #ifndef LDC
  143. #define LDC args -> ldc
  144. #endif
  145. #ifndef M
  146. #define M args -> m
  147. #endif
  148. #ifndef N
  149. #define N args -> n
  150. #endif
  151. #ifndef K
  152. #define K args -> k
  153. #endif
  154. #ifdef TIMING
  155. #define START_RPCC() rpcc_counter = rpcc()
  156. #define STOP_RPCC(COUNTER) COUNTER += rpcc() - rpcc_counter
  157. #else
  158. #define START_RPCC()
  159. #define STOP_RPCC(COUNTER)
  160. #endif
  161. int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n,
  162. XFLOAT *sa, XFLOAT *sb, BLASLONG dummy){
  163. BLASLONG k, lda, ldb, ldc;
  164. FLOAT *alpha, *beta;
  165. IFLOAT *a, *b;
  166. FLOAT *c;
  167. BLASLONG m_from, m_to, n_from, n_to;
  168. BLASLONG ls, is, js;
  169. BLASLONG min_l, min_i, min_j;
  170. #if !defined(FUSED_GEMM) || defined(TIMING)
  171. BLASLONG jjs, min_jj;
  172. #endif
  173. BLASLONG l1stride, gemm_p, l2size;
  174. #if defined(XDOUBLE) && defined(QUAD_PRECISION)
  175. xidouble xalpha;
  176. #endif
  177. #ifdef TIMING
  178. unsigned long long rpcc_counter;
  179. unsigned long long innercost = 0;
  180. unsigned long long outercost = 0;
  181. unsigned long long kernelcost = 0;
  182. double total;
  183. #endif
  184. k = K;
  185. a = (IFLOAT *)A;
  186. b = (IFLOAT *)B;
  187. c = (FLOAT *)C;
  188. lda = LDA;
  189. ldb = LDB;
  190. ldc = LDC;
  191. alpha = (FLOAT *)args -> alpha;
  192. beta = (FLOAT *)args -> beta;
  193. m_from = 0;
  194. m_to = M;
  195. if (range_m) {
  196. m_from = *(((BLASLONG *)range_m) + 0);
  197. m_to = *(((BLASLONG *)range_m) + 1);
  198. }
  199. n_from = 0;
  200. n_to = N;
  201. if (range_n) {
  202. n_from = *(((BLASLONG *)range_n) + 0);
  203. n_to = *(((BLASLONG *)range_n) + 1);
  204. }
  205. if (beta) {
  206. #if !defined(XDOUBLE) || !defined(QUAD_PRECISION)
  207. #ifndef COMPLEX
  208. if (beta[0] != ONE
  209. #else
  210. if ((beta[0] != ONE) || (beta[1] != ZERO)
  211. #endif
  212. #else
  213. if (((beta[0].x[1] != 0x3fff000000000000UL) || beta[0].x[0] != 0)
  214. #ifdef COMPLEX
  215. &&(((beta[1].x[0] | beta[1].x[1]) << 1) != 0)
  216. #endif
  217. #endif
  218. ) {
  219. #if defined(XDOUBLE) && defined(QUAD_PRECISION)
  220. xidouble xbeta;
  221. qtox(&xbeta, beta);
  222. #endif
  223. BETA_OPERATION(m_from, m_to, n_from, n_to, beta, c, ldc);
  224. }
  225. }
  226. if ((k == 0) || (alpha == NULL)) return 0;
  227. #if !defined(XDOUBLE) || !defined(QUAD_PRECISION)
  228. if ( alpha[0] == ZERO
  229. #ifdef COMPLEX
  230. && alpha[1] == ZERO
  231. #endif
  232. ) return 0;
  233. #else
  234. if (((alpha[0].x[0] | alpha[0].x[1]
  235. #ifdef COMPLEX
  236. | alpha[1].x[0] | alpha[1].x[1]
  237. #endif
  238. ) << 1) == 0) return 0;
  239. #endif
  240. #if defined(XDOUBLE) && defined(QUAD_PRECISION)
  241. qtox(&xalpha, alpha);
  242. #endif
  243. l2size = GEMM_P * GEMM_Q;
  244. #if 0
  245. fprintf(stderr, "GEMM(Single): M_from : %ld M_to : %ld N_from : %ld N_to : %ld k : %ld\n", m_from, m_to, n_from, n_to, k);
  246. fprintf(stderr, "GEMM(Single):: P = %4ld Q = %4ld R = %4ld\n", (BLASLONG)GEMM_P, (BLASLONG)GEMM_Q, (BLASLONG)GEMM_R);
  247. // fprintf(stderr, "GEMM: SA .. %p SB .. %p\n", sa, sb);
  248. // fprintf(stderr, "A = %p B = %p C = %p\n\tlda = %ld ldb = %ld ldc = %ld\n", a, b, c, lda, ldb, ldc);
  249. #endif
  250. #ifdef TIMING
  251. innercost = 0;
  252. outercost = 0;
  253. kernelcost = 0;
  254. #endif
  255. for(js = n_from; js < n_to; js += GEMM_R){
  256. min_j = n_to - js;
  257. if (min_j > GEMM_R) min_j = GEMM_R;
  258. for(ls = 0; ls < k; ls += min_l){
  259. min_l = k - ls;
  260. if (min_l >= GEMM_Q * 2) {
  261. // gemm_p = GEMM_P;
  262. min_l = GEMM_Q;
  263. } else {
  264. if (min_l > GEMM_Q) {
  265. min_l = ((min_l / 2 + GEMM_UNROLL_M - 1)/GEMM_UNROLL_M) * GEMM_UNROLL_M;
  266. }
  267. gemm_p = ((l2size / min_l + GEMM_UNROLL_M - 1)/GEMM_UNROLL_M) * GEMM_UNROLL_M;
  268. while (gemm_p * min_l > l2size) gemm_p -= GEMM_UNROLL_M;
  269. }
  270. BLASLONG pad_min_l = min_l;
  271. #if defined(HALF)
  272. #if defined(DYNAMIC_ARCH)
  273. pad_min_l = (min_l + gotoblas->sbgemm_align_k - 1) & ~(gotoblas->sbgemm_align_k-1);
  274. #else
  275. pad_min_l = (min_l + SBGEMM_ALIGN_K - 1) & ~(SBGEMM_ALIGN_K - 1);;
  276. #endif
  277. #endif
  278. /* First, we have to move data A to L2 cache */
  279. min_i = m_to - m_from;
  280. l1stride = 1;
  281. if (min_i >= GEMM_P * 2) {
  282. min_i = GEMM_P;
  283. } else {
  284. if (min_i > GEMM_P) {
  285. min_i = ((min_i / 2 + GEMM_UNROLL_M - 1)/GEMM_UNROLL_M) * GEMM_UNROLL_M;
  286. } else {
  287. l1stride = 0;
  288. }
  289. }
  290. START_RPCC();
  291. ICOPY_OPERATION(min_l, min_i, a, lda, ls, m_from, sa);
  292. STOP_RPCC(innercost);
  293. #if defined(FUSED_GEMM) && !defined(TIMING)
  294. FUSED_KERNEL_OPERATION(min_i, min_j, min_l, alpha,
  295. sa, sb, b, ldb, c, ldc, m_from, js, ls);
  296. #else
  297. for(jjs = js; jjs < js + min_j; jjs += min_jj){
  298. min_jj = min_j + js - jjs;
  299. #if defined(SKYLAKEX) || defined(COOPERLAKE) || defined(SAPPHIRERAPIDS)
  300. /* the current AVX512 s/d/c/z GEMM kernel requires n>=6*GEMM_UNROLL_N to achieve best performance */
  301. if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N;
  302. #else
  303. if (min_jj >= 3*GEMM_UNROLL_N) min_jj = 3*GEMM_UNROLL_N;
  304. else
  305. /*
  306. if (min_jj >= 2*GEMM_UNROLL_N) min_jj = 2*GEMM_UNROLL_N;
  307. else
  308. */
  309. if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N;
  310. #endif
  311. START_RPCC();
  312. OCOPY_OPERATION(min_l, min_jj, b, ldb, ls, jjs,
  313. sb + pad_min_l * (jjs - js) * COMPSIZE * l1stride);
  314. STOP_RPCC(outercost);
  315. START_RPCC();
  316. #if !defined(XDOUBLE) || !defined(QUAD_PRECISION)
  317. KERNEL_OPERATION(min_i, min_jj, min_l, alpha,
  318. sa, sb + pad_min_l * (jjs - js) * COMPSIZE * l1stride, c, ldc, m_from, jjs);
  319. #else
  320. KERNEL_OPERATION(min_i, min_jj, min_l, (void *)&xalpha,
  321. sa, sb + pad_min_l * (jjs - js) * COMPSIZE * l1stride, c, ldc, m_from, jjs);
  322. #endif
  323. STOP_RPCC(kernelcost);
  324. }
  325. #endif
  326. for(is = m_from + min_i; is < m_to; is += min_i){
  327. min_i = m_to - is;
  328. if (min_i >= GEMM_P * 2) {
  329. min_i = GEMM_P;
  330. } else
  331. if (min_i > GEMM_P) {
  332. min_i = ((min_i / 2 + GEMM_UNROLL_M - 1)/GEMM_UNROLL_M) * GEMM_UNROLL_M;
  333. }
  334. START_RPCC();
  335. ICOPY_OPERATION(min_l, min_i, a, lda, ls, is, sa);
  336. STOP_RPCC(innercost);
  337. START_RPCC();
  338. #if !defined(XDOUBLE) || !defined(QUAD_PRECISION)
  339. KERNEL_OPERATION(min_i, min_j, min_l, alpha, sa, sb, c, ldc, is, js);
  340. #else
  341. KERNEL_OPERATION(min_i, min_j, min_l, (void *)&xalpha, sa, sb, c, ldc, is, js);
  342. #endif
  343. STOP_RPCC(kernelcost);
  344. } /* end of is */
  345. } /* end of js */
  346. } /* end of ls */
  347. #ifdef TIMING
  348. total = (double)outercost + (double)innercost + (double)kernelcost;
  349. printf( "Copy A : %5.2f Copy B: %5.2f Kernel : %5.2f kernel Effi. : %5.2f Total Effi. : %5.2f\n",
  350. innercost / total * 100., outercost / total * 100.,
  351. kernelcost / total * 100.,
  352. (double)(m_to - m_from) * (double)(n_to - n_from) * (double)k / (double)kernelcost * 100. * (double)COMPSIZE / 2.,
  353. (double)(m_to - m_from) * (double)(n_to - n_from) * (double)k / total * 100. * (double)COMPSIZE / 2.);
  354. #endif
  355. return 0;
  356. }