You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

level3.c 13 kB

5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. /*********************************************************************/
  2. /* Copyright 2009, 2010 The University of Texas at Austin. */
  3. /* Copyright 2025 The OpenBLAS Project. */
  4. /* All rights reserved. */
  5. /* */
  6. /* Redistribution and use in source and binary forms, with or */
  7. /* without modification, are permitted provided that the following */
  8. /* conditions are met: */
  9. /* */
  10. /* 1. Redistributions of source code must retain the above */
  11. /* copyright notice, this list of conditions and the following */
  12. /* disclaimer. */
  13. /* */
  14. /* 2. Redistributions in binary form must reproduce the above */
  15. /* copyright notice, this list of conditions and the following */
  16. /* disclaimer in the documentation and/or other materials */
  17. /* provided with the distribution. */
  18. /* */
  19. /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
  20. /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
  21. /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
  22. /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
  23. /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
  24. /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
  25. /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
  26. /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
  27. /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
  28. /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
  29. /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
  30. /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
  31. /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
  32. /* POSSIBILITY OF SUCH DAMAGE. */
  33. /* */
  34. /* The views and conclusions contained in the software and */
  35. /* documentation are those of the authors and should not be */
  36. /* interpreted as representing official policies, either expressed */
  37. /* or implied, of The University of Texas at Austin. */
  38. /*********************************************************************/
  39. /* This file is a template for level 3 operation */
  40. #ifndef BETA_OPERATION
  41. #if !defined(XDOUBLE) || !defined(QUAD_PRECISION)
  42. #ifndef COMPLEX
  43. #define BETA_OPERATION(M_FROM, M_TO, N_FROM, N_TO, BETA, C, LDC) \
  44. GEMM_BETA((M_TO) - (M_FROM), (N_TO - N_FROM), 0, \
  45. BETA[0], NULL, 0, NULL, 0, \
  46. (FLOAT *)(C) + ((M_FROM) + (N_FROM) * (LDC)) * COMPSIZE, LDC)
  47. #else
  48. #define BETA_OPERATION(M_FROM, M_TO, N_FROM, N_TO, BETA, C, LDC) \
  49. GEMM_BETA((M_TO) - (M_FROM), (N_TO - N_FROM), 0, \
  50. BETA[0], BETA[1], NULL, 0, NULL, 0, \
  51. (FLOAT *)(C) + ((M_FROM) + (N_FROM) * (LDC)) * COMPSIZE, LDC)
  52. #endif
  53. #else
  54. #define BETA_OPERATION(M_FROM, M_TO, N_FROM, N_TO, BETA, C, LDC) \
  55. GEMM_BETA((M_TO) - (M_FROM), (N_TO - N_FROM), 0, \
  56. BETA, NULL, 0, NULL, 0, \
  57. (FLOAT *)(C) + ((M_FROM) + (N_FROM) * (LDC)) * COMPSIZE, LDC)
  58. #endif
  59. #endif
  60. #ifndef ICOPY_OPERATION
  61. #if defined(NN) || defined(NT) || defined(NC) || defined(NR) || \
  62. defined(RN) || defined(RT) || defined(RC) || defined(RR)
  63. #define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ITCOPY(M, N, (IFLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER);
  64. #else
  65. #define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_INCOPY(M, N, (IFLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER);
  66. #endif
  67. #endif
  68. #ifndef OCOPY_OPERATION
  69. #if defined(NN) || defined(TN) || defined(CN) || defined(RN) || \
  70. defined(NR) || defined(TR) || defined(CR) || defined(RR)
  71. #define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ONCOPY(M, N, (IFLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER);
  72. #else
  73. #define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_OTCOPY(M, N, (IFLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER);
  74. #endif
  75. #endif
  76. #ifndef KERNEL_FUNC
  77. #if defined(NN) || defined(NT) || defined(TN) || defined(TT)
  78. #define KERNEL_FUNC GEMM_KERNEL_N
  79. #endif
  80. #if defined(CN) || defined(CT) || defined(RN) || defined(RT)
  81. #define KERNEL_FUNC GEMM_KERNEL_L
  82. #endif
  83. #if defined(NC) || defined(TC) || defined(NR) || defined(TR)
  84. #define KERNEL_FUNC GEMM_KERNEL_R
  85. #endif
  86. #if defined(CC) || defined(CR) || defined(RC) || defined(RR)
  87. #define KERNEL_FUNC GEMM_KERNEL_B
  88. #endif
  89. #endif
  90. #ifndef KERNEL_OPERATION
  91. #if !defined(XDOUBLE) || !defined(QUAD_PRECISION)
  92. #ifndef COMPLEX
  93. #define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) \
  94. KERNEL_FUNC(M, N, K, ALPHA[0], SA, SB, (FLOAT *)(C) + ((X) + (Y) * LDC) * COMPSIZE, LDC)
  95. #else
  96. #define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) \
  97. KERNEL_FUNC(M, N, K, ALPHA[0], ALPHA[1], SA, SB, (FLOAT *)(C) + ((X) + (Y) * LDC) * COMPSIZE, LDC)
  98. #endif
  99. #else
  100. #define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) \
  101. KERNEL_FUNC(M, N, K, ALPHA, SA, SB, (FLOAT *)(C) + ((X) + (Y) * LDC) * COMPSIZE, LDC)
  102. #endif
  103. #endif
  104. #ifndef FUSED_KERNEL_OPERATION
  105. #if defined(NN) || defined(TN) || defined(CN) || defined(RN) || \
  106. defined(NR) || defined(TR) || defined(CR) || defined(RR)
  107. #ifndef COMPLEX
  108. #define FUSED_KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, B, LDB, C, LDC, I, J, L) \
  109. FUSED_GEMM_KERNEL_N(M, N, K, ALPHA[0], SA, SB, \
  110. (FLOAT *)(B) + ((L) + (J) * LDB) * COMPSIZE, LDB, (FLOAT *)(C) + ((I) + (J) * LDC) * COMPSIZE, LDC)
  111. #else
  112. #define FUSED_KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, B, LDB, C, LDC, I, J, L) \
  113. FUSED_GEMM_KERNEL_N(M, N, K, ALPHA[0], ALPHA[1], SA, SB, \
  114. (FLOAT *)(B) + ((L) + (J) * LDB) * COMPSIZE, LDB, (FLOAT *)(C) + ((I) + (J) * LDC) * COMPSIZE, LDC)
  115. #endif
  116. #else
  117. #ifndef COMPLEX
  118. #define FUSED_KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, B, LDB, C, LDC, I, J, L) \
  119. FUSED_GEMM_KERNEL_T(M, N, K, ALPHA[0], SA, SB, \
  120. (FLOAT *)(B) + ((J) + (L) * LDB) * COMPSIZE, LDB, (FLOAT *)(C) + ((I) + (J) * LDC) * COMPSIZE, LDC)
  121. #else
  122. #define FUSED_KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, B, LDB, C, LDC, I, J, L) \
  123. FUSED_GEMM_KERNEL_T(M, N, K, ALPHA[0], ALPHA[1], SA, SB, \
  124. (FLOAT *)(B) + ((J) + (L) * LDB) * COMPSIZE, LDB, (FLOAT *)(C) + ((I) + (J) * LDC) * COMPSIZE, LDC)
  125. #endif
  126. #endif
  127. #endif
  128. #ifndef A
  129. #define A args -> a
  130. #endif
  131. #ifndef LDA
  132. #define LDA args -> lda
  133. #endif
  134. #ifndef B
  135. #define B args -> b
  136. #endif
  137. #ifndef LDB
  138. #define LDB args -> ldb
  139. #endif
  140. #ifndef C
  141. #define C args -> c
  142. #endif
  143. #ifndef LDC
  144. #define LDC args -> ldc
  145. #endif
  146. #ifndef M
  147. #define M args -> m
  148. #endif
  149. #ifndef N
  150. #define N args -> n
  151. #endif
  152. #ifndef K
  153. #define K args -> k
  154. #endif
  155. #ifdef TIMING
  156. #define START_RPCC() rpcc_counter = rpcc()
  157. #define STOP_RPCC(COUNTER) COUNTER += rpcc() - rpcc_counter
  158. #else
  159. #define START_RPCC()
  160. #define STOP_RPCC(COUNTER)
  161. #endif
  162. int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n,
  163. XFLOAT *sa, XFLOAT *sb, BLASLONG dummy){
  164. BLASLONG k, lda, ldb, ldc;
  165. FLOAT *alpha, *beta;
  166. IFLOAT *a, *b;
  167. FLOAT *c;
  168. BLASLONG m_from, m_to, n_from, n_to;
  169. BLASLONG ls, is, js;
  170. BLASLONG min_l, min_i, min_j;
  171. #if !defined(FUSED_GEMM) || defined(TIMING)
  172. BLASLONG jjs, min_jj;
  173. #endif
  174. BLASLONG l1stride, gemm_p, l2size;
  175. #if defined(XDOUBLE) && defined(QUAD_PRECISION)
  176. xidouble xalpha;
  177. #endif
  178. #ifdef TIMING
  179. unsigned long long rpcc_counter;
  180. unsigned long long innercost = 0;
  181. unsigned long long outercost = 0;
  182. unsigned long long kernelcost = 0;
  183. double total;
  184. #endif
  185. k = K;
  186. a = (IFLOAT *)A;
  187. b = (IFLOAT *)B;
  188. c = (FLOAT *)C;
  189. lda = LDA;
  190. ldb = LDB;
  191. ldc = LDC;
  192. alpha = (FLOAT *)args -> alpha;
  193. beta = (FLOAT *)args -> beta;
  194. m_from = 0;
  195. m_to = M;
  196. if (range_m) {
  197. m_from = *(((BLASLONG *)range_m) + 0);
  198. m_to = *(((BLASLONG *)range_m) + 1);
  199. }
  200. n_from = 0;
  201. n_to = N;
  202. if (range_n) {
  203. n_from = *(((BLASLONG *)range_n) + 0);
  204. n_to = *(((BLASLONG *)range_n) + 1);
  205. }
  206. if (beta) {
  207. #if !defined(XDOUBLE) || !defined(QUAD_PRECISION)
  208. #ifndef COMPLEX
  209. if (beta[0] != ONE
  210. #else
  211. if ((beta[0] != ONE) || (beta[1] != ZERO)
  212. #endif
  213. #else
  214. if (((beta[0].x[1] != 0x3fff000000000000UL) || beta[0].x[0] != 0)
  215. #ifdef COMPLEX
  216. &&(((beta[1].x[0] | beta[1].x[1]) << 1) != 0)
  217. #endif
  218. #endif
  219. ) {
  220. #if defined(XDOUBLE) && defined(QUAD_PRECISION)
  221. xidouble xbeta;
  222. qtox(&xbeta, beta);
  223. #endif
  224. BETA_OPERATION(m_from, m_to, n_from, n_to, beta, c, ldc);
  225. }
  226. }
  227. if ((k == 0) || (alpha == NULL)) return 0;
  228. #if !defined(XDOUBLE) || !defined(QUAD_PRECISION)
  229. if ( alpha[0] == ZERO
  230. #ifdef COMPLEX
  231. && alpha[1] == ZERO
  232. #endif
  233. ) return 0;
  234. #else
  235. if (((alpha[0].x[0] | alpha[0].x[1]
  236. #ifdef COMPLEX
  237. | alpha[1].x[0] | alpha[1].x[1]
  238. #endif
  239. ) << 1) == 0) return 0;
  240. #endif
  241. #if defined(XDOUBLE) && defined(QUAD_PRECISION)
  242. qtox(&xalpha, alpha);
  243. #endif
  244. l2size = GEMM_P * GEMM_Q;
  245. #if 0
  246. fprintf(stderr, "GEMM(Single): M_from : %ld M_to : %ld N_from : %ld N_to : %ld k : %ld\n", m_from, m_to, n_from, n_to, k);
  247. fprintf(stderr, "GEMM(Single):: P = %4ld Q = %4ld R = %4ld\n", (BLASLONG)GEMM_P, (BLASLONG)GEMM_Q, (BLASLONG)GEMM_R);
  248. // fprintf(stderr, "GEMM: SA .. %p SB .. %p\n", sa, sb);
  249. // fprintf(stderr, "A = %p B = %p C = %p\n\tlda = %ld ldb = %ld ldc = %ld\n", a, b, c, lda, ldb, ldc);
  250. #endif
  251. #ifdef TIMING
  252. innercost = 0;
  253. outercost = 0;
  254. kernelcost = 0;
  255. #endif
  256. for(js = n_from; js < n_to; js += GEMM_R){
  257. min_j = n_to - js;
  258. if (min_j > GEMM_R) min_j = GEMM_R;
  259. for(ls = 0; ls < k; ls += min_l){
  260. min_l = k - ls;
  261. if (min_l >= GEMM_Q * 2) {
  262. // gemm_p = GEMM_P;
  263. min_l = GEMM_Q;
  264. } else {
  265. if (min_l > GEMM_Q) {
  266. min_l = ((min_l / 2 + GEMM_UNROLL_M - 1)/GEMM_UNROLL_M) * GEMM_UNROLL_M;
  267. }
  268. gemm_p = ((l2size / min_l + GEMM_UNROLL_M - 1)/GEMM_UNROLL_M) * GEMM_UNROLL_M;
  269. while (gemm_p * min_l > l2size) gemm_p -= GEMM_UNROLL_M;
  270. }
  271. BLASLONG pad_min_l = min_l;
  272. #if defined(BFLOAT16)
  273. #if defined(DYNAMIC_ARCH)
  274. pad_min_l = (min_l + gotoblas->sbgemm_align_k - 1) & ~(gotoblas->sbgemm_align_k-1);
  275. #else
  276. pad_min_l = (min_l + SBGEMM_ALIGN_K - 1) & ~(SBGEMM_ALIGN_K - 1);;
  277. #endif
  278. #endif
  279. /* First, we have to move data A to L2 cache */
  280. min_i = m_to - m_from;
  281. l1stride = 1;
  282. if (min_i >= GEMM_P * 2) {
  283. min_i = GEMM_P;
  284. } else {
  285. if (min_i > GEMM_P) {
  286. min_i = ((min_i / 2 + GEMM_UNROLL_M - 1)/GEMM_UNROLL_M) * GEMM_UNROLL_M;
  287. } else {
  288. l1stride = 0;
  289. }
  290. }
  291. START_RPCC();
  292. ICOPY_OPERATION(min_l, min_i, a, lda, ls, m_from, sa);
  293. STOP_RPCC(innercost);
  294. #if defined(FUSED_GEMM) && !defined(TIMING)
  295. FUSED_KERNEL_OPERATION(min_i, min_j, min_l, alpha,
  296. sa, sb, b, ldb, c, ldc, m_from, js, ls);
  297. #else
  298. for(jjs = js; jjs < js + min_j; jjs += min_jj){
  299. min_jj = min_j + js - jjs;
  300. #if defined(SKYLAKEX) || defined(COOPERLAKE) || defined(SAPPHIRERAPIDS)
  301. /* the current AVX512 s/d/c/z GEMM kernel requires n>=6*GEMM_UNROLL_N to achieve best performance */
  302. if (min_jj >= 6*GEMM_UNROLL_N) min_jj = 6*GEMM_UNROLL_N;
  303. #else
  304. if (min_jj >= 3*GEMM_UNROLL_N) min_jj = 3*GEMM_UNROLL_N;
  305. else
  306. /*
  307. if (min_jj >= 2*GEMM_UNROLL_N) min_jj = 2*GEMM_UNROLL_N;
  308. else
  309. */
  310. if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N;
  311. #endif
  312. START_RPCC();
  313. OCOPY_OPERATION(min_l, min_jj, b, ldb, ls, jjs,
  314. sb + pad_min_l * (jjs - js) * COMPSIZE * l1stride);
  315. STOP_RPCC(outercost);
  316. START_RPCC();
  317. #if !defined(XDOUBLE) || !defined(QUAD_PRECISION)
  318. KERNEL_OPERATION(min_i, min_jj, min_l, alpha,
  319. sa, sb + pad_min_l * (jjs - js) * COMPSIZE * l1stride, c, ldc, m_from, jjs);
  320. #else
  321. KERNEL_OPERATION(min_i, min_jj, min_l, (void *)&xalpha,
  322. sa, sb + pad_min_l * (jjs - js) * COMPSIZE * l1stride, c, ldc, m_from, jjs);
  323. #endif
  324. STOP_RPCC(kernelcost);
  325. }
  326. #endif
  327. for(is = m_from + min_i; is < m_to; is += min_i){
  328. min_i = m_to - is;
  329. if (min_i >= GEMM_P * 2) {
  330. min_i = GEMM_P;
  331. } else
  332. if (min_i > GEMM_P) {
  333. min_i = ((min_i / 2 + GEMM_UNROLL_M - 1)/GEMM_UNROLL_M) * GEMM_UNROLL_M;
  334. }
  335. START_RPCC();
  336. ICOPY_OPERATION(min_l, min_i, a, lda, ls, is, sa);
  337. STOP_RPCC(innercost);
  338. START_RPCC();
  339. #if !defined(XDOUBLE) || !defined(QUAD_PRECISION)
  340. KERNEL_OPERATION(min_i, min_j, min_l, alpha, sa, sb, c, ldc, is, js);
  341. #else
  342. KERNEL_OPERATION(min_i, min_j, min_l, (void *)&xalpha, sa, sb, c, ldc, is, js);
  343. #endif
  344. STOP_RPCC(kernelcost);
  345. } /* end of is */
  346. } /* end of js */
  347. } /* end of ls */
  348. #ifdef TIMING
  349. total = (double)outercost + (double)innercost + (double)kernelcost;
  350. printf( "Copy A : %5.2f Copy B: %5.2f Kernel : %5.2f kernel Effi. : %5.2f Total Effi. : %5.2f\n",
  351. innercost / total * 100., outercost / total * 100.,
  352. kernelcost / total * 100.,
  353. (double)(m_to - m_from) * (double)(n_to - n_from) * (double)k / (double)kernelcost * 100. * (double)COMPSIZE / 2.,
  354. (double)(m_to - m_from) * (double)(n_to - n_from) * (double)k / total * 100. * (double)COMPSIZE / 2.);
  355. #endif
  356. return 0;
  357. }