You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

level3_thread.c 23 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723
  1. /*********************************************************************/
  2. /* Copyright 2009, 2010 The University of Texas at Austin. */
  3. /* All rights reserved. */
  4. /* */
  5. /* Redistribution and use in source and binary forms, with or */
  6. /* without modification, are permitted provided that the following */
  7. /* conditions are met: */
  8. /* */
  9. /* 1. Redistributions of source code must retain the above */
  10. /* copyright notice, this list of conditions and the following */
  11. /* disclaimer. */
  12. /* */
  13. /* 2. Redistributions in binary form must reproduce the above */
  14. /* copyright notice, this list of conditions and the following */
  15. /* disclaimer in the documentation and/or other materials */
  16. /* provided with the distribution. */
  17. /* */
  18. /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
  19. /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
  20. /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
  21. /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
  22. /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
  23. /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
  24. /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
  25. /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
  26. /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
  27. /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
  28. /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
  29. /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
  30. /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
  31. /* POSSIBILITY OF SUCH DAMAGE. */
  32. /* */
  33. /* The views and conclusions contained in the software and */
  34. /* documentation are those of the authors and should not be */
  35. /* interpreted as representing official policies, either expressed */
  36. /* or implied, of The University of Texas at Austin. */
  37. /*********************************************************************/
  38. #ifndef CACHE_LINE_SIZE
  39. #define CACHE_LINE_SIZE 8
  40. #endif
  41. #ifndef DIVIDE_RATE
  42. #define DIVIDE_RATE 2
  43. #endif
  44. #ifndef SWITCH_RATIO
  45. #define SWITCH_RATIO 2
  46. #endif
  47. //The array of job_t may overflow the stack.
  48. //Instead, use malloc to alloc job_t.
  49. #if MAX_CPU_NUMBER > BLAS3_MEM_ALLOC_THRESHOLD
  50. #define USE_ALLOC_HEAP
  51. #endif
  52. #ifndef GEMM_LOCAL
  53. #if defined(NN)
  54. #define GEMM_LOCAL GEMM_NN
  55. #elif defined(NT)
  56. #define GEMM_LOCAL GEMM_NT
  57. #elif defined(NR)
  58. #define GEMM_LOCAL GEMM_NR
  59. #elif defined(NC)
  60. #define GEMM_LOCAL GEMM_NC
  61. #elif defined(TN)
  62. #define GEMM_LOCAL GEMM_TN
  63. #elif defined(TT)
  64. #define GEMM_LOCAL GEMM_TT
  65. #elif defined(TR)
  66. #define GEMM_LOCAL GEMM_TR
  67. #elif defined(TC)
  68. #define GEMM_LOCAL GEMM_TC
  69. #elif defined(RN)
  70. #define GEMM_LOCAL GEMM_RN
  71. #elif defined(RT)
  72. #define GEMM_LOCAL GEMM_RT
  73. #elif defined(RR)
  74. #define GEMM_LOCAL GEMM_RR
  75. #elif defined(RC)
  76. #define GEMM_LOCAL GEMM_RC
  77. #elif defined(CN)
  78. #define GEMM_LOCAL GEMM_CN
  79. #elif defined(CT)
  80. #define GEMM_LOCAL GEMM_CT
  81. #elif defined(CR)
  82. #define GEMM_LOCAL GEMM_CR
  83. #elif defined(CC)
  84. #define GEMM_LOCAL GEMM_CC
  85. #endif
  86. #endif
  87. typedef struct {
  88. #if _STDC_VERSION__ >= 201112L
  89. _Atomic
  90. #else
  91. volatile
  92. #endif
  93. BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE];
  94. } job_t;
  95. #ifndef BETA_OPERATION
  96. #ifndef COMPLEX
  97. #define BETA_OPERATION(M_FROM, M_TO, N_FROM, N_TO, BETA, C, LDC) \
  98. GEMM_BETA((M_TO) - (M_FROM), (N_TO - N_FROM), 0, \
  99. BETA[0], NULL, 0, NULL, 0, \
  100. (FLOAT *)(C) + ((M_FROM) + (N_FROM) * (LDC)) * COMPSIZE, LDC)
  101. #else
  102. #define BETA_OPERATION(M_FROM, M_TO, N_FROM, N_TO, BETA, C, LDC) \
  103. GEMM_BETA((M_TO) - (M_FROM), (N_TO - N_FROM), 0, \
  104. BETA[0], BETA[1], NULL, 0, NULL, 0, \
  105. (FLOAT *)(C) + ((M_FROM) + (N_FROM) * (LDC)) * COMPSIZE, LDC)
  106. #endif
  107. #endif
  108. #ifndef ICOPY_OPERATION
  109. #if defined(NN) || defined(NT) || defined(NC) || defined(NR) || \
  110. defined(RN) || defined(RT) || defined(RC) || defined(RR)
  111. #define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ITCOPY(M, N, (FLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER);
  112. #else
  113. #define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_INCOPY(M, N, (FLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER);
  114. #endif
  115. #endif
  116. #ifndef OCOPY_OPERATION
  117. #if defined(NN) || defined(TN) || defined(CN) || defined(RN) || \
  118. defined(NR) || defined(TR) || defined(CR) || defined(RR)
  119. #define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ONCOPY(M, N, (FLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER);
  120. #else
  121. #define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_OTCOPY(M, N, (FLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER);
  122. #endif
  123. #endif
  124. #ifndef KERNEL_FUNC
  125. #if defined(NN) || defined(NT) || defined(TN) || defined(TT)
  126. #define KERNEL_FUNC GEMM_KERNEL_N
  127. #endif
  128. #if defined(CN) || defined(CT) || defined(RN) || defined(RT)
  129. #define KERNEL_FUNC GEMM_KERNEL_L
  130. #endif
  131. #if defined(NC) || defined(TC) || defined(NR) || defined(TR)
  132. #define KERNEL_FUNC GEMM_KERNEL_R
  133. #endif
  134. #if defined(CC) || defined(CR) || defined(RC) || defined(RR)
  135. #define KERNEL_FUNC GEMM_KERNEL_B
  136. #endif
  137. #endif
  138. #ifndef KERNEL_OPERATION
  139. #ifndef COMPLEX
  140. #define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) \
  141. KERNEL_FUNC(M, N, K, ALPHA[0], SA, SB, (FLOAT *)(C) + ((X) + (Y) * LDC) * COMPSIZE, LDC)
  142. #else
  143. #define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) \
  144. KERNEL_FUNC(M, N, K, ALPHA[0], ALPHA[1], SA, SB, (FLOAT *)(C) + ((X) + (Y) * LDC) * COMPSIZE, LDC)
  145. #endif
  146. #endif
  147. #ifndef FUSED_KERNEL_OPERATION
  148. #if defined(NN) || defined(TN) || defined(CN) || defined(RN) || \
  149. defined(NR) || defined(TR) || defined(CR) || defined(RR)
  150. #ifndef COMPLEX
  151. #define FUSED_KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, B, LDB, C, LDC, I, J, L) \
  152. FUSED_GEMM_KERNEL_N(M, N, K, ALPHA[0], SA, SB, \
  153. (FLOAT *)(B) + ((L) + (J) * LDB) * COMPSIZE, LDB, (FLOAT *)(C) + ((I) + (J) * LDC) * COMPSIZE, LDC)
  154. #else
  155. #define FUSED_KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, B, LDB, C, LDC, I, J, L) \
  156. FUSED_GEMM_KERNEL_N(M, N, K, ALPHA[0], ALPHA[1], SA, SB, \
  157. (FLOAT *)(B) + ((L) + (J) * LDB) * COMPSIZE, LDB, (FLOAT *)(C) + ((I) + (J) * LDC) * COMPSIZE, LDC)
  158. #endif
  159. #else
  160. #ifndef COMPLEX
  161. #define FUSED_KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, B, LDB, C, LDC, I, J, L) \
  162. FUSED_GEMM_KERNEL_T(M, N, K, ALPHA[0], SA, SB, \
  163. (FLOAT *)(B) + ((J) + (L) * LDB) * COMPSIZE, LDB, (FLOAT *)(C) + ((I) + (J) * LDC) * COMPSIZE, LDC)
  164. #else
  165. #define FUSED_KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, B, LDB, C, LDC, I, J, L) \
  166. FUSED_GEMM_KERNEL_T(M, N, K, ALPHA[0], ALPHA[1], SA, SB, \
  167. (FLOAT *)(B) + ((J) + (L) * LDB) * COMPSIZE, LDB, (FLOAT *)(C) + ((I) + (J) * LDC) * COMPSIZE, LDC)
  168. #endif
  169. #endif
  170. #endif
  171. #ifndef A
  172. #define A args -> a
  173. #endif
  174. #ifndef LDA
  175. #define LDA args -> lda
  176. #endif
  177. #ifndef B
  178. #define B args -> b
  179. #endif
  180. #ifndef LDB
  181. #define LDB args -> ldb
  182. #endif
  183. #ifndef C
  184. #define C args -> c
  185. #endif
  186. #ifndef LDC
  187. #define LDC args -> ldc
  188. #endif
  189. #ifndef M
  190. #define M args -> m
  191. #endif
  192. #ifndef N
  193. #define N args -> n
  194. #endif
  195. #ifndef K
  196. #define K args -> k
  197. #endif
  198. #ifdef TIMING
  199. #define START_RPCC() rpcc_counter = rpcc()
  200. #define STOP_RPCC(COUNTER) COUNTER += rpcc() - rpcc_counter
  201. #else
  202. #define START_RPCC()
  203. #define STOP_RPCC(COUNTER)
  204. #endif
  205. static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG mypos){
  206. FLOAT *buffer[DIVIDE_RATE];
  207. BLASLONG k, lda, ldb, ldc;
  208. BLASLONG m_from, m_to, n_from, n_to;
  209. FLOAT *alpha, *beta;
  210. FLOAT *a, *b, *c;
  211. job_t *job = (job_t *)args -> common;
  212. BLASLONG nthreads_m;
  213. BLASLONG mypos_m, mypos_n;
  214. BLASLONG is, js, ls, bufferside, jjs;
  215. BLASLONG min_i, min_l, div_n, min_jj;
  216. BLASLONG i, current;
  217. BLASLONG l1stride;
  218. #ifdef TIMING
  219. BLASULONG rpcc_counter;
  220. BLASULONG copy_A = 0;
  221. BLASULONG copy_B = 0;
  222. BLASULONG kernel = 0;
  223. BLASULONG waiting1 = 0;
  224. BLASULONG waiting2 = 0;
  225. BLASULONG waiting3 = 0;
  226. BLASULONG waiting6[MAX_CPU_NUMBER];
  227. BLASULONG ops = 0;
  228. for (i = 0; i < args -> nthreads; i++) waiting6[i] = 0;
  229. #endif
  230. k = K;
  231. a = (FLOAT *)A;
  232. b = (FLOAT *)B;
  233. c = (FLOAT *)C;
  234. lda = LDA;
  235. ldb = LDB;
  236. ldc = LDC;
  237. alpha = (FLOAT *)args -> alpha;
  238. beta = (FLOAT *)args -> beta;
  239. /* Initialize 2D CPU distribution */
  240. nthreads_m = args -> nthreads;
  241. if (range_m) {
  242. nthreads_m = range_m[-1];
  243. }
  244. mypos_n = blas_quickdivide(mypos, nthreads_m); /* mypos_n = mypos / nthreads_m */
  245. mypos_m = mypos - mypos_n * nthreads_m; /* mypos_m = mypos % nthreads_m */
  246. /* Initialize m and n */
  247. m_from = 0;
  248. m_to = M;
  249. if (range_m) {
  250. m_from = range_m[mypos_m + 0];
  251. m_to = range_m[mypos_m + 1];
  252. }
  253. n_from = 0;
  254. n_to = N;
  255. if (range_n) {
  256. n_from = range_n[mypos + 0];
  257. n_to = range_n[mypos + 1];
  258. }
  259. /* Multiply C by beta if needed */
  260. if (beta) {
  261. #ifndef COMPLEX
  262. if (beta[0] != ONE)
  263. #else
  264. if ((beta[0] != ONE) || (beta[1] != ZERO))
  265. #endif
  266. BETA_OPERATION(m_from, m_to, range_n[mypos_n * nthreads_m], range_n[(mypos_n + 1) * nthreads_m], beta, c, ldc);
  267. }
  268. /* Return early if no more computation is needed */
  269. if ((k == 0) || (alpha == NULL)) return 0;
  270. if (alpha[0] == ZERO
  271. #ifdef COMPLEX
  272. && alpha[1] == ZERO
  273. #endif
  274. ) return 0;
  275. /* Initialize workspace for local region of B */
  276. div_n = (n_to - n_from + DIVIDE_RATE - 1) / DIVIDE_RATE;
  277. buffer[0] = sb;
  278. for (i = 1; i < DIVIDE_RATE; i++) {
  279. buffer[i] = buffer[i - 1] + GEMM_Q * ((div_n + GEMM_UNROLL_N - 1)/GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE;
  280. }
  281. /* Iterate through steps of k */
  282. for(ls = 0; ls < k; ls += min_l){
  283. /* Determine step size in k */
  284. min_l = k - ls;
  285. if (min_l >= GEMM_Q * 2) {
  286. min_l = GEMM_Q;
  287. } else {
  288. if (min_l > GEMM_Q) min_l = (min_l + 1) / 2;
  289. }
  290. /* Determine step size in m
  291. * Note: We are currently on the first step in m
  292. */
  293. l1stride = 1;
  294. min_i = m_to - m_from;
  295. if (min_i >= GEMM_P * 2) {
  296. min_i = GEMM_P;
  297. } else {
  298. if (min_i > GEMM_P) {
  299. min_i = ((min_i / 2 + GEMM_UNROLL_M - 1)/GEMM_UNROLL_M) * GEMM_UNROLL_M;
  300. } else {
  301. if (args -> nthreads == 1) l1stride = 0;
  302. }
  303. }
  304. /* Copy local region of A into workspace */
  305. START_RPCC();
  306. ICOPY_OPERATION(min_l, min_i, a, lda, ls, m_from, sa);
  307. STOP_RPCC(copy_A);
  308. /* Copy local region of B into workspace and apply kernel */
  309. div_n = (n_to - n_from + DIVIDE_RATE - 1) / DIVIDE_RATE;
  310. for (js = n_from, bufferside = 0; js < n_to; js += div_n, bufferside ++) {
  311. /* Make sure if no one is using workspace */
  312. START_RPCC();
  313. for (i = 0; i < args -> nthreads; i++)
  314. while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {YIELDING;};
  315. STOP_RPCC(waiting1);
  316. #if defined(FUSED_GEMM) && !defined(TIMING)
  317. /* Fused operation to copy region of B into workspace and apply kernel */
  318. FUSED_KERNEL_OPERATION(min_i, MIN(n_to, js + div_n) - js, min_l, alpha,
  319. sa, buffer[bufferside], b, ldb, c, ldc, m_from, js, ls);
  320. #else
  321. /* Split local region of B into parts */
  322. for(jjs = js; jjs < MIN(n_to, js + div_n); jjs += min_jj){
  323. min_jj = MIN(n_to, js + div_n) - jjs;
  324. if (min_jj >= 3*GEMM_UNROLL_N) min_jj = 3*GEMM_UNROLL_N;
  325. else
  326. if (min_jj >= 2*GEMM_UNROLL_N) min_jj = 2*GEMM_UNROLL_N;
  327. else
  328. if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N;
  329. /* Copy part of local region of B into workspace */
  330. START_RPCC();
  331. OCOPY_OPERATION(min_l, min_jj, b, ldb, ls, jjs,
  332. buffer[bufferside] + min_l * (jjs - js) * COMPSIZE * l1stride);
  333. STOP_RPCC(copy_B);
  334. /* Apply kernel with local region of A and part of local region of B */
  335. START_RPCC();
  336. KERNEL_OPERATION(min_i, min_jj, min_l, alpha,
  337. sa, buffer[bufferside] + min_l * (jjs - js) * COMPSIZE * l1stride,
  338. c, ldc, m_from, jjs);
  339. STOP_RPCC(kernel);
  340. #ifdef TIMING
  341. ops += 2 * min_i * min_jj * min_l;
  342. #endif
  343. }
  344. #endif
  345. /* Set flag so other threads can access local region of B */
  346. for (i = mypos_n * nthreads_m; i < (mypos_n + 1) * nthreads_m; i++)
  347. job[mypos].working[i][CACHE_LINE_SIZE * bufferside] = (BLASLONG)buffer[bufferside];
  348. WMB;
  349. }
  350. /* Get regions of B from other threads and apply kernel */
  351. current = mypos;
  352. do {
  353. /* This thread accesses regions of B from threads in the range
  354. * [ mypos_n * nthreads_m, (mypos_n+1) * nthreads_m ) */
  355. current ++;
  356. if (current >= (mypos_n + 1) * nthreads_m) current = mypos_n * nthreads_m;
  357. /* Split other region of B into parts */
  358. div_n = (range_n[current + 1] - range_n[current] + DIVIDE_RATE - 1) / DIVIDE_RATE;
  359. for (js = range_n[current], bufferside = 0; js < range_n[current + 1]; js += div_n, bufferside ++) {
  360. if (current != mypos) {
  361. /* Wait until other region of B is initialized */
  362. START_RPCC();
  363. while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;};
  364. STOP_RPCC(waiting2);
  365. /* Apply kernel with local region of A and part of other region of B */
  366. START_RPCC();
  367. KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - js, div_n), min_l, alpha,
  368. sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside],
  369. c, ldc, m_from, js);
  370. STOP_RPCC(kernel);
  371. #ifdef TIMING
  372. ops += 2 * min_i * MIN(range_n[current + 1] - js, div_n) * min_l;
  373. #endif
  374. }
  375. /* Clear synchronization flag if this thread is done with other region of B */
  376. if (m_to - m_from == min_i) {
  377. job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0;
  378. }
  379. }
  380. } while (current != mypos);
  381. /* Iterate through steps of m
  382. * Note: First step has already been finished */
  383. for(is = m_from + min_i; is < m_to; is += min_i){
  384. min_i = m_to - is;
  385. if (min_i >= GEMM_P * 2) {
  386. min_i = GEMM_P;
  387. } else
  388. if (min_i > GEMM_P) {
  389. min_i = (((min_i + 1) / 2 + GEMM_UNROLL_M - 1)/GEMM_UNROLL_M) * GEMM_UNROLL_M;
  390. }
  391. /* Copy local region of A into workspace */
  392. START_RPCC();
  393. ICOPY_OPERATION(min_l, min_i, a, lda, ls, is, sa);
  394. STOP_RPCC(copy_A);
  395. /* Get regions of B and apply kernel */
  396. current = mypos;
  397. do {
  398. /* Split region of B into parts and apply kernel */
  399. div_n = (range_n[current + 1] - range_n[current] + DIVIDE_RATE - 1) / DIVIDE_RATE;
  400. for (js = range_n[current], bufferside = 0; js < range_n[current + 1]; js += div_n, bufferside ++) {
  401. /* Apply kernel with local region of A and part of region of B */
  402. START_RPCC();
  403. KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - js, div_n), min_l, alpha,
  404. sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside],
  405. c, ldc, is, js);
  406. STOP_RPCC(kernel);
  407. #ifdef TIMING
  408. ops += 2 * min_i * MIN(range_n[current + 1] - js, div_n) * min_l;
  409. #endif
  410. /* Clear synchronization flag if this thread is done with region of B */
  411. if (is + min_i >= m_to) {
  412. job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0;
  413. WMB;
  414. }
  415. }
  416. /* This thread accesses regions of B from threads in the range
  417. * [ mypos_n * nthreads_m, (mypos_n+1) * nthreads_m ) */
  418. current ++;
  419. if (current >= (mypos_n + 1) * nthreads_m) current = mypos_n * nthreads_m;
  420. } while (current != mypos);
  421. }
  422. }
  423. /* Wait until all other threads are done with local region of B */
  424. START_RPCC();
  425. for (i = 0; i < args -> nthreads; i++) {
  426. for (js = 0; js < DIVIDE_RATE; js++) {
  427. while (job[mypos].working[i][CACHE_LINE_SIZE * js] ) {YIELDING;};
  428. }
  429. }
  430. STOP_RPCC(waiting3);
  431. #ifdef TIMING
  432. BLASLONG waiting = waiting1 + waiting2 + waiting3;
  433. BLASLONG total = copy_A + copy_B + kernel + waiting;
  434. fprintf(stderr, "GEMM [%2ld] Copy_A : %6.2f Copy_B : %6.2f Wait1 : %6.2f Wait2 : %6.2f Wait3 : %6.2f Kernel : %6.2f",
  435. mypos, (double)copy_A /(double)total * 100., (double)copy_B /(double)total * 100.,
  436. (double)waiting1 /(double)total * 100.,
  437. (double)waiting2 /(double)total * 100.,
  438. (double)waiting3 /(double)total * 100.,
  439. (double)ops/(double)kernel / 4. * 100.);
  440. fprintf(stderr, "\n");
  441. #endif
  442. return 0;
  443. }
  444. static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG
  445. *range_n, FLOAT *sa, FLOAT *sb,
  446. BLASLONG nthreads_m, BLASLONG nthreads_n) {
  447. blas_arg_t newarg;
  448. #ifndef USE_ALLOC_HEAP
  449. job_t job[MAX_CPU_NUMBER];
  450. #else
  451. job_t * job = NULL;
  452. #endif
  453. blas_queue_t queue[MAX_CPU_NUMBER];
  454. BLASLONG range_M_buffer[MAX_CPU_NUMBER + 2];
  455. BLASLONG range_N_buffer[MAX_CPU_NUMBER + 2];
  456. BLASLONG *range_M, *range_N;
  457. BLASLONG num_parts;
  458. BLASLONG nthreads = args -> nthreads;
  459. BLASLONG width, i, j, k, js;
  460. BLASLONG m, n, n_from, n_to;
  461. int mode;
  462. /* Get execution mode */
  463. #ifndef COMPLEX
  464. #ifdef XDOUBLE
  465. mode = BLAS_XDOUBLE | BLAS_REAL | BLAS_NODE;
  466. #elif defined(DOUBLE)
  467. mode = BLAS_DOUBLE | BLAS_REAL | BLAS_NODE;
  468. #else
  469. mode = BLAS_SINGLE | BLAS_REAL | BLAS_NODE;
  470. #endif
  471. #else
  472. #ifdef XDOUBLE
  473. mode = BLAS_XDOUBLE | BLAS_COMPLEX | BLAS_NODE;
  474. #elif defined(DOUBLE)
  475. mode = BLAS_DOUBLE | BLAS_COMPLEX | BLAS_NODE;
  476. #else
  477. mode = BLAS_SINGLE | BLAS_COMPLEX | BLAS_NODE;
  478. #endif
  479. #endif
  480. #ifdef USE_ALLOC_HEAP
  481. /* Dynamically allocate workspace */
  482. job = (job_t*)malloc(MAX_CPU_NUMBER * sizeof(job_t));
  483. if(job==NULL){
  484. fprintf(stderr, "OpenBLAS: malloc failed in %s\n", __func__);
  485. exit(1);
  486. }
  487. #endif
  488. /* Initialize struct for arguments */
  489. newarg.m = args -> m;
  490. newarg.n = args -> n;
  491. newarg.k = args -> k;
  492. newarg.a = args -> a;
  493. newarg.b = args -> b;
  494. newarg.c = args -> c;
  495. newarg.lda = args -> lda;
  496. newarg.ldb = args -> ldb;
  497. newarg.ldc = args -> ldc;
  498. newarg.alpha = args -> alpha;
  499. newarg.beta = args -> beta;
  500. newarg.nthreads = args -> nthreads;
  501. newarg.common = (void *)job;
  502. #ifdef PARAMTEST
  503. newarg.gemm_p = args -> gemm_p;
  504. newarg.gemm_q = args -> gemm_q;
  505. newarg.gemm_r = args -> gemm_r;
  506. #endif
  507. /* Initialize partitions in m and n
  508. * Note: The number of CPU partitions is stored in the -1 entry */
  509. range_M = &range_M_buffer[1];
  510. range_N = &range_N_buffer[1];
  511. range_M[-1] = nthreads_m;
  512. range_N[-1] = nthreads_n;
  513. if (!range_m) {
  514. range_M[0] = 0;
  515. m = args -> m;
  516. } else {
  517. range_M[0] = range_m[0];
  518. m = range_m[1] - range_m[0];
  519. }
  520. /* Partition m into nthreads_m regions */
  521. num_parts = 0;
  522. while (m > 0){
  523. width = blas_quickdivide(m + nthreads_m - num_parts - 1, nthreads_m - num_parts);
  524. m -= width;
  525. if (m < 0) width = width + m;
  526. range_M[num_parts + 1] = range_M[num_parts] + width;
  527. num_parts ++;
  528. }
  529. for (i = num_parts; i < MAX_CPU_NUMBER; i++) {
  530. range_M[i + 1] = range_M[num_parts];
  531. }
  532. /* Initialize parameters for parallel execution */
  533. for (i = 0; i < nthreads; i++) {
  534. queue[i].mode = mode;
  535. queue[i].routine = inner_thread;
  536. queue[i].args = &newarg;
  537. queue[i].range_m = range_M;
  538. queue[i].range_n = range_N;
  539. queue[i].sa = NULL;
  540. queue[i].sb = NULL;
  541. queue[i].next = &queue[i + 1];
  542. }
  543. queue[0].sa = sa;
  544. queue[0].sb = sb;
  545. queue[nthreads - 1].next = NULL;
  546. /* Iterate through steps of n */
  547. if (!range_n) {
  548. n_from = 0;
  549. n_to = args -> n;
  550. } else {
  551. n_from = range_n[0];
  552. n_to = range_n[1];
  553. }
  554. for(js = n_from; js < n_to; js += GEMM_R * nthreads){
  555. n = n_to - js;
  556. if (n > GEMM_R * nthreads) n = GEMM_R * nthreads;
  557. /* Partition (a step of) n into nthreads regions */
  558. range_N[0] = js;
  559. num_parts = 0;
  560. while (n > 0){
  561. width = blas_quickdivide(n + nthreads - num_parts - 1, nthreads - num_parts);
  562. if (width < SWITCH_RATIO) {
  563. width = SWITCH_RATIO;
  564. }
  565. n -= width;
  566. if (n < 0) width = width + n;
  567. range_N[num_parts + 1] = range_N[num_parts] + width;
  568. num_parts ++;
  569. }
  570. for (j = num_parts; j < MAX_CPU_NUMBER; j++) {
  571. range_N[j + 1] = range_N[num_parts];
  572. }
  573. /* Clear synchronization flags */
  574. for (i = 0; i < MAX_CPU_NUMBER; i++) {
  575. for (j = 0; j < MAX_CPU_NUMBER; j++) {
  576. for (k = 0; k < DIVIDE_RATE; k++) {
  577. job[i].working[j][CACHE_LINE_SIZE * k] = 0;
  578. }
  579. }
  580. }
  581. /* Execute parallel computation */
  582. exec_blas(nthreads, queue);
  583. }
  584. #ifdef USE_ALLOC_HEAP
  585. free(job);
  586. #endif
  587. return 0;
  588. }
  589. int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG mypos){
  590. BLASLONG m = args -> m;
  591. BLASLONG n = args -> n;
  592. BLASLONG nthreads_m, nthreads_n;
  593. /* Get dimensions from index ranges if available */
  594. if (range_m) {
  595. m = range_m[1] - range_m[0];
  596. }
  597. if (range_n) {
  598. n = range_n[1] - range_n[0];
  599. }
  600. /* Partitions in m should have at least SWITCH_RATIO rows */
  601. if (m < 2 * SWITCH_RATIO) {
  602. nthreads_m = 1;
  603. } else {
  604. nthreads_m = args -> nthreads;
  605. while (m < nthreads_m * SWITCH_RATIO) {
  606. nthreads_m = nthreads_m / 2;
  607. }
  608. }
  609. /* Partitions in n should have at most SWITCH_RATIO * nthreads_m columns */
  610. if (n < SWITCH_RATIO * nthreads_m) {
  611. nthreads_n = 1;
  612. } else {
  613. nthreads_n = (n + SWITCH_RATIO * nthreads_m - 1) / (SWITCH_RATIO * nthreads_m);
  614. if (nthreads_m * nthreads_n > args -> nthreads) {
  615. nthreads_n = blas_quickdivide(args -> nthreads, nthreads_m);
  616. }
  617. }
  618. /* Execute serial or parallel computation */
  619. if (nthreads_m * nthreads_n <= 1) {
  620. GEMM_LOCAL(args, range_m, range_n, sa, sb, 0);
  621. } else {
  622. args -> nthreads = nthreads_m * nthreads_n;
  623. gemm_driver(args, range_m, range_n, sa, sb, nthreads_m, nthreads_n);
  624. }
  625. return 0;
  626. }