You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

level3_syrk_threaded.c 18 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710
  1. /*********************************************************************/
  2. /* Copyright 2009, 2010 The University of Texas at Austin. */
  3. /* Copyright 2023 The OpenBLAS Project. */
  4. /* All rights reserved. */
  5. /* */
  6. /* Redistribution and use in source and binary forms, with or */
  7. /* without modification, are permitted provided that the following */
  8. /* conditions are met: */
  9. /* */
  10. /* 1. Redistributions of source code must retain the above */
  11. /* copyright notice, this list of conditions and the following */
  12. /* disclaimer. */
  13. /* */
  14. /* 2. Redistributions in binary form must reproduce the above */
  15. /* copyright notice, this list of conditions and the following */
  16. /* disclaimer in the documentation and/or other materials */
  17. /* provided with the distribution. */
  18. /* */
  19. /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
  20. /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
  21. /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
  22. /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
  23. /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
  24. /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
  25. /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
  26. /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
  27. /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
  28. /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
  29. /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
  30. /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
  31. /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
  32. /* POSSIBILITY OF SUCH DAMAGE. */
  33. /* */
  34. /* The views and conclusions contained in the software and */
  35. /* documentation are those of the authors and should not be */
  36. /* interpreted as representing official policies, either expressed */
  37. /* or implied, of The University of Texas at Austin. */
  38. /*********************************************************************/
  39. #ifndef CACHE_LINE_SIZE
  40. #define CACHE_LINE_SIZE 8
  41. #endif
  42. #ifndef DIVIDE_RATE
  43. #define DIVIDE_RATE 2
  44. #endif
  45. //The array of job_t may overflow the stack.
  46. //Instead, use malloc to alloc job_t.
  47. #if MAX_CPU_NUMBER > BLAS3_MEM_ALLOC_THRESHOLD
  48. #define USE_ALLOC_HEAP
  49. #endif
  50. #ifndef SYRK_LOCAL
  51. #if !defined(LOWER) && !defined(TRANS)
  52. #define SYRK_LOCAL SYRK_UN
  53. #elif !defined(LOWER) && defined(TRANS)
  54. #define SYRK_LOCAL SYRK_UT
  55. #elif defined(LOWER) && !defined(TRANS)
  56. #define SYRK_LOCAL SYRK_LN
  57. #else
  58. #define SYRK_LOCAL SYRK_LT
  59. #endif
  60. #endif
  61. typedef struct {
  62. #ifdef HAVE_C11
  63. _Atomic
  64. #else
  65. volatile
  66. #endif
  67. BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE];
  68. } job_t;
  69. #ifndef KERNEL_OPERATION
  70. #ifndef COMPLEX
  71. #define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) \
  72. KERNEL_FUNC(M, N, K, ALPHA[0], SA, SB, (FLOAT *)(C) + ((X) + (Y) * LDC) * COMPSIZE, LDC, (X) - (Y))
  73. #else
  74. #define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) \
  75. KERNEL_FUNC(M, N, K, ALPHA[0], ALPHA[1], SA, SB, (FLOAT *)(C) + ((X) + (Y) * LDC) * COMPSIZE, LDC, (X) - (Y))
  76. #endif
  77. #endif
  78. #ifndef ICOPY_OPERATION
  79. #ifndef TRANS
  80. #define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ITCOPY(M, N, (FLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER);
  81. #else
  82. #define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_INCOPY(M, N, (FLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER);
  83. #endif
  84. #endif
  85. #ifndef OCOPY_OPERATION
  86. #ifdef TRANS
  87. #define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ONCOPY(M, N, (FLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER);
  88. #else
  89. #define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_OTCOPY(M, N, (FLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER);
  90. #endif
  91. #endif
  92. #ifndef A
  93. #define A args -> a
  94. #endif
  95. #ifndef LDA
  96. #define LDA args -> lda
  97. #endif
  98. #ifndef C
  99. #define C args -> c
  100. #endif
  101. #ifndef LDC
  102. #define LDC args -> ldc
  103. #endif
  104. #ifndef M
  105. #define M args -> m
  106. #endif
  107. #ifndef N
  108. #define N args -> n
  109. #endif
  110. #ifndef K
  111. #define K args -> k
  112. #endif
  113. #undef TIMING
  114. #ifdef TIMING
  115. #define START_RPCC() rpcc_counter = rpcc()
  116. #define STOP_RPCC(COUNTER) COUNTER += rpcc() - rpcc_counter
  117. #else
  118. #define START_RPCC()
  119. #define STOP_RPCC(COUNTER)
  120. #endif
  121. static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG mypos){
  122. FLOAT *buffer[DIVIDE_RATE];
  123. BLASLONG k, lda, ldc;
  124. BLASLONG m_from, m_to, n_from, n_to;
  125. FLOAT *alpha, *beta;
  126. FLOAT *a, *c;
  127. job_t *job = (job_t *)args -> common;
  128. BLASLONG xxx, bufferside;
  129. BLASLONG ls, min_l, jjs, min_jj;
  130. BLASLONG is, min_i, div_n;
  131. BLASLONG i, current;
  132. #ifdef LOWER
  133. BLASLONG start_i;
  134. #endif
  135. #ifdef TIMING
  136. BLASLONG rpcc_counter;
  137. BLASLONG copy_A = 0;
  138. BLASLONG copy_B = 0;
  139. BLASLONG kernel = 0;
  140. BLASLONG waiting1 = 0;
  141. BLASLONG waiting2 = 0;
  142. BLASLONG waiting3 = 0;
  143. BLASLONG waiting6[MAX_CPU_NUMBER];
  144. BLASLONG ops = 0;
  145. for (i = 0; i < args -> nthreads; i++) waiting6[i] = 0;
  146. #endif
  147. k = K;
  148. a = (FLOAT *)A;
  149. c = (FLOAT *)C;
  150. lda = LDA;
  151. ldc = LDC;
  152. alpha = (FLOAT *)args -> alpha;
  153. beta = (FLOAT *)args -> beta;
  154. m_from = 0;
  155. m_to = N;
  156. /* Global Range */
  157. n_from = 0;
  158. n_to = N;
  159. if (range_n) {
  160. m_from = range_n[mypos + 0];
  161. m_to = range_n[mypos + 1];
  162. n_from = range_n[0];
  163. n_to = range_n[args -> nthreads];
  164. }
  165. if (beta) {
  166. #if !defined(COMPLEX) || defined(HERK)
  167. if (beta[0] != ONE)
  168. #else
  169. if ((beta[0] != ONE) || (beta[1] != ZERO))
  170. #endif
  171. syrk_beta(m_from, m_to, n_from, n_to, beta, c, ldc);
  172. }
  173. if ((k == 0) || (alpha == NULL)) return 0;
  174. if (alpha[0] == ZERO
  175. #if defined(COMPLEX) && !defined(HERK)
  176. && alpha[1] == ZERO
  177. #endif
  178. ) return 0;
  179. #if 0
  180. fprintf(stderr, "Thread[%ld] m_from : %ld m_to : %ld n_from : %ld n_to : %ld\n", mypos, m_from, m_to, n_from, n_to);
  181. #endif
  182. div_n = (((m_to - m_from + DIVIDE_RATE - 1) / DIVIDE_RATE + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN;
  183. buffer[0] = sb;
  184. for (i = 1; i < DIVIDE_RATE; i++) {
  185. buffer[i] = buffer[i - 1] + GEMM_Q * div_n * COMPSIZE;
  186. }
  187. for(ls = 0; ls < k; ls += min_l){
  188. min_l = k - ls;
  189. if (min_l >= GEMM_Q * 2) {
  190. min_l = GEMM_Q;
  191. } else {
  192. if (min_l > GEMM_Q) min_l = (min_l + 1) / 2;
  193. }
  194. min_i = m_to - m_from;
  195. if (min_i >= GEMM_P * 2) {
  196. min_i = GEMM_P;
  197. } else {
  198. if (min_i > GEMM_P) {
  199. min_i = ((min_i / 2 + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN;
  200. }
  201. }
  202. #ifdef LOWER
  203. xxx = (m_to - m_from - min_i) % GEMM_P;
  204. if (xxx) min_i -= GEMM_P - xxx;
  205. #endif
  206. START_RPCC();
  207. #ifndef LOWER
  208. ICOPY_OPERATION(min_l, min_i, a, lda, ls, m_from, sa);
  209. #else
  210. ICOPY_OPERATION(min_l, min_i, a, lda, ls, m_to - min_i, sa);
  211. #endif
  212. STOP_RPCC(copy_A);
  213. div_n = (((m_to - m_from + DIVIDE_RATE - 1) / DIVIDE_RATE + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN;
  214. for (xxx = m_from, bufferside = 0; xxx < m_to; xxx += div_n, bufferside ++) {
  215. START_RPCC();
  216. /* Make sure if no one is using buffer */
  217. #ifndef LOWER
  218. for (i = 0; i < mypos; i++)
  219. #else
  220. for (i = mypos + 1; i < args -> nthreads; i++)
  221. #endif
  222. while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {YIELDING;};
  223. STOP_RPCC(waiting1);
  224. #ifndef LOWER
  225. for(jjs = xxx; jjs < MIN(m_to, xxx + div_n); jjs += min_jj){
  226. min_jj = MIN(m_to, xxx + div_n) - jjs;
  227. if (xxx == m_from) {
  228. if (min_jj > min_i) min_jj = min_i;
  229. } else {
  230. if (min_jj > GEMM_UNROLL_MN) min_jj = GEMM_UNROLL_MN;
  231. }
  232. START_RPCC();
  233. OCOPY_OPERATION(min_l, min_jj, a, lda, ls, jjs,
  234. buffer[bufferside] + min_l * (jjs - xxx) * COMPSIZE);
  235. STOP_RPCC(copy_B);
  236. START_RPCC();
  237. KERNEL_OPERATION(min_i, min_jj, min_l, alpha,
  238. sa, buffer[bufferside] + min_l * (jjs - xxx) * COMPSIZE,
  239. c, ldc, m_from, jjs);
  240. STOP_RPCC(kernel);
  241. #ifdef TIMING
  242. ops += 2 * min_i * min_jj * min_l;
  243. #endif
  244. }
  245. #else
  246. for(jjs = xxx; jjs < MIN(m_to, xxx + div_n); jjs += min_jj){
  247. min_jj = MIN(m_to, xxx + div_n) - jjs;
  248. if (min_jj > GEMM_UNROLL_MN) min_jj = GEMM_UNROLL_MN;
  249. START_RPCC();
  250. OCOPY_OPERATION(min_l, min_jj, a, lda, ls, jjs,
  251. buffer[bufferside] + min_l * (jjs - xxx) * COMPSIZE);
  252. STOP_RPCC(copy_B);
  253. START_RPCC();
  254. KERNEL_OPERATION(min_i, min_jj, min_l, alpha,
  255. sa, buffer[bufferside] + min_l * (jjs - xxx) * COMPSIZE,
  256. c, ldc, m_to - min_i, jjs);
  257. STOP_RPCC(kernel);
  258. #ifdef TIMING
  259. ops += 2 * min_i * min_jj * min_l;
  260. #endif
  261. }
  262. #endif
  263. #ifndef LOWER
  264. for (i = 0; i <= mypos; i++)
  265. #else
  266. for (i = mypos; i < args -> nthreads; i++)
  267. #endif
  268. job[mypos].working[i][CACHE_LINE_SIZE * bufferside] = (BLASLONG)buffer[bufferside];
  269. WMB;
  270. }
  271. #ifndef LOWER
  272. current = mypos + 1;
  273. while (current < args -> nthreads) {
  274. #else
  275. current = mypos - 1;
  276. while (current >= 0) {
  277. #endif
  278. div_n = (((range_n[current + 1] - range_n[current] + DIVIDE_RATE - 1) / DIVIDE_RATE + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN;
  279. for (xxx = range_n[current], bufferside = 0; xxx < range_n[current + 1]; xxx += div_n, bufferside ++) {
  280. START_RPCC();
  281. /* thread has to wait */
  282. while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;};
  283. STOP_RPCC(waiting2);
  284. START_RPCC();
  285. #ifndef LOWER
  286. KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - xxx, div_n), min_l, alpha,
  287. sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside],
  288. c, ldc,
  289. m_from,
  290. xxx);
  291. #else
  292. KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - xxx, div_n), min_l, alpha,
  293. sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside],
  294. c, ldc,
  295. m_to - min_i,
  296. xxx);
  297. #endif
  298. STOP_RPCC(kernel);
  299. #ifdef TIMING
  300. ops += 2 * min_i * MIN(range_n[current + 1] - xxx, div_n) * min_l;
  301. #endif
  302. if (m_to - m_from == min_i) {
  303. job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0;
  304. }
  305. }
  306. #ifndef LOWER
  307. current ++;
  308. #else
  309. current --;
  310. #endif
  311. }
  312. #ifndef LOWER
  313. for(is = m_from + min_i; is < m_to; is += min_i){
  314. min_i = m_to - is;
  315. #else
  316. start_i = min_i;
  317. for(is = m_from; is < m_to - start_i; is += min_i){
  318. min_i = m_to - start_i - is;
  319. #endif
  320. if (min_i >= GEMM_P * 2) {
  321. min_i = GEMM_P;
  322. } else
  323. if (min_i > GEMM_P) {
  324. min_i = (((min_i + 1) / 2 + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN;
  325. }
  326. START_RPCC();
  327. ICOPY_OPERATION(min_l, min_i, a, lda, ls, is, sa);
  328. STOP_RPCC(copy_A);
  329. current = mypos;
  330. do {
  331. div_n = (((range_n[current + 1] - range_n[current] + DIVIDE_RATE - 1) / DIVIDE_RATE + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN;
  332. for (xxx = range_n[current], bufferside = 0; xxx < range_n[current + 1]; xxx += div_n, bufferside ++) {
  333. START_RPCC();
  334. KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - xxx, div_n), min_l, alpha,
  335. sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside],
  336. c, ldc, is, xxx);
  337. STOP_RPCC(kernel);
  338. #ifdef TIMING
  339. ops += 2 * min_i * MIN(range_n[current + 1] - xxx, div_n) * min_l;
  340. #endif
  341. #ifndef LOWER
  342. if (is + min_i >= m_to) {
  343. #else
  344. if (is + min_i >= m_to - start_i) {
  345. #endif
  346. /* Thread doesn't need this buffer any more */
  347. job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0;
  348. WMB;
  349. }
  350. }
  351. #ifndef LOWER
  352. current ++;
  353. } while (current != args -> nthreads);
  354. #else
  355. current --;
  356. } while (current >= 0);
  357. #endif
  358. }
  359. }
  360. START_RPCC();
  361. for (i = 0; i < args -> nthreads; i++) {
  362. if (i != mypos) {
  363. for (xxx = 0; xxx < DIVIDE_RATE; xxx++) {
  364. while (job[mypos].working[i][CACHE_LINE_SIZE * xxx] ) {YIELDING;};
  365. }
  366. }
  367. }
  368. STOP_RPCC(waiting3);
  369. #ifdef TIMING
  370. BLASLONG waiting = waiting1 + waiting2 + waiting3;
  371. BLASLONG total = copy_A + copy_B + kernel + waiting;
  372. fprintf(stderr, "GEMM [%2ld] Copy_A : %6.2f Copy_B : %6.2f Wait1 : %6.2f Wait2 : %6.2f Wait3 : %6.2f Kernel : %6.2f",
  373. mypos, (double)copy_A /(double)total * 100., (double)copy_B /(double)total * 100.,
  374. (double)waiting1 /(double)total * 100.,
  375. (double)waiting2 /(double)total * 100.,
  376. (double)waiting3 /(double)total * 100.,
  377. (double)ops/(double)kernel / 4. * 100.);
  378. #if 0
  379. fprintf(stderr, "GEMM [%2ld] Copy_A : %6.2ld Copy_B : %6.2ld Wait : %6.2ld\n",
  380. mypos, copy_A, copy_B, waiting);
  381. fprintf(stderr, "Waiting[%2ld] %6.2f %6.2f %6.2f\n",
  382. mypos,
  383. (double)waiting1/(double)waiting * 100.,
  384. (double)waiting2/(double)waiting * 100.,
  385. (double)waiting3/(double)waiting * 100.);
  386. #endif
  387. fprintf(stderr, "\n");
  388. #endif
  389. return 0;
  390. }
  391. int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG mypos){
  392. blas_arg_t newarg;
  393. #ifndef USE_ALLOC_HEAP
  394. job_t job[MAX_CPU_NUMBER];
  395. #else
  396. job_t * job = NULL;
  397. #endif
  398. blas_queue_t queue[MAX_CPU_NUMBER];
  399. BLASLONG range[MAX_CPU_NUMBER + 100];
  400. BLASLONG num_cpu;
  401. BLASLONG nthreads = args -> nthreads;
  402. BLASLONG width, i, j, k;
  403. BLASLONG n, n_from, n_to;
  404. int mode, mask;
  405. double dnum, di, dinum;
  406. #if defined(DYNAMIC_ARCH)
  407. int switch_ratio = gotoblas->switch_ratio;
  408. #else
  409. int switch_ratio = SWITCH_RATIO;
  410. #endif
  411. if ((nthreads == 1) || (args->n < nthreads * switch_ratio)) {
  412. SYRK_LOCAL(args, range_m, range_n, sa, sb, 0);
  413. return 0;
  414. }
  415. #ifndef COMPLEX
  416. #ifdef XDOUBLE
  417. mode = BLAS_XDOUBLE | BLAS_REAL;
  418. mask = MAX(QGEMM_UNROLL_M, QGEMM_UNROLL_N) - 1;
  419. #elif defined(DOUBLE)
  420. mode = BLAS_DOUBLE | BLAS_REAL;
  421. mask = DGEMM_UNROLL_MN - 1;
  422. #else
  423. mode = BLAS_SINGLE | BLAS_REAL;
  424. mask = SGEMM_UNROLL_MN - 1;
  425. #endif
  426. #else
  427. #ifdef XDOUBLE
  428. mode = BLAS_XDOUBLE | BLAS_COMPLEX;
  429. mask = MAX(XGEMM_UNROLL_M, XGEMM_UNROLL_N) - 1;
  430. #elif defined(DOUBLE)
  431. mode = BLAS_DOUBLE | BLAS_COMPLEX;
  432. mask = ZGEMM_UNROLL_MN - 1;
  433. #else
  434. mode = BLAS_SINGLE | BLAS_COMPLEX;
  435. mask = CGEMM_UNROLL_MN - 1;
  436. #endif
  437. #endif
  438. newarg.m = args -> m;
  439. newarg.n = args -> n;
  440. newarg.k = args -> k;
  441. newarg.a = args -> a;
  442. newarg.b = args -> b;
  443. newarg.c = args -> c;
  444. newarg.lda = args -> lda;
  445. newarg.ldb = args -> ldb;
  446. newarg.ldc = args -> ldc;
  447. newarg.alpha = args -> alpha;
  448. newarg.beta = args -> beta;
  449. #ifdef USE_ALLOC_HEAP
  450. job = (job_t*)malloc(MAX_CPU_NUMBER * sizeof(job_t));
  451. if(job==NULL){
  452. fprintf(stderr, "OpenBLAS: malloc failed in %s\n", __func__);
  453. exit(1);
  454. }
  455. #endif
  456. newarg.common = (void *)job;
  457. if (!range_n) {
  458. n_from = 0;
  459. n_to = args -> n;
  460. } else {
  461. n_from = range_n[0];
  462. n_to = range_n[1] - range_n[0];
  463. }
  464. #ifndef LOWER
  465. range[MAX_CPU_NUMBER] = n_to - n_from;
  466. range[0] = 0;
  467. num_cpu = 0;
  468. i = 0;
  469. n = n_to - n_from;
  470. dnum = (double)n * (double)n /(double)nthreads;
  471. while (i < n){
  472. if (nthreads - num_cpu > 1) {
  473. di = (double)i;
  474. dinum = di * di + dnum;
  475. if (dinum > 0)
  476. width = (((BLASLONG)((sqrt(dinum) - di) + mask)/(mask+1)) * (mask+1) );
  477. else
  478. width = (((BLASLONG)(- di + mask)/(mask+1)) * (mask+1) );
  479. if (num_cpu == 0) width = n - (((n - width)/(mask+1)) * (mask+1) );
  480. if ((width > n - i) || (width < mask)) width = n - i;
  481. } else {
  482. width = n - i;
  483. }
  484. range[MAX_CPU_NUMBER - num_cpu - 1] = range[MAX_CPU_NUMBER - num_cpu] - width;
  485. queue[num_cpu].mode = mode;
  486. queue[num_cpu].routine = inner_thread;
  487. queue[num_cpu].args = &newarg;
  488. queue[num_cpu].range_m = range_m;
  489. queue[num_cpu].sa = NULL;
  490. queue[num_cpu].sb = NULL;
  491. queue[num_cpu].next = &queue[num_cpu + 1];
  492. num_cpu ++;
  493. i += width;
  494. }
  495. for (i = 0; i < num_cpu; i ++) queue[i].range_n = &range[MAX_CPU_NUMBER - num_cpu];
  496. #else
  497. range[0] = 0;
  498. num_cpu = 0;
  499. i = 0;
  500. n = n_to - n_from;
  501. dnum = (double)n * (double)n /(double)nthreads;
  502. while (i < n){
  503. if (nthreads - num_cpu > 1) {
  504. di = (double)i;
  505. dinum = di * di +dnum;
  506. if (dinum > 0)
  507. width = (((BLASLONG)((sqrt(di * di + dnum) - di) + mask)/(mask+1)) * (mask+1));
  508. else
  509. width = (((BLASLONG)(- di + mask)/(mask+1)) * (mask+1));
  510. if ((width > n - i) || (width < mask)) width = n - i;
  511. } else {
  512. width = n - i;
  513. }
  514. range[num_cpu + 1] = range[num_cpu] + width;
  515. queue[num_cpu].mode = mode;
  516. queue[num_cpu].routine = inner_thread;
  517. queue[num_cpu].args = &newarg;
  518. queue[num_cpu].range_m = range_m;
  519. queue[num_cpu].range_n = range;
  520. queue[num_cpu].sa = NULL;
  521. queue[num_cpu].sb = NULL;
  522. queue[num_cpu].next = &queue[num_cpu + 1];
  523. num_cpu ++;
  524. i += width;
  525. }
  526. #endif
  527. newarg.nthreads = num_cpu;
  528. if (num_cpu) {
  529. for (j = 0; j < num_cpu; j++) {
  530. for (i = 0; i < num_cpu; i++) {
  531. for (k = 0; k < DIVIDE_RATE; k++) {
  532. job[j].working[i][CACHE_LINE_SIZE * k] = 0;
  533. }
  534. }
  535. }
  536. queue[0].sa = sa;
  537. queue[0].sb = sb;
  538. queue[num_cpu - 1].next = NULL;
  539. exec_blas(num_cpu, queue);
  540. }
  541. #ifdef USE_ALLOC_HEAP
  542. free(job);
  543. #endif
  544. return 0;
  545. }