You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

potrf_parallel.c 19 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. /*********************************************************************/
  2. /* Copyright 2009, 2010 The University of Texas at Austin. */
  3. /* Copyright 2025 The OpenBLAS Project. */
  4. /* All rights reserved. */
  5. /* */
  6. /* Redistribution and use in source and binary forms, with or */
  7. /* without modification, are permitted provided that the following */
  8. /* conditions are met: */
  9. /* */
  10. /* 1. Redistributions of source code must retain the above */
  11. /* copyright notice, this list of conditions and the following */
  12. /* disclaimer. */
  13. /* */
  14. /* 2. Redistributions in binary form must reproduce the above */
  15. /* copyright notice, this list of conditions and the following */
  16. /* disclaimer in the documentation and/or other materials */
  17. /* provided with the distribution. */
  18. /* */
  19. /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
  20. /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
  21. /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
  22. /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
  23. /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
  24. /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
  25. /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
  26. /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
  27. /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
  28. /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
  29. /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
  30. /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
  31. /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
  32. /* POSSIBILITY OF SUCH DAMAGE. */
  33. /* */
  34. /* The views and conclusions contained in the software and */
  35. /* documentation are those of the authors and should not be */
  36. /* interpreted as representing official policies, either expressed */
  37. /* or implied, of The University of Texas at Austin. */
  38. /*********************************************************************/
  39. #include <stdio.h>
  40. #include "common.h"
  41. #ifndef USE_SIMPLE_THREADED_LEVEL3
  42. //The array of job_t may overflow the stack.
  43. //Instead, use malloc to alloc job_t.
  44. #if MAX_CPU_NUMBER > BLAS3_MEM_ALLOC_THRESHOLD
  45. #define USE_ALLOC_HEAP
  46. #endif
  47. static FLOAT dm1 = -1.;
  48. #ifndef KERNEL_FUNC
  49. #ifndef LOWER
  50. #define KERNEL_FUNC SYRK_KERNEL_U
  51. #else
  52. #define KERNEL_FUNC SYRK_KERNEL_L
  53. #endif
  54. #endif
  55. #ifndef LOWER
  56. #ifndef COMPLEX
  57. #define TRSM_KERNEL TRSM_KERNEL_LT
  58. #else
  59. #define TRSM_KERNEL TRSM_KERNEL_LC
  60. #endif
  61. #else
  62. #ifndef COMPLEX
  63. #define TRSM_KERNEL TRSM_KERNEL_RN
  64. #else
  65. #define TRSM_KERNEL TRSM_KERNEL_RR
  66. #endif
  67. #endif
  68. #ifndef CACHE_LINE_SIZE
  69. #define CACHE_LINE_SIZE 8
  70. #endif
  71. #ifndef DIVIDE_RATE
  72. #define DIVIDE_RATE 2
  73. #endif
  74. #ifndef LOWER
  75. #define TRANS
  76. #endif
  77. #ifndef SYRK_LOCAL
  78. #if !defined(LOWER) && !defined(TRANS)
  79. #define SYRK_LOCAL SYRK_UN
  80. #elif !defined(LOWER) && defined(TRANS)
  81. #define SYRK_LOCAL SYRK_UT
  82. #elif defined(LOWER) && !defined(TRANS)
  83. #define SYRK_LOCAL SYRK_LN
  84. #else
  85. #define SYRK_LOCAL SYRK_LT
  86. #endif
  87. #endif
  88. typedef struct {
  89. #ifdef HAVE_C11
  90. _Atomic
  91. #else
  92. volatile
  93. #endif
  94. BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE];
  95. } job_t;
  96. #ifdef HAVE_C11
  97. #define atomic_load_long(p) __atomic_load_n(p, __ATOMIC_RELAXED)
  98. #define atomic_store_long(p, v) __atomic_store_n(p, v, __ATOMIC_RELAXED)
  99. #else
  100. #define atomic_load_long(p) (BLASLONG)(*(volatile BLASLONG*)(p))
  101. #define atomic_store_long(p, v) (*(volatile BLASLONG *)(p)) = (v)
  102. #endif
  103. #ifndef KERNEL_OPERATION
  104. #ifndef COMPLEX
  105. #define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) \
  106. KERNEL_FUNC(M, N, K, ALPHA[0], SA, SB, (FLOAT *)(C) + ((X) + (Y) * LDC) * COMPSIZE, LDC, (X) - (Y))
  107. #else
  108. #define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) \
  109. KERNEL_FUNC(M, N, K, ALPHA[0], ALPHA[1], SA, SB, (FLOAT *)(C) + ((X) + (Y) * LDC) * COMPSIZE, LDC, (X) - (Y))
  110. #endif
  111. #endif
  112. #ifndef ICOPY_OPERATION
  113. #ifndef TRANS
  114. #define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ITCOPY(M, N, (FLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER);
  115. #else
  116. #define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_INCOPY(M, N, (FLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER);
  117. #endif
  118. #endif
  119. #ifndef OCOPY_OPERATION
  120. #ifdef TRANS
  121. #define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ONCOPY(M, N, (FLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER);
  122. #else
  123. #define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_OTCOPY(M, N, (FLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER);
  124. #endif
  125. #endif
  126. #ifndef S
  127. #define S args -> a
  128. #endif
  129. #ifndef A
  130. #define A args -> b
  131. #endif
  132. #ifndef C
  133. #define C args -> c
  134. #endif
  135. #ifndef LDA
  136. #define LDA args -> lda
  137. #endif
  138. #ifndef N
  139. #define N args -> m
  140. #endif
  141. #ifndef K
  142. #define K args -> k
  143. #endif
  144. static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG mypos){
  145. FLOAT *buffer[DIVIDE_RATE];
  146. BLASLONG k, lda;
  147. BLASLONG m_from, m_to;
  148. FLOAT *alpha;
  149. FLOAT *a, *c;
  150. job_t *job = (job_t *)args -> common;
  151. BLASLONG xxx, bufferside;
  152. BLASLONG jjs, min_jj;
  153. BLASLONG is, min_i, div_n;
  154. BLASLONG i, current;
  155. k = K;
  156. a = (FLOAT *)A;
  157. c = (FLOAT *)C;
  158. lda = LDA;
  159. alpha = (FLOAT *)args -> alpha;
  160. m_from = range_n[mypos + 0];
  161. m_to = range_n[mypos + 1];
  162. #if 0
  163. fprintf(stderr, "Thread[%ld] m_from : %ld m_to : %ld\n", mypos, m_from, m_to);
  164. #endif
  165. div_n = (((m_to - m_from + DIVIDE_RATE - 1) / DIVIDE_RATE + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN;
  166. buffer[0] = (FLOAT *)((((BLASULONG)(sb + k * k * COMPSIZE) + GEMM_ALIGN) & ~GEMM_ALIGN) + GEMM_OFFSET_B);
  167. for (i = 1; i < DIVIDE_RATE; i++) {
  168. buffer[i] = buffer[i - 1] + GEMM_Q * div_n * COMPSIZE;
  169. }
  170. #ifndef LOWER
  171. TRSM_IUNCOPY(k, k, (FLOAT *)S, lda, 0, sb);
  172. #else
  173. TRSM_OLTCOPY(k, k, (FLOAT *)S, lda, 0, sb);
  174. #endif
  175. for (xxx = m_from, bufferside = 0; xxx < m_to; xxx += div_n, bufferside ++) {
  176. for(jjs = xxx; jjs < MIN(m_to, xxx + div_n); jjs += min_jj){
  177. min_jj = MIN(m_to, xxx + div_n) - jjs;
  178. #ifndef LOWER
  179. if (min_jj > GEMM_UNROLL_MN) min_jj = GEMM_UNROLL_MN;
  180. #else
  181. if (min_jj > GEMM_P) min_jj = GEMM_P;
  182. #endif
  183. #ifndef LOWER
  184. OCOPY_OPERATION (k, min_jj, a, lda, 0, jjs, buffer[bufferside] + k * (jjs - xxx) * COMPSIZE);
  185. TRSM_KERNEL (k, min_jj, k, dm1,
  186. #ifdef COMPLEX
  187. ZERO,
  188. #endif
  189. sb,
  190. buffer[bufferside] + k * (jjs - xxx) * COMPSIZE,
  191. a + jjs * lda * COMPSIZE, lda, 0);
  192. #else
  193. ICOPY_OPERATION (k, min_jj, a, lda, 0, jjs, buffer[bufferside] + k * (jjs - xxx) * COMPSIZE);
  194. TRSM_KERNEL (min_jj, k, k, dm1,
  195. #ifdef COMPLEX
  196. ZERO,
  197. #endif
  198. buffer[bufferside] + k * (jjs - xxx) * COMPSIZE,
  199. sb,
  200. a + jjs * COMPSIZE, lda, 0);
  201. #endif
  202. }
  203. #ifndef LOWER
  204. MB;
  205. for (i = 0; i <= mypos; i++)
  206. atomic_store_long(&job[mypos].working[i][CACHE_LINE_SIZE * bufferside], (BLASLONG)buffer[bufferside]);
  207. // job[mypos].working[i][CACHE_LINE_SIZE * bufferside] = (BLASLONG)buffer[bufferside];
  208. #else
  209. MB
  210. for (i = mypos; i < args -> nthreads; i++)
  211. atomic_store_long(&job[mypos].working[i][CACHE_LINE_SIZE * bufferside], (BLASLONG)buffer[bufferside]);
  212. // job[mypos].working[i][CACHE_LINE_SIZE * bufferside] = (BLASLONG)buffer[bufferside];
  213. #endif
  214. // WMB;
  215. }
  216. min_i = m_to - m_from;
  217. if (min_i >= GEMM_P * 2) {
  218. min_i = GEMM_P;
  219. } else
  220. if (min_i > GEMM_P) {
  221. min_i = (((min_i + 1) / 2 + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN;
  222. }
  223. #ifndef LOWER
  224. ICOPY_OPERATION(k, min_i, a, lda, 0, m_from, sa);
  225. #else
  226. OCOPY_OPERATION(k, min_i, a, lda, 0, m_from, sa);
  227. #endif
  228. current = mypos;
  229. #ifndef LOWER
  230. while (current < args -> nthreads)
  231. #else
  232. while (current >= 0)
  233. #endif
  234. {
  235. div_n = (((range_n[current + 1] - range_n[current] + DIVIDE_RATE - 1) / DIVIDE_RATE + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN;
  236. for (xxx = range_n[current], bufferside = 0; xxx < range_n[current + 1]; xxx += div_n, bufferside ++) {
  237. /* thread has to wait */
  238. if (current != mypos)
  239. do {
  240. jw = atomic_load_long(&job[current].working[mypos][CACHE_LINE_SIZE * bufferside]);
  241. } while (jw == 0);
  242. MB;
  243. //while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;};
  244. KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - xxx, div_n), k, alpha,
  245. sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside],
  246. c, lda, m_from, xxx);
  247. if (m_from + min_i >= m_to) {
  248. atomic_store_long(&job[current].working[mypos][CACHE_LINE_SIZE * bufferside], job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0);
  249. // job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0;
  250. WMB;
  251. }
  252. }
  253. #ifndef LOWER
  254. current ++;
  255. #else
  256. current --;
  257. #endif
  258. }
  259. for(is = m_from + min_i; is < m_to; is += min_i){
  260. min_i = m_to - is;
  261. if (min_i >= GEMM_P * 2) {
  262. min_i = GEMM_P;
  263. } else
  264. if (min_i > GEMM_P) {
  265. min_i = (((min_i + 1) / 2 + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN;
  266. }
  267. #ifndef LOWER
  268. ICOPY_OPERATION(k, min_i, a, lda, 0, is, sa);
  269. #else
  270. OCOPY_OPERATION(k, min_i, a, lda, 0, is, sa);
  271. #endif
  272. current = mypos;
  273. #ifndef LOWER
  274. while (current < args -> nthreads)
  275. #else
  276. while (current >= 0)
  277. #endif
  278. {
  279. div_n = (((range_n[current + 1] - range_n[current] + DIVIDE_RATE - 1) / DIVIDE_RATE + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN;
  280. for (xxx = range_n[current], bufferside = 0; xxx < range_n[current + 1]; xxx += div_n, bufferside ++) {
  281. KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - xxx, div_n), k, alpha,
  282. sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside],
  283. c, lda, is, xxx);
  284. if (is + min_i >= m_to) {
  285. atomic_store_long(&job[current].working[mypos][CACHE_LINE_SIZE * bufferside], job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0);
  286. // job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0;
  287. WMB;
  288. }
  289. }
  290. #ifndef LOWER
  291. current ++;
  292. #else
  293. current --;
  294. #endif
  295. }
  296. }
  297. for (i = 0; i < args -> nthreads; i++) {
  298. if (i != mypos) {
  299. for (xxx = 0; xxx < DIVIDE_RATE; xxx++)
  300. #if 1
  301. {
  302. do {
  303. jw = atomic_load_long(&job[mypos].working[i][CACHE_LINE_SIZE * xxx]);
  304. } while (jw);
  305. MB;
  306. }
  307. #else
  308. while (job[mypos].working[i][CACHE_LINE_SIZE * xxx] ) {YIELDING;};
  309. #endif
  310. // }
  311. }
  312. }
  313. return 0;
  314. }
  315. static int thread_driver(blas_arg_t *args, FLOAT *sa, FLOAT *sb){
  316. blas_arg_t newarg;
  317. #ifndef USE_ALLOC_HEAP
  318. job_t job[MAX_CPU_NUMBER];
  319. #else
  320. job_t * job = NULL;
  321. #endif
  322. blas_queue_t queue[MAX_CPU_NUMBER];
  323. BLASLONG range[MAX_CPU_NUMBER + 100];
  324. BLASLONG num_cpu;
  325. BLASLONG nthreads = args -> nthreads;
  326. BLASLONG width, i, j, k;
  327. BLASLONG n, n_from, n_to;
  328. int mode, mask;
  329. double dnum;
  330. #ifndef COMPLEX
  331. #ifdef XDOUBLE
  332. mode = BLAS_XDOUBLE | BLAS_REAL;
  333. mask = MAX(QGEMM_UNROLL_M, QGEMM_UNROLL_N) - 1;
  334. #elif defined(DOUBLE)
  335. mode = BLAS_DOUBLE | BLAS_REAL;
  336. mask = MAX(DGEMM_UNROLL_M, DGEMM_UNROLL_N) - 1;
  337. #elif defined(BFLOAT16)
  338. mode = BLAS_BFLOAT16 | BLAS_REAL;
  339. mask = MAX(SBGEMM_UNROLL_M, SBGEMM_UNROLL_N) - 1;
  340. #else
  341. mode = BLAS_SINGLE | BLAS_REAL;
  342. mask = MAX(SGEMM_UNROLL_M, SGEMM_UNROLL_N) - 1;
  343. #endif
  344. #else
  345. #ifdef XDOUBLE
  346. mode = BLAS_XDOUBLE | BLAS_COMPLEX;
  347. mask = MAX(XGEMM_UNROLL_M, XGEMM_UNROLL_N) - 1;
  348. #elif defined(DOUBLE)
  349. mode = BLAS_DOUBLE | BLAS_COMPLEX;
  350. mask = MAX(ZGEMM_UNROLL_M, ZGEMM_UNROLL_N) - 1;
  351. #else
  352. mode = BLAS_SINGLE | BLAS_COMPLEX;
  353. mask = MAX(CGEMM_UNROLL_M, CGEMM_UNROLL_N) - 1;
  354. #endif
  355. #endif
  356. newarg.m = args -> m;
  357. newarg.k = args -> k;
  358. newarg.a = args -> a;
  359. newarg.b = args -> b;
  360. newarg.c = args -> c;
  361. newarg.lda = args -> lda;
  362. newarg.alpha = args -> alpha;
  363. #ifdef USE_ALLOC_HEAP
  364. job = (job_t*)malloc(MAX_CPU_NUMBER * sizeof(job_t));
  365. if(job==NULL){
  366. fprintf(stderr, "OpenBLAS: malloc failed in %s\n", __func__);
  367. exit(1);
  368. }
  369. #endif
  370. newarg.common = (void *)job;
  371. n_from = 0;
  372. n_to = args -> m;
  373. #ifndef LOWER
  374. range[MAX_CPU_NUMBER] = n_to - n_from;
  375. range[0] = 0;
  376. num_cpu = 0;
  377. i = 0;
  378. n = n_to - n_from;
  379. dnum = (double)n * (double)n /(double)nthreads;
  380. while (i < n){
  381. if (nthreads - num_cpu > 1) {
  382. double di = (double)i;
  383. width = ((((BLASLONG)(sqrt(di * di + dnum) - di) + mask)/(mask+1)) * (mask+1));
  384. if (num_cpu == 0) width = n - (((n - width)/(mask+1)) * (mask+1));
  385. if ((width > n - i) || (width < mask)) width = n - i;
  386. } else {
  387. width = n - i;
  388. }
  389. range[MAX_CPU_NUMBER - num_cpu - 1] = range[MAX_CPU_NUMBER - num_cpu] - width;
  390. queue[num_cpu].mode = mode;
  391. queue[num_cpu].routine = inner_thread;
  392. queue[num_cpu].args = &newarg;
  393. queue[num_cpu].range_m = NULL;
  394. queue[num_cpu].sa = NULL;
  395. queue[num_cpu].sb = NULL;
  396. queue[num_cpu].next = &queue[num_cpu + 1];
  397. num_cpu ++;
  398. i += width;
  399. }
  400. for (i = 0; i < num_cpu; i ++) queue[i].range_n = &range[MAX_CPU_NUMBER - num_cpu];
  401. #else
  402. range[0] = 0;
  403. num_cpu = 0;
  404. i = 0;
  405. n = n_to - n_from;
  406. dnum = (double)n * (double)n /(double)nthreads;
  407. while (i < n){
  408. if (nthreads - num_cpu > 1) {
  409. double di = (double)i;
  410. width = ((((BLASLONG)(sqrt(di * di + dnum) - di) + mask)/(mask+1)) * (mask+1));
  411. if ((width > n - i) || (width < mask)) width = n - i;
  412. } else {
  413. width = n - i;
  414. }
  415. range[num_cpu + 1] = range[num_cpu] + width;
  416. queue[num_cpu].mode = mode;
  417. queue[num_cpu].routine = inner_thread;
  418. queue[num_cpu].args = &newarg;
  419. queue[num_cpu].range_m = NULL;
  420. queue[num_cpu].range_n = range;
  421. queue[num_cpu].sa = NULL;
  422. queue[num_cpu].sb = NULL;
  423. queue[num_cpu].next = &queue[num_cpu + 1];
  424. num_cpu ++;
  425. i += width;
  426. }
  427. #endif
  428. newarg.nthreads = num_cpu;
  429. if (num_cpu) {
  430. for (j = 0; j < num_cpu; j++) {
  431. for (i = 0; i < num_cpu; i++) {
  432. for (k = 0; k < DIVIDE_RATE; k++) {
  433. job[j].working[i][CACHE_LINE_SIZE * k] = 0;
  434. }
  435. }
  436. }
  437. queue[0].sa = sa;
  438. queue[0].sb = sb;
  439. queue[num_cpu - 1].next = NULL;
  440. exec_blas(num_cpu, queue);
  441. }
  442. #ifdef USE_ALLOC_HEAP
  443. free(job);
  444. #endif
  445. return 0;
  446. }
  447. #endif
  448. blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG myid) {
  449. BLASLONG n, bk, i, blocking, lda;
  450. BLASLONG info;
  451. int mode;
  452. blas_arg_t newarg;
  453. FLOAT *a;
  454. FLOAT alpha[2] = { -ONE, ZERO};
  455. #ifndef COMPLEX
  456. #ifdef XDOUBLE
  457. mode = BLAS_XDOUBLE | BLAS_REAL;
  458. #elif defined(DOUBLE)
  459. mode = BLAS_DOUBLE | BLAS_REAL;
  460. #else
  461. mode = BLAS_SINGLE | BLAS_REAL;
  462. #endif
  463. #else
  464. #ifdef XDOUBLE
  465. mode = BLAS_XDOUBLE | BLAS_COMPLEX;
  466. #elif defined(DOUBLE)
  467. mode = BLAS_DOUBLE | BLAS_COMPLEX;
  468. #else
  469. mode = BLAS_SINGLE | BLAS_COMPLEX;
  470. #endif
  471. #endif
  472. if (args -> nthreads == 1) {
  473. #ifndef LOWER
  474. info = POTRF_U_SINGLE(args, NULL, NULL, sa, sb, 0);
  475. #else
  476. info = POTRF_L_SINGLE(args, NULL, NULL, sa, sb, 0);
  477. #endif
  478. return info;
  479. }
  480. n = args -> n;
  481. a = (FLOAT *)args -> a;
  482. lda = args -> lda;
  483. if (range_n) n = range_n[1] - range_n[0];
  484. if (n <= GEMM_UNROLL_N * 2) {
  485. #ifndef LOWER
  486. info = POTRF_U_SINGLE(args, NULL, range_n, sa, sb, 0);
  487. #else
  488. info = POTRF_L_SINGLE(args, NULL, range_n, sa, sb, 0);
  489. #endif
  490. return info;
  491. }
  492. newarg.lda = lda;
  493. newarg.ldb = lda;
  494. newarg.ldc = lda;
  495. newarg.alpha = alpha;
  496. newarg.beta = NULL;
  497. newarg.nthreads = args -> nthreads;
  498. blocking = ((n / 2 + GEMM_UNROLL_N - 1)/GEMM_UNROLL_N) * GEMM_UNROLL_N;
  499. if (blocking > GEMM_Q) blocking = GEMM_Q;
  500. for (i = 0; i < n; i += blocking) {
  501. bk = n - i;
  502. if (bk > blocking) bk = blocking;
  503. newarg.m = bk;
  504. newarg.n = bk;
  505. newarg.a = a + (i + i * lda) * COMPSIZE;
  506. info = CNAME(&newarg, NULL, NULL, sa, sb, 0);
  507. if (info) return info + i;
  508. if (n - i - bk > 0) {
  509. #ifndef USE_SIMPLE_THREADED_LEVEL3
  510. newarg.m = n - i - bk;
  511. newarg.k = bk;
  512. #ifndef LOWER
  513. newarg.b = a + ( i + (i + bk) * lda) * COMPSIZE;
  514. #else
  515. newarg.b = a + ((i + bk) + i * lda) * COMPSIZE;
  516. #endif
  517. newarg.c = a + ((i + bk) + (i + bk) * lda) * COMPSIZE;
  518. thread_driver(&newarg, sa, sb);
  519. #else
  520. #ifndef LOWER
  521. newarg.m = bk;
  522. newarg.n = n - i - bk;
  523. newarg.a = a + (i + i * lda) * COMPSIZE;
  524. newarg.b = a + (i + (i + bk) * lda) * COMPSIZE;
  525. gemm_thread_n(mode | BLAS_TRANSA_T,
  526. &newarg, NULL, NULL, (void *)TRSM_LCUN, sa, sb, args -> nthreads);
  527. newarg.n = n - i - bk;
  528. newarg.k = bk;
  529. newarg.a = a + ( i + (i + bk) * lda) * COMPSIZE;
  530. newarg.c = a + ((i + bk) + (i + bk) * lda) * COMPSIZE;
  531. #if 0
  532. HERK_THREAD_UC(&newarg, NULL, NULL, sa, sb, 0);
  533. #else
  534. syrk_thread(mode | BLAS_TRANSA_N | BLAS_TRANSB_T,
  535. &newarg, NULL, NULL, (void *)HERK_UC, sa, sb, args -> nthreads);
  536. #endif
  537. #else
  538. newarg.m = n - i - bk;
  539. newarg.n = bk;
  540. newarg.a = a + (i + i * lda) * COMPSIZE;
  541. newarg.b = a + (i + bk + i * lda) * COMPSIZE;
  542. gemm_thread_m(mode | BLAS_RSIDE | BLAS_TRANSA_T | BLAS_UPLO,
  543. &newarg, NULL, NULL, (void *)TRSM_RCLN, sa, sb, args -> nthreads);
  544. newarg.n = n - i - bk;
  545. newarg.k = bk;
  546. newarg.a = a + (i + bk + i * lda) * COMPSIZE;
  547. newarg.c = a + (i + bk + (i + bk) * lda) * COMPSIZE;
  548. #if 0
  549. HERK_THREAD_LN(&newarg, NULL, NULL, sa, sb, 0);
  550. #else
  551. syrk_thread(mode | BLAS_TRANSA_N | BLAS_TRANSB_T | BLAS_UPLO,
  552. &newarg, NULL, NULL, (void *)HERK_LN, sa, sb, args -> nthreads);
  553. #endif
  554. #endif
  555. #endif
  556. }
  557. }
  558. return 0;
  559. }