You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

init.c 18 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731
  1. /*****************************************************************************
  2. Copyright (c) 2011, Lab of Parallel Software and Computational Science,ICSAS
  3. All rights reserved.
  4. Redistribution and use in source and binary forms, with or without
  5. modification, are permitted provided that the following conditions are
  6. met:
  7. 1. Redistributions of source code must retain the above copyright
  8. notice, this list of conditions and the following disclaimer.
  9. 2. Redistributions in binary form must reproduce the above copyright
  10. notice, this list of conditions and the following disclaimer in
  11. the documentation and/or other materials provided with the
  12. distribution.
  13. 3. Neither the name of the ISCAS nor the names of its contributors may
  14. be used to endorse or promote products derived from this software
  15. without specific prior written permission.
  16. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  17. AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  18. IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  19. ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  20. LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  21. DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  22. SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  23. CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  24. OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  25. USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. **********************************************************************************/
  27. /*********************************************************************/
  28. /* Copyright 2009, 2010 The University of Texas at Austin. */
  29. /* All rights reserved. */
  30. /* */
  31. /* Redistribution and use in source and binary forms, with or */
  32. /* without modification, are permitted provided that the following */
  33. /* conditions are met: */
  34. /* */
  35. /* 1. Redistributions of source code must retain the above */
  36. /* copyright notice, this list of conditions and the following */
  37. /* disclaimer. */
  38. /* */
  39. /* 2. Redistributions in binary form must reproduce the above */
  40. /* copyright notice, this list of conditions and the following */
  41. /* disclaimer in the documentation and/or other materials */
  42. /* provided with the distribution. */
  43. /* */
  44. /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
  45. /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
  46. /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
  47. /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
  48. /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
  49. /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
  50. /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
  51. /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
  52. /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
  53. /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
  54. /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
  55. /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
  56. /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
  57. /* POSSIBILITY OF SUCH DAMAGE. */
  58. /* */
  59. /* The views and conclusions contained in the software and */
  60. /* documentation are those of the authors and should not be */
  61. /* interpreted as representing official policies, either expressed */
  62. /* or implied, of The University of Texas at Austin. */
  63. /*********************************************************************/
  64. #include "common.h"
  65. #if defined(OS_LINUX) && defined(SMP)
  66. #define _GNU_SOURCE
  67. #include <sys/sysinfo.h>
  68. #include <sys/syscall.h>
  69. #include <sys/shm.h>
  70. #include <fcntl.h>
  71. #include <sched.h>
  72. #include <dirent.h>
  73. #include <dlfcn.h>
  74. #define MAX_NODES 16
  75. #define MAX_CPUS 256
  76. #define SH_MAGIC 0x510510
  77. #define CPUMAP_NAME "/sys/devices/system/node/node%d/cpumap"
  78. #define SHARE_NAME "/sys/devices/system/cpu/cpu%d/cache/index%d/shared_cpu_map"
  79. #define NODE_DIR "/sys/devices/system/node"
  80. #undef DEBUG
  81. /* Private variables */
  82. typedef struct {
  83. unsigned long lock;
  84. unsigned int magic;
  85. unsigned int shmid;
  86. int num_nodes;
  87. int num_procs;
  88. int final_num_procs;
  89. unsigned long avail;
  90. unsigned long cpu_info [MAX_CPUS];
  91. unsigned long node_info [MAX_NODES];
  92. int cpu_use[MAX_CPUS];
  93. } shm_t;
  94. static cpu_set_t cpu_orig_mask[4];
  95. static int cpu_mapping[MAX_CPUS];
  96. static int node_mapping[MAX_CPUS * 4];
  97. static int cpu_sub_mapping[MAX_CPUS];
  98. static int disable_mapping;
  99. /* Number of cores per nodes */
  100. static int node_cpu[MAX_NODES];
  101. static int node_equal = 0;
  102. static shm_t *common = (void *)-1;
  103. static int shmid, pshmid;
  104. static void *paddr;
  105. static unsigned long lprocmask, lnodemask;
  106. static int numprocs = 1;
  107. static int numnodes = 1;
  108. #if 1
  109. #define READ_CPU(x) ( (x) & 0xff)
  110. #define READ_NODE(x) (((x) >> 8) & 0xff)
  111. #define READ_CORE(x) (((x) >> 16) & 0xff)
  112. #define WRITE_CPU(x) (x)
  113. #define WRITE_NODE(x) ((x) << 8)
  114. #define WRITE_CORE(x) ((x) << 16)
  115. #else
  116. #define READ_CPU(x) ( (x) & 0xff)
  117. #define READ_CORE(x) (((x) >> 8) & 0xff)
  118. #define READ_NODE(x) (((x) >> 16) & 0xff)
  119. #define WRITE_CPU(x) (x)
  120. #define WRITE_CORE(x) ((x) << 8)
  121. #define WRITE_NODE(x) ((x) << 16)
  122. #endif
  123. static inline int popcount(unsigned long number) {
  124. int count = 0;
  125. while (number > 0) {
  126. if (number & 1) count ++;
  127. number >>= 1;
  128. }
  129. return count;
  130. }
  131. static inline int rcount(unsigned long number) {
  132. int count = -1;
  133. while ((number > 0) && ((number & 0)) == 0) {
  134. count ++;
  135. number >>= 1;
  136. }
  137. return count;
  138. }
  139. static inline unsigned long get_cpumap(int node) {
  140. int infile;
  141. unsigned long affinity;
  142. char name[160];
  143. char *p, *dummy;
  144. sprintf(name, CPUMAP_NAME, node);
  145. infile = open(name, O_RDONLY);
  146. affinity = 0;
  147. if (infile != -1) {
  148. read(infile, name, sizeof(name));
  149. p = name;
  150. while ((*p == '0') || (*p == ',')) p++;
  151. affinity = strtol(p, &dummy, 16);
  152. close(infile);
  153. }
  154. return affinity;
  155. }
  156. static inline unsigned long get_share(int cpu, int level) {
  157. int infile;
  158. unsigned long affinity;
  159. char name[160];
  160. char *p;
  161. sprintf(name, SHARE_NAME, cpu, level);
  162. infile = open(name, O_RDONLY);
  163. affinity = (1UL << cpu);
  164. if (infile != -1) {
  165. read(infile, name, sizeof(name));
  166. p = name;
  167. while ((*p == '0') || (*p == ',')) p++;
  168. affinity = strtol(p, &p, 16);
  169. close(infile);
  170. }
  171. return affinity;
  172. }
  173. static int numa_check(void) {
  174. DIR *dp;
  175. struct dirent *dir;
  176. int node;
  177. common -> num_nodes = 0;
  178. dp = opendir(NODE_DIR);
  179. if (dp == NULL) {
  180. common -> num_nodes = 1;
  181. return 0;
  182. }
  183. for (node = 0; node < MAX_NODES; node ++) common -> node_info[node] = 0;
  184. while ((dir = readdir(dp)) != NULL) {
  185. if (*(unsigned int *) dir -> d_name == 0x065646f6eU) {
  186. node = atoi(&dir -> d_name[4]);
  187. if (node > MAX_NODES) {
  188. fprintf(stderr, "\nGotoBLAS Warining : MAX_NODES (NUMA) is too small. Terminated.\n");
  189. exit(1);
  190. }
  191. common -> num_nodes ++;
  192. common -> node_info[node] = get_cpumap(node);
  193. }
  194. }
  195. closedir(dp);
  196. if (common -> num_nodes == 1) return 1;
  197. #ifdef DEBUG
  198. fprintf(stderr, "Numa found : number of Nodes = %2d\n", common -> num_nodes);
  199. for (node = 0; node < common -> num_nodes; node ++)
  200. fprintf(stderr, "MASK (%2d) : %08lx\n", node, common -> node_info[node]);
  201. #endif
  202. return common -> num_nodes;
  203. }
  204. static void numa_mapping(void) {
  205. int node, cpu, core;
  206. int i, j, h;
  207. unsigned long work, bit;
  208. int count = 0;
  209. for (node = 0; node < common -> num_nodes; node ++) {
  210. core = 0;
  211. for (cpu = 0; cpu < common -> num_procs; cpu ++) {
  212. if (common -> node_info[node] & common -> avail & (1UL << cpu)) {
  213. common -> cpu_info[count] = WRITE_CORE(core) | WRITE_NODE(node) | WRITE_CPU(cpu);
  214. count ++;
  215. core ++;
  216. }
  217. }
  218. }
  219. #ifdef DEBUG
  220. fprintf(stderr, "\nFrom /sys ...\n\n");
  221. for (cpu = 0; cpu < count; cpu++)
  222. fprintf(stderr, "CPU (%2d) : %08lx\n", cpu, common -> cpu_info[cpu]);
  223. #endif
  224. h = 1;
  225. while (h < count) h = 2 * h + 1;
  226. while (h > 1) {
  227. h /= 2;
  228. for (i = h; i < count; i++) {
  229. work = common -> cpu_info[i];
  230. bit = CPU_ISSET(i, &cpu_orig_mask[0]);
  231. j = i - h;
  232. while (work < common -> cpu_info[j]) {
  233. common -> cpu_info[j + h] = common -> cpu_info[j];
  234. if (CPU_ISSET(j, &cpu_orig_mask[0])) {
  235. CPU_SET(j + h, &cpu_orig_mask[0]);
  236. } else {
  237. CPU_CLR(j + h, &cpu_orig_mask[0]);
  238. }
  239. j -= h;
  240. if (j < 0) break;
  241. }
  242. common -> cpu_info[j + h] = work;
  243. if (bit) {
  244. CPU_SET(j + h, &cpu_orig_mask[0]);
  245. } else {
  246. CPU_CLR(j + h, &cpu_orig_mask[0]);
  247. }
  248. }
  249. }
  250. #ifdef DEBUG
  251. fprintf(stderr, "\nSorting ...\n\n");
  252. for (cpu = 0; cpu < count; cpu++)
  253. fprintf(stderr, "CPU (%2d) : %08lx\n", cpu, common -> cpu_info[cpu]);
  254. #endif
  255. }
  256. static void disable_hyperthread(void) {
  257. unsigned long share;
  258. int cpu;
  259. common -> avail = (1UL << common -> num_procs) - 1;
  260. #ifdef DEBUG
  261. fprintf(stderr, "\nAvail CPUs : %04lx.\n", common -> avail);
  262. #endif
  263. for (cpu = 0; cpu < common -> num_procs; cpu ++) {
  264. share = (get_share(cpu, 1) & common -> avail);
  265. if (popcount(share) > 1) {
  266. #ifdef DEBUG
  267. fprintf(stderr, "Detected Hyper Threading on CPU %4x; disabled CPU %04lx.\n",
  268. cpu, share & ~(1UL << cpu));
  269. #endif
  270. common -> avail &= ~((share & ~(1UL << cpu)));
  271. }
  272. }
  273. }
  274. static void disable_affinity(void) {
  275. #ifdef DEBUG
  276. fprintf(stderr, "Final all available CPUs : %04lx.\n\n", common -> avail);
  277. fprintf(stderr, "CPU mask : %04lx.\n\n", *(unsigned long *)&cpu_orig_mask[0]);
  278. #endif
  279. lprocmask = (1UL << common -> final_num_procs) - 1;
  280. #ifndef USE_OPENMP
  281. lprocmask &= *(unsigned long *)&cpu_orig_mask[0];
  282. #endif
  283. #ifdef DEBUG
  284. fprintf(stderr, "I choose these CPUs : %04lx.\n\n", lprocmask);
  285. #endif
  286. }
  287. static void setup_mempolicy(void) {
  288. int cpu, mynode, maxcpu;
  289. for (cpu = 0; cpu < MAX_NODES; cpu ++) node_cpu[cpu] = 0;
  290. maxcpu = 0;
  291. for (cpu = 0; cpu < numprocs; cpu ++) {
  292. mynode = READ_NODE(common -> cpu_info[cpu_sub_mapping[cpu]]);
  293. lnodemask |= (1UL << mynode);
  294. node_cpu[mynode] ++;
  295. if (maxcpu < node_cpu[mynode]) maxcpu = node_cpu[mynode];
  296. }
  297. node_equal = 1;
  298. for (cpu = 0; cpu < MAX_NODES; cpu ++) if ((node_cpu[cpu] != 0) && (node_cpu[cpu] != maxcpu)) node_equal = 0;
  299. if (lnodemask) {
  300. #ifdef DEBUG
  301. fprintf(stderr, "Node mask = %lx\n", lnodemask);
  302. #endif
  303. my_set_mempolicy(MPOL_INTERLEAVE, &lnodemask, sizeof(lnodemask) * 8);
  304. numnodes = popcount(lnodemask);
  305. }
  306. }
  307. static inline int is_dead(int id) {
  308. struct shmid_ds ds;
  309. return shmctl(id, IPC_STAT, &ds);
  310. }
  311. static void open_shmem(void) {
  312. int try = 0;
  313. do {
  314. shmid = shmget(SH_MAGIC, 4096, 0666);
  315. if (shmid == -1) {
  316. shmid = shmget(SH_MAGIC, 4096, IPC_CREAT | 0666);
  317. }
  318. try ++;
  319. } while ((try < 10) && (shmid == -1));
  320. if (shmid == -1) {
  321. fprintf(stderr, "GotoBLAS : Can't open shared memory. Terminated.\n");
  322. exit(1);
  323. }
  324. if (shmid != -1) common = (shm_t *)shmat(shmid, NULL, 0);
  325. #ifdef DEBUG
  326. fprintf(stderr, "Shared Memory id = %x Address = %p\n", shmid, common);
  327. #endif
  328. }
  329. static void create_pshmem(void) {
  330. pshmid = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
  331. paddr = shmat(pshmid, NULL, 0);
  332. shmctl(pshmid, IPC_RMID, 0);
  333. #ifdef DEBUG
  334. fprintf(stderr, "Private Shared Memory id = %x Address = %p\n", pshmid, paddr);
  335. #endif
  336. }
  337. static void local_cpu_map(void) {
  338. int cpu, id, mapping;
  339. cpu = 0;
  340. mapping = 0;
  341. do {
  342. id = common -> cpu_use[cpu];
  343. if (id > 0) {
  344. if (is_dead(id)) common -> cpu_use[cpu] = 0;
  345. }
  346. if ((common -> cpu_use[cpu] == 0) && (lprocmask & (1UL << cpu))) {
  347. common -> cpu_use[cpu] = pshmid;
  348. cpu_mapping[mapping] = READ_CPU(common -> cpu_info[cpu]);
  349. cpu_sub_mapping[mapping] = cpu;
  350. mapping ++;
  351. }
  352. cpu ++;
  353. } while ((mapping < numprocs) && (cpu < common -> final_num_procs));
  354. disable_mapping = 0;
  355. if ((mapping < numprocs) || (numprocs == 1)) {
  356. for (cpu = 0; cpu < common -> final_num_procs; cpu ++) {
  357. if (common -> cpu_use[cpu] == pshmid) common -> cpu_use[cpu] = 0;
  358. }
  359. disable_mapping = 1;
  360. }
  361. #ifdef DEBUG
  362. for (cpu = 0; cpu < numprocs; cpu ++) {
  363. fprintf(stderr, "Local Mapping : %2d --> %2d (%2d)\n", cpu, cpu_mapping[cpu], cpu_sub_mapping[cpu]);
  364. }
  365. #endif
  366. }
  367. /* Public Functions */
  368. int get_num_procs(void) { return numprocs; }
  369. int get_num_nodes(void) { return numnodes; }
  370. int get_node_equal(void) {
  371. return (((blas_cpu_number % numnodes) == 0) && node_equal);
  372. }
  373. int gotoblas_set_affinity(int pos) {
  374. cpu_set_t cpu_mask;
  375. int mynode = 1;
  376. /* if number of threads is larger than inital condition */
  377. if (pos < 0) {
  378. sched_setaffinity(0, sizeof(cpu_orig_mask), &cpu_orig_mask[0]);
  379. return 0;
  380. }
  381. if (!disable_mapping) {
  382. mynode = READ_NODE(common -> cpu_info[cpu_sub_mapping[pos]]);
  383. #ifdef DEBUG
  384. fprintf(stderr, "Giving Affinity[%4d %3d] --> %3d My node = %3d\n", getpid(), pos, cpu_mapping[pos], mynode);
  385. #endif
  386. CPU_ZERO(&cpu_mask);
  387. CPU_SET (cpu_mapping[pos], &cpu_mask);
  388. sched_setaffinity(0, sizeof(cpu_mask), &cpu_mask);
  389. node_mapping[WhereAmI()] = mynode;
  390. }
  391. return mynode;
  392. }
  393. int get_node(void) {
  394. if (!disable_mapping) return node_mapping[WhereAmI()];
  395. return 1;
  396. }
  397. static int initialized = 0;
  398. void gotoblas_affinity_init(void) {
  399. int cpu, num_avail;
  400. #ifndef USE_OPENMP
  401. cpu_set_t cpu_mask;
  402. #endif
  403. if (initialized) return;
  404. initialized = 1;
  405. sched_getaffinity(0, sizeof(cpu_orig_mask), &cpu_orig_mask[0]);
  406. #ifdef USE_OPENMP
  407. numprocs = 0;
  408. #else
  409. numprocs = readenv("OPENBLAS_NUM_THREADS");
  410. if (numprocs == 0) numprocs = readenv("GOTO_NUM_THREADS");
  411. #endif
  412. if (numprocs == 0) numprocs = readenv("OMP_NUM_THREADS");
  413. numnodes = 1;
  414. if (numprocs == 1) {
  415. disable_mapping = 1;
  416. return;
  417. }
  418. create_pshmem();
  419. open_shmem();
  420. while ((common -> lock) && (common -> magic != SH_MAGIC)) {
  421. if (is_dead(common -> shmid)) {
  422. common -> lock = 0;
  423. common -> shmid = 0;
  424. common -> magic = 0;
  425. } else {
  426. sched_yield();
  427. }
  428. }
  429. blas_lock(&common -> lock);
  430. if ((common -> shmid) && is_dead(common -> shmid)) common -> magic = 0;
  431. common -> shmid = pshmid;
  432. if (common -> magic != SH_MAGIC) {
  433. #ifdef DEBUG
  434. fprintf(stderr, "Shared Memory Initialization.\n");
  435. #endif
  436. common -> num_procs = get_nprocs();
  437. for (cpu = 0; cpu < common -> num_procs; cpu++) common -> cpu_info[cpu] = cpu;
  438. numa_check();
  439. disable_hyperthread();
  440. if (common -> num_nodes > 1) numa_mapping();
  441. common -> final_num_procs = popcount(common -> avail);
  442. for (cpu = 0; cpu < common -> final_num_procs; cpu ++) common -> cpu_use[cpu] = 0;
  443. common -> magic = SH_MAGIC;
  444. }
  445. disable_affinity();
  446. num_avail = popcount(lprocmask);
  447. if ((numprocs <= 0) || (numprocs > num_avail)) numprocs = num_avail;
  448. #ifdef DEBUG
  449. fprintf(stderr, "Number of threads = %d\n", numprocs);
  450. #endif
  451. local_cpu_map();
  452. blas_unlock(&common -> lock);
  453. #ifndef USE_OPENMP
  454. if (!disable_mapping) {
  455. #ifdef DEBUG
  456. fprintf(stderr, "Giving Affinity[%3d] --> %3d\n", 0, cpu_mapping[0]);
  457. #endif
  458. CPU_ZERO(&cpu_mask);
  459. CPU_SET (cpu_mapping[0], &cpu_mask);
  460. sched_setaffinity(0, sizeof(cpu_mask), &cpu_mask);
  461. node_mapping[WhereAmI()] = READ_NODE(common -> cpu_info[cpu_sub_mapping[0]]);
  462. setup_mempolicy();
  463. if (readenv("OPENBLAS_MAIN_FREE") || readenv("GOTOBLAS_MAIN_FREE")) {
  464. sched_setaffinity(0, sizeof(cpu_orig_mask), &cpu_orig_mask[0]);
  465. }
  466. }
  467. #endif
  468. #ifdef DEBUG
  469. fprintf(stderr, "Initialization is done.\n");
  470. #endif
  471. }
  472. void gotoblas_affinity_quit(void) {
  473. int i;
  474. struct shmid_ds ds;
  475. #ifdef DEBUG
  476. fprintf(stderr, "Terminating ..\n");
  477. #endif
  478. if ((numprocs == 1) || (initialized == 0)) return;
  479. if (!disable_mapping) {
  480. blas_lock(&common -> lock);
  481. for (i = 0; i < numprocs; i ++) common -> cpu_use[cpu_mapping[i]] = -1;
  482. blas_unlock(&common -> lock);
  483. }
  484. shmctl(shmid, IPC_STAT, &ds);
  485. if (ds.shm_nattch == 1) shmctl(shmid, IPC_RMID, 0);
  486. shmdt(common);
  487. shmdt(paddr);
  488. initialized = 0;
  489. }
  490. #else
  491. void gotoblas_affinity_init(void) {};
  492. void gotoblas_set_affinity(int threads) {};
  493. void gotoblas_set_affinity2(int threads) {};
  494. void gotoblas_affinity_reschedule(void) {};
  495. int get_num_procs(void) { return get_nprocs(); }
  496. int get_num_nodes(void) { return 1; }
  497. int get_node(void) { return 1;}
  498. #endif

OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version.