You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

init.c 26 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025
  1. /*****************************************************************************
  2. Copyright (c) 2011-2014, The OpenBLAS Project
  3. All rights reserved.
  4. Redistribution and use in source and binary forms, with or without
  5. modification, are permitted provided that the following conditions are
  6. met:
  7. 1. Redistributions of source code must retain the above copyright
  8. notice, this list of conditions and the following disclaimer.
  9. 2. Redistributions in binary form must reproduce the above copyright
  10. notice, this list of conditions and the following disclaimer in
  11. the documentation and/or other materials provided with the
  12. distribution.
  13. 3. Neither the name of the OpenBLAS project nor the names of
  14. its contributors may be used to endorse or promote products
  15. derived from this software without specific prior written
  16. permission.
  17. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  18. AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  21. LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  22. DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  23. SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  24. CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  25. kOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  26. USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27. **********************************************************************************/
  28. /*********************************************************************/
  29. /* Copyright 2009, 2010 The University of Texas at Austin. */
  30. /* All rights reserved. */
  31. /* */
  32. /* Redistribution and use in source and binary forms, with or */
  33. /* without modification, are permitted provided that the following */
  34. /* conditions are met: */
  35. /* */
  36. /* 1. Redistributions of source code must retain the above */
  37. /* copyright notice, this list of conditions and the following */
  38. /* disclaimer. */
  39. /* */
  40. /* 2. Redistributions in binary form must reproduce the above */
  41. /* copyright notice, this list of conditions and the following */
  42. /* disclaimer in the documentation and/or other materials */
  43. /* provided with the distribution. */
  44. /* */
  45. /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
  46. /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
  47. /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
  48. /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
  49. /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
  50. /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
  51. /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
  52. /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
  53. /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
  54. /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
  55. /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
  56. /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
  57. /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
  58. /* POSSIBILITY OF SUCH DAMAGE. */
  59. /* */
  60. /* The views and conclusions contained in the software and */
  61. /* documentation are those of the authors and should not be */
  62. /* interpreted as representing official policies, either expressed */
  63. /* or implied, of The University of Texas at Austin. */
  64. /*********************************************************************/
  65. #include "common.h"
  66. #if defined(OS_LINUX) && defined(SMP)
  67. #define _GNU_SOURCE
  68. #include <sys/sysinfo.h>
  69. #include <sys/syscall.h>
  70. #include <sys/types.h>
  71. #include <errno.h>
  72. #include <sys/shm.h>
  73. #include <fcntl.h>
  74. #include <sched.h>
  75. #include <dirent.h>
  76. #include <dlfcn.h>
  77. #include <unistd.h>
  78. #include <string.h>
  79. #if defined(BIGNUMA)
  80. // max number of nodes as defined in numa.h
  81. // max cpus as defined in most sched.h
  82. // cannot use CPU_SETSIZE directly as some
  83. // Linux distributors set it to 4096
  84. #define MAX_NODES 128
  85. #define MAX_CPUS 1024
  86. #else
  87. #define MAX_NODES 16
  88. #define MAX_CPUS 256
  89. #endif
  90. #define NCPUBITS (8*sizeof(unsigned long))
  91. #define MAX_BITMASK_LEN (MAX_CPUS/NCPUBITS)
  92. #define CPUELT(cpu) ((cpu) / NCPUBITS)
  93. #define CPUMASK(cpu) ((unsigned long) 1UL << ((cpu) % NCPUBITS))
  94. #define SH_MAGIC 0x510510
  95. #define CPUMAP_NAME "/sys/devices/system/node/node%d/cpumap"
  96. #define SHARE_NAME "/sys/devices/system/cpu/cpu%d/cache/index%d/shared_cpu_map"
  97. #define NODE_DIR "/sys/devices/system/node"
  98. //#undef DEBUG
  99. /* Private variables */
  100. typedef struct {
  101. unsigned long lock;
  102. unsigned int magic;
  103. unsigned int shmid;
  104. int num_nodes;
  105. int num_procs;
  106. int final_num_procs;
  107. unsigned long avail [MAX_BITMASK_LEN];
  108. int avail_count;
  109. unsigned long cpu_info [MAX_CPUS];
  110. unsigned long node_info [MAX_NODES][MAX_BITMASK_LEN];
  111. int cpu_use[MAX_CPUS];
  112. } shm_t;
  113. static cpu_set_t cpu_orig_mask[4];
  114. static int cpu_mapping[MAX_CPUS];
  115. static int node_mapping[MAX_CPUS * 4];
  116. static int cpu_sub_mapping[MAX_CPUS];
  117. static int disable_mapping;
  118. /* Number of cores per nodes */
  119. static int node_cpu[MAX_NODES];
  120. static int node_equal = 0;
  121. static shm_t *common = (void *)-1;
  122. static int shmid, pshmid;
  123. static void *paddr;
  124. static unsigned long lprocmask[MAX_BITMASK_LEN], lnodemask;
  125. static int lprocmask_count = 0;
  126. static int numprocs = 1;
  127. static int numnodes = 1;
  128. #if 1
  129. #define READ_CPU(x) ( (x) & 0xff)
  130. #define READ_NODE(x) (((x) >> 8) & 0xff)
  131. #define READ_CORE(x) (((x) >> 16) & 0xff)
  132. #define WRITE_CPU(x) (x)
  133. #define WRITE_NODE(x) ((x) << 8)
  134. #define WRITE_CORE(x) ((x) << 16)
  135. #else
  136. #define READ_CPU(x) ( (x) & 0xff)
  137. #define READ_CORE(x) (((x) >> 8) & 0xff)
  138. #define READ_NODE(x) (((x) >> 16) & 0xff)
  139. #define WRITE_CPU(x) (x)
  140. #define WRITE_CORE(x) ((x) << 8)
  141. #define WRITE_NODE(x) ((x) << 16)
  142. #endif
  143. static inline int popcount(unsigned long number) {
  144. int count = 0;
  145. while (number > 0) {
  146. if (number & 1) count ++;
  147. number >>= 1;
  148. }
  149. return count;
  150. }
  151. static inline int rcount(unsigned long number) {
  152. int count = -1;
  153. while ((number > 0) && ((number & 0)) == 0) {
  154. count ++;
  155. number >>= 1;
  156. }
  157. return count;
  158. }
  159. /***
  160. Known issue: The number of CPUs/cores should less
  161. than sizeof(unsigned long). On 64 bits, the limit
  162. is 64. On 32 bits, it is 32.
  163. ***/
  164. static inline void get_cpumap(int node, unsigned long * node_info) {
  165. int infile;
  166. unsigned long affinity[32];
  167. char name[160];
  168. char cpumap[160];
  169. char *dummy;
  170. int i=0;
  171. int count=0;
  172. int k=0;
  173. sprintf(name, CPUMAP_NAME, node);
  174. infile = open(name, O_RDONLY);
  175. for(i=0; i<32; i++){
  176. affinity[i] = 0;
  177. }
  178. if (infile != -1) {
  179. read(infile, cpumap, sizeof(cpumap));
  180. for(i=0; i<160; i++){
  181. if(cpumap[i] == '\n')
  182. break;
  183. if(cpumap[i] != ','){
  184. name[k++]=cpumap[i];
  185. //Enough data for Hex
  186. if(k >= NCPUBITS/4){
  187. affinity[count++] = strtoul(name, &dummy, 16);
  188. k=0;
  189. }
  190. }
  191. }
  192. if(k!=0){
  193. name[k]='\0';
  194. affinity[count++] = strtoul(name, &dummy, 16);
  195. // k=0;
  196. }
  197. // 0-63bit -> node_info[0], 64-128bit -> node_info[1] ....
  198. // revert the sequence
  199. for(i=0; i<count && i<MAX_BITMASK_LEN; i++){
  200. node_info[i]=affinity[count-i-1];
  201. }
  202. close(infile);
  203. }
  204. return ;
  205. }
  206. static inline void get_share(int cpu, int level, unsigned long * share) {
  207. int infile;
  208. unsigned long affinity[32];
  209. char cpumap[160];
  210. char name[160];
  211. char *dummy;
  212. int count=0;
  213. int i=0,k=0;
  214. int bitmask_idx = 0;
  215. sprintf(name, SHARE_NAME, cpu, level);
  216. infile = open(name, O_RDONLY);
  217. // Init share
  218. for(i=0; i<MAX_BITMASK_LEN; i++){
  219. share[i]=0;
  220. }
  221. bitmask_idx = CPUELT(cpu);
  222. share[bitmask_idx] = CPUMASK(cpu);
  223. if (infile != -1) {
  224. read(infile, cpumap, sizeof(cpumap));
  225. for(i=0; i<160; i++){
  226. if(cpumap[i] == '\n')
  227. break;
  228. if(cpumap[i] != ','){
  229. name[k++]=cpumap[i];
  230. //Enough data
  231. if(k >= NCPUBITS/4){
  232. affinity[count++] = strtoul(name, &dummy, 16);
  233. k=0;
  234. }
  235. }
  236. }
  237. if(k!=0){
  238. name[k]='\0';
  239. affinity[count++] = strtoul(name, &dummy, 16);
  240. // k=0;
  241. }
  242. // 0-63bit -> node_info[0], 64-128bit -> node_info[1] ....
  243. // revert the sequence
  244. for(i=0; i<count && i<MAX_BITMASK_LEN; i++){
  245. share[i]=affinity[count-i-1];
  246. }
  247. close(infile);
  248. }
  249. return ;
  250. }
  251. static int numa_check(void) {
  252. DIR *dp;
  253. struct dirent *dir;
  254. int node;
  255. int j;
  256. common -> num_nodes = 0;
  257. dp = opendir(NODE_DIR);
  258. if (dp == NULL) {
  259. common -> num_nodes = 1;
  260. return 0;
  261. }
  262. for (node = 0; node < MAX_NODES; node ++) {
  263. for (j = 0; j<MAX_BITMASK_LEN; j++) common -> node_info[node][j] = 0;
  264. }
  265. while ((dir = readdir(dp)) != NULL) {
  266. if (strncmp(dir->d_name, "node", 4)==0) {
  267. node = atoi(&dir -> d_name[4]);
  268. if (node > MAX_NODES) {
  269. fprintf(stderr, "\nOpenBLAS Warning : MAX_NODES (NUMA) is too small. Terminated.\n");
  270. exit(1);
  271. }
  272. common -> num_nodes ++;
  273. get_cpumap(node, common->node_info[node]);
  274. }
  275. }
  276. closedir(dp);
  277. if (common -> num_nodes == 1) return 1;
  278. #ifdef DEBUG
  279. fprintf(stderr, "Numa found : number of Nodes = %2d\n", common -> num_nodes);
  280. for (node = 0; node < common -> num_nodes; node ++)
  281. fprintf(stderr, "MASK (%2d) : %08lx\n", node, common -> node_info[node][0]);
  282. #endif
  283. return common -> num_nodes;
  284. }
  285. #if defined(__GLIBC_PREREQ)
  286. #if !__GLIBC_PREREQ(2, 6)
  287. int sched_getcpu(void)
  288. {
  289. int cpu;
  290. FILE *fp = NULL;
  291. if ( (fp = fopen("/proc/self/stat", "r")) == NULL)
  292. return -1;
  293. if ( fscanf( fp, "%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%*s%d", &cpu) != 1) {
  294. fclose (fp);
  295. return -1;
  296. }
  297. fclose (fp);
  298. return(cpu);
  299. }
  300. #endif
  301. #endif
  302. static void numa_mapping(void) {
  303. int node, cpu, core;
  304. int i, j, h;
  305. unsigned long work, bit;
  306. int count = 0;
  307. int bitmask_idx = 0;
  308. int current_cpu;
  309. int current_node = 0;
  310. int cpu_count = 0;
  311. for (node = 0; node < common -> num_nodes; node ++) {
  312. core = 0;
  313. for (cpu = 0; cpu < common -> num_procs; cpu ++) {
  314. bitmask_idx = CPUELT(cpu);
  315. if (common -> node_info[node][bitmask_idx] & common -> avail[bitmask_idx] & CPUMASK(cpu)) {
  316. common -> cpu_info[count] = WRITE_CORE(core) | WRITE_NODE(node) | WRITE_CPU(cpu);
  317. count ++;
  318. core ++;
  319. }
  320. }
  321. }
  322. #ifdef DEBUG
  323. fprintf(stderr, "\nFrom /sys ...\n\n");
  324. for (cpu = 0; cpu < count; cpu++)
  325. fprintf(stderr, "CPU (%2d) : %08lx\n", cpu, common -> cpu_info[cpu]);
  326. #endif
  327. current_cpu = sched_getcpu();
  328. for (cpu = 0; cpu < count; cpu++) {
  329. if (READ_CPU(common -> cpu_info[cpu]) == current_cpu) {
  330. current_node = READ_NODE(common -> cpu_info[cpu]);
  331. break;
  332. }
  333. }
  334. for (i = 0; i < MAX_BITMASK_LEN; i++)
  335. cpu_count += popcount(common -> node_info[current_node][i] & common -> avail[i]);
  336. /*
  337. * If all the processes can be accommodated in the
  338. * in the current node itself, then bind to cores
  339. * from the current node only
  340. */
  341. if (numprocs <= cpu_count) {
  342. /*
  343. * First sort all the cores in order from the current node.
  344. * Then take remaining nodes one by one in order,
  345. * and sort their cores in order.
  346. */
  347. for (i = 0; i < count; i++) {
  348. for (j = 0; j < count - 1; j++) {
  349. int node_1, node_2;
  350. int core_1, core_2;
  351. int swap = 0;
  352. node_1 = READ_NODE(common -> cpu_info[j]);
  353. node_2 = READ_NODE(common -> cpu_info[j + 1]);
  354. core_1 = READ_CORE(common -> cpu_info[j]);
  355. core_2 = READ_CORE(common -> cpu_info[j + 1]);
  356. if (node_1 == node_2) {
  357. if (core_1 > core_2)
  358. swap = 1;
  359. } else {
  360. if ((node_2 == current_node) ||
  361. ((node_1 != current_node) && (node_1 > node_2)))
  362. swap = 1;
  363. }
  364. if (swap) {
  365. unsigned long temp;
  366. temp = common->cpu_info[j];
  367. common->cpu_info[j] = common->cpu_info[j + 1];
  368. common->cpu_info[j + 1] = temp;
  369. }
  370. }
  371. }
  372. } else {
  373. h = 1;
  374. while (h < count) h = 2 * h + 1;
  375. while (h > 1) {
  376. h /= 2;
  377. for (i = h; i < count; i++) {
  378. work = common -> cpu_info[i];
  379. bit = CPU_ISSET(i, &cpu_orig_mask[0]);
  380. j = i - h;
  381. while (work < common -> cpu_info[j]) {
  382. common -> cpu_info[j + h] = common -> cpu_info[j];
  383. if (CPU_ISSET(j, &cpu_orig_mask[0])) {
  384. CPU_SET(j + h, &cpu_orig_mask[0]);
  385. } else {
  386. CPU_CLR(j + h, &cpu_orig_mask[0]);
  387. }
  388. j -= h;
  389. if (j < 0) break;
  390. }
  391. common -> cpu_info[j + h] = work;
  392. if (bit) {
  393. CPU_SET(j + h, &cpu_orig_mask[0]);
  394. } else {
  395. CPU_CLR(j + h, &cpu_orig_mask[0]);
  396. }
  397. }
  398. }
  399. }
  400. #ifdef DEBUG
  401. fprintf(stderr, "\nSorting ...\n\n");
  402. for (cpu = 0; cpu < count; cpu++)
  403. fprintf(stderr, "CPUINFO (%2d) : %08lx (CPU=%3lu CORE=%3lu NODE=%3lu)\n", cpu, common -> cpu_info[cpu],
  404. READ_CPU(common -> cpu_info[cpu]),
  405. READ_CORE(common -> cpu_info[cpu]),
  406. READ_NODE(common -> cpu_info[cpu]));
  407. #endif
  408. }
  409. static void disable_hyperthread(void) {
  410. unsigned long share[MAX_BITMASK_LEN];
  411. int cpu;
  412. int bitmask_idx = 0;
  413. int i=0, count=0;
  414. bitmask_idx = CPUELT(common -> num_procs);
  415. for(i=0; i< bitmask_idx; i++){
  416. common -> avail[count++] = 0xFFFFFFFFFFFFFFFFUL;
  417. }
  418. if(CPUMASK(common -> num_procs) != 1){
  419. common -> avail[count++] = CPUMASK(common -> num_procs) - 1;
  420. }
  421. common -> avail_count = count;
  422. /* if(common->num_procs > 64){ */
  423. /* fprintf(stderr, "\nOpenBLAS Warning : The number of CPU/Cores(%d) is beyond the limit(64). Terminated.\n", common->num_procs); */
  424. /* exit(1); */
  425. /* }else if(common->num_procs == 64){ */
  426. /* common -> avail = 0xFFFFFFFFFFFFFFFFUL; */
  427. /* }else */
  428. /* common -> avail = (1UL << common -> num_procs) - 1; */
  429. #ifdef DEBUG
  430. fprintf(stderr, "\nAvail CPUs : ");
  431. for(i=0; i<count; i++)
  432. fprintf(stderr, "%04lx ", common -> avail[i]);
  433. fprintf(stderr, ".\n");
  434. #endif
  435. for (cpu = 0; cpu < common -> num_procs; cpu ++) {
  436. get_share(cpu, 1, share);
  437. //When the shared cpu are in different element of share & avail array, this may be a bug.
  438. for (i = 0; i < count ; i++){
  439. share[i] &= common->avail[i];
  440. if (popcount(share[i]) > 1) {
  441. #ifdef DEBUG
  442. fprintf(stderr, "Detected Hyper Threading on CPU %4x; disabled CPU %04lx.\n",
  443. cpu, share[i] & ~(CPUMASK(cpu)));
  444. #endif
  445. common -> avail[i] &= ~((share[i] & ~ CPUMASK(cpu)));
  446. }
  447. }
  448. }
  449. }
  450. static void disable_affinity(void) {
  451. int i=0;
  452. int bitmask_idx=0;
  453. int count=0;
  454. #ifdef DEBUG
  455. fprintf(stderr, "Final all available CPUs : %04lx.\n\n", common -> avail[0]);
  456. fprintf(stderr, "CPU mask : %04lx.\n\n", *(unsigned long *)&cpu_orig_mask[0]);
  457. #endif
  458. /* if(common->final_num_procs > 64){ */
  459. /* fprintf(stderr, "\nOpenBLAS Warining : The number of CPU/Cores(%d) is beyond the limit(64). Terminated.\n", common->final_num_procs); */
  460. /* exit(1); */
  461. /* }else if(common->final_num_procs == 64){ */
  462. /* lprocmask = 0xFFFFFFFFFFFFFFFFUL; */
  463. /* }else */
  464. /* lprocmask = (1UL << common -> final_num_procs) - 1; */
  465. bitmask_idx = CPUELT(common -> final_num_procs);
  466. for(i=0; i< bitmask_idx; i++){
  467. lprocmask[count++] = 0xFFFFFFFFFFFFFFFFUL;
  468. }
  469. if(CPUMASK(common -> final_num_procs) != 1){
  470. lprocmask[count++] = CPUMASK(common -> final_num_procs) - 1;
  471. }
  472. lprocmask_count = count;
  473. #ifndef USE_OPENMP
  474. for(i=0; i< count; i++){
  475. lprocmask[i] &= common->avail[i];
  476. }
  477. #endif
  478. #ifdef DEBUG
  479. fprintf(stderr, "I choose these CPUs : %04lx.\n\n", lprocmask[0]);
  480. #endif
  481. }
  482. static void setup_mempolicy(void) {
  483. int cpu, mynode, maxcpu;
  484. for (cpu = 0; cpu < MAX_NODES; cpu ++) node_cpu[cpu] = 0;
  485. maxcpu = 0;
  486. for (cpu = 0; cpu < numprocs; cpu ++) {
  487. mynode = READ_NODE(common -> cpu_info[cpu_sub_mapping[cpu]]);
  488. lnodemask |= (1UL << mynode);
  489. node_cpu[mynode] ++;
  490. if (maxcpu < node_cpu[mynode]) maxcpu = node_cpu[mynode];
  491. }
  492. node_equal = 1;
  493. for (cpu = 0; cpu < MAX_NODES; cpu ++) if ((node_cpu[cpu] != 0) && (node_cpu[cpu] != maxcpu)) node_equal = 0;
  494. if (lnodemask) {
  495. #ifdef DEBUG
  496. fprintf(stderr, "Node mask = %lx\n", lnodemask);
  497. #endif
  498. my_set_mempolicy(MPOL_INTERLEAVE, &lnodemask, sizeof(lnodemask) * 8);
  499. numnodes = popcount(lnodemask);
  500. }
  501. }
  502. static inline int is_dead(int id) {
  503. struct shmid_ds ds;
  504. return shmctl(id, IPC_STAT, &ds);
  505. }
  506. static int open_shmem(void) {
  507. int try = 0;
  508. int err = 0;
  509. do {
  510. #if defined(BIGNUMA)
  511. // raised to 32768, enough for 128 nodes and 1024 cups
  512. shmid = shmget(SH_MAGIC, 32768, 0666);
  513. #else
  514. shmid = shmget(SH_MAGIC, 4096, 0666);
  515. #endif
  516. if (shmid == -1) {
  517. #if defined(BIGNUMA)
  518. shmid = shmget(SH_MAGIC, 32768, IPC_CREAT | 0666);
  519. #else
  520. shmid = shmget(SH_MAGIC, 4096, IPC_CREAT | 0666);
  521. #endif
  522. }
  523. if (shmid == -1) err = errno;
  524. try ++;
  525. } while ((try < 10) && (shmid == -1));
  526. if (shmid == -1) {
  527. fprintf (stderr, "Obtaining shared memory segment failed in open_shmem: %s\n",strerror(err));
  528. fprintf (stderr, "Setting CPU affinity not possible without shared memory access.\n");
  529. return (1);
  530. }
  531. if (shmid != -1) {
  532. if ( (common = shmat(shmid, NULL, 0)) == (void*)-1) {
  533. perror ("Attaching shared memory segment failed in open_shmem");
  534. fprintf (stderr, "Setting CPU affinity not possible without shared memory access.\n");
  535. return (1);
  536. }
  537. }
  538. #ifdef DEBUG
  539. fprintf(stderr, "Shared Memory id = %x Address = %p\n", shmid, common);
  540. #endif
  541. return (0);
  542. }
  543. static int create_pshmem(void) {
  544. pshmid = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
  545. if (pshmid == -1) {
  546. perror ("Obtaining shared memory segment failed in create_pshmem");
  547. fprintf (stderr, "Setting CPU affinity not possible without shared memory access.\n");
  548. return(1);
  549. }
  550. if ( (paddr = shmat(pshmid, NULL, 0)) == (void*)-1) {
  551. perror ("Attaching shared memory segment failed in create_pshmem");
  552. fprintf (stderr, "Setting CPU affinity not possible without shared memory access.\n");
  553. return (1);
  554. }
  555. if (shmctl(pshmid, IPC_RMID, 0) == -1) return (1);
  556. #ifdef DEBUG
  557. fprintf(stderr, "Private Shared Memory id = %x Address = %p\n", pshmid, paddr);
  558. #endif
  559. return(0);
  560. }
  561. static void local_cpu_map(void) {
  562. int cpu, id, mapping;
  563. int bitmask_idx = 0;
  564. cpu = 0;
  565. mapping = 0;
  566. do {
  567. id = common -> cpu_use[cpu];
  568. if (id > 0) {
  569. if (is_dead(id)) common -> cpu_use[cpu] = 0;
  570. }
  571. bitmask_idx = CPUELT(cpu);
  572. if ((common -> cpu_use[cpu] == 0) && (lprocmask[bitmask_idx] & CPUMASK(cpu))) {
  573. common -> cpu_use[cpu] = pshmid;
  574. cpu_mapping[mapping] = READ_CPU(common -> cpu_info[cpu]);
  575. cpu_sub_mapping[mapping] = cpu;
  576. mapping ++;
  577. }
  578. cpu ++;
  579. } while ((mapping < numprocs) && (cpu < common -> final_num_procs));
  580. disable_mapping = 0;
  581. if ((mapping < numprocs) || (numprocs == 1)) {
  582. for (cpu = 0; cpu < common -> final_num_procs; cpu ++) {
  583. if (common -> cpu_use[cpu] == pshmid) common -> cpu_use[cpu] = 0;
  584. }
  585. disable_mapping = 1;
  586. }
  587. #ifdef DEBUG
  588. for (cpu = 0; cpu < numprocs; cpu ++) {
  589. fprintf(stderr, "Local Mapping : %2d --> %2d (%2d)\n", cpu, cpu_mapping[cpu], cpu_sub_mapping[cpu]);
  590. }
  591. #endif
  592. }
  593. /* Public Functions */
  594. int get_num_procs(void) { return numprocs; }
  595. int get_num_nodes(void) { return numnodes; }
  596. int get_node_equal(void) {
  597. return (((blas_cpu_number % numnodes) == 0) && node_equal);
  598. }
  599. int gotoblas_set_affinity(int pos) {
  600. cpu_set_t cpu_mask;
  601. int mynode = 1;
  602. /* if number of threads is larger than initial condition */
  603. if (pos < 0) {
  604. sched_setaffinity(0, sizeof(cpu_orig_mask), &cpu_orig_mask[0]);
  605. return 0;
  606. }
  607. if (!disable_mapping) {
  608. mynode = READ_NODE(common -> cpu_info[cpu_sub_mapping[pos]]);
  609. #ifdef DEBUG
  610. fprintf(stderr, "Giving Affinity[%4d %3d] --> %3d My node = %3d\n", getpid(), pos, cpu_mapping[pos], mynode);
  611. #endif
  612. CPU_ZERO(&cpu_mask);
  613. CPU_SET (cpu_mapping[pos], &cpu_mask);
  614. sched_setaffinity(0, sizeof(cpu_mask), &cpu_mask);
  615. node_mapping[WhereAmI()] = mynode;
  616. }
  617. return mynode;
  618. }
  619. int get_node(void) {
  620. if (!disable_mapping) return node_mapping[WhereAmI()];
  621. return 1;
  622. }
  623. static int initialized = 0;
  624. void gotoblas_affinity_init(void) {
  625. int cpu, num_avail;
  626. #ifndef USE_OPENMP
  627. cpu_set_t cpu_mask;
  628. #endif
  629. int i;
  630. if (initialized) return;
  631. initialized = 1;
  632. sched_getaffinity(0, sizeof(cpu_orig_mask), &cpu_orig_mask[0]);
  633. #ifdef USE_OPENMP
  634. numprocs = 0;
  635. #else
  636. numprocs = readenv_atoi("OPENBLAS_NUM_THREADS");
  637. if (numprocs == 0) numprocs = readenv_atoi("GOTO_NUM_THREADS");
  638. #endif
  639. if (numprocs == 0) numprocs = readenv_atoi("OMP_NUM_THREADS");
  640. numnodes = 1;
  641. if (numprocs == 1) {
  642. disable_mapping = 1;
  643. return;
  644. }
  645. if (create_pshmem() != 0) {
  646. disable_mapping = 1;
  647. return;
  648. }
  649. if (open_shmem() != 0) {
  650. disable_mapping = 1;
  651. return;
  652. }
  653. while ((common -> lock) && (common -> magic != SH_MAGIC)) {
  654. if (is_dead(common -> shmid)) {
  655. common -> lock = 0;
  656. common -> shmid = 0;
  657. common -> magic = 0;
  658. } else {
  659. YIELDING;
  660. }
  661. }
  662. blas_lock(&common -> lock);
  663. if ((common -> shmid) && is_dead(common -> shmid)) common -> magic = 0;
  664. common -> shmid = pshmid;
  665. if (common -> magic != SH_MAGIC) {
  666. cpu_set_t *cpusetp;
  667. int nums;
  668. int ret;
  669. #ifdef DEBUG
  670. fprintf(stderr, "Shared Memory Initialization.\n");
  671. #endif
  672. //returns the number of processors which are currently online
  673. nums = sysconf(_SC_NPROCESSORS_CONF);
  674. #if !defined(__GLIBC_PREREQ)
  675. common->num_procs = nums;
  676. #else
  677. #if !__GLIBC_PREREQ(2, 3)
  678. common->num_procs = nums;
  679. #elif __GLIBC_PREREQ(2, 7)
  680. cpusetp = CPU_ALLOC(nums);
  681. if (cpusetp == NULL) {
  682. common->num_procs = nums;
  683. } else {
  684. size_t size;
  685. size = CPU_ALLOC_SIZE(nums);
  686. ret = sched_getaffinity(0,size,cpusetp);
  687. if (ret!=0)
  688. common->num_procs = nums;
  689. else
  690. common->num_procs = CPU_COUNT_S(size,cpusetp);
  691. }
  692. CPU_FREE(cpusetp);
  693. #else
  694. ret = sched_getaffinity(0,sizeof(cpu_set_t), cpusetp);
  695. if (ret!=0) {
  696. common->num_procs = nums;
  697. } else {
  698. #if !__GLIBC_PREREQ(2, 6)
  699. int i;
  700. int n = 0;
  701. for (i=0;i<nums;i++)
  702. if (CPU_ISSET(i,cpusetp)) n++;
  703. common->num_procs = n;
  704. }
  705. #else
  706. common->num_procs = CPU_COUNT(sizeof(cpu_set_t),cpusetp);
  707. }
  708. #endif
  709. #endif
  710. #endif
  711. if(common -> num_procs > MAX_CPUS) {
  712. fprintf(stderr, "\nOpenBLAS Warning : The number of CPU/Cores(%d) is beyond the limit(%d). Terminated.\n", common->num_procs, MAX_CPUS);
  713. exit(1);
  714. }
  715. for (cpu = 0; cpu < common -> num_procs; cpu++) common -> cpu_info[cpu] = cpu;
  716. numa_check();
  717. disable_hyperthread();
  718. if (common -> num_nodes > 1) numa_mapping();
  719. common -> final_num_procs = 0;
  720. for(i = 0; i < common -> avail_count; i++) common -> final_num_procs += rcount(common -> avail[i]) + 1; //Make the max cpu number.
  721. for (cpu = 0; cpu < common -> final_num_procs; cpu ++) common -> cpu_use[cpu] = 0;
  722. common -> magic = SH_MAGIC;
  723. }
  724. disable_affinity();
  725. num_avail = 0;
  726. for(i=0; i<lprocmask_count; i++) num_avail += popcount(lprocmask[i]);
  727. if ((numprocs <= 0) || (numprocs > num_avail)) numprocs = num_avail;
  728. #ifdef DEBUG
  729. fprintf(stderr, "Number of threads = %d\n", numprocs);
  730. #endif
  731. local_cpu_map();
  732. blas_unlock(&common -> lock);
  733. #ifndef USE_OPENMP
  734. if (!disable_mapping) {
  735. #ifdef DEBUG
  736. fprintf(stderr, "Giving Affinity[%3d] --> %3d\n", 0, cpu_mapping[0]);
  737. #endif
  738. CPU_ZERO(&cpu_mask);
  739. CPU_SET (cpu_mapping[0], &cpu_mask);
  740. sched_setaffinity(0, sizeof(cpu_mask), &cpu_mask);
  741. node_mapping[WhereAmI()] = READ_NODE(common -> cpu_info[cpu_sub_mapping[0]]);
  742. setup_mempolicy();
  743. if (readenv_atoi("OPENBLAS_MAIN_FREE") || readenv_atoi("GOTOBLAS_MAIN_FREE")) {
  744. sched_setaffinity(0, sizeof(cpu_orig_mask), &cpu_orig_mask[0]);
  745. }
  746. }
  747. #endif
  748. #ifdef DEBUG
  749. fprintf(stderr, "Initialization is done.\n");
  750. #endif
  751. }
  752. void gotoblas_affinity_quit(void) {
  753. int i;
  754. struct shmid_ds ds;
  755. #ifdef DEBUG
  756. fprintf(stderr, "Terminating ..\n");
  757. #endif
  758. if ((numprocs == 1) || (initialized == 0)) return;
  759. if (!disable_mapping) {
  760. blas_lock(&common -> lock);
  761. for (i = 0; i < numprocs; i ++) common -> cpu_use[cpu_mapping[i]] = -1;
  762. blas_unlock(&common -> lock);
  763. }
  764. shmctl(shmid, IPC_STAT, &ds);
  765. if (ds.shm_nattch == 1) shmctl(shmid, IPC_RMID, 0);
  766. shmdt(common);
  767. shmdt(paddr);
  768. initialized = 0;
  769. }
  770. #else
  771. void gotoblas_affinity_init(void) {};
  772. void gotoblas_set_affinity(int threads) {};
  773. void gotoblas_set_affinity2(int threads) {};
  774. void gotoblas_affinity_reschedule(void) {};
  775. int get_num_procs(void) { return sysconf(_SC_NPROCESSORS_CONF); }
  776. int get_num_nodes(void) { return 1; }
  777. int get_node(void) { return 1;}
  778. #endif