You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

memory.c 70 kB

Remove the need for most locking in memory.c. Using thread local storage for tracking memory allocations means that threads no longer have to lock at all when doing memory allocations / frees. This particularly helps the gemm driver since it does an allocation per invocation. Even without threading at all, this helps, since even calling a lock with no contention has a cost: Before this change, no threading: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 102 ns 102 ns 13504412 BM_SGEMM/6 175 ns 175 ns 7997580 BM_SGEMM/8 205 ns 205 ns 6842073 BM_SGEMM/10 266 ns 266 ns 5294919 BM_SGEMM/16 478 ns 478 ns 2963441 BM_SGEMM/20 690 ns 690 ns 2144755 BM_SGEMM/32 1906 ns 1906 ns 716981 BM_SGEMM/40 2983 ns 2983 ns 473218 BM_SGEMM/64 9421 ns 9422 ns 148450 BM_SGEMM/72 12630 ns 12631 ns 112105 BM_SGEMM/80 15845 ns 15846 ns 89118 BM_SGEMM/90 25675 ns 25676 ns 54332 BM_SGEMM/100 29864 ns 29865 ns 47120 BM_SGEMM/112 37841 ns 37842 ns 36717 BM_SGEMM/128 56531 ns 56532 ns 25361 BM_SGEMM/140 75886 ns 75888 ns 18143 BM_SGEMM/150 98493 ns 98496 ns 14299 BM_SGEMM/160 102620 ns 102622 ns 13381 BM_SGEMM/170 135169 ns 135173 ns 10231 BM_SGEMM/180 146170 ns 146172 ns 9535 BM_SGEMM/189 190226 ns 190231 ns 7397 BM_SGEMM/200 194513 ns 194519 ns 7210 BM_SGEMM/256 396561 ns 396573 ns 3531 ``` with this change: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 14500387 BM_SGEMM/6 166 ns 166 ns 8381763 BM_SGEMM/8 196 ns 196 ns 7277044 BM_SGEMM/10 256 ns 256 ns 5515721 BM_SGEMM/16 463 ns 463 ns 3025197 BM_SGEMM/20 636 ns 636 ns 2070213 BM_SGEMM/32 1885 ns 1885 ns 739444 BM_SGEMM/40 2969 ns 2969 ns 472152 BM_SGEMM/64 9371 ns 9372 ns 148932 BM_SGEMM/72 12431 ns 12431 ns 112919 BM_SGEMM/80 15615 ns 15616 ns 89978 BM_SGEMM/90 25397 ns 25398 ns 55041 BM_SGEMM/100 29445 ns 29446 ns 47540 BM_SGEMM/112 37530 ns 37531 ns 37286 BM_SGEMM/128 55373 ns 55375 ns 25277 BM_SGEMM/140 76241 ns 76241 ns 18259 BM_SGEMM/150 102196 ns 102200 ns 13736 BM_SGEMM/160 101521 ns 101525 ns 13556 BM_SGEMM/170 136182 ns 136184 ns 10567 BM_SGEMM/180 146861 ns 146864 ns 9035 BM_SGEMM/189 192632 ns 192632 ns 7231 BM_SGEMM/200 198547 ns 198555 ns 6995 BM_SGEMM/256 392316 ns 392330 ns 3539 ``` Before, when built with USE_THREAD=1, GEMM_MULTITHREAD_THRESHOLD = 4, the cost of small matrix operations was overshadowed by thread locking (look smaller than 32) even when not explicitly spawning threads: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 328 ns 328 ns 4170562 BM_SGEMM/6 396 ns 396 ns 3536400 BM_SGEMM/8 418 ns 418 ns 3330102 BM_SGEMM/10 491 ns 491 ns 2863047 BM_SGEMM/16 710 ns 710 ns 2028314 BM_SGEMM/20 871 ns 871 ns 1581546 BM_SGEMM/32 2132 ns 2132 ns 657089 BM_SGEMM/40 3197 ns 3196 ns 437969 BM_SGEMM/64 9645 ns 9645 ns 144987 BM_SGEMM/72 35064 ns 32881 ns 50264 BM_SGEMM/80 37661 ns 35787 ns 42080 BM_SGEMM/90 36507 ns 36077 ns 40091 BM_SGEMM/100 32513 ns 31850 ns 48607 BM_SGEMM/112 41742 ns 41207 ns 37273 BM_SGEMM/128 67211 ns 65095 ns 21933 BM_SGEMM/140 68263 ns 67943 ns 19245 BM_SGEMM/150 121854 ns 115439 ns 10660 BM_SGEMM/160 116826 ns 115539 ns 10000 BM_SGEMM/170 126566 ns 122798 ns 11960 BM_SGEMM/180 130088 ns 127292 ns 11503 BM_SGEMM/189 120309 ns 116634 ns 13162 BM_SGEMM/200 114559 ns 110993 ns 10000 BM_SGEMM/256 217063 ns 207806 ns 6417 ``` and after, it's gone (note this includes my other change which reduces calls to num_cpu_avail): ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 12347650 BM_SGEMM/6 166 ns 166 ns 8259683 BM_SGEMM/8 193 ns 193 ns 7162210 BM_SGEMM/10 258 ns 258 ns 5415657 BM_SGEMM/16 471 ns 471 ns 2981009 BM_SGEMM/20 666 ns 666 ns 2148002 BM_SGEMM/32 1903 ns 1903 ns 738245 BM_SGEMM/40 2969 ns 2969 ns 473239 BM_SGEMM/64 9440 ns 9440 ns 148442 BM_SGEMM/72 37239 ns 33330 ns 46813 BM_SGEMM/80 57350 ns 55949 ns 32251 BM_SGEMM/90 36275 ns 36249 ns 42259 BM_SGEMM/100 31111 ns 31008 ns 45270 BM_SGEMM/112 43782 ns 40912 ns 34749 BM_SGEMM/128 67375 ns 64406 ns 22443 BM_SGEMM/140 76389 ns 67003 ns 21430 BM_SGEMM/150 72952 ns 71830 ns 19793 BM_SGEMM/160 97039 ns 96858 ns 11498 BM_SGEMM/170 123272 ns 122007 ns 11855 BM_SGEMM/180 126828 ns 126505 ns 11567 BM_SGEMM/189 115179 ns 114665 ns 11044 BM_SGEMM/200 89289 ns 87259 ns 16147 BM_SGEMM/256 226252 ns 222677 ns 7375 ``` I've also tested this with ThreadSanitizer and found no data races during execution. I'm not sure why 200 is always faster than it's neighbors, we must be hitting some optimal cache size or something.
7 years ago
7 years ago
7 years ago
7 years ago
Remove the need for most locking in memory.c. Using thread local storage for tracking memory allocations means that threads no longer have to lock at all when doing memory allocations / frees. This particularly helps the gemm driver since it does an allocation per invocation. Even without threading at all, this helps, since even calling a lock with no contention has a cost: Before this change, no threading: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 102 ns 102 ns 13504412 BM_SGEMM/6 175 ns 175 ns 7997580 BM_SGEMM/8 205 ns 205 ns 6842073 BM_SGEMM/10 266 ns 266 ns 5294919 BM_SGEMM/16 478 ns 478 ns 2963441 BM_SGEMM/20 690 ns 690 ns 2144755 BM_SGEMM/32 1906 ns 1906 ns 716981 BM_SGEMM/40 2983 ns 2983 ns 473218 BM_SGEMM/64 9421 ns 9422 ns 148450 BM_SGEMM/72 12630 ns 12631 ns 112105 BM_SGEMM/80 15845 ns 15846 ns 89118 BM_SGEMM/90 25675 ns 25676 ns 54332 BM_SGEMM/100 29864 ns 29865 ns 47120 BM_SGEMM/112 37841 ns 37842 ns 36717 BM_SGEMM/128 56531 ns 56532 ns 25361 BM_SGEMM/140 75886 ns 75888 ns 18143 BM_SGEMM/150 98493 ns 98496 ns 14299 BM_SGEMM/160 102620 ns 102622 ns 13381 BM_SGEMM/170 135169 ns 135173 ns 10231 BM_SGEMM/180 146170 ns 146172 ns 9535 BM_SGEMM/189 190226 ns 190231 ns 7397 BM_SGEMM/200 194513 ns 194519 ns 7210 BM_SGEMM/256 396561 ns 396573 ns 3531 ``` with this change: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 14500387 BM_SGEMM/6 166 ns 166 ns 8381763 BM_SGEMM/8 196 ns 196 ns 7277044 BM_SGEMM/10 256 ns 256 ns 5515721 BM_SGEMM/16 463 ns 463 ns 3025197 BM_SGEMM/20 636 ns 636 ns 2070213 BM_SGEMM/32 1885 ns 1885 ns 739444 BM_SGEMM/40 2969 ns 2969 ns 472152 BM_SGEMM/64 9371 ns 9372 ns 148932 BM_SGEMM/72 12431 ns 12431 ns 112919 BM_SGEMM/80 15615 ns 15616 ns 89978 BM_SGEMM/90 25397 ns 25398 ns 55041 BM_SGEMM/100 29445 ns 29446 ns 47540 BM_SGEMM/112 37530 ns 37531 ns 37286 BM_SGEMM/128 55373 ns 55375 ns 25277 BM_SGEMM/140 76241 ns 76241 ns 18259 BM_SGEMM/150 102196 ns 102200 ns 13736 BM_SGEMM/160 101521 ns 101525 ns 13556 BM_SGEMM/170 136182 ns 136184 ns 10567 BM_SGEMM/180 146861 ns 146864 ns 9035 BM_SGEMM/189 192632 ns 192632 ns 7231 BM_SGEMM/200 198547 ns 198555 ns 6995 BM_SGEMM/256 392316 ns 392330 ns 3539 ``` Before, when built with USE_THREAD=1, GEMM_MULTITHREAD_THRESHOLD = 4, the cost of small matrix operations was overshadowed by thread locking (look smaller than 32) even when not explicitly spawning threads: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 328 ns 328 ns 4170562 BM_SGEMM/6 396 ns 396 ns 3536400 BM_SGEMM/8 418 ns 418 ns 3330102 BM_SGEMM/10 491 ns 491 ns 2863047 BM_SGEMM/16 710 ns 710 ns 2028314 BM_SGEMM/20 871 ns 871 ns 1581546 BM_SGEMM/32 2132 ns 2132 ns 657089 BM_SGEMM/40 3197 ns 3196 ns 437969 BM_SGEMM/64 9645 ns 9645 ns 144987 BM_SGEMM/72 35064 ns 32881 ns 50264 BM_SGEMM/80 37661 ns 35787 ns 42080 BM_SGEMM/90 36507 ns 36077 ns 40091 BM_SGEMM/100 32513 ns 31850 ns 48607 BM_SGEMM/112 41742 ns 41207 ns 37273 BM_SGEMM/128 67211 ns 65095 ns 21933 BM_SGEMM/140 68263 ns 67943 ns 19245 BM_SGEMM/150 121854 ns 115439 ns 10660 BM_SGEMM/160 116826 ns 115539 ns 10000 BM_SGEMM/170 126566 ns 122798 ns 11960 BM_SGEMM/180 130088 ns 127292 ns 11503 BM_SGEMM/189 120309 ns 116634 ns 13162 BM_SGEMM/200 114559 ns 110993 ns 10000 BM_SGEMM/256 217063 ns 207806 ns 6417 ``` and after, it's gone (note this includes my other change which reduces calls to num_cpu_avail): ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 12347650 BM_SGEMM/6 166 ns 166 ns 8259683 BM_SGEMM/8 193 ns 193 ns 7162210 BM_SGEMM/10 258 ns 258 ns 5415657 BM_SGEMM/16 471 ns 471 ns 2981009 BM_SGEMM/20 666 ns 666 ns 2148002 BM_SGEMM/32 1903 ns 1903 ns 738245 BM_SGEMM/40 2969 ns 2969 ns 473239 BM_SGEMM/64 9440 ns 9440 ns 148442 BM_SGEMM/72 37239 ns 33330 ns 46813 BM_SGEMM/80 57350 ns 55949 ns 32251 BM_SGEMM/90 36275 ns 36249 ns 42259 BM_SGEMM/100 31111 ns 31008 ns 45270 BM_SGEMM/112 43782 ns 40912 ns 34749 BM_SGEMM/128 67375 ns 64406 ns 22443 BM_SGEMM/140 76389 ns 67003 ns 21430 BM_SGEMM/150 72952 ns 71830 ns 19793 BM_SGEMM/160 97039 ns 96858 ns 11498 BM_SGEMM/170 123272 ns 122007 ns 11855 BM_SGEMM/180 126828 ns 126505 ns 11567 BM_SGEMM/189 115179 ns 114665 ns 11044 BM_SGEMM/200 89289 ns 87259 ns 16147 BM_SGEMM/256 226252 ns 222677 ns 7375 ``` I've also tested this with ThreadSanitizer and found no data races during execution. I'm not sure why 200 is always faster than it's neighbors, we must be hitting some optimal cache size or something.
7 years ago
Remove the need for most locking in memory.c. Using thread local storage for tracking memory allocations means that threads no longer have to lock at all when doing memory allocations / frees. This particularly helps the gemm driver since it does an allocation per invocation. Even without threading at all, this helps, since even calling a lock with no contention has a cost: Before this change, no threading: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 102 ns 102 ns 13504412 BM_SGEMM/6 175 ns 175 ns 7997580 BM_SGEMM/8 205 ns 205 ns 6842073 BM_SGEMM/10 266 ns 266 ns 5294919 BM_SGEMM/16 478 ns 478 ns 2963441 BM_SGEMM/20 690 ns 690 ns 2144755 BM_SGEMM/32 1906 ns 1906 ns 716981 BM_SGEMM/40 2983 ns 2983 ns 473218 BM_SGEMM/64 9421 ns 9422 ns 148450 BM_SGEMM/72 12630 ns 12631 ns 112105 BM_SGEMM/80 15845 ns 15846 ns 89118 BM_SGEMM/90 25675 ns 25676 ns 54332 BM_SGEMM/100 29864 ns 29865 ns 47120 BM_SGEMM/112 37841 ns 37842 ns 36717 BM_SGEMM/128 56531 ns 56532 ns 25361 BM_SGEMM/140 75886 ns 75888 ns 18143 BM_SGEMM/150 98493 ns 98496 ns 14299 BM_SGEMM/160 102620 ns 102622 ns 13381 BM_SGEMM/170 135169 ns 135173 ns 10231 BM_SGEMM/180 146170 ns 146172 ns 9535 BM_SGEMM/189 190226 ns 190231 ns 7397 BM_SGEMM/200 194513 ns 194519 ns 7210 BM_SGEMM/256 396561 ns 396573 ns 3531 ``` with this change: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 14500387 BM_SGEMM/6 166 ns 166 ns 8381763 BM_SGEMM/8 196 ns 196 ns 7277044 BM_SGEMM/10 256 ns 256 ns 5515721 BM_SGEMM/16 463 ns 463 ns 3025197 BM_SGEMM/20 636 ns 636 ns 2070213 BM_SGEMM/32 1885 ns 1885 ns 739444 BM_SGEMM/40 2969 ns 2969 ns 472152 BM_SGEMM/64 9371 ns 9372 ns 148932 BM_SGEMM/72 12431 ns 12431 ns 112919 BM_SGEMM/80 15615 ns 15616 ns 89978 BM_SGEMM/90 25397 ns 25398 ns 55041 BM_SGEMM/100 29445 ns 29446 ns 47540 BM_SGEMM/112 37530 ns 37531 ns 37286 BM_SGEMM/128 55373 ns 55375 ns 25277 BM_SGEMM/140 76241 ns 76241 ns 18259 BM_SGEMM/150 102196 ns 102200 ns 13736 BM_SGEMM/160 101521 ns 101525 ns 13556 BM_SGEMM/170 136182 ns 136184 ns 10567 BM_SGEMM/180 146861 ns 146864 ns 9035 BM_SGEMM/189 192632 ns 192632 ns 7231 BM_SGEMM/200 198547 ns 198555 ns 6995 BM_SGEMM/256 392316 ns 392330 ns 3539 ``` Before, when built with USE_THREAD=1, GEMM_MULTITHREAD_THRESHOLD = 4, the cost of small matrix operations was overshadowed by thread locking (look smaller than 32) even when not explicitly spawning threads: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 328 ns 328 ns 4170562 BM_SGEMM/6 396 ns 396 ns 3536400 BM_SGEMM/8 418 ns 418 ns 3330102 BM_SGEMM/10 491 ns 491 ns 2863047 BM_SGEMM/16 710 ns 710 ns 2028314 BM_SGEMM/20 871 ns 871 ns 1581546 BM_SGEMM/32 2132 ns 2132 ns 657089 BM_SGEMM/40 3197 ns 3196 ns 437969 BM_SGEMM/64 9645 ns 9645 ns 144987 BM_SGEMM/72 35064 ns 32881 ns 50264 BM_SGEMM/80 37661 ns 35787 ns 42080 BM_SGEMM/90 36507 ns 36077 ns 40091 BM_SGEMM/100 32513 ns 31850 ns 48607 BM_SGEMM/112 41742 ns 41207 ns 37273 BM_SGEMM/128 67211 ns 65095 ns 21933 BM_SGEMM/140 68263 ns 67943 ns 19245 BM_SGEMM/150 121854 ns 115439 ns 10660 BM_SGEMM/160 116826 ns 115539 ns 10000 BM_SGEMM/170 126566 ns 122798 ns 11960 BM_SGEMM/180 130088 ns 127292 ns 11503 BM_SGEMM/189 120309 ns 116634 ns 13162 BM_SGEMM/200 114559 ns 110993 ns 10000 BM_SGEMM/256 217063 ns 207806 ns 6417 ``` and after, it's gone (note this includes my other change which reduces calls to num_cpu_avail): ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 12347650 BM_SGEMM/6 166 ns 166 ns 8259683 BM_SGEMM/8 193 ns 193 ns 7162210 BM_SGEMM/10 258 ns 258 ns 5415657 BM_SGEMM/16 471 ns 471 ns 2981009 BM_SGEMM/20 666 ns 666 ns 2148002 BM_SGEMM/32 1903 ns 1903 ns 738245 BM_SGEMM/40 2969 ns 2969 ns 473239 BM_SGEMM/64 9440 ns 9440 ns 148442 BM_SGEMM/72 37239 ns 33330 ns 46813 BM_SGEMM/80 57350 ns 55949 ns 32251 BM_SGEMM/90 36275 ns 36249 ns 42259 BM_SGEMM/100 31111 ns 31008 ns 45270 BM_SGEMM/112 43782 ns 40912 ns 34749 BM_SGEMM/128 67375 ns 64406 ns 22443 BM_SGEMM/140 76389 ns 67003 ns 21430 BM_SGEMM/150 72952 ns 71830 ns 19793 BM_SGEMM/160 97039 ns 96858 ns 11498 BM_SGEMM/170 123272 ns 122007 ns 11855 BM_SGEMM/180 126828 ns 126505 ns 11567 BM_SGEMM/189 115179 ns 114665 ns 11044 BM_SGEMM/200 89289 ns 87259 ns 16147 BM_SGEMM/256 226252 ns 222677 ns 7375 ``` I've also tested this with ThreadSanitizer and found no data races during execution. I'm not sure why 200 is always faster than it's neighbors, we must be hitting some optimal cache size or something.
7 years ago
Remove the need for most locking in memory.c. Using thread local storage for tracking memory allocations means that threads no longer have to lock at all when doing memory allocations / frees. This particularly helps the gemm driver since it does an allocation per invocation. Even without threading at all, this helps, since even calling a lock with no contention has a cost: Before this change, no threading: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 102 ns 102 ns 13504412 BM_SGEMM/6 175 ns 175 ns 7997580 BM_SGEMM/8 205 ns 205 ns 6842073 BM_SGEMM/10 266 ns 266 ns 5294919 BM_SGEMM/16 478 ns 478 ns 2963441 BM_SGEMM/20 690 ns 690 ns 2144755 BM_SGEMM/32 1906 ns 1906 ns 716981 BM_SGEMM/40 2983 ns 2983 ns 473218 BM_SGEMM/64 9421 ns 9422 ns 148450 BM_SGEMM/72 12630 ns 12631 ns 112105 BM_SGEMM/80 15845 ns 15846 ns 89118 BM_SGEMM/90 25675 ns 25676 ns 54332 BM_SGEMM/100 29864 ns 29865 ns 47120 BM_SGEMM/112 37841 ns 37842 ns 36717 BM_SGEMM/128 56531 ns 56532 ns 25361 BM_SGEMM/140 75886 ns 75888 ns 18143 BM_SGEMM/150 98493 ns 98496 ns 14299 BM_SGEMM/160 102620 ns 102622 ns 13381 BM_SGEMM/170 135169 ns 135173 ns 10231 BM_SGEMM/180 146170 ns 146172 ns 9535 BM_SGEMM/189 190226 ns 190231 ns 7397 BM_SGEMM/200 194513 ns 194519 ns 7210 BM_SGEMM/256 396561 ns 396573 ns 3531 ``` with this change: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 14500387 BM_SGEMM/6 166 ns 166 ns 8381763 BM_SGEMM/8 196 ns 196 ns 7277044 BM_SGEMM/10 256 ns 256 ns 5515721 BM_SGEMM/16 463 ns 463 ns 3025197 BM_SGEMM/20 636 ns 636 ns 2070213 BM_SGEMM/32 1885 ns 1885 ns 739444 BM_SGEMM/40 2969 ns 2969 ns 472152 BM_SGEMM/64 9371 ns 9372 ns 148932 BM_SGEMM/72 12431 ns 12431 ns 112919 BM_SGEMM/80 15615 ns 15616 ns 89978 BM_SGEMM/90 25397 ns 25398 ns 55041 BM_SGEMM/100 29445 ns 29446 ns 47540 BM_SGEMM/112 37530 ns 37531 ns 37286 BM_SGEMM/128 55373 ns 55375 ns 25277 BM_SGEMM/140 76241 ns 76241 ns 18259 BM_SGEMM/150 102196 ns 102200 ns 13736 BM_SGEMM/160 101521 ns 101525 ns 13556 BM_SGEMM/170 136182 ns 136184 ns 10567 BM_SGEMM/180 146861 ns 146864 ns 9035 BM_SGEMM/189 192632 ns 192632 ns 7231 BM_SGEMM/200 198547 ns 198555 ns 6995 BM_SGEMM/256 392316 ns 392330 ns 3539 ``` Before, when built with USE_THREAD=1, GEMM_MULTITHREAD_THRESHOLD = 4, the cost of small matrix operations was overshadowed by thread locking (look smaller than 32) even when not explicitly spawning threads: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 328 ns 328 ns 4170562 BM_SGEMM/6 396 ns 396 ns 3536400 BM_SGEMM/8 418 ns 418 ns 3330102 BM_SGEMM/10 491 ns 491 ns 2863047 BM_SGEMM/16 710 ns 710 ns 2028314 BM_SGEMM/20 871 ns 871 ns 1581546 BM_SGEMM/32 2132 ns 2132 ns 657089 BM_SGEMM/40 3197 ns 3196 ns 437969 BM_SGEMM/64 9645 ns 9645 ns 144987 BM_SGEMM/72 35064 ns 32881 ns 50264 BM_SGEMM/80 37661 ns 35787 ns 42080 BM_SGEMM/90 36507 ns 36077 ns 40091 BM_SGEMM/100 32513 ns 31850 ns 48607 BM_SGEMM/112 41742 ns 41207 ns 37273 BM_SGEMM/128 67211 ns 65095 ns 21933 BM_SGEMM/140 68263 ns 67943 ns 19245 BM_SGEMM/150 121854 ns 115439 ns 10660 BM_SGEMM/160 116826 ns 115539 ns 10000 BM_SGEMM/170 126566 ns 122798 ns 11960 BM_SGEMM/180 130088 ns 127292 ns 11503 BM_SGEMM/189 120309 ns 116634 ns 13162 BM_SGEMM/200 114559 ns 110993 ns 10000 BM_SGEMM/256 217063 ns 207806 ns 6417 ``` and after, it's gone (note this includes my other change which reduces calls to num_cpu_avail): ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 12347650 BM_SGEMM/6 166 ns 166 ns 8259683 BM_SGEMM/8 193 ns 193 ns 7162210 BM_SGEMM/10 258 ns 258 ns 5415657 BM_SGEMM/16 471 ns 471 ns 2981009 BM_SGEMM/20 666 ns 666 ns 2148002 BM_SGEMM/32 1903 ns 1903 ns 738245 BM_SGEMM/40 2969 ns 2969 ns 473239 BM_SGEMM/64 9440 ns 9440 ns 148442 BM_SGEMM/72 37239 ns 33330 ns 46813 BM_SGEMM/80 57350 ns 55949 ns 32251 BM_SGEMM/90 36275 ns 36249 ns 42259 BM_SGEMM/100 31111 ns 31008 ns 45270 BM_SGEMM/112 43782 ns 40912 ns 34749 BM_SGEMM/128 67375 ns 64406 ns 22443 BM_SGEMM/140 76389 ns 67003 ns 21430 BM_SGEMM/150 72952 ns 71830 ns 19793 BM_SGEMM/160 97039 ns 96858 ns 11498 BM_SGEMM/170 123272 ns 122007 ns 11855 BM_SGEMM/180 126828 ns 126505 ns 11567 BM_SGEMM/189 115179 ns 114665 ns 11044 BM_SGEMM/200 89289 ns 87259 ns 16147 BM_SGEMM/256 226252 ns 222677 ns 7375 ``` I've also tested this with ThreadSanitizer and found no data races during execution. I'm not sure why 200 is always faster than it's neighbors, we must be hitting some optimal cache size or something.
7 years ago
Remove the need for most locking in memory.c. Using thread local storage for tracking memory allocations means that threads no longer have to lock at all when doing memory allocations / frees. This particularly helps the gemm driver since it does an allocation per invocation. Even without threading at all, this helps, since even calling a lock with no contention has a cost: Before this change, no threading: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 102 ns 102 ns 13504412 BM_SGEMM/6 175 ns 175 ns 7997580 BM_SGEMM/8 205 ns 205 ns 6842073 BM_SGEMM/10 266 ns 266 ns 5294919 BM_SGEMM/16 478 ns 478 ns 2963441 BM_SGEMM/20 690 ns 690 ns 2144755 BM_SGEMM/32 1906 ns 1906 ns 716981 BM_SGEMM/40 2983 ns 2983 ns 473218 BM_SGEMM/64 9421 ns 9422 ns 148450 BM_SGEMM/72 12630 ns 12631 ns 112105 BM_SGEMM/80 15845 ns 15846 ns 89118 BM_SGEMM/90 25675 ns 25676 ns 54332 BM_SGEMM/100 29864 ns 29865 ns 47120 BM_SGEMM/112 37841 ns 37842 ns 36717 BM_SGEMM/128 56531 ns 56532 ns 25361 BM_SGEMM/140 75886 ns 75888 ns 18143 BM_SGEMM/150 98493 ns 98496 ns 14299 BM_SGEMM/160 102620 ns 102622 ns 13381 BM_SGEMM/170 135169 ns 135173 ns 10231 BM_SGEMM/180 146170 ns 146172 ns 9535 BM_SGEMM/189 190226 ns 190231 ns 7397 BM_SGEMM/200 194513 ns 194519 ns 7210 BM_SGEMM/256 396561 ns 396573 ns 3531 ``` with this change: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 14500387 BM_SGEMM/6 166 ns 166 ns 8381763 BM_SGEMM/8 196 ns 196 ns 7277044 BM_SGEMM/10 256 ns 256 ns 5515721 BM_SGEMM/16 463 ns 463 ns 3025197 BM_SGEMM/20 636 ns 636 ns 2070213 BM_SGEMM/32 1885 ns 1885 ns 739444 BM_SGEMM/40 2969 ns 2969 ns 472152 BM_SGEMM/64 9371 ns 9372 ns 148932 BM_SGEMM/72 12431 ns 12431 ns 112919 BM_SGEMM/80 15615 ns 15616 ns 89978 BM_SGEMM/90 25397 ns 25398 ns 55041 BM_SGEMM/100 29445 ns 29446 ns 47540 BM_SGEMM/112 37530 ns 37531 ns 37286 BM_SGEMM/128 55373 ns 55375 ns 25277 BM_SGEMM/140 76241 ns 76241 ns 18259 BM_SGEMM/150 102196 ns 102200 ns 13736 BM_SGEMM/160 101521 ns 101525 ns 13556 BM_SGEMM/170 136182 ns 136184 ns 10567 BM_SGEMM/180 146861 ns 146864 ns 9035 BM_SGEMM/189 192632 ns 192632 ns 7231 BM_SGEMM/200 198547 ns 198555 ns 6995 BM_SGEMM/256 392316 ns 392330 ns 3539 ``` Before, when built with USE_THREAD=1, GEMM_MULTITHREAD_THRESHOLD = 4, the cost of small matrix operations was overshadowed by thread locking (look smaller than 32) even when not explicitly spawning threads: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 328 ns 328 ns 4170562 BM_SGEMM/6 396 ns 396 ns 3536400 BM_SGEMM/8 418 ns 418 ns 3330102 BM_SGEMM/10 491 ns 491 ns 2863047 BM_SGEMM/16 710 ns 710 ns 2028314 BM_SGEMM/20 871 ns 871 ns 1581546 BM_SGEMM/32 2132 ns 2132 ns 657089 BM_SGEMM/40 3197 ns 3196 ns 437969 BM_SGEMM/64 9645 ns 9645 ns 144987 BM_SGEMM/72 35064 ns 32881 ns 50264 BM_SGEMM/80 37661 ns 35787 ns 42080 BM_SGEMM/90 36507 ns 36077 ns 40091 BM_SGEMM/100 32513 ns 31850 ns 48607 BM_SGEMM/112 41742 ns 41207 ns 37273 BM_SGEMM/128 67211 ns 65095 ns 21933 BM_SGEMM/140 68263 ns 67943 ns 19245 BM_SGEMM/150 121854 ns 115439 ns 10660 BM_SGEMM/160 116826 ns 115539 ns 10000 BM_SGEMM/170 126566 ns 122798 ns 11960 BM_SGEMM/180 130088 ns 127292 ns 11503 BM_SGEMM/189 120309 ns 116634 ns 13162 BM_SGEMM/200 114559 ns 110993 ns 10000 BM_SGEMM/256 217063 ns 207806 ns 6417 ``` and after, it's gone (note this includes my other change which reduces calls to num_cpu_avail): ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 12347650 BM_SGEMM/6 166 ns 166 ns 8259683 BM_SGEMM/8 193 ns 193 ns 7162210 BM_SGEMM/10 258 ns 258 ns 5415657 BM_SGEMM/16 471 ns 471 ns 2981009 BM_SGEMM/20 666 ns 666 ns 2148002 BM_SGEMM/32 1903 ns 1903 ns 738245 BM_SGEMM/40 2969 ns 2969 ns 473239 BM_SGEMM/64 9440 ns 9440 ns 148442 BM_SGEMM/72 37239 ns 33330 ns 46813 BM_SGEMM/80 57350 ns 55949 ns 32251 BM_SGEMM/90 36275 ns 36249 ns 42259 BM_SGEMM/100 31111 ns 31008 ns 45270 BM_SGEMM/112 43782 ns 40912 ns 34749 BM_SGEMM/128 67375 ns 64406 ns 22443 BM_SGEMM/140 76389 ns 67003 ns 21430 BM_SGEMM/150 72952 ns 71830 ns 19793 BM_SGEMM/160 97039 ns 96858 ns 11498 BM_SGEMM/170 123272 ns 122007 ns 11855 BM_SGEMM/180 126828 ns 126505 ns 11567 BM_SGEMM/189 115179 ns 114665 ns 11044 BM_SGEMM/200 89289 ns 87259 ns 16147 BM_SGEMM/256 226252 ns 222677 ns 7375 ``` I've also tested this with ThreadSanitizer and found no data races during execution. I'm not sure why 200 is always faster than it's neighbors, we must be hitting some optimal cache size or something.
7 years ago
Remove the need for most locking in memory.c. Using thread local storage for tracking memory allocations means that threads no longer have to lock at all when doing memory allocations / frees. This particularly helps the gemm driver since it does an allocation per invocation. Even without threading at all, this helps, since even calling a lock with no contention has a cost: Before this change, no threading: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 102 ns 102 ns 13504412 BM_SGEMM/6 175 ns 175 ns 7997580 BM_SGEMM/8 205 ns 205 ns 6842073 BM_SGEMM/10 266 ns 266 ns 5294919 BM_SGEMM/16 478 ns 478 ns 2963441 BM_SGEMM/20 690 ns 690 ns 2144755 BM_SGEMM/32 1906 ns 1906 ns 716981 BM_SGEMM/40 2983 ns 2983 ns 473218 BM_SGEMM/64 9421 ns 9422 ns 148450 BM_SGEMM/72 12630 ns 12631 ns 112105 BM_SGEMM/80 15845 ns 15846 ns 89118 BM_SGEMM/90 25675 ns 25676 ns 54332 BM_SGEMM/100 29864 ns 29865 ns 47120 BM_SGEMM/112 37841 ns 37842 ns 36717 BM_SGEMM/128 56531 ns 56532 ns 25361 BM_SGEMM/140 75886 ns 75888 ns 18143 BM_SGEMM/150 98493 ns 98496 ns 14299 BM_SGEMM/160 102620 ns 102622 ns 13381 BM_SGEMM/170 135169 ns 135173 ns 10231 BM_SGEMM/180 146170 ns 146172 ns 9535 BM_SGEMM/189 190226 ns 190231 ns 7397 BM_SGEMM/200 194513 ns 194519 ns 7210 BM_SGEMM/256 396561 ns 396573 ns 3531 ``` with this change: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 14500387 BM_SGEMM/6 166 ns 166 ns 8381763 BM_SGEMM/8 196 ns 196 ns 7277044 BM_SGEMM/10 256 ns 256 ns 5515721 BM_SGEMM/16 463 ns 463 ns 3025197 BM_SGEMM/20 636 ns 636 ns 2070213 BM_SGEMM/32 1885 ns 1885 ns 739444 BM_SGEMM/40 2969 ns 2969 ns 472152 BM_SGEMM/64 9371 ns 9372 ns 148932 BM_SGEMM/72 12431 ns 12431 ns 112919 BM_SGEMM/80 15615 ns 15616 ns 89978 BM_SGEMM/90 25397 ns 25398 ns 55041 BM_SGEMM/100 29445 ns 29446 ns 47540 BM_SGEMM/112 37530 ns 37531 ns 37286 BM_SGEMM/128 55373 ns 55375 ns 25277 BM_SGEMM/140 76241 ns 76241 ns 18259 BM_SGEMM/150 102196 ns 102200 ns 13736 BM_SGEMM/160 101521 ns 101525 ns 13556 BM_SGEMM/170 136182 ns 136184 ns 10567 BM_SGEMM/180 146861 ns 146864 ns 9035 BM_SGEMM/189 192632 ns 192632 ns 7231 BM_SGEMM/200 198547 ns 198555 ns 6995 BM_SGEMM/256 392316 ns 392330 ns 3539 ``` Before, when built with USE_THREAD=1, GEMM_MULTITHREAD_THRESHOLD = 4, the cost of small matrix operations was overshadowed by thread locking (look smaller than 32) even when not explicitly spawning threads: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 328 ns 328 ns 4170562 BM_SGEMM/6 396 ns 396 ns 3536400 BM_SGEMM/8 418 ns 418 ns 3330102 BM_SGEMM/10 491 ns 491 ns 2863047 BM_SGEMM/16 710 ns 710 ns 2028314 BM_SGEMM/20 871 ns 871 ns 1581546 BM_SGEMM/32 2132 ns 2132 ns 657089 BM_SGEMM/40 3197 ns 3196 ns 437969 BM_SGEMM/64 9645 ns 9645 ns 144987 BM_SGEMM/72 35064 ns 32881 ns 50264 BM_SGEMM/80 37661 ns 35787 ns 42080 BM_SGEMM/90 36507 ns 36077 ns 40091 BM_SGEMM/100 32513 ns 31850 ns 48607 BM_SGEMM/112 41742 ns 41207 ns 37273 BM_SGEMM/128 67211 ns 65095 ns 21933 BM_SGEMM/140 68263 ns 67943 ns 19245 BM_SGEMM/150 121854 ns 115439 ns 10660 BM_SGEMM/160 116826 ns 115539 ns 10000 BM_SGEMM/170 126566 ns 122798 ns 11960 BM_SGEMM/180 130088 ns 127292 ns 11503 BM_SGEMM/189 120309 ns 116634 ns 13162 BM_SGEMM/200 114559 ns 110993 ns 10000 BM_SGEMM/256 217063 ns 207806 ns 6417 ``` and after, it's gone (note this includes my other change which reduces calls to num_cpu_avail): ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 12347650 BM_SGEMM/6 166 ns 166 ns 8259683 BM_SGEMM/8 193 ns 193 ns 7162210 BM_SGEMM/10 258 ns 258 ns 5415657 BM_SGEMM/16 471 ns 471 ns 2981009 BM_SGEMM/20 666 ns 666 ns 2148002 BM_SGEMM/32 1903 ns 1903 ns 738245 BM_SGEMM/40 2969 ns 2969 ns 473239 BM_SGEMM/64 9440 ns 9440 ns 148442 BM_SGEMM/72 37239 ns 33330 ns 46813 BM_SGEMM/80 57350 ns 55949 ns 32251 BM_SGEMM/90 36275 ns 36249 ns 42259 BM_SGEMM/100 31111 ns 31008 ns 45270 BM_SGEMM/112 43782 ns 40912 ns 34749 BM_SGEMM/128 67375 ns 64406 ns 22443 BM_SGEMM/140 76389 ns 67003 ns 21430 BM_SGEMM/150 72952 ns 71830 ns 19793 BM_SGEMM/160 97039 ns 96858 ns 11498 BM_SGEMM/170 123272 ns 122007 ns 11855 BM_SGEMM/180 126828 ns 126505 ns 11567 BM_SGEMM/189 115179 ns 114665 ns 11044 BM_SGEMM/200 89289 ns 87259 ns 16147 BM_SGEMM/256 226252 ns 222677 ns 7375 ``` I've also tested this with ThreadSanitizer and found no data races during execution. I'm not sure why 200 is always faster than it's neighbors, we must be hitting some optimal cache size or something.
7 years ago
Remove the need for most locking in memory.c. Using thread local storage for tracking memory allocations means that threads no longer have to lock at all when doing memory allocations / frees. This particularly helps the gemm driver since it does an allocation per invocation. Even without threading at all, this helps, since even calling a lock with no contention has a cost: Before this change, no threading: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 102 ns 102 ns 13504412 BM_SGEMM/6 175 ns 175 ns 7997580 BM_SGEMM/8 205 ns 205 ns 6842073 BM_SGEMM/10 266 ns 266 ns 5294919 BM_SGEMM/16 478 ns 478 ns 2963441 BM_SGEMM/20 690 ns 690 ns 2144755 BM_SGEMM/32 1906 ns 1906 ns 716981 BM_SGEMM/40 2983 ns 2983 ns 473218 BM_SGEMM/64 9421 ns 9422 ns 148450 BM_SGEMM/72 12630 ns 12631 ns 112105 BM_SGEMM/80 15845 ns 15846 ns 89118 BM_SGEMM/90 25675 ns 25676 ns 54332 BM_SGEMM/100 29864 ns 29865 ns 47120 BM_SGEMM/112 37841 ns 37842 ns 36717 BM_SGEMM/128 56531 ns 56532 ns 25361 BM_SGEMM/140 75886 ns 75888 ns 18143 BM_SGEMM/150 98493 ns 98496 ns 14299 BM_SGEMM/160 102620 ns 102622 ns 13381 BM_SGEMM/170 135169 ns 135173 ns 10231 BM_SGEMM/180 146170 ns 146172 ns 9535 BM_SGEMM/189 190226 ns 190231 ns 7397 BM_SGEMM/200 194513 ns 194519 ns 7210 BM_SGEMM/256 396561 ns 396573 ns 3531 ``` with this change: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 14500387 BM_SGEMM/6 166 ns 166 ns 8381763 BM_SGEMM/8 196 ns 196 ns 7277044 BM_SGEMM/10 256 ns 256 ns 5515721 BM_SGEMM/16 463 ns 463 ns 3025197 BM_SGEMM/20 636 ns 636 ns 2070213 BM_SGEMM/32 1885 ns 1885 ns 739444 BM_SGEMM/40 2969 ns 2969 ns 472152 BM_SGEMM/64 9371 ns 9372 ns 148932 BM_SGEMM/72 12431 ns 12431 ns 112919 BM_SGEMM/80 15615 ns 15616 ns 89978 BM_SGEMM/90 25397 ns 25398 ns 55041 BM_SGEMM/100 29445 ns 29446 ns 47540 BM_SGEMM/112 37530 ns 37531 ns 37286 BM_SGEMM/128 55373 ns 55375 ns 25277 BM_SGEMM/140 76241 ns 76241 ns 18259 BM_SGEMM/150 102196 ns 102200 ns 13736 BM_SGEMM/160 101521 ns 101525 ns 13556 BM_SGEMM/170 136182 ns 136184 ns 10567 BM_SGEMM/180 146861 ns 146864 ns 9035 BM_SGEMM/189 192632 ns 192632 ns 7231 BM_SGEMM/200 198547 ns 198555 ns 6995 BM_SGEMM/256 392316 ns 392330 ns 3539 ``` Before, when built with USE_THREAD=1, GEMM_MULTITHREAD_THRESHOLD = 4, the cost of small matrix operations was overshadowed by thread locking (look smaller than 32) even when not explicitly spawning threads: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 328 ns 328 ns 4170562 BM_SGEMM/6 396 ns 396 ns 3536400 BM_SGEMM/8 418 ns 418 ns 3330102 BM_SGEMM/10 491 ns 491 ns 2863047 BM_SGEMM/16 710 ns 710 ns 2028314 BM_SGEMM/20 871 ns 871 ns 1581546 BM_SGEMM/32 2132 ns 2132 ns 657089 BM_SGEMM/40 3197 ns 3196 ns 437969 BM_SGEMM/64 9645 ns 9645 ns 144987 BM_SGEMM/72 35064 ns 32881 ns 50264 BM_SGEMM/80 37661 ns 35787 ns 42080 BM_SGEMM/90 36507 ns 36077 ns 40091 BM_SGEMM/100 32513 ns 31850 ns 48607 BM_SGEMM/112 41742 ns 41207 ns 37273 BM_SGEMM/128 67211 ns 65095 ns 21933 BM_SGEMM/140 68263 ns 67943 ns 19245 BM_SGEMM/150 121854 ns 115439 ns 10660 BM_SGEMM/160 116826 ns 115539 ns 10000 BM_SGEMM/170 126566 ns 122798 ns 11960 BM_SGEMM/180 130088 ns 127292 ns 11503 BM_SGEMM/189 120309 ns 116634 ns 13162 BM_SGEMM/200 114559 ns 110993 ns 10000 BM_SGEMM/256 217063 ns 207806 ns 6417 ``` and after, it's gone (note this includes my other change which reduces calls to num_cpu_avail): ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 12347650 BM_SGEMM/6 166 ns 166 ns 8259683 BM_SGEMM/8 193 ns 193 ns 7162210 BM_SGEMM/10 258 ns 258 ns 5415657 BM_SGEMM/16 471 ns 471 ns 2981009 BM_SGEMM/20 666 ns 666 ns 2148002 BM_SGEMM/32 1903 ns 1903 ns 738245 BM_SGEMM/40 2969 ns 2969 ns 473239 BM_SGEMM/64 9440 ns 9440 ns 148442 BM_SGEMM/72 37239 ns 33330 ns 46813 BM_SGEMM/80 57350 ns 55949 ns 32251 BM_SGEMM/90 36275 ns 36249 ns 42259 BM_SGEMM/100 31111 ns 31008 ns 45270 BM_SGEMM/112 43782 ns 40912 ns 34749 BM_SGEMM/128 67375 ns 64406 ns 22443 BM_SGEMM/140 76389 ns 67003 ns 21430 BM_SGEMM/150 72952 ns 71830 ns 19793 BM_SGEMM/160 97039 ns 96858 ns 11498 BM_SGEMM/170 123272 ns 122007 ns 11855 BM_SGEMM/180 126828 ns 126505 ns 11567 BM_SGEMM/189 115179 ns 114665 ns 11044 BM_SGEMM/200 89289 ns 87259 ns 16147 BM_SGEMM/256 226252 ns 222677 ns 7375 ``` I've also tested this with ThreadSanitizer and found no data races during execution. I'm not sure why 200 is always faster than it's neighbors, we must be hitting some optimal cache size or something.
7 years ago
Remove the need for most locking in memory.c. Using thread local storage for tracking memory allocations means that threads no longer have to lock at all when doing memory allocations / frees. This particularly helps the gemm driver since it does an allocation per invocation. Even without threading at all, this helps, since even calling a lock with no contention has a cost: Before this change, no threading: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 102 ns 102 ns 13504412 BM_SGEMM/6 175 ns 175 ns 7997580 BM_SGEMM/8 205 ns 205 ns 6842073 BM_SGEMM/10 266 ns 266 ns 5294919 BM_SGEMM/16 478 ns 478 ns 2963441 BM_SGEMM/20 690 ns 690 ns 2144755 BM_SGEMM/32 1906 ns 1906 ns 716981 BM_SGEMM/40 2983 ns 2983 ns 473218 BM_SGEMM/64 9421 ns 9422 ns 148450 BM_SGEMM/72 12630 ns 12631 ns 112105 BM_SGEMM/80 15845 ns 15846 ns 89118 BM_SGEMM/90 25675 ns 25676 ns 54332 BM_SGEMM/100 29864 ns 29865 ns 47120 BM_SGEMM/112 37841 ns 37842 ns 36717 BM_SGEMM/128 56531 ns 56532 ns 25361 BM_SGEMM/140 75886 ns 75888 ns 18143 BM_SGEMM/150 98493 ns 98496 ns 14299 BM_SGEMM/160 102620 ns 102622 ns 13381 BM_SGEMM/170 135169 ns 135173 ns 10231 BM_SGEMM/180 146170 ns 146172 ns 9535 BM_SGEMM/189 190226 ns 190231 ns 7397 BM_SGEMM/200 194513 ns 194519 ns 7210 BM_SGEMM/256 396561 ns 396573 ns 3531 ``` with this change: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 14500387 BM_SGEMM/6 166 ns 166 ns 8381763 BM_SGEMM/8 196 ns 196 ns 7277044 BM_SGEMM/10 256 ns 256 ns 5515721 BM_SGEMM/16 463 ns 463 ns 3025197 BM_SGEMM/20 636 ns 636 ns 2070213 BM_SGEMM/32 1885 ns 1885 ns 739444 BM_SGEMM/40 2969 ns 2969 ns 472152 BM_SGEMM/64 9371 ns 9372 ns 148932 BM_SGEMM/72 12431 ns 12431 ns 112919 BM_SGEMM/80 15615 ns 15616 ns 89978 BM_SGEMM/90 25397 ns 25398 ns 55041 BM_SGEMM/100 29445 ns 29446 ns 47540 BM_SGEMM/112 37530 ns 37531 ns 37286 BM_SGEMM/128 55373 ns 55375 ns 25277 BM_SGEMM/140 76241 ns 76241 ns 18259 BM_SGEMM/150 102196 ns 102200 ns 13736 BM_SGEMM/160 101521 ns 101525 ns 13556 BM_SGEMM/170 136182 ns 136184 ns 10567 BM_SGEMM/180 146861 ns 146864 ns 9035 BM_SGEMM/189 192632 ns 192632 ns 7231 BM_SGEMM/200 198547 ns 198555 ns 6995 BM_SGEMM/256 392316 ns 392330 ns 3539 ``` Before, when built with USE_THREAD=1, GEMM_MULTITHREAD_THRESHOLD = 4, the cost of small matrix operations was overshadowed by thread locking (look smaller than 32) even when not explicitly spawning threads: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 328 ns 328 ns 4170562 BM_SGEMM/6 396 ns 396 ns 3536400 BM_SGEMM/8 418 ns 418 ns 3330102 BM_SGEMM/10 491 ns 491 ns 2863047 BM_SGEMM/16 710 ns 710 ns 2028314 BM_SGEMM/20 871 ns 871 ns 1581546 BM_SGEMM/32 2132 ns 2132 ns 657089 BM_SGEMM/40 3197 ns 3196 ns 437969 BM_SGEMM/64 9645 ns 9645 ns 144987 BM_SGEMM/72 35064 ns 32881 ns 50264 BM_SGEMM/80 37661 ns 35787 ns 42080 BM_SGEMM/90 36507 ns 36077 ns 40091 BM_SGEMM/100 32513 ns 31850 ns 48607 BM_SGEMM/112 41742 ns 41207 ns 37273 BM_SGEMM/128 67211 ns 65095 ns 21933 BM_SGEMM/140 68263 ns 67943 ns 19245 BM_SGEMM/150 121854 ns 115439 ns 10660 BM_SGEMM/160 116826 ns 115539 ns 10000 BM_SGEMM/170 126566 ns 122798 ns 11960 BM_SGEMM/180 130088 ns 127292 ns 11503 BM_SGEMM/189 120309 ns 116634 ns 13162 BM_SGEMM/200 114559 ns 110993 ns 10000 BM_SGEMM/256 217063 ns 207806 ns 6417 ``` and after, it's gone (note this includes my other change which reduces calls to num_cpu_avail): ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 12347650 BM_SGEMM/6 166 ns 166 ns 8259683 BM_SGEMM/8 193 ns 193 ns 7162210 BM_SGEMM/10 258 ns 258 ns 5415657 BM_SGEMM/16 471 ns 471 ns 2981009 BM_SGEMM/20 666 ns 666 ns 2148002 BM_SGEMM/32 1903 ns 1903 ns 738245 BM_SGEMM/40 2969 ns 2969 ns 473239 BM_SGEMM/64 9440 ns 9440 ns 148442 BM_SGEMM/72 37239 ns 33330 ns 46813 BM_SGEMM/80 57350 ns 55949 ns 32251 BM_SGEMM/90 36275 ns 36249 ns 42259 BM_SGEMM/100 31111 ns 31008 ns 45270 BM_SGEMM/112 43782 ns 40912 ns 34749 BM_SGEMM/128 67375 ns 64406 ns 22443 BM_SGEMM/140 76389 ns 67003 ns 21430 BM_SGEMM/150 72952 ns 71830 ns 19793 BM_SGEMM/160 97039 ns 96858 ns 11498 BM_SGEMM/170 123272 ns 122007 ns 11855 BM_SGEMM/180 126828 ns 126505 ns 11567 BM_SGEMM/189 115179 ns 114665 ns 11044 BM_SGEMM/200 89289 ns 87259 ns 16147 BM_SGEMM/256 226252 ns 222677 ns 7375 ``` I've also tested this with ThreadSanitizer and found no data races during execution. I'm not sure why 200 is always faster than it's neighbors, we must be hitting some optimal cache size or something.
7 years ago
Remove the need for most locking in memory.c. Using thread local storage for tracking memory allocations means that threads no longer have to lock at all when doing memory allocations / frees. This particularly helps the gemm driver since it does an allocation per invocation. Even without threading at all, this helps, since even calling a lock with no contention has a cost: Before this change, no threading: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 102 ns 102 ns 13504412 BM_SGEMM/6 175 ns 175 ns 7997580 BM_SGEMM/8 205 ns 205 ns 6842073 BM_SGEMM/10 266 ns 266 ns 5294919 BM_SGEMM/16 478 ns 478 ns 2963441 BM_SGEMM/20 690 ns 690 ns 2144755 BM_SGEMM/32 1906 ns 1906 ns 716981 BM_SGEMM/40 2983 ns 2983 ns 473218 BM_SGEMM/64 9421 ns 9422 ns 148450 BM_SGEMM/72 12630 ns 12631 ns 112105 BM_SGEMM/80 15845 ns 15846 ns 89118 BM_SGEMM/90 25675 ns 25676 ns 54332 BM_SGEMM/100 29864 ns 29865 ns 47120 BM_SGEMM/112 37841 ns 37842 ns 36717 BM_SGEMM/128 56531 ns 56532 ns 25361 BM_SGEMM/140 75886 ns 75888 ns 18143 BM_SGEMM/150 98493 ns 98496 ns 14299 BM_SGEMM/160 102620 ns 102622 ns 13381 BM_SGEMM/170 135169 ns 135173 ns 10231 BM_SGEMM/180 146170 ns 146172 ns 9535 BM_SGEMM/189 190226 ns 190231 ns 7397 BM_SGEMM/200 194513 ns 194519 ns 7210 BM_SGEMM/256 396561 ns 396573 ns 3531 ``` with this change: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 14500387 BM_SGEMM/6 166 ns 166 ns 8381763 BM_SGEMM/8 196 ns 196 ns 7277044 BM_SGEMM/10 256 ns 256 ns 5515721 BM_SGEMM/16 463 ns 463 ns 3025197 BM_SGEMM/20 636 ns 636 ns 2070213 BM_SGEMM/32 1885 ns 1885 ns 739444 BM_SGEMM/40 2969 ns 2969 ns 472152 BM_SGEMM/64 9371 ns 9372 ns 148932 BM_SGEMM/72 12431 ns 12431 ns 112919 BM_SGEMM/80 15615 ns 15616 ns 89978 BM_SGEMM/90 25397 ns 25398 ns 55041 BM_SGEMM/100 29445 ns 29446 ns 47540 BM_SGEMM/112 37530 ns 37531 ns 37286 BM_SGEMM/128 55373 ns 55375 ns 25277 BM_SGEMM/140 76241 ns 76241 ns 18259 BM_SGEMM/150 102196 ns 102200 ns 13736 BM_SGEMM/160 101521 ns 101525 ns 13556 BM_SGEMM/170 136182 ns 136184 ns 10567 BM_SGEMM/180 146861 ns 146864 ns 9035 BM_SGEMM/189 192632 ns 192632 ns 7231 BM_SGEMM/200 198547 ns 198555 ns 6995 BM_SGEMM/256 392316 ns 392330 ns 3539 ``` Before, when built with USE_THREAD=1, GEMM_MULTITHREAD_THRESHOLD = 4, the cost of small matrix operations was overshadowed by thread locking (look smaller than 32) even when not explicitly spawning threads: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 328 ns 328 ns 4170562 BM_SGEMM/6 396 ns 396 ns 3536400 BM_SGEMM/8 418 ns 418 ns 3330102 BM_SGEMM/10 491 ns 491 ns 2863047 BM_SGEMM/16 710 ns 710 ns 2028314 BM_SGEMM/20 871 ns 871 ns 1581546 BM_SGEMM/32 2132 ns 2132 ns 657089 BM_SGEMM/40 3197 ns 3196 ns 437969 BM_SGEMM/64 9645 ns 9645 ns 144987 BM_SGEMM/72 35064 ns 32881 ns 50264 BM_SGEMM/80 37661 ns 35787 ns 42080 BM_SGEMM/90 36507 ns 36077 ns 40091 BM_SGEMM/100 32513 ns 31850 ns 48607 BM_SGEMM/112 41742 ns 41207 ns 37273 BM_SGEMM/128 67211 ns 65095 ns 21933 BM_SGEMM/140 68263 ns 67943 ns 19245 BM_SGEMM/150 121854 ns 115439 ns 10660 BM_SGEMM/160 116826 ns 115539 ns 10000 BM_SGEMM/170 126566 ns 122798 ns 11960 BM_SGEMM/180 130088 ns 127292 ns 11503 BM_SGEMM/189 120309 ns 116634 ns 13162 BM_SGEMM/200 114559 ns 110993 ns 10000 BM_SGEMM/256 217063 ns 207806 ns 6417 ``` and after, it's gone (note this includes my other change which reduces calls to num_cpu_avail): ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 12347650 BM_SGEMM/6 166 ns 166 ns 8259683 BM_SGEMM/8 193 ns 193 ns 7162210 BM_SGEMM/10 258 ns 258 ns 5415657 BM_SGEMM/16 471 ns 471 ns 2981009 BM_SGEMM/20 666 ns 666 ns 2148002 BM_SGEMM/32 1903 ns 1903 ns 738245 BM_SGEMM/40 2969 ns 2969 ns 473239 BM_SGEMM/64 9440 ns 9440 ns 148442 BM_SGEMM/72 37239 ns 33330 ns 46813 BM_SGEMM/80 57350 ns 55949 ns 32251 BM_SGEMM/90 36275 ns 36249 ns 42259 BM_SGEMM/100 31111 ns 31008 ns 45270 BM_SGEMM/112 43782 ns 40912 ns 34749 BM_SGEMM/128 67375 ns 64406 ns 22443 BM_SGEMM/140 76389 ns 67003 ns 21430 BM_SGEMM/150 72952 ns 71830 ns 19793 BM_SGEMM/160 97039 ns 96858 ns 11498 BM_SGEMM/170 123272 ns 122007 ns 11855 BM_SGEMM/180 126828 ns 126505 ns 11567 BM_SGEMM/189 115179 ns 114665 ns 11044 BM_SGEMM/200 89289 ns 87259 ns 16147 BM_SGEMM/256 226252 ns 222677 ns 7375 ``` I've also tested this with ThreadSanitizer and found no data races during execution. I'm not sure why 200 is always faster than it's neighbors, we must be hitting some optimal cache size or something.
7 years ago
Remove the need for most locking in memory.c. Using thread local storage for tracking memory allocations means that threads no longer have to lock at all when doing memory allocations / frees. This particularly helps the gemm driver since it does an allocation per invocation. Even without threading at all, this helps, since even calling a lock with no contention has a cost: Before this change, no threading: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 102 ns 102 ns 13504412 BM_SGEMM/6 175 ns 175 ns 7997580 BM_SGEMM/8 205 ns 205 ns 6842073 BM_SGEMM/10 266 ns 266 ns 5294919 BM_SGEMM/16 478 ns 478 ns 2963441 BM_SGEMM/20 690 ns 690 ns 2144755 BM_SGEMM/32 1906 ns 1906 ns 716981 BM_SGEMM/40 2983 ns 2983 ns 473218 BM_SGEMM/64 9421 ns 9422 ns 148450 BM_SGEMM/72 12630 ns 12631 ns 112105 BM_SGEMM/80 15845 ns 15846 ns 89118 BM_SGEMM/90 25675 ns 25676 ns 54332 BM_SGEMM/100 29864 ns 29865 ns 47120 BM_SGEMM/112 37841 ns 37842 ns 36717 BM_SGEMM/128 56531 ns 56532 ns 25361 BM_SGEMM/140 75886 ns 75888 ns 18143 BM_SGEMM/150 98493 ns 98496 ns 14299 BM_SGEMM/160 102620 ns 102622 ns 13381 BM_SGEMM/170 135169 ns 135173 ns 10231 BM_SGEMM/180 146170 ns 146172 ns 9535 BM_SGEMM/189 190226 ns 190231 ns 7397 BM_SGEMM/200 194513 ns 194519 ns 7210 BM_SGEMM/256 396561 ns 396573 ns 3531 ``` with this change: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 14500387 BM_SGEMM/6 166 ns 166 ns 8381763 BM_SGEMM/8 196 ns 196 ns 7277044 BM_SGEMM/10 256 ns 256 ns 5515721 BM_SGEMM/16 463 ns 463 ns 3025197 BM_SGEMM/20 636 ns 636 ns 2070213 BM_SGEMM/32 1885 ns 1885 ns 739444 BM_SGEMM/40 2969 ns 2969 ns 472152 BM_SGEMM/64 9371 ns 9372 ns 148932 BM_SGEMM/72 12431 ns 12431 ns 112919 BM_SGEMM/80 15615 ns 15616 ns 89978 BM_SGEMM/90 25397 ns 25398 ns 55041 BM_SGEMM/100 29445 ns 29446 ns 47540 BM_SGEMM/112 37530 ns 37531 ns 37286 BM_SGEMM/128 55373 ns 55375 ns 25277 BM_SGEMM/140 76241 ns 76241 ns 18259 BM_SGEMM/150 102196 ns 102200 ns 13736 BM_SGEMM/160 101521 ns 101525 ns 13556 BM_SGEMM/170 136182 ns 136184 ns 10567 BM_SGEMM/180 146861 ns 146864 ns 9035 BM_SGEMM/189 192632 ns 192632 ns 7231 BM_SGEMM/200 198547 ns 198555 ns 6995 BM_SGEMM/256 392316 ns 392330 ns 3539 ``` Before, when built with USE_THREAD=1, GEMM_MULTITHREAD_THRESHOLD = 4, the cost of small matrix operations was overshadowed by thread locking (look smaller than 32) even when not explicitly spawning threads: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 328 ns 328 ns 4170562 BM_SGEMM/6 396 ns 396 ns 3536400 BM_SGEMM/8 418 ns 418 ns 3330102 BM_SGEMM/10 491 ns 491 ns 2863047 BM_SGEMM/16 710 ns 710 ns 2028314 BM_SGEMM/20 871 ns 871 ns 1581546 BM_SGEMM/32 2132 ns 2132 ns 657089 BM_SGEMM/40 3197 ns 3196 ns 437969 BM_SGEMM/64 9645 ns 9645 ns 144987 BM_SGEMM/72 35064 ns 32881 ns 50264 BM_SGEMM/80 37661 ns 35787 ns 42080 BM_SGEMM/90 36507 ns 36077 ns 40091 BM_SGEMM/100 32513 ns 31850 ns 48607 BM_SGEMM/112 41742 ns 41207 ns 37273 BM_SGEMM/128 67211 ns 65095 ns 21933 BM_SGEMM/140 68263 ns 67943 ns 19245 BM_SGEMM/150 121854 ns 115439 ns 10660 BM_SGEMM/160 116826 ns 115539 ns 10000 BM_SGEMM/170 126566 ns 122798 ns 11960 BM_SGEMM/180 130088 ns 127292 ns 11503 BM_SGEMM/189 120309 ns 116634 ns 13162 BM_SGEMM/200 114559 ns 110993 ns 10000 BM_SGEMM/256 217063 ns 207806 ns 6417 ``` and after, it's gone (note this includes my other change which reduces calls to num_cpu_avail): ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 12347650 BM_SGEMM/6 166 ns 166 ns 8259683 BM_SGEMM/8 193 ns 193 ns 7162210 BM_SGEMM/10 258 ns 258 ns 5415657 BM_SGEMM/16 471 ns 471 ns 2981009 BM_SGEMM/20 666 ns 666 ns 2148002 BM_SGEMM/32 1903 ns 1903 ns 738245 BM_SGEMM/40 2969 ns 2969 ns 473239 BM_SGEMM/64 9440 ns 9440 ns 148442 BM_SGEMM/72 37239 ns 33330 ns 46813 BM_SGEMM/80 57350 ns 55949 ns 32251 BM_SGEMM/90 36275 ns 36249 ns 42259 BM_SGEMM/100 31111 ns 31008 ns 45270 BM_SGEMM/112 43782 ns 40912 ns 34749 BM_SGEMM/128 67375 ns 64406 ns 22443 BM_SGEMM/140 76389 ns 67003 ns 21430 BM_SGEMM/150 72952 ns 71830 ns 19793 BM_SGEMM/160 97039 ns 96858 ns 11498 BM_SGEMM/170 123272 ns 122007 ns 11855 BM_SGEMM/180 126828 ns 126505 ns 11567 BM_SGEMM/189 115179 ns 114665 ns 11044 BM_SGEMM/200 89289 ns 87259 ns 16147 BM_SGEMM/256 226252 ns 222677 ns 7375 ``` I've also tested this with ThreadSanitizer and found no data races during execution. I'm not sure why 200 is always faster than it's neighbors, we must be hitting some optimal cache size or something.
7 years ago
Remove the need for most locking in memory.c. Using thread local storage for tracking memory allocations means that threads no longer have to lock at all when doing memory allocations / frees. This particularly helps the gemm driver since it does an allocation per invocation. Even without threading at all, this helps, since even calling a lock with no contention has a cost: Before this change, no threading: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 102 ns 102 ns 13504412 BM_SGEMM/6 175 ns 175 ns 7997580 BM_SGEMM/8 205 ns 205 ns 6842073 BM_SGEMM/10 266 ns 266 ns 5294919 BM_SGEMM/16 478 ns 478 ns 2963441 BM_SGEMM/20 690 ns 690 ns 2144755 BM_SGEMM/32 1906 ns 1906 ns 716981 BM_SGEMM/40 2983 ns 2983 ns 473218 BM_SGEMM/64 9421 ns 9422 ns 148450 BM_SGEMM/72 12630 ns 12631 ns 112105 BM_SGEMM/80 15845 ns 15846 ns 89118 BM_SGEMM/90 25675 ns 25676 ns 54332 BM_SGEMM/100 29864 ns 29865 ns 47120 BM_SGEMM/112 37841 ns 37842 ns 36717 BM_SGEMM/128 56531 ns 56532 ns 25361 BM_SGEMM/140 75886 ns 75888 ns 18143 BM_SGEMM/150 98493 ns 98496 ns 14299 BM_SGEMM/160 102620 ns 102622 ns 13381 BM_SGEMM/170 135169 ns 135173 ns 10231 BM_SGEMM/180 146170 ns 146172 ns 9535 BM_SGEMM/189 190226 ns 190231 ns 7397 BM_SGEMM/200 194513 ns 194519 ns 7210 BM_SGEMM/256 396561 ns 396573 ns 3531 ``` with this change: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 14500387 BM_SGEMM/6 166 ns 166 ns 8381763 BM_SGEMM/8 196 ns 196 ns 7277044 BM_SGEMM/10 256 ns 256 ns 5515721 BM_SGEMM/16 463 ns 463 ns 3025197 BM_SGEMM/20 636 ns 636 ns 2070213 BM_SGEMM/32 1885 ns 1885 ns 739444 BM_SGEMM/40 2969 ns 2969 ns 472152 BM_SGEMM/64 9371 ns 9372 ns 148932 BM_SGEMM/72 12431 ns 12431 ns 112919 BM_SGEMM/80 15615 ns 15616 ns 89978 BM_SGEMM/90 25397 ns 25398 ns 55041 BM_SGEMM/100 29445 ns 29446 ns 47540 BM_SGEMM/112 37530 ns 37531 ns 37286 BM_SGEMM/128 55373 ns 55375 ns 25277 BM_SGEMM/140 76241 ns 76241 ns 18259 BM_SGEMM/150 102196 ns 102200 ns 13736 BM_SGEMM/160 101521 ns 101525 ns 13556 BM_SGEMM/170 136182 ns 136184 ns 10567 BM_SGEMM/180 146861 ns 146864 ns 9035 BM_SGEMM/189 192632 ns 192632 ns 7231 BM_SGEMM/200 198547 ns 198555 ns 6995 BM_SGEMM/256 392316 ns 392330 ns 3539 ``` Before, when built with USE_THREAD=1, GEMM_MULTITHREAD_THRESHOLD = 4, the cost of small matrix operations was overshadowed by thread locking (look smaller than 32) even when not explicitly spawning threads: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 328 ns 328 ns 4170562 BM_SGEMM/6 396 ns 396 ns 3536400 BM_SGEMM/8 418 ns 418 ns 3330102 BM_SGEMM/10 491 ns 491 ns 2863047 BM_SGEMM/16 710 ns 710 ns 2028314 BM_SGEMM/20 871 ns 871 ns 1581546 BM_SGEMM/32 2132 ns 2132 ns 657089 BM_SGEMM/40 3197 ns 3196 ns 437969 BM_SGEMM/64 9645 ns 9645 ns 144987 BM_SGEMM/72 35064 ns 32881 ns 50264 BM_SGEMM/80 37661 ns 35787 ns 42080 BM_SGEMM/90 36507 ns 36077 ns 40091 BM_SGEMM/100 32513 ns 31850 ns 48607 BM_SGEMM/112 41742 ns 41207 ns 37273 BM_SGEMM/128 67211 ns 65095 ns 21933 BM_SGEMM/140 68263 ns 67943 ns 19245 BM_SGEMM/150 121854 ns 115439 ns 10660 BM_SGEMM/160 116826 ns 115539 ns 10000 BM_SGEMM/170 126566 ns 122798 ns 11960 BM_SGEMM/180 130088 ns 127292 ns 11503 BM_SGEMM/189 120309 ns 116634 ns 13162 BM_SGEMM/200 114559 ns 110993 ns 10000 BM_SGEMM/256 217063 ns 207806 ns 6417 ``` and after, it's gone (note this includes my other change which reduces calls to num_cpu_avail): ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 12347650 BM_SGEMM/6 166 ns 166 ns 8259683 BM_SGEMM/8 193 ns 193 ns 7162210 BM_SGEMM/10 258 ns 258 ns 5415657 BM_SGEMM/16 471 ns 471 ns 2981009 BM_SGEMM/20 666 ns 666 ns 2148002 BM_SGEMM/32 1903 ns 1903 ns 738245 BM_SGEMM/40 2969 ns 2969 ns 473239 BM_SGEMM/64 9440 ns 9440 ns 148442 BM_SGEMM/72 37239 ns 33330 ns 46813 BM_SGEMM/80 57350 ns 55949 ns 32251 BM_SGEMM/90 36275 ns 36249 ns 42259 BM_SGEMM/100 31111 ns 31008 ns 45270 BM_SGEMM/112 43782 ns 40912 ns 34749 BM_SGEMM/128 67375 ns 64406 ns 22443 BM_SGEMM/140 76389 ns 67003 ns 21430 BM_SGEMM/150 72952 ns 71830 ns 19793 BM_SGEMM/160 97039 ns 96858 ns 11498 BM_SGEMM/170 123272 ns 122007 ns 11855 BM_SGEMM/180 126828 ns 126505 ns 11567 BM_SGEMM/189 115179 ns 114665 ns 11044 BM_SGEMM/200 89289 ns 87259 ns 16147 BM_SGEMM/256 226252 ns 222677 ns 7375 ``` I've also tested this with ThreadSanitizer and found no data races during execution. I'm not sure why 200 is always faster than it's neighbors, we must be hitting some optimal cache size or something.
7 years ago
Remove the need for most locking in memory.c. Using thread local storage for tracking memory allocations means that threads no longer have to lock at all when doing memory allocations / frees. This particularly helps the gemm driver since it does an allocation per invocation. Even without threading at all, this helps, since even calling a lock with no contention has a cost: Before this change, no threading: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 102 ns 102 ns 13504412 BM_SGEMM/6 175 ns 175 ns 7997580 BM_SGEMM/8 205 ns 205 ns 6842073 BM_SGEMM/10 266 ns 266 ns 5294919 BM_SGEMM/16 478 ns 478 ns 2963441 BM_SGEMM/20 690 ns 690 ns 2144755 BM_SGEMM/32 1906 ns 1906 ns 716981 BM_SGEMM/40 2983 ns 2983 ns 473218 BM_SGEMM/64 9421 ns 9422 ns 148450 BM_SGEMM/72 12630 ns 12631 ns 112105 BM_SGEMM/80 15845 ns 15846 ns 89118 BM_SGEMM/90 25675 ns 25676 ns 54332 BM_SGEMM/100 29864 ns 29865 ns 47120 BM_SGEMM/112 37841 ns 37842 ns 36717 BM_SGEMM/128 56531 ns 56532 ns 25361 BM_SGEMM/140 75886 ns 75888 ns 18143 BM_SGEMM/150 98493 ns 98496 ns 14299 BM_SGEMM/160 102620 ns 102622 ns 13381 BM_SGEMM/170 135169 ns 135173 ns 10231 BM_SGEMM/180 146170 ns 146172 ns 9535 BM_SGEMM/189 190226 ns 190231 ns 7397 BM_SGEMM/200 194513 ns 194519 ns 7210 BM_SGEMM/256 396561 ns 396573 ns 3531 ``` with this change: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 14500387 BM_SGEMM/6 166 ns 166 ns 8381763 BM_SGEMM/8 196 ns 196 ns 7277044 BM_SGEMM/10 256 ns 256 ns 5515721 BM_SGEMM/16 463 ns 463 ns 3025197 BM_SGEMM/20 636 ns 636 ns 2070213 BM_SGEMM/32 1885 ns 1885 ns 739444 BM_SGEMM/40 2969 ns 2969 ns 472152 BM_SGEMM/64 9371 ns 9372 ns 148932 BM_SGEMM/72 12431 ns 12431 ns 112919 BM_SGEMM/80 15615 ns 15616 ns 89978 BM_SGEMM/90 25397 ns 25398 ns 55041 BM_SGEMM/100 29445 ns 29446 ns 47540 BM_SGEMM/112 37530 ns 37531 ns 37286 BM_SGEMM/128 55373 ns 55375 ns 25277 BM_SGEMM/140 76241 ns 76241 ns 18259 BM_SGEMM/150 102196 ns 102200 ns 13736 BM_SGEMM/160 101521 ns 101525 ns 13556 BM_SGEMM/170 136182 ns 136184 ns 10567 BM_SGEMM/180 146861 ns 146864 ns 9035 BM_SGEMM/189 192632 ns 192632 ns 7231 BM_SGEMM/200 198547 ns 198555 ns 6995 BM_SGEMM/256 392316 ns 392330 ns 3539 ``` Before, when built with USE_THREAD=1, GEMM_MULTITHREAD_THRESHOLD = 4, the cost of small matrix operations was overshadowed by thread locking (look smaller than 32) even when not explicitly spawning threads: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 328 ns 328 ns 4170562 BM_SGEMM/6 396 ns 396 ns 3536400 BM_SGEMM/8 418 ns 418 ns 3330102 BM_SGEMM/10 491 ns 491 ns 2863047 BM_SGEMM/16 710 ns 710 ns 2028314 BM_SGEMM/20 871 ns 871 ns 1581546 BM_SGEMM/32 2132 ns 2132 ns 657089 BM_SGEMM/40 3197 ns 3196 ns 437969 BM_SGEMM/64 9645 ns 9645 ns 144987 BM_SGEMM/72 35064 ns 32881 ns 50264 BM_SGEMM/80 37661 ns 35787 ns 42080 BM_SGEMM/90 36507 ns 36077 ns 40091 BM_SGEMM/100 32513 ns 31850 ns 48607 BM_SGEMM/112 41742 ns 41207 ns 37273 BM_SGEMM/128 67211 ns 65095 ns 21933 BM_SGEMM/140 68263 ns 67943 ns 19245 BM_SGEMM/150 121854 ns 115439 ns 10660 BM_SGEMM/160 116826 ns 115539 ns 10000 BM_SGEMM/170 126566 ns 122798 ns 11960 BM_SGEMM/180 130088 ns 127292 ns 11503 BM_SGEMM/189 120309 ns 116634 ns 13162 BM_SGEMM/200 114559 ns 110993 ns 10000 BM_SGEMM/256 217063 ns 207806 ns 6417 ``` and after, it's gone (note this includes my other change which reduces calls to num_cpu_avail): ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 12347650 BM_SGEMM/6 166 ns 166 ns 8259683 BM_SGEMM/8 193 ns 193 ns 7162210 BM_SGEMM/10 258 ns 258 ns 5415657 BM_SGEMM/16 471 ns 471 ns 2981009 BM_SGEMM/20 666 ns 666 ns 2148002 BM_SGEMM/32 1903 ns 1903 ns 738245 BM_SGEMM/40 2969 ns 2969 ns 473239 BM_SGEMM/64 9440 ns 9440 ns 148442 BM_SGEMM/72 37239 ns 33330 ns 46813 BM_SGEMM/80 57350 ns 55949 ns 32251 BM_SGEMM/90 36275 ns 36249 ns 42259 BM_SGEMM/100 31111 ns 31008 ns 45270 BM_SGEMM/112 43782 ns 40912 ns 34749 BM_SGEMM/128 67375 ns 64406 ns 22443 BM_SGEMM/140 76389 ns 67003 ns 21430 BM_SGEMM/150 72952 ns 71830 ns 19793 BM_SGEMM/160 97039 ns 96858 ns 11498 BM_SGEMM/170 123272 ns 122007 ns 11855 BM_SGEMM/180 126828 ns 126505 ns 11567 BM_SGEMM/189 115179 ns 114665 ns 11044 BM_SGEMM/200 89289 ns 87259 ns 16147 BM_SGEMM/256 226252 ns 222677 ns 7375 ``` I've also tested this with ThreadSanitizer and found no data races during execution. I'm not sure why 200 is always faster than it's neighbors, we must be hitting some optimal cache size or something.
7 years ago
Remove the need for most locking in memory.c. Using thread local storage for tracking memory allocations means that threads no longer have to lock at all when doing memory allocations / frees. This particularly helps the gemm driver since it does an allocation per invocation. Even without threading at all, this helps, since even calling a lock with no contention has a cost: Before this change, no threading: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 102 ns 102 ns 13504412 BM_SGEMM/6 175 ns 175 ns 7997580 BM_SGEMM/8 205 ns 205 ns 6842073 BM_SGEMM/10 266 ns 266 ns 5294919 BM_SGEMM/16 478 ns 478 ns 2963441 BM_SGEMM/20 690 ns 690 ns 2144755 BM_SGEMM/32 1906 ns 1906 ns 716981 BM_SGEMM/40 2983 ns 2983 ns 473218 BM_SGEMM/64 9421 ns 9422 ns 148450 BM_SGEMM/72 12630 ns 12631 ns 112105 BM_SGEMM/80 15845 ns 15846 ns 89118 BM_SGEMM/90 25675 ns 25676 ns 54332 BM_SGEMM/100 29864 ns 29865 ns 47120 BM_SGEMM/112 37841 ns 37842 ns 36717 BM_SGEMM/128 56531 ns 56532 ns 25361 BM_SGEMM/140 75886 ns 75888 ns 18143 BM_SGEMM/150 98493 ns 98496 ns 14299 BM_SGEMM/160 102620 ns 102622 ns 13381 BM_SGEMM/170 135169 ns 135173 ns 10231 BM_SGEMM/180 146170 ns 146172 ns 9535 BM_SGEMM/189 190226 ns 190231 ns 7397 BM_SGEMM/200 194513 ns 194519 ns 7210 BM_SGEMM/256 396561 ns 396573 ns 3531 ``` with this change: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 14500387 BM_SGEMM/6 166 ns 166 ns 8381763 BM_SGEMM/8 196 ns 196 ns 7277044 BM_SGEMM/10 256 ns 256 ns 5515721 BM_SGEMM/16 463 ns 463 ns 3025197 BM_SGEMM/20 636 ns 636 ns 2070213 BM_SGEMM/32 1885 ns 1885 ns 739444 BM_SGEMM/40 2969 ns 2969 ns 472152 BM_SGEMM/64 9371 ns 9372 ns 148932 BM_SGEMM/72 12431 ns 12431 ns 112919 BM_SGEMM/80 15615 ns 15616 ns 89978 BM_SGEMM/90 25397 ns 25398 ns 55041 BM_SGEMM/100 29445 ns 29446 ns 47540 BM_SGEMM/112 37530 ns 37531 ns 37286 BM_SGEMM/128 55373 ns 55375 ns 25277 BM_SGEMM/140 76241 ns 76241 ns 18259 BM_SGEMM/150 102196 ns 102200 ns 13736 BM_SGEMM/160 101521 ns 101525 ns 13556 BM_SGEMM/170 136182 ns 136184 ns 10567 BM_SGEMM/180 146861 ns 146864 ns 9035 BM_SGEMM/189 192632 ns 192632 ns 7231 BM_SGEMM/200 198547 ns 198555 ns 6995 BM_SGEMM/256 392316 ns 392330 ns 3539 ``` Before, when built with USE_THREAD=1, GEMM_MULTITHREAD_THRESHOLD = 4, the cost of small matrix operations was overshadowed by thread locking (look smaller than 32) even when not explicitly spawning threads: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 328 ns 328 ns 4170562 BM_SGEMM/6 396 ns 396 ns 3536400 BM_SGEMM/8 418 ns 418 ns 3330102 BM_SGEMM/10 491 ns 491 ns 2863047 BM_SGEMM/16 710 ns 710 ns 2028314 BM_SGEMM/20 871 ns 871 ns 1581546 BM_SGEMM/32 2132 ns 2132 ns 657089 BM_SGEMM/40 3197 ns 3196 ns 437969 BM_SGEMM/64 9645 ns 9645 ns 144987 BM_SGEMM/72 35064 ns 32881 ns 50264 BM_SGEMM/80 37661 ns 35787 ns 42080 BM_SGEMM/90 36507 ns 36077 ns 40091 BM_SGEMM/100 32513 ns 31850 ns 48607 BM_SGEMM/112 41742 ns 41207 ns 37273 BM_SGEMM/128 67211 ns 65095 ns 21933 BM_SGEMM/140 68263 ns 67943 ns 19245 BM_SGEMM/150 121854 ns 115439 ns 10660 BM_SGEMM/160 116826 ns 115539 ns 10000 BM_SGEMM/170 126566 ns 122798 ns 11960 BM_SGEMM/180 130088 ns 127292 ns 11503 BM_SGEMM/189 120309 ns 116634 ns 13162 BM_SGEMM/200 114559 ns 110993 ns 10000 BM_SGEMM/256 217063 ns 207806 ns 6417 ``` and after, it's gone (note this includes my other change which reduces calls to num_cpu_avail): ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 12347650 BM_SGEMM/6 166 ns 166 ns 8259683 BM_SGEMM/8 193 ns 193 ns 7162210 BM_SGEMM/10 258 ns 258 ns 5415657 BM_SGEMM/16 471 ns 471 ns 2981009 BM_SGEMM/20 666 ns 666 ns 2148002 BM_SGEMM/32 1903 ns 1903 ns 738245 BM_SGEMM/40 2969 ns 2969 ns 473239 BM_SGEMM/64 9440 ns 9440 ns 148442 BM_SGEMM/72 37239 ns 33330 ns 46813 BM_SGEMM/80 57350 ns 55949 ns 32251 BM_SGEMM/90 36275 ns 36249 ns 42259 BM_SGEMM/100 31111 ns 31008 ns 45270 BM_SGEMM/112 43782 ns 40912 ns 34749 BM_SGEMM/128 67375 ns 64406 ns 22443 BM_SGEMM/140 76389 ns 67003 ns 21430 BM_SGEMM/150 72952 ns 71830 ns 19793 BM_SGEMM/160 97039 ns 96858 ns 11498 BM_SGEMM/170 123272 ns 122007 ns 11855 BM_SGEMM/180 126828 ns 126505 ns 11567 BM_SGEMM/189 115179 ns 114665 ns 11044 BM_SGEMM/200 89289 ns 87259 ns 16147 BM_SGEMM/256 226252 ns 222677 ns 7375 ``` I've also tested this with ThreadSanitizer and found no data races during execution. I'm not sure why 200 is always faster than it's neighbors, we must be hitting some optimal cache size or something.
7 years ago
Remove the need for most locking in memory.c. Using thread local storage for tracking memory allocations means that threads no longer have to lock at all when doing memory allocations / frees. This particularly helps the gemm driver since it does an allocation per invocation. Even without threading at all, this helps, since even calling a lock with no contention has a cost: Before this change, no threading: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 102 ns 102 ns 13504412 BM_SGEMM/6 175 ns 175 ns 7997580 BM_SGEMM/8 205 ns 205 ns 6842073 BM_SGEMM/10 266 ns 266 ns 5294919 BM_SGEMM/16 478 ns 478 ns 2963441 BM_SGEMM/20 690 ns 690 ns 2144755 BM_SGEMM/32 1906 ns 1906 ns 716981 BM_SGEMM/40 2983 ns 2983 ns 473218 BM_SGEMM/64 9421 ns 9422 ns 148450 BM_SGEMM/72 12630 ns 12631 ns 112105 BM_SGEMM/80 15845 ns 15846 ns 89118 BM_SGEMM/90 25675 ns 25676 ns 54332 BM_SGEMM/100 29864 ns 29865 ns 47120 BM_SGEMM/112 37841 ns 37842 ns 36717 BM_SGEMM/128 56531 ns 56532 ns 25361 BM_SGEMM/140 75886 ns 75888 ns 18143 BM_SGEMM/150 98493 ns 98496 ns 14299 BM_SGEMM/160 102620 ns 102622 ns 13381 BM_SGEMM/170 135169 ns 135173 ns 10231 BM_SGEMM/180 146170 ns 146172 ns 9535 BM_SGEMM/189 190226 ns 190231 ns 7397 BM_SGEMM/200 194513 ns 194519 ns 7210 BM_SGEMM/256 396561 ns 396573 ns 3531 ``` with this change: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 14500387 BM_SGEMM/6 166 ns 166 ns 8381763 BM_SGEMM/8 196 ns 196 ns 7277044 BM_SGEMM/10 256 ns 256 ns 5515721 BM_SGEMM/16 463 ns 463 ns 3025197 BM_SGEMM/20 636 ns 636 ns 2070213 BM_SGEMM/32 1885 ns 1885 ns 739444 BM_SGEMM/40 2969 ns 2969 ns 472152 BM_SGEMM/64 9371 ns 9372 ns 148932 BM_SGEMM/72 12431 ns 12431 ns 112919 BM_SGEMM/80 15615 ns 15616 ns 89978 BM_SGEMM/90 25397 ns 25398 ns 55041 BM_SGEMM/100 29445 ns 29446 ns 47540 BM_SGEMM/112 37530 ns 37531 ns 37286 BM_SGEMM/128 55373 ns 55375 ns 25277 BM_SGEMM/140 76241 ns 76241 ns 18259 BM_SGEMM/150 102196 ns 102200 ns 13736 BM_SGEMM/160 101521 ns 101525 ns 13556 BM_SGEMM/170 136182 ns 136184 ns 10567 BM_SGEMM/180 146861 ns 146864 ns 9035 BM_SGEMM/189 192632 ns 192632 ns 7231 BM_SGEMM/200 198547 ns 198555 ns 6995 BM_SGEMM/256 392316 ns 392330 ns 3539 ``` Before, when built with USE_THREAD=1, GEMM_MULTITHREAD_THRESHOLD = 4, the cost of small matrix operations was overshadowed by thread locking (look smaller than 32) even when not explicitly spawning threads: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 328 ns 328 ns 4170562 BM_SGEMM/6 396 ns 396 ns 3536400 BM_SGEMM/8 418 ns 418 ns 3330102 BM_SGEMM/10 491 ns 491 ns 2863047 BM_SGEMM/16 710 ns 710 ns 2028314 BM_SGEMM/20 871 ns 871 ns 1581546 BM_SGEMM/32 2132 ns 2132 ns 657089 BM_SGEMM/40 3197 ns 3196 ns 437969 BM_SGEMM/64 9645 ns 9645 ns 144987 BM_SGEMM/72 35064 ns 32881 ns 50264 BM_SGEMM/80 37661 ns 35787 ns 42080 BM_SGEMM/90 36507 ns 36077 ns 40091 BM_SGEMM/100 32513 ns 31850 ns 48607 BM_SGEMM/112 41742 ns 41207 ns 37273 BM_SGEMM/128 67211 ns 65095 ns 21933 BM_SGEMM/140 68263 ns 67943 ns 19245 BM_SGEMM/150 121854 ns 115439 ns 10660 BM_SGEMM/160 116826 ns 115539 ns 10000 BM_SGEMM/170 126566 ns 122798 ns 11960 BM_SGEMM/180 130088 ns 127292 ns 11503 BM_SGEMM/189 120309 ns 116634 ns 13162 BM_SGEMM/200 114559 ns 110993 ns 10000 BM_SGEMM/256 217063 ns 207806 ns 6417 ``` and after, it's gone (note this includes my other change which reduces calls to num_cpu_avail): ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 12347650 BM_SGEMM/6 166 ns 166 ns 8259683 BM_SGEMM/8 193 ns 193 ns 7162210 BM_SGEMM/10 258 ns 258 ns 5415657 BM_SGEMM/16 471 ns 471 ns 2981009 BM_SGEMM/20 666 ns 666 ns 2148002 BM_SGEMM/32 1903 ns 1903 ns 738245 BM_SGEMM/40 2969 ns 2969 ns 473239 BM_SGEMM/64 9440 ns 9440 ns 148442 BM_SGEMM/72 37239 ns 33330 ns 46813 BM_SGEMM/80 57350 ns 55949 ns 32251 BM_SGEMM/90 36275 ns 36249 ns 42259 BM_SGEMM/100 31111 ns 31008 ns 45270 BM_SGEMM/112 43782 ns 40912 ns 34749 BM_SGEMM/128 67375 ns 64406 ns 22443 BM_SGEMM/140 76389 ns 67003 ns 21430 BM_SGEMM/150 72952 ns 71830 ns 19793 BM_SGEMM/160 97039 ns 96858 ns 11498 BM_SGEMM/170 123272 ns 122007 ns 11855 BM_SGEMM/180 126828 ns 126505 ns 11567 BM_SGEMM/189 115179 ns 114665 ns 11044 BM_SGEMM/200 89289 ns 87259 ns 16147 BM_SGEMM/256 226252 ns 222677 ns 7375 ``` I've also tested this with ThreadSanitizer and found no data races during execution. I'm not sure why 200 is always faster than it's neighbors, we must be hitting some optimal cache size or something.
7 years ago
Remove the need for most locking in memory.c. Using thread local storage for tracking memory allocations means that threads no longer have to lock at all when doing memory allocations / frees. This particularly helps the gemm driver since it does an allocation per invocation. Even without threading at all, this helps, since even calling a lock with no contention has a cost: Before this change, no threading: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 102 ns 102 ns 13504412 BM_SGEMM/6 175 ns 175 ns 7997580 BM_SGEMM/8 205 ns 205 ns 6842073 BM_SGEMM/10 266 ns 266 ns 5294919 BM_SGEMM/16 478 ns 478 ns 2963441 BM_SGEMM/20 690 ns 690 ns 2144755 BM_SGEMM/32 1906 ns 1906 ns 716981 BM_SGEMM/40 2983 ns 2983 ns 473218 BM_SGEMM/64 9421 ns 9422 ns 148450 BM_SGEMM/72 12630 ns 12631 ns 112105 BM_SGEMM/80 15845 ns 15846 ns 89118 BM_SGEMM/90 25675 ns 25676 ns 54332 BM_SGEMM/100 29864 ns 29865 ns 47120 BM_SGEMM/112 37841 ns 37842 ns 36717 BM_SGEMM/128 56531 ns 56532 ns 25361 BM_SGEMM/140 75886 ns 75888 ns 18143 BM_SGEMM/150 98493 ns 98496 ns 14299 BM_SGEMM/160 102620 ns 102622 ns 13381 BM_SGEMM/170 135169 ns 135173 ns 10231 BM_SGEMM/180 146170 ns 146172 ns 9535 BM_SGEMM/189 190226 ns 190231 ns 7397 BM_SGEMM/200 194513 ns 194519 ns 7210 BM_SGEMM/256 396561 ns 396573 ns 3531 ``` with this change: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 14500387 BM_SGEMM/6 166 ns 166 ns 8381763 BM_SGEMM/8 196 ns 196 ns 7277044 BM_SGEMM/10 256 ns 256 ns 5515721 BM_SGEMM/16 463 ns 463 ns 3025197 BM_SGEMM/20 636 ns 636 ns 2070213 BM_SGEMM/32 1885 ns 1885 ns 739444 BM_SGEMM/40 2969 ns 2969 ns 472152 BM_SGEMM/64 9371 ns 9372 ns 148932 BM_SGEMM/72 12431 ns 12431 ns 112919 BM_SGEMM/80 15615 ns 15616 ns 89978 BM_SGEMM/90 25397 ns 25398 ns 55041 BM_SGEMM/100 29445 ns 29446 ns 47540 BM_SGEMM/112 37530 ns 37531 ns 37286 BM_SGEMM/128 55373 ns 55375 ns 25277 BM_SGEMM/140 76241 ns 76241 ns 18259 BM_SGEMM/150 102196 ns 102200 ns 13736 BM_SGEMM/160 101521 ns 101525 ns 13556 BM_SGEMM/170 136182 ns 136184 ns 10567 BM_SGEMM/180 146861 ns 146864 ns 9035 BM_SGEMM/189 192632 ns 192632 ns 7231 BM_SGEMM/200 198547 ns 198555 ns 6995 BM_SGEMM/256 392316 ns 392330 ns 3539 ``` Before, when built with USE_THREAD=1, GEMM_MULTITHREAD_THRESHOLD = 4, the cost of small matrix operations was overshadowed by thread locking (look smaller than 32) even when not explicitly spawning threads: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 328 ns 328 ns 4170562 BM_SGEMM/6 396 ns 396 ns 3536400 BM_SGEMM/8 418 ns 418 ns 3330102 BM_SGEMM/10 491 ns 491 ns 2863047 BM_SGEMM/16 710 ns 710 ns 2028314 BM_SGEMM/20 871 ns 871 ns 1581546 BM_SGEMM/32 2132 ns 2132 ns 657089 BM_SGEMM/40 3197 ns 3196 ns 437969 BM_SGEMM/64 9645 ns 9645 ns 144987 BM_SGEMM/72 35064 ns 32881 ns 50264 BM_SGEMM/80 37661 ns 35787 ns 42080 BM_SGEMM/90 36507 ns 36077 ns 40091 BM_SGEMM/100 32513 ns 31850 ns 48607 BM_SGEMM/112 41742 ns 41207 ns 37273 BM_SGEMM/128 67211 ns 65095 ns 21933 BM_SGEMM/140 68263 ns 67943 ns 19245 BM_SGEMM/150 121854 ns 115439 ns 10660 BM_SGEMM/160 116826 ns 115539 ns 10000 BM_SGEMM/170 126566 ns 122798 ns 11960 BM_SGEMM/180 130088 ns 127292 ns 11503 BM_SGEMM/189 120309 ns 116634 ns 13162 BM_SGEMM/200 114559 ns 110993 ns 10000 BM_SGEMM/256 217063 ns 207806 ns 6417 ``` and after, it's gone (note this includes my other change which reduces calls to num_cpu_avail): ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 12347650 BM_SGEMM/6 166 ns 166 ns 8259683 BM_SGEMM/8 193 ns 193 ns 7162210 BM_SGEMM/10 258 ns 258 ns 5415657 BM_SGEMM/16 471 ns 471 ns 2981009 BM_SGEMM/20 666 ns 666 ns 2148002 BM_SGEMM/32 1903 ns 1903 ns 738245 BM_SGEMM/40 2969 ns 2969 ns 473239 BM_SGEMM/64 9440 ns 9440 ns 148442 BM_SGEMM/72 37239 ns 33330 ns 46813 BM_SGEMM/80 57350 ns 55949 ns 32251 BM_SGEMM/90 36275 ns 36249 ns 42259 BM_SGEMM/100 31111 ns 31008 ns 45270 BM_SGEMM/112 43782 ns 40912 ns 34749 BM_SGEMM/128 67375 ns 64406 ns 22443 BM_SGEMM/140 76389 ns 67003 ns 21430 BM_SGEMM/150 72952 ns 71830 ns 19793 BM_SGEMM/160 97039 ns 96858 ns 11498 BM_SGEMM/170 123272 ns 122007 ns 11855 BM_SGEMM/180 126828 ns 126505 ns 11567 BM_SGEMM/189 115179 ns 114665 ns 11044 BM_SGEMM/200 89289 ns 87259 ns 16147 BM_SGEMM/256 226252 ns 222677 ns 7375 ``` I've also tested this with ThreadSanitizer and found no data races during execution. I'm not sure why 200 is always faster than it's neighbors, we must be hitting some optimal cache size or something.
7 years ago
Remove the need for most locking in memory.c. Using thread local storage for tracking memory allocations means that threads no longer have to lock at all when doing memory allocations / frees. This particularly helps the gemm driver since it does an allocation per invocation. Even without threading at all, this helps, since even calling a lock with no contention has a cost: Before this change, no threading: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 102 ns 102 ns 13504412 BM_SGEMM/6 175 ns 175 ns 7997580 BM_SGEMM/8 205 ns 205 ns 6842073 BM_SGEMM/10 266 ns 266 ns 5294919 BM_SGEMM/16 478 ns 478 ns 2963441 BM_SGEMM/20 690 ns 690 ns 2144755 BM_SGEMM/32 1906 ns 1906 ns 716981 BM_SGEMM/40 2983 ns 2983 ns 473218 BM_SGEMM/64 9421 ns 9422 ns 148450 BM_SGEMM/72 12630 ns 12631 ns 112105 BM_SGEMM/80 15845 ns 15846 ns 89118 BM_SGEMM/90 25675 ns 25676 ns 54332 BM_SGEMM/100 29864 ns 29865 ns 47120 BM_SGEMM/112 37841 ns 37842 ns 36717 BM_SGEMM/128 56531 ns 56532 ns 25361 BM_SGEMM/140 75886 ns 75888 ns 18143 BM_SGEMM/150 98493 ns 98496 ns 14299 BM_SGEMM/160 102620 ns 102622 ns 13381 BM_SGEMM/170 135169 ns 135173 ns 10231 BM_SGEMM/180 146170 ns 146172 ns 9535 BM_SGEMM/189 190226 ns 190231 ns 7397 BM_SGEMM/200 194513 ns 194519 ns 7210 BM_SGEMM/256 396561 ns 396573 ns 3531 ``` with this change: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 14500387 BM_SGEMM/6 166 ns 166 ns 8381763 BM_SGEMM/8 196 ns 196 ns 7277044 BM_SGEMM/10 256 ns 256 ns 5515721 BM_SGEMM/16 463 ns 463 ns 3025197 BM_SGEMM/20 636 ns 636 ns 2070213 BM_SGEMM/32 1885 ns 1885 ns 739444 BM_SGEMM/40 2969 ns 2969 ns 472152 BM_SGEMM/64 9371 ns 9372 ns 148932 BM_SGEMM/72 12431 ns 12431 ns 112919 BM_SGEMM/80 15615 ns 15616 ns 89978 BM_SGEMM/90 25397 ns 25398 ns 55041 BM_SGEMM/100 29445 ns 29446 ns 47540 BM_SGEMM/112 37530 ns 37531 ns 37286 BM_SGEMM/128 55373 ns 55375 ns 25277 BM_SGEMM/140 76241 ns 76241 ns 18259 BM_SGEMM/150 102196 ns 102200 ns 13736 BM_SGEMM/160 101521 ns 101525 ns 13556 BM_SGEMM/170 136182 ns 136184 ns 10567 BM_SGEMM/180 146861 ns 146864 ns 9035 BM_SGEMM/189 192632 ns 192632 ns 7231 BM_SGEMM/200 198547 ns 198555 ns 6995 BM_SGEMM/256 392316 ns 392330 ns 3539 ``` Before, when built with USE_THREAD=1, GEMM_MULTITHREAD_THRESHOLD = 4, the cost of small matrix operations was overshadowed by thread locking (look smaller than 32) even when not explicitly spawning threads: ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 328 ns 328 ns 4170562 BM_SGEMM/6 396 ns 396 ns 3536400 BM_SGEMM/8 418 ns 418 ns 3330102 BM_SGEMM/10 491 ns 491 ns 2863047 BM_SGEMM/16 710 ns 710 ns 2028314 BM_SGEMM/20 871 ns 871 ns 1581546 BM_SGEMM/32 2132 ns 2132 ns 657089 BM_SGEMM/40 3197 ns 3196 ns 437969 BM_SGEMM/64 9645 ns 9645 ns 144987 BM_SGEMM/72 35064 ns 32881 ns 50264 BM_SGEMM/80 37661 ns 35787 ns 42080 BM_SGEMM/90 36507 ns 36077 ns 40091 BM_SGEMM/100 32513 ns 31850 ns 48607 BM_SGEMM/112 41742 ns 41207 ns 37273 BM_SGEMM/128 67211 ns 65095 ns 21933 BM_SGEMM/140 68263 ns 67943 ns 19245 BM_SGEMM/150 121854 ns 115439 ns 10660 BM_SGEMM/160 116826 ns 115539 ns 10000 BM_SGEMM/170 126566 ns 122798 ns 11960 BM_SGEMM/180 130088 ns 127292 ns 11503 BM_SGEMM/189 120309 ns 116634 ns 13162 BM_SGEMM/200 114559 ns 110993 ns 10000 BM_SGEMM/256 217063 ns 207806 ns 6417 ``` and after, it's gone (note this includes my other change which reduces calls to num_cpu_avail): ``` ---------------------------------------------------- Benchmark Time CPU Iterations ---------------------------------------------------- BM_SGEMM/4 95 ns 95 ns 12347650 BM_SGEMM/6 166 ns 166 ns 8259683 BM_SGEMM/8 193 ns 193 ns 7162210 BM_SGEMM/10 258 ns 258 ns 5415657 BM_SGEMM/16 471 ns 471 ns 2981009 BM_SGEMM/20 666 ns 666 ns 2148002 BM_SGEMM/32 1903 ns 1903 ns 738245 BM_SGEMM/40 2969 ns 2969 ns 473239 BM_SGEMM/64 9440 ns 9440 ns 148442 BM_SGEMM/72 37239 ns 33330 ns 46813 BM_SGEMM/80 57350 ns 55949 ns 32251 BM_SGEMM/90 36275 ns 36249 ns 42259 BM_SGEMM/100 31111 ns 31008 ns 45270 BM_SGEMM/112 43782 ns 40912 ns 34749 BM_SGEMM/128 67375 ns 64406 ns 22443 BM_SGEMM/140 76389 ns 67003 ns 21430 BM_SGEMM/150 72952 ns 71830 ns 19793 BM_SGEMM/160 97039 ns 96858 ns 11498 BM_SGEMM/170 123272 ns 122007 ns 11855 BM_SGEMM/180 126828 ns 126505 ns 11567 BM_SGEMM/189 115179 ns 114665 ns 11044 BM_SGEMM/200 89289 ns 87259 ns 16147 BM_SGEMM/256 226252 ns 222677 ns 7375 ``` I've also tested this with ThreadSanitizer and found no data races during execution. I'm not sure why 200 is always faster than it's neighbors, we must be hitting some optimal cache size or something.
7 years ago
9 years ago
9 years ago
9 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112
  1. /*****************************************************************************
  2. Copyright (c) 2011-2014, The OpenBLAS Project
  3. All rights reserved.
  4. Redistribution and use in source and binary forms, with or without
  5. modification, are permitted provided that the following conditions are
  6. met:
  7. 1. Redistributions of source code must retain the above copyright
  8. notice, this list of conditions and the following disclaimer.
  9. 2. Redistributions in binary form must reproduce the above copyright
  10. notice, this list of conditions and the following disclaimer in
  11. the documentation and/or other materials provided with the
  12. distribution.
  13. 3. Neither the name of the OpenBLAS project nor the names of
  14. its contributors may be used to endorse or promote products
  15. derived from this software without specific prior written
  16. permission.
  17. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  18. AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  21. LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  22. DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  23. SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  24. CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  25. OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  26. USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27. **********************************************************************************/
  28. /*********************************************************************/
  29. /* Copyright 2009, 2010 The University of Texas at Austin. */
  30. /* All rights reserved. */
  31. /* */
  32. /* Redistribution and use in source and binary forms, with or */
  33. /* without modification, are permitted provided that the following */
  34. /* conditions are met: */
  35. /* */
  36. /* 1. Redistributions of source code must retain the above */
  37. /* copyright notice, this list of conditions and the following */
  38. /* disclaimer. */
  39. /* */
  40. /* 2. Redistributions in binary form must reproduce the above */
  41. /* copyright notice, this list of conditions and the following */
  42. /* disclaimer in the documentation and/or other materials */
  43. /* provided with the distribution. */
  44. /* */
  45. /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
  46. /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
  47. /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
  48. /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
  49. /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
  50. /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
  51. /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
  52. /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
  53. /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
  54. /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
  55. /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
  56. /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
  57. /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
  58. /* POSSIBILITY OF SUCH DAMAGE. */
  59. /* */
  60. /* The views and conclusions contained in the software and */
  61. /* documentation are those of the authors and should not be */
  62. /* interpreted as representing official policies, either expressed */
  63. /* or implied, of The University of Texas at Austin. */
  64. /*********************************************************************/
  65. //#undef DEBUG
  66. #include "common.h"
  67. #if defined(USE_TLS) && defined(SMP)
  68. #define COMPILE_TLS
  69. #if USE_TLS != 1
  70. #undef COMPILE_TLS
  71. #endif
  72. #if defined(__GLIBC_PREREQ)
  73. #if !__GLIBC_PREREQ(2,20)
  74. #undef COMPILE_TLS
  75. #endif
  76. #endif
  77. #endif
  78. #if defined(COMPILE_TLS)
  79. #include <errno.h>
  80. #if defined(OS_WINDOWS) && !defined(OS_CYGWIN_NT)
  81. #define ALLOC_WINDOWS
  82. #ifndef MEM_LARGE_PAGES
  83. #define MEM_LARGE_PAGES 0x20000000
  84. #endif
  85. #else
  86. #define ALLOC_MMAP
  87. #define ALLOC_MALLOC
  88. #endif
  89. #include <stdlib.h>
  90. #include <stdio.h>
  91. #include <fcntl.h>
  92. #if !defined(OS_WINDOWS) || defined(OS_CYGWIN_NT)
  93. #include <sys/mman.h>
  94. #ifndef NO_SYSV_IPC
  95. #include <sys/shm.h>
  96. #endif
  97. #include <sys/ipc.h>
  98. #endif
  99. #include <sys/types.h>
  100. #ifdef OS_LINUX
  101. #include <sys/sysinfo.h>
  102. #include <sched.h>
  103. #include <errno.h>
  104. #include <linux/unistd.h>
  105. #include <sys/syscall.h>
  106. #include <sys/time.h>
  107. #include <sys/resource.h>
  108. #endif
  109. #ifdef OS_HAIKU
  110. #include <unistd.h>
  111. #endif
  112. #if defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN)
  113. #include <sys/sysctl.h>
  114. #include <sys/resource.h>
  115. #endif
  116. #if defined(OS_WINDOWS) && (defined(__MINGW32__) || defined(__MINGW64__))
  117. #include <conio.h>
  118. #undef printf
  119. #define printf _cprintf
  120. #endif
  121. #ifdef OS_LINUX
  122. #ifndef MPOL_PREFERRED
  123. #define MPOL_PREFERRED 1
  124. #endif
  125. #endif
  126. #if (defined(PPC440) || !defined(OS_LINUX) || defined(HPL)) && !defined(NO_WARMUP)
  127. #define NO_WARMUP
  128. #endif
  129. #ifndef SHM_HUGETLB
  130. #define SHM_HUGETLB 04000
  131. #endif
  132. #ifndef FIXED_PAGESIZE
  133. #define FIXED_PAGESIZE 4096
  134. #endif
  135. #define BITMASK(a, b, c) ((((a) >> (b)) & (c)))
  136. #if defined(_MSC_VER) && !defined(__clang__)
  137. #define CONSTRUCTOR __cdecl
  138. #define DESTRUCTOR __cdecl
  139. #elif (defined(OS_DARWIN) || defined(OS_SUNOS)) && defined(C_GCC)
  140. #define CONSTRUCTOR __attribute__ ((constructor))
  141. #define DESTRUCTOR __attribute__ ((destructor))
  142. #elif __GNUC__ && INIT_PRIORITY && ((GCC_VERSION >= 40300) || (CLANG_VERSION >= 20900))
  143. #define CONSTRUCTOR __attribute__ ((constructor(101)))
  144. #define DESTRUCTOR __attribute__ ((destructor(101)))
  145. #else
  146. #define CONSTRUCTOR __attribute__ ((constructor))
  147. #define DESTRUCTOR __attribute__ ((destructor))
  148. #endif
  149. #ifdef DYNAMIC_ARCH
  150. gotoblas_t *gotoblas = NULL;
  151. #endif
  152. extern void openblas_warning(int verbose, const char * msg);
  153. #ifndef SMP
  154. #define blas_cpu_number 1
  155. #define blas_num_threads 1
  156. /* Dummy Function */
  157. int goto_get_num_procs (void) { return 1;};
  158. void goto_set_num_threads(int num_threads) {};
  159. #else
  160. #if defined(OS_LINUX) || defined(OS_SUNOS) || defined(OS_NETBSD)
  161. #ifndef NO_AFFINITY
  162. int get_num_procs(void);
  163. #else
  164. int get_num_procs(void) {
  165. static int nums = 0;
  166. cpu_set_t *cpusetp;
  167. size_t size;
  168. int ret;
  169. int i,n;
  170. if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF);
  171. #if !defined(OS_LINUX)
  172. return nums;
  173. #endif
  174. #if !defined(__GLIBC_PREREQ)
  175. return nums;
  176. #else
  177. #if !__GLIBC_PREREQ(2, 3)
  178. return nums;
  179. #endif
  180. #if !__GLIBC_PREREQ(2, 7)
  181. ret = sched_getaffinity(0,sizeof(cpu_set_t), cpusetp);
  182. if (ret!=0) return nums;
  183. n=0;
  184. #if !__GLIBC_PREREQ(2, 6)
  185. for (i=0;i<nums;i++)
  186. if (CPU_ISSET(i,cpusetp)) n++;
  187. nums=n;
  188. #else
  189. nums = CPU_COUNT(sizeof(cpu_set_t),cpusetp);
  190. #endif
  191. return nums;
  192. #else
  193. cpusetp = CPU_ALLOC(nums);
  194. if (cpusetp == NULL) return nums;
  195. size = CPU_ALLOC_SIZE(nums);
  196. ret = sched_getaffinity(0,size,cpusetp);
  197. if (ret!=0) return nums;
  198. ret = CPU_COUNT_S(size,cpusetp);
  199. if (ret > 0 && ret < nums) nums = ret;
  200. CPU_FREE(cpusetp);
  201. return nums;
  202. #endif
  203. #endif
  204. }
  205. #endif
  206. #endif
  207. #ifdef OS_ANDROID
  208. int get_num_procs(void) {
  209. static int nums = 0;
  210. if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF);
  211. return nums;
  212. }
  213. #endif
  214. #ifdef OS_HAIKU
  215. int get_num_procs(void) {
  216. static int nums = 0;
  217. if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF);
  218. return nums;
  219. }
  220. #endif
  221. #ifdef OS_AIX
  222. int get_num_procs(void) {
  223. static int nums = 0;
  224. if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF);
  225. return nums;
  226. }
  227. #endif
  228. #ifdef OS_WINDOWS
  229. int get_num_procs(void) {
  230. static int nums = 0;
  231. if (nums == 0) {
  232. SYSTEM_INFO sysinfo;
  233. GetSystemInfo(&sysinfo);
  234. nums = sysinfo.dwNumberOfProcessors;
  235. }
  236. return nums;
  237. }
  238. #endif
  239. #if defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY)
  240. int get_num_procs(void) {
  241. static int nums = 0;
  242. int m[2];
  243. size_t len;
  244. if (nums == 0) {
  245. m[0] = CTL_HW;
  246. m[1] = HW_NCPU;
  247. len = sizeof(int);
  248. sysctl(m, 2, &nums, &len, NULL, 0);
  249. }
  250. return nums;
  251. }
  252. #endif
  253. #if defined(OS_DARWIN)
  254. int get_num_procs(void) {
  255. static int nums = 0;
  256. size_t len;
  257. if (nums == 0){
  258. len = sizeof(int);
  259. sysctlbyname("hw.physicalcpu", &nums, &len, NULL, 0);
  260. }
  261. return nums;
  262. }
  263. /*
  264. void set_stack_limit(int limitMB){
  265. int result=0;
  266. struct rlimit rl;
  267. rlim_t StackSize;
  268. StackSize=limitMB*1024*1024;
  269. result=getrlimit(RLIMIT_STACK, &rl);
  270. if(result==0){
  271. if(rl.rlim_cur < StackSize){
  272. rl.rlim_cur=StackSize;
  273. result=setrlimit(RLIMIT_STACK, &rl);
  274. if(result !=0){
  275. fprintf(stderr, "OpenBLAS: set stack limit error =%d\n", result);
  276. }
  277. }
  278. }
  279. }
  280. */
  281. #endif
  282. /*
  283. OpenBLAS uses the numbers of CPU cores in multithreading.
  284. It can be set by openblas_set_num_threads(int num_threads);
  285. */
  286. int blas_cpu_number = 0;
  287. /*
  288. The numbers of threads in the thread pool.
  289. This value is equal or large than blas_cpu_number. This means some threads are sleep.
  290. */
  291. int blas_num_threads = 0;
  292. int goto_get_num_procs (void) {
  293. return blas_cpu_number;
  294. }
  295. static void blas_memory_init();
  296. void openblas_fork_handler()
  297. {
  298. // This handler shuts down the OpenBLAS-managed PTHREAD pool when OpenBLAS is
  299. // built with "make USE_OPENMP=0".
  300. // Hanging can still happen when OpenBLAS is built against the libgomp
  301. // implementation of OpenMP. The problem is tracked at:
  302. // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=60035
  303. // In the mean time build with USE_OPENMP=0 or link against another
  304. // implementation of OpenMP.
  305. #if !((defined(OS_WINDOWS) && !defined(OS_CYGWIN_NT)) || defined(OS_ANDROID)) && defined(SMP_SERVER)
  306. int err;
  307. err = pthread_atfork ((void (*)(void)) BLASFUNC(blas_thread_shutdown), NULL, blas_memory_init);
  308. if(err != 0)
  309. openblas_warning(0, "OpenBLAS Warning ... cannot install fork handler. You may meet hang after fork.\n");
  310. #endif
  311. }
  312. extern int openblas_num_threads_env();
  313. extern int openblas_goto_num_threads_env();
  314. extern int openblas_omp_num_threads_env();
  315. int blas_get_cpu_number(void){
  316. #if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID)
  317. int max_num;
  318. #endif
  319. int blas_goto_num = 0;
  320. int blas_omp_num = 0;
  321. if (blas_num_threads) return blas_num_threads;
  322. #if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID)
  323. max_num = get_num_procs();
  324. #endif
  325. // blas_goto_num = 0;
  326. #ifndef USE_OPENMP_UNUSED
  327. blas_goto_num=openblas_num_threads_env();
  328. if (blas_goto_num < 0) blas_goto_num = 0;
  329. if (blas_goto_num == 0) {
  330. blas_goto_num=openblas_goto_num_threads_env();
  331. if (blas_goto_num < 0) blas_goto_num = 0;
  332. }
  333. #endif
  334. // blas_omp_num = 0;
  335. blas_omp_num=openblas_omp_num_threads_env();
  336. if (blas_omp_num < 0) blas_omp_num = 0;
  337. if (blas_goto_num > 0) blas_num_threads = blas_goto_num;
  338. else if (blas_omp_num > 0) blas_num_threads = blas_omp_num;
  339. else blas_num_threads = MAX_CPU_NUMBER;
  340. #if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID)
  341. if (blas_num_threads > max_num) blas_num_threads = max_num;
  342. #endif
  343. if (blas_num_threads > MAX_CPU_NUMBER) blas_num_threads = MAX_CPU_NUMBER;
  344. #ifdef DEBUG
  345. printf( "Adjusted number of threads : %3d\n", blas_num_threads);
  346. #endif
  347. blas_cpu_number = blas_num_threads;
  348. return blas_num_threads;
  349. }
  350. #endif
  351. int openblas_get_num_procs(void) {
  352. #ifndef SMP
  353. return 1;
  354. #else
  355. return get_num_procs();
  356. #endif
  357. }
  358. int openblas_get_num_threads(void) {
  359. #ifndef SMP
  360. return 1;
  361. #else
  362. // init blas_cpu_number if needed
  363. blas_get_cpu_number();
  364. return blas_cpu_number;
  365. #endif
  366. }
  367. int hugetlb_allocated = 0;
  368. #if defined(OS_WINDOWS)
  369. #define LIKELY_ONE(x) (x)
  370. #else
  371. #define LIKELY_ONE(x) (__builtin_expect(x, 1))
  372. #endif
  373. /* Stores information about the allocation and how to release it */
  374. struct alloc_t {
  375. /* Whether this allocation is being used */
  376. int used;
  377. /* Any special attributes needed when releasing this allocation */
  378. int attr;
  379. /* Function that can properly release this memory */
  380. void (*release_func)(struct alloc_t *);
  381. /* Pad to 64-byte alignment */
  382. char pad[64 - 2 * sizeof(int) - sizeof(void(*))];
  383. };
  384. /* Convenience macros for storing release funcs */
  385. #define STORE_RELEASE_FUNC(address, func) \
  386. if (address != (void *)-1) { \
  387. struct alloc_t *alloc_info = (struct alloc_t *)address; \
  388. alloc_info->release_func = func; \
  389. }
  390. #define STORE_RELEASE_FUNC_WITH_ATTR(address, func, attr) \
  391. if (address != (void *)-1) { \
  392. struct alloc_t *alloc_info = (struct alloc_t *)address; \
  393. alloc_info->release_func = func; \
  394. alloc_info->attr = attr; \
  395. }
  396. /* The number of bytes that will be allocated for each buffer. When allocating
  397. memory, we store an alloc_t followed by the actual buffer memory. This means
  398. that each allocation always has its associated alloc_t, without the need
  399. for an auxiliary tracking structure. */
  400. static const int allocation_block_size = BUFFER_SIZE + sizeof(struct alloc_t);
  401. #if defined(SMP)
  402. # if defined(OS_WINDOWS)
  403. static DWORD local_storage_key = 0;
  404. DWORD lsk;
  405. # else
  406. static pthread_key_t local_storage_key = 0;
  407. pthread_key_t lsk;
  408. # endif /* defined(OS_WINDOWS) */
  409. #endif /* defined(SMP) */
  410. #if defined(OS_LINUX) && !defined(NO_WARMUP)
  411. static int hot_alloc = 0;
  412. #endif
  413. /* Global lock for memory allocation */
  414. #if defined(USE_PTHREAD_LOCK)
  415. static pthread_mutex_t alloc_lock = PTHREAD_MUTEX_INITIALIZER;
  416. #elif defined(USE_PTHREAD_SPINLOCK)
  417. static pthread_spinlock_t alloc_lock = 0;
  418. #else
  419. static BLASULONG alloc_lock = 0UL;
  420. #endif
  421. #if defined(USE_PTHREAD_LOCK)
  422. static pthread_mutex_t key_lock = PTHREAD_MUTEX_INITIALIZER;
  423. #elif defined(USE_PTHREAD_SPINLOCK)
  424. static pthread_spinlock_t key_lock = 0;
  425. #else
  426. static BLASULONG key_lock = 0UL;
  427. #endif
  428. /* Returns a pointer to the start of the per-thread memory allocation data */
  429. static __inline struct alloc_t ** get_memory_table() {
  430. #if defined(SMP)
  431. LOCK_COMMAND(&key_lock);
  432. lsk=local_storage_key;
  433. UNLOCK_COMMAND(&key_lock);
  434. if (!lsk) {
  435. blas_memory_init();
  436. }
  437. # if defined(OS_WINDOWS)
  438. struct alloc_t ** local_memory_table = (struct alloc_t **)TlsGetValue(local_storage_key);
  439. # else
  440. struct alloc_t ** local_memory_table = (struct alloc_t **)pthread_getspecific(local_storage_key);
  441. # endif /* defined(OS_WINDOWS) */
  442. #else
  443. static struct alloc_t ** local_memory_table = NULL;
  444. #endif /* defined(SMP) */
  445. #if defined (SMP)
  446. LOCK_COMMAND(&key_lock);
  447. lsk=local_storage_key;
  448. UNLOCK_COMMAND(&key_lock);
  449. if (lsk && !local_memory_table) {
  450. #else
  451. if (!local_memory_table) {
  452. #endif /* defined(SMP) */
  453. local_memory_table = (struct alloc_t **)malloc(sizeof(struct alloc_t *) * NUM_BUFFERS);
  454. memset(local_memory_table, 0, sizeof(struct alloc_t *) * NUM_BUFFERS);
  455. #if defined(SMP)
  456. # if defined(OS_WINDOWS)
  457. LOCK_COMMAND(&key_lock);
  458. TlsSetValue(local_storage_key, (void*)local_memory_table);
  459. UNLOCK_COMMAND(&key_lock);
  460. # else
  461. LOCK_COMMAND(&key_lock);
  462. pthread_setspecific(local_storage_key, (void*)local_memory_table);
  463. UNLOCK_COMMAND(&key_lock);
  464. # endif /* defined(OS_WINDOWS) */
  465. #endif /* defined(SMP) */
  466. }
  467. return local_memory_table;
  468. }
  469. #ifdef ALLOC_MMAP
  470. static void alloc_mmap_free(struct alloc_t *alloc_info){
  471. if (munmap(alloc_info, allocation_block_size)) {
  472. printf("OpenBLAS : munmap failed\n");
  473. }
  474. }
  475. #ifdef NO_WARMUP
  476. static void *alloc_mmap(void *address){
  477. void *map_address;
  478. if (address){
  479. map_address = mmap(address,
  480. allocation_block_size,
  481. MMAP_ACCESS, MMAP_POLICY | MAP_FIXED, -1, 0);
  482. } else {
  483. map_address = mmap(address,
  484. allocation_block_size,
  485. MMAP_ACCESS, MMAP_POLICY, -1, 0);
  486. }
  487. STORE_RELEASE_FUNC(map_address, alloc_mmap_free);
  488. #ifdef OS_LINUX
  489. my_mbind(map_address, allocation_block_size, MPOL_PREFERRED, NULL, 0, 0);
  490. #endif
  491. return map_address;
  492. }
  493. #else
  494. #define BENCH_ITERATION 4
  495. #define SCALING 2
  496. static inline BLASULONG run_bench(BLASULONG address, BLASULONG size) {
  497. BLASULONG original, *p;
  498. BLASULONG start, stop, min;
  499. int iter, i, count;
  500. min = (BLASULONG)-1;
  501. original = *(BLASULONG *)(address + size - PAGESIZE);
  502. *(BLASULONG *)(address + size - PAGESIZE) = (BLASULONG)address;
  503. for (iter = 0; iter < BENCH_ITERATION; iter ++ ) {
  504. p = (BLASULONG *)address;
  505. count = size / PAGESIZE;
  506. start = rpcc();
  507. for (i = 0; i < count; i ++) {
  508. p = (BLASULONG *)(*p);
  509. }
  510. stop = rpcc();
  511. if (min > stop - start) min = stop - start;
  512. }
  513. *(BLASULONG *)(address + size - PAGESIZE + 0) = original;
  514. *(BLASULONG *)(address + size - PAGESIZE + 8) = (BLASULONG)p;
  515. return min;
  516. }
  517. static void *alloc_mmap(void *address){
  518. void *map_address, *best_address;
  519. BLASULONG best, start, current, original;
  520. BLASULONG allocsize;
  521. if (address){
  522. /* Just give up use advanced operation */
  523. map_address = mmap(address, allocation_block_size, MMAP_ACCESS, MMAP_POLICY | MAP_FIXED, -1, 0);
  524. #ifdef OS_LINUX
  525. my_mbind(map_address, allocation_block_size, MPOL_PREFERRED, NULL, 0, 0);
  526. #endif
  527. } else {
  528. #if defined(OS_LINUX) && !defined(NO_WARMUP)
  529. if (hot_alloc == 0) {
  530. map_address = mmap(NULL, allocation_block_size, MMAP_ACCESS, MMAP_POLICY, -1, 0);
  531. #ifdef OS_LINUX
  532. my_mbind(map_address, allocation_block_size, MPOL_PREFERRED, NULL, 0, 0);
  533. #endif
  534. } else {
  535. #endif
  536. map_address = mmap(NULL, allocation_block_size * SCALING,
  537. MMAP_ACCESS, MMAP_POLICY, -1, 0);
  538. if (map_address != (void *)-1) {
  539. #ifdef OS_LINUX
  540. #ifdef DEBUG
  541. int ret=0;
  542. ret=my_mbind(map_address, allocation_block_size * SCALING, MPOL_PREFERRED, NULL, 0, 0);
  543. if(ret==-1){
  544. int errsv=errno;
  545. perror("OpenBLAS alloc_mmap:");
  546. printf("error code=%d,\tmap_address=%lx\n",errsv,map_address);
  547. }
  548. #else
  549. my_mbind(map_address, allocation_block_size * SCALING, MPOL_PREFERRED, NULL, 0, 0);
  550. #endif
  551. #endif
  552. allocsize = DGEMM_P * DGEMM_Q * sizeof(double);
  553. start = (BLASULONG)map_address;
  554. current = (SCALING - 1) * allocation_block_size;
  555. original = current;
  556. while(current > 0 && current <= original) {
  557. *(BLASLONG *)start = (BLASLONG)start + PAGESIZE;
  558. start += PAGESIZE;
  559. current -= PAGESIZE;
  560. }
  561. *(BLASLONG *)(start - PAGESIZE) = (BLASULONG)map_address;
  562. start = (BLASULONG)map_address;
  563. best = (BLASULONG)-1;
  564. best_address = map_address;
  565. while ((start + allocsize < (BLASULONG)map_address + (SCALING - 1) * allocation_block_size)) {
  566. current = run_bench(start, allocsize);
  567. if (best > current) {
  568. best = current;
  569. best_address = (void *)start;
  570. }
  571. start += PAGESIZE;
  572. }
  573. if ((BLASULONG)best_address > (BLASULONG)map_address)
  574. munmap(map_address, (BLASULONG)best_address - (BLASULONG)map_address);
  575. munmap((void *)((BLASULONG)best_address + allocation_block_size), (SCALING - 1) * allocation_block_size + (BLASULONG)map_address - (BLASULONG)best_address);
  576. map_address = best_address;
  577. #if defined(OS_LINUX) && !defined(NO_WARMUP)
  578. hot_alloc = 2;
  579. #endif
  580. }
  581. }
  582. #if defined(OS_LINUX) && !defined(NO_WARMUP)
  583. }
  584. #endif
  585. STORE_RELEASE_FUNC(map_address, alloc_mmap_free);
  586. return map_address;
  587. }
  588. #endif
  589. #endif
  590. #ifdef ALLOC_MALLOC
  591. static void alloc_malloc_free(struct alloc_t *alloc_info){
  592. free(alloc_info);
  593. }
  594. static void *alloc_malloc(void *address){
  595. void *map_address;
  596. map_address = (void *)malloc(allocation_block_size + FIXED_PAGESIZE);
  597. if (map_address == (void *)NULL) map_address = (void *)-1;
  598. STORE_RELEASE_FUNC(map_address, alloc_malloc_free);
  599. return map_address;
  600. }
  601. #endif
  602. #ifdef ALLOC_QALLOC
  603. void *qalloc(int flags, size_t bytes);
  604. void *qfree (void *address);
  605. #define QNONCACHE 0x1
  606. #define QCOMMS 0x2
  607. #define QFAST 0x4
  608. static void alloc_qalloc_free(struct alloc_t *alloc_info){
  609. qfree(alloc_info);
  610. }
  611. static void *alloc_qalloc(void *address){
  612. void *map_address;
  613. map_address = (void *)qalloc(QCOMMS | QFAST, allocation_block_size + FIXED_PAGESIZE);
  614. if (map_address == (void *)NULL) map_address = (void *)-1;
  615. STORE_RELEASE_FUNC(map_address, alloc_qalloc_free);
  616. return (void *)(((BLASULONG)map_address + FIXED_PAGESIZE - 1) & ~(FIXED_PAGESIZE - 1));
  617. }
  618. #endif
  619. #ifdef ALLOC_WINDOWS
  620. static void alloc_windows_free(struct alloc_t *alloc_info){
  621. VirtualFree(alloc_info, allocation_block_size, MEM_DECOMMIT);
  622. }
  623. static void *alloc_windows(void *address){
  624. void *map_address;
  625. map_address = VirtualAlloc(address,
  626. allocation_block_size,
  627. MEM_RESERVE | MEM_COMMIT,
  628. PAGE_READWRITE);
  629. if (map_address == (void *)NULL) map_address = (void *)-1;
  630. STORE_RELEASE_FUNC(map_address, alloc_windows_free);
  631. return map_address;
  632. }
  633. #endif
  634. #ifdef ALLOC_DEVICEDRIVER
  635. #ifndef DEVICEDRIVER_NAME
  636. #define DEVICEDRIVER_NAME "/dev/mapper"
  637. #endif
  638. static void alloc_devicedirver_free(struct alloc_t *alloc_info){
  639. int attr = alloc_info -> attr;
  640. if (munmap(address, allocation_block_size)) {
  641. printf("OpenBLAS : Bugphysarea unmap failed.\n");
  642. }
  643. if (close(attr)) {
  644. printf("OpenBLAS : Bugphysarea close failed.\n");
  645. }
  646. }
  647. static void *alloc_devicedirver(void *address){
  648. int fd;
  649. void *map_address;
  650. if ((fd = open(DEVICEDRIVER_NAME, O_RDWR | O_SYNC)) < 0) {
  651. return (void *)-1;
  652. }
  653. map_address = mmap(address, allocation_block_size,
  654. PROT_READ | PROT_WRITE,
  655. MAP_FILE | MAP_SHARED,
  656. fd, 0);
  657. STORE_RELEASE_FUNC_WITH_ATTR(map_address, alloc_devicedirver_free, fd);
  658. return map_address;
  659. }
  660. #endif
  661. #ifdef ALLOC_SHM
  662. static void alloc_shm_free(struct alloc_t *alloc_info){
  663. if (shmdt(alloc_info)) {
  664. printf("OpenBLAS : Shared memory unmap failed.\n");
  665. }
  666. }
  667. static void *alloc_shm(void *address){
  668. void *map_address;
  669. int shmid;
  670. shmid = shmget(IPC_PRIVATE, allocation_block_size,IPC_CREAT | 0600);
  671. map_address = (void *)shmat(shmid, address, 0);
  672. if (map_address != (void *)-1){
  673. #ifdef OS_LINUX
  674. my_mbind(map_address, allocation_block_size, MPOL_PREFERRED, NULL, 0, 0);
  675. #endif
  676. shmctl(shmid, IPC_RMID, 0);
  677. struct alloc_t *alloc_info = (struct alloc_t *)map_address;
  678. alloc_info->release_func = alloc_shm_free;
  679. alloc_info->attr = shmid;
  680. }
  681. return map_address;
  682. }
  683. #if defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS
  684. static void alloc_hugetlb_free(struct alloc_t *alloc_info){
  685. #if defined(OS_LINUX) || defined(OS_AIX)
  686. if (shmdt(alloc_info)) {
  687. printf("OpenBLAS : Hugepage unmap failed.\n");
  688. }
  689. #endif
  690. #ifdef __sun__
  691. munmap(alloc_info, allocation_block_size);
  692. #endif
  693. #ifdef OS_WINDOWS
  694. VirtualFree(alloc_info, allocation_block_size, MEM_LARGE_PAGES | MEM_DECOMMIT);
  695. #endif
  696. }
  697. static void *alloc_hugetlb(void *address){
  698. void *map_address = (void *)-1;
  699. #if defined(OS_LINUX) || defined(OS_AIX)
  700. int shmid;
  701. shmid = shmget(IPC_PRIVATE, allocation_block_size,
  702. #ifdef OS_LINUX
  703. SHM_HUGETLB |
  704. #endif
  705. #ifdef OS_AIX
  706. SHM_LGPAGE | SHM_PIN |
  707. #endif
  708. IPC_CREAT | SHM_R | SHM_W);
  709. if (shmid != -1) {
  710. map_address = (void *)shmat(shmid, address, SHM_RND);
  711. #ifdef OS_LINUX
  712. my_mbind(map_address, allocation_block_size, MPOL_PREFERRED, NULL, 0, 0);
  713. #endif
  714. if (map_address != (void *)-1){
  715. shmctl(shmid, IPC_RMID, 0);
  716. }
  717. }
  718. #endif
  719. #ifdef __sun__
  720. struct memcntl_mha mha;
  721. mha.mha_cmd = MHA_MAPSIZE_BSSBRK;
  722. mha.mha_flags = 0;
  723. mha.mha_pagesize = HUGE_PAGESIZE;
  724. memcntl(NULL, 0, MC_HAT_ADVISE, (char *)&mha, 0, 0);
  725. map_address = (BLASULONG)memalign(HUGE_PAGESIZE, allocation_block_size);
  726. #endif
  727. #ifdef OS_WINDOWS
  728. HANDLE hToken;
  729. TOKEN_PRIVILEGES tp;
  730. if (OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &hToken) != TRUE) return (void *) -1;
  731. tp.PrivilegeCount = 1;
  732. tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
  733. if (LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &tp.Privileges[0].Luid) != TRUE) {
  734. CloseHandle(hToken);
  735. return (void*)-1;
  736. }
  737. if (AdjustTokenPrivileges(hToken, FALSE, &tp, 0, NULL, NULL) != TRUE) {
  738. CloseHandle(hToken);
  739. return (void*)-1;
  740. }
  741. map_address = (void *)VirtualAlloc(address,
  742. allocation_block_size,
  743. MEM_LARGE_PAGES | MEM_RESERVE | MEM_COMMIT,
  744. PAGE_READWRITE);
  745. tp.Privileges[0].Attributes = 0;
  746. AdjustTokenPrivileges(hToken, FALSE, &tp, 0, NULL, NULL);
  747. if (map_address == (void *)NULL) map_address = (void *)-1;
  748. #endif
  749. STORE_RELEASE_FUNC(map_address, alloc_hugetlb_free);
  750. return map_address;
  751. }
  752. #endif
  753. #endif
  754. #ifdef ALLOC_HUGETLBFILE
  755. static int hugetlb_pid = 0;
  756. static void alloc_hugetlbfile_free(struct alloc_t *alloc_info){
  757. int attr = alloc_info -> attr;
  758. if (munmap(alloc_info, allocation_block_size)) {
  759. printf("OpenBLAS : HugeTLBfs unmap failed.\n");
  760. }
  761. if (close(attr)) {
  762. printf("OpenBLAS : HugeTLBfs close failed.\n");
  763. }
  764. }
  765. static void *alloc_hugetlbfile(void *address){
  766. void *map_address = (void *)-1;
  767. int fd;
  768. char filename[64];
  769. if (!hugetlb_pid) hugetlb_pid = getpid();
  770. sprintf(filename, "%s/gotoblas.%d", HUGETLB_FILE_NAME, hugetlb_pid);
  771. if ((fd = open(filename, O_RDWR | O_CREAT, 0700)) < 0) {
  772. return (void *)-1;
  773. }
  774. unlink(filename);
  775. map_address = mmap(address, allocation_block_size,
  776. PROT_READ | PROT_WRITE,
  777. MAP_SHARED,
  778. fd, 0);
  779. STORE_RELEASE_FUNC_WITH_ATTR(map_address, alloc_hugetlbfile_free, fd);
  780. return map_address;
  781. }
  782. #endif
  783. #ifdef SEEK_ADDRESS
  784. static BLASULONG base_address = 0UL;
  785. #else
  786. static BLASULONG base_address = BASE_ADDRESS;
  787. #endif
  788. #if __STDC_VERSION__ >= 201112L
  789. static _Atomic int memory_initialized = 0;
  790. #else
  791. static volatile int memory_initialized = 0;
  792. #endif
  793. /* Memory allocation routine */
  794. /* procpos ... indicates where it comes from */
  795. /* 0 : Level 3 functions */
  796. /* 1 : Level 2 functions */
  797. /* 2 : Thread */
  798. static void blas_memory_cleanup(void* ptr){
  799. if (ptr) {
  800. struct alloc_t ** table = (struct alloc_t **)ptr;
  801. int pos;
  802. for (pos = 0; pos < NUM_BUFFERS; pos ++){
  803. struct alloc_t *alloc_info = table[pos];
  804. if (alloc_info) {
  805. alloc_info->release_func(alloc_info);
  806. table[pos] = (void *)0;
  807. }
  808. }
  809. free(table);
  810. }
  811. }
  812. static void blas_memory_init(){
  813. #if defined(SMP)
  814. # if defined(OS_WINDOWS)
  815. local_storage_key = TlsAlloc();
  816. # else
  817. pthread_key_create(&local_storage_key, blas_memory_cleanup);
  818. # endif /* defined(OS_WINDOWS) */
  819. #endif /* defined(SMP) */
  820. }
  821. void *blas_memory_alloc(int procpos){
  822. int position;
  823. void *map_address;
  824. void *(*memoryalloc[])(void *address) = {
  825. #ifdef ALLOC_DEVICEDRIVER
  826. alloc_devicedirver,
  827. #endif
  828. /* Hugetlb implicitly assumes ALLOC_SHM */
  829. #ifdef ALLOC_SHM
  830. alloc_shm,
  831. #endif
  832. #if ((defined ALLOC_SHM) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS))
  833. alloc_hugetlb,
  834. #endif
  835. #ifdef ALLOC_MMAP
  836. alloc_mmap,
  837. #endif
  838. #ifdef ALLOC_QALLOC
  839. alloc_qalloc,
  840. #endif
  841. #ifdef ALLOC_WINDOWS
  842. alloc_windows,
  843. #endif
  844. #ifdef ALLOC_MALLOC
  845. alloc_malloc,
  846. #endif
  847. NULL,
  848. };
  849. void *(**func)(void *address);
  850. struct alloc_t * alloc_info;
  851. struct alloc_t ** alloc_table;
  852. #if defined(SMP) && !defined(USE_OPENMP)
  853. int mi;
  854. LOCK_COMMAND(&alloc_lock);
  855. mi=memory_initialized;
  856. UNLOCK_COMMAND(&alloc_lock);
  857. if (!LIKELY_ONE(mi)) {
  858. #else
  859. if (!LIKELY_ONE(memory_initialized)) {
  860. #endif
  861. #if defined(SMP) && !defined(USE_OPENMP)
  862. /* Only allow a single thread to initialize memory system */
  863. LOCK_COMMAND(&alloc_lock);
  864. if (!memory_initialized) {
  865. #endif
  866. blas_memory_init();
  867. #ifdef DYNAMIC_ARCH
  868. gotoblas_dynamic_init();
  869. #endif
  870. #if defined(SMP) && defined(OS_LINUX) && !defined(NO_AFFINITY)
  871. gotoblas_affinity_init();
  872. #endif
  873. #ifdef SMP
  874. if (!blas_num_threads) blas_cpu_number = blas_get_cpu_number();
  875. #endif
  876. #if defined(ARCH_X86) || defined(ARCH_X86_64) || defined(ARCH_IA64) || defined(ARCH_MIPS64) || defined(ARCH_ARM64)
  877. #ifndef DYNAMIC_ARCH
  878. blas_set_parameter();
  879. #endif
  880. #endif
  881. memory_initialized = 1;
  882. #if defined(SMP) && !defined(USE_OPENMP)
  883. }
  884. UNLOCK_COMMAND(&alloc_lock);
  885. #endif
  886. }
  887. #ifdef DEBUG
  888. printf("Alloc Start ...\n");
  889. #endif
  890. position = 0;
  891. alloc_table = get_memory_table();
  892. do {
  893. if (!alloc_table[position] || !alloc_table[position]->used) goto allocation;
  894. position ++;
  895. } while (position < NUM_BUFFERS);
  896. goto error;
  897. allocation :
  898. #ifdef DEBUG
  899. printf(" Position -> %d\n", position);
  900. #endif
  901. alloc_info = alloc_table[position];
  902. if (!alloc_info) {
  903. do {
  904. #ifdef DEBUG
  905. printf("Allocation Start : %lx\n", base_address);
  906. #endif
  907. map_address = (void *)-1;
  908. func = &memoryalloc[0];
  909. while ((func != NULL) && (map_address == (void *) -1)) {
  910. map_address = (*func)((void *)base_address);
  911. #ifdef ALLOC_DEVICEDRIVER
  912. if ((*func == alloc_devicedirver) && (map_address == (void *)-1)) {
  913. fprintf(stderr, "OpenBLAS Warning ... Physically contiguous allocation failed.\n");
  914. }
  915. #endif
  916. #ifdef ALLOC_HUGETLBFILE
  917. if ((*func == alloc_hugetlbfile) && (map_address == (void *)-1)) {
  918. #ifndef OS_WINDOWS
  919. fprintf(stderr, "OpenBLAS Warning ... HugeTLB(File) allocation failed.\n");
  920. #endif
  921. }
  922. #endif
  923. #if (defined ALLOC_SHM) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS)
  924. if ((*func == alloc_hugetlb) && (map_address != (void *)-1)) hugetlb_allocated = 1;
  925. #endif
  926. func ++;
  927. }
  928. #ifdef DEBUG
  929. printf(" Success -> %08lx\n", map_address);
  930. #endif
  931. if (((BLASLONG) map_address) == -1) base_address = 0UL;
  932. if (base_address) base_address += allocation_block_size + FIXED_PAGESIZE;
  933. } while ((BLASLONG)map_address == -1);
  934. alloc_table[position] = alloc_info = map_address;
  935. #ifdef DEBUG
  936. printf(" Mapping Succeeded. %p(%d)\n", (void *)alloc_info, position);
  937. #endif
  938. }
  939. #ifdef DEBUG
  940. printf("Mapped : %p %3d\n\n", (void *)alloc_info, position);
  941. #endif
  942. alloc_info->used = 1;
  943. return (void *)(((char *)alloc_info) + sizeof(struct alloc_t));
  944. error:
  945. printf("OpenBLAS : Program will terminate because you tried to allocate too many memory regions.\n");
  946. return NULL;
  947. }
  948. void blas_memory_free(void *buffer){
  949. #ifdef DEBUG
  950. int position;
  951. struct alloc_t ** alloc_table;
  952. #endif
  953. /* Since we passed an offset pointer to the caller, get back to the actual allocation */
  954. struct alloc_t *alloc_info = (void *)(((char *)buffer) - sizeof(struct alloc_t));
  955. #ifdef DEBUG
  956. printf("Unmapped Start : %p ...\n", alloc_info);
  957. #endif
  958. alloc_info->used = 0;
  959. #ifdef DEBUG
  960. printf("Unmap Succeeded.\n\n");
  961. #endif
  962. return;
  963. #ifdef DEBUG
  964. alloc_table = get_memory_table();
  965. for (position = 0; position < NUM_BUFFERS; position++){
  966. if (alloc_table[position]) {
  967. printf("%4ld %p : %d\n", position, alloc_table[position], alloc_table[position]->used);
  968. }
  969. }
  970. #endif
  971. return;
  972. }
  973. void *blas_memory_alloc_nolock(int unused) {
  974. void *map_address;
  975. map_address = (void *)malloc(BUFFER_SIZE + FIXED_PAGESIZE);
  976. return map_address;
  977. }
  978. void blas_memory_free_nolock(void * map_address) {
  979. free(map_address);
  980. }
  981. void blas_shutdown(void){
  982. #ifdef SMP
  983. BLASFUNC(blas_thread_shutdown)();
  984. #endif
  985. #ifdef SMP
  986. /* Only cleanupIf we were built for threading and TLS was initialized */
  987. if (local_storage_key)
  988. #endif
  989. blas_memory_cleanup((void*)get_memory_table());
  990. #ifdef SEEK_ADDRESS
  991. base_address = 0UL;
  992. #else
  993. base_address = BASE_ADDRESS;
  994. #endif
  995. return;
  996. }
  997. #if defined(OS_LINUX) && !defined(NO_WARMUP)
  998. #ifdef SMP
  999. #if defined(USE_PTHREAD_LOCK)
  1000. static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
  1001. #elif defined(USE_PTHREAD_SPINLOCK)
  1002. static pthread_spinlock_t init_lock = 0;
  1003. #else
  1004. static BLASULONG init_lock = 0UL;
  1005. #endif
  1006. #endif
  1007. static void _touch_memory(blas_arg_t *arg, BLASLONG *range_m, BLASLONG *range_n,
  1008. void *sa, void *sb, BLASLONG pos) {
  1009. #if !defined(ARCH_POWER) && !defined(ARCH_SPARC)
  1010. size_t size;
  1011. BLASULONG buffer;
  1012. size = allocation_block_size - PAGESIZE;
  1013. buffer = (BLASULONG)sa + GEMM_OFFSET_A;
  1014. #if defined(OS_LINUX) && !defined(NO_WARMUP)
  1015. if (hot_alloc != 2) {
  1016. #endif
  1017. #ifdef SMP
  1018. LOCK_COMMAND(&init_lock);
  1019. #endif
  1020. while (size > 0) {
  1021. *(int *)buffer = size;
  1022. buffer += PAGESIZE;
  1023. size -= PAGESIZE;
  1024. }
  1025. #ifdef SMP
  1026. UNLOCK_COMMAND(&init_lock);
  1027. #endif
  1028. size = MIN((allocation_block_size - PAGESIZE), L2_SIZE);
  1029. buffer = (BLASULONG)sa + GEMM_OFFSET_A;
  1030. while (size > 0) {
  1031. *(int *)buffer = size;
  1032. buffer += 64;
  1033. size -= 64;
  1034. }
  1035. #if defined(OS_LINUX) && !defined(NO_WARMUP)
  1036. }
  1037. #endif
  1038. #endif
  1039. }
  1040. #ifdef SMP
  1041. static void _init_thread_memory(void *buffer) {
  1042. blas_queue_t queue[MAX_CPU_NUMBER];
  1043. int num_cpu;
  1044. for (num_cpu = 0; num_cpu < blas_num_threads; num_cpu++) {
  1045. blas_queue_init(&queue[num_cpu]);
  1046. queue[num_cpu].mode = BLAS_DOUBLE | BLAS_REAL;
  1047. queue[num_cpu].routine = &_touch_memory;
  1048. queue[num_cpu].args = NULL;
  1049. queue[num_cpu].next = &queue[num_cpu + 1];
  1050. }
  1051. queue[num_cpu - 1].next = NULL;
  1052. queue[0].sa = buffer;
  1053. exec_blas(num_cpu, queue);
  1054. }
  1055. #endif
  1056. static void gotoblas_memory_init(void) {
  1057. void *buffer;
  1058. hot_alloc = 1;
  1059. buffer = (void *)blas_memory_alloc(0);
  1060. #ifdef SMP
  1061. if (blas_cpu_number == 0) blas_get_cpu_number();
  1062. #ifdef SMP_SERVER
  1063. if (blas_server_avail == 0) blas_thread_init();
  1064. #endif
  1065. _init_thread_memory((void *)((BLASULONG)buffer + GEMM_OFFSET_A));
  1066. #else
  1067. _touch_memory(NULL, NULL, NULL, (void *)((BLASULONG)buffer + GEMM_OFFSET_A), NULL, 0);
  1068. #endif
  1069. blas_memory_free(buffer);
  1070. }
  1071. #endif
  1072. /* Initialization for all function; this function should be called before main */
  1073. static int gotoblas_initialized = 0;
  1074. extern void openblas_read_env();
  1075. void CONSTRUCTOR gotoblas_init(void) {
  1076. if (gotoblas_initialized) return;
  1077. #ifdef SMP
  1078. openblas_fork_handler();
  1079. #endif
  1080. openblas_read_env();
  1081. #ifdef PROFILE
  1082. moncontrol (0);
  1083. #endif
  1084. #ifdef DYNAMIC_ARCH
  1085. gotoblas_dynamic_init();
  1086. #endif
  1087. #if defined(SMP) && defined(OS_LINUX) && !defined(NO_AFFINITY)
  1088. gotoblas_affinity_init();
  1089. #endif
  1090. #if defined(OS_LINUX) && !defined(NO_WARMUP)
  1091. gotoblas_memory_init();
  1092. #endif
  1093. //#if defined(OS_LINUX)
  1094. #if 0
  1095. struct rlimit curlimit;
  1096. if ( getrlimit(RLIMIT_STACK, &curlimit ) == 0 )
  1097. {
  1098. if ( curlimit.rlim_cur != curlimit.rlim_max )
  1099. {
  1100. curlimit.rlim_cur = curlimit.rlim_max;
  1101. setrlimit(RLIMIT_STACK, &curlimit);
  1102. }
  1103. }
  1104. #endif
  1105. #ifdef SMP
  1106. if (blas_cpu_number == 0) blas_get_cpu_number();
  1107. #ifdef SMP_SERVER
  1108. if (blas_server_avail == 0) blas_thread_init();
  1109. #endif
  1110. #endif
  1111. #ifdef FUNCTION_PROFILE
  1112. gotoblas_profile_init();
  1113. #endif
  1114. gotoblas_initialized = 1;
  1115. #ifdef PROFILE
  1116. moncontrol (1);
  1117. #endif
  1118. }
  1119. void DESTRUCTOR gotoblas_quit(void) {
  1120. if (gotoblas_initialized == 0) return;
  1121. blas_shutdown();
  1122. #if defined(SMP)
  1123. #if defined(OS_WINDOWS)
  1124. TlsFree(local_storage_key);
  1125. #else
  1126. pthread_key_delete(local_storage_key);
  1127. #endif
  1128. #endif
  1129. #ifdef PROFILE
  1130. moncontrol (0);
  1131. #endif
  1132. #ifdef FUNCTION_PROFILE
  1133. gotoblas_profile_quit();
  1134. #endif
  1135. #if defined(SMP) && defined(OS_LINUX) && !defined(NO_AFFINITY)
  1136. gotoblas_affinity_quit();
  1137. #endif
  1138. #ifdef DYNAMIC_ARCH
  1139. gotoblas_dynamic_quit();
  1140. #endif
  1141. gotoblas_initialized = 0;
  1142. #ifdef PROFILE
  1143. moncontrol (1);
  1144. #endif
  1145. }
  1146. #if defined(_MSC_VER) && !defined(__clang__)
  1147. BOOL APIENTRY DllMain(HMODULE hModule, DWORD ul_reason_for_call, LPVOID lpReserved)
  1148. {
  1149. switch (ul_reason_for_call)
  1150. {
  1151. case DLL_PROCESS_ATTACH:
  1152. gotoblas_init();
  1153. break;
  1154. case DLL_THREAD_ATTACH:
  1155. break;
  1156. case DLL_THREAD_DETACH:
  1157. #if defined(SMP)
  1158. blas_memory_cleanup((void*)get_memory_table());
  1159. #endif
  1160. break;
  1161. case DLL_PROCESS_DETACH:
  1162. gotoblas_quit();
  1163. break;
  1164. default:
  1165. break;
  1166. }
  1167. return TRUE;
  1168. }
  1169. /*
  1170. This is to allow static linking.
  1171. Code adapted from Google performance tools:
  1172. https://gperftools.googlecode.com/git-history/perftools-1.0/src/windows/port.cc
  1173. Reference:
  1174. https://sourceware.org/ml/pthreads-win32/2008/msg00028.html
  1175. http://ci.boost.org/svn-trac/browser/trunk/libs/thread/src/win32/tss_pe.cpp
  1176. */
  1177. static int on_process_term(void)
  1178. {
  1179. gotoblas_quit();
  1180. return 0;
  1181. }
  1182. #ifdef _WIN64
  1183. #pragma comment(linker, "/INCLUDE:_tls_used")
  1184. #else
  1185. #pragma comment(linker, "/INCLUDE:__tls_used")
  1186. #endif
  1187. #ifdef _WIN64
  1188. #pragma const_seg(".CRT$XLB")
  1189. #else
  1190. #pragma data_seg(".CRT$XLB")
  1191. #endif
  1192. static void (APIENTRY *dll_callback)(HINSTANCE h, DWORD ul_reason_for_call, PVOID pv) = DllMain;
  1193. #ifdef _WIN64
  1194. #pragma const_seg()
  1195. #else
  1196. #pragma data_seg()
  1197. #endif
  1198. #ifdef _WIN64
  1199. #pragma const_seg(".CRT$XTU")
  1200. #else
  1201. #pragma data_seg(".CRT$XTU")
  1202. #endif
  1203. static int(*p_process_term)(void) = on_process_term;
  1204. #ifdef _WIN64
  1205. #pragma const_seg()
  1206. #else
  1207. #pragma data_seg()
  1208. #endif
  1209. #endif
  1210. #if (defined(C_PGI) || (!defined(C_SUN) && defined(F_INTERFACE_SUN))) && (defined(ARCH_X86) || defined(ARCH_X86_64))
  1211. /* Don't call me; this is just work around for PGI / Sun bug */
  1212. void gotoblas_dummy_for_PGI(void) {
  1213. gotoblas_init();
  1214. gotoblas_quit();
  1215. #if 0
  1216. asm ("\t.section\t.ctors,\"aw\",@progbits; .align 8; .quad gotoblas_init; .section .text");
  1217. asm ("\t.section\t.dtors,\"aw\",@progbits; .align 8; .quad gotoblas_quit; .section .text");
  1218. #else
  1219. asm (".section .init,\"ax\"; call gotoblas_init@PLT; .section .text");
  1220. asm (".section .fini,\"ax\"; call gotoblas_quit@PLT; .section .text");
  1221. #endif
  1222. }
  1223. #endif
  1224. #else
  1225. #include <errno.h>
  1226. #ifdef OS_WINDOWS
  1227. #define ALLOC_WINDOWS
  1228. #ifndef MEM_LARGE_PAGES
  1229. #define MEM_LARGE_PAGES 0x20000000
  1230. #endif
  1231. #else
  1232. #define ALLOC_MMAP
  1233. #define ALLOC_MALLOC
  1234. #endif
  1235. #include <stdlib.h>
  1236. #include <stdio.h>
  1237. #include <fcntl.h>
  1238. #ifndef OS_WINDOWS
  1239. #include <sys/mman.h>
  1240. #ifndef NO_SYSV_IPC
  1241. #include <sys/shm.h>
  1242. #endif
  1243. #include <sys/ipc.h>
  1244. #endif
  1245. #include <sys/types.h>
  1246. #ifdef OS_LINUX
  1247. #include <sys/sysinfo.h>
  1248. #include <sched.h>
  1249. #include <errno.h>
  1250. #include <linux/unistd.h>
  1251. #include <sys/syscall.h>
  1252. #include <sys/time.h>
  1253. #include <sys/resource.h>
  1254. #endif
  1255. #if defined(OS_FREEBSD) || defined(OS_DARWIN)
  1256. #include <sys/sysctl.h>
  1257. #include <sys/resource.h>
  1258. #endif
  1259. #if defined(OS_WINDOWS) && (defined(__MINGW32__) || defined(__MINGW64__))
  1260. #include <conio.h>
  1261. #undef printf
  1262. #define printf _cprintf
  1263. #endif
  1264. #ifdef OS_LINUX
  1265. #ifndef MPOL_PREFERRED
  1266. #define MPOL_PREFERRED 1
  1267. #endif
  1268. #endif
  1269. #if (defined(PPC440) || !defined(OS_LINUX) || defined(HPL)) && !defined(NO_WARMUP)
  1270. #define NO_WARMUP
  1271. #endif
  1272. #ifndef SHM_HUGETLB
  1273. #define SHM_HUGETLB 04000
  1274. #endif
  1275. #ifndef FIXED_PAGESIZE
  1276. #define FIXED_PAGESIZE 4096
  1277. #endif
  1278. #define BITMASK(a, b, c) ((((a) >> (b)) & (c)))
  1279. #if defined(_MSC_VER) && !defined(__clang__)
  1280. #define CONSTRUCTOR __cdecl
  1281. #define DESTRUCTOR __cdecl
  1282. #elif (defined(OS_DARWIN) || defined(OS_SUNOS)) && defined(C_GCC)
  1283. #define CONSTRUCTOR __attribute__ ((constructor))
  1284. #define DESTRUCTOR __attribute__ ((destructor))
  1285. #else
  1286. #define CONSTRUCTOR __attribute__ ((constructor(101)))
  1287. #define DESTRUCTOR __attribute__ ((destructor(101)))
  1288. #endif
  1289. #ifdef DYNAMIC_ARCH
  1290. gotoblas_t *gotoblas = NULL;
  1291. #endif
  1292. extern void openblas_warning(int verbose, const char * msg);
  1293. #ifndef SMP
  1294. #define blas_cpu_number 1
  1295. #define blas_num_threads 1
  1296. /* Dummy Function */
  1297. int goto_get_num_procs (void) { return 1;};
  1298. void goto_set_num_threads(int num_threads) {};
  1299. #else
  1300. #if defined(OS_LINUX) || defined(OS_SUNOS) || defined(OS_NETBSD)
  1301. #ifndef NO_AFFINITY
  1302. int get_num_procs(void);
  1303. #else
  1304. int get_num_procs(void) {
  1305. static int nums = 0;
  1306. cpu_set_t *cpusetp;
  1307. size_t size;
  1308. int ret;
  1309. int i,n;
  1310. if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF);
  1311. #if !defined(OS_LINUX)
  1312. return nums;
  1313. #endif
  1314. #if !defined(__GLIBC_PREREQ)
  1315. return nums;
  1316. #else
  1317. #if !__GLIBC_PREREQ(2, 3)
  1318. return nums;
  1319. #endif
  1320. #if !__GLIBC_PREREQ(2, 7)
  1321. ret = sched_getaffinity(0,sizeof(cpu_set_t), cpusetp);
  1322. if (ret!=0) return nums;
  1323. n=0;
  1324. #if !__GLIBC_PREREQ(2, 6)
  1325. for (i=0;i<nums;i++)
  1326. if (CPU_ISSET(i,cpusetp)) n++;
  1327. nums=n;
  1328. #else
  1329. nums = CPU_COUNT(sizeof(cpu_set_t),cpusetp);
  1330. #endif
  1331. return nums;
  1332. #else
  1333. cpusetp = CPU_ALLOC(nums);
  1334. if (cpusetp == NULL) return nums;
  1335. size = CPU_ALLOC_SIZE(nums);
  1336. ret = sched_getaffinity(0,size,cpusetp);
  1337. if (ret!=0) return nums;
  1338. nums = CPU_COUNT_S(size,cpusetp);
  1339. CPU_FREE(cpusetp);
  1340. return nums;
  1341. #endif
  1342. #endif
  1343. }
  1344. #endif
  1345. #endif
  1346. #ifdef OS_ANDROID
  1347. int get_num_procs(void) {
  1348. static int nums = 0;
  1349. if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF);
  1350. return nums;
  1351. }
  1352. #endif
  1353. #ifdef OS_HAIKU
  1354. int get_num_procs(void) {
  1355. static int nums = 0;
  1356. if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF);
  1357. return nums;
  1358. }
  1359. #endif
  1360. #ifdef OS_AIX
  1361. int get_num_procs(void) {
  1362. static int nums = 0;
  1363. if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF);
  1364. return nums;
  1365. }
  1366. #endif
  1367. #ifdef OS_WINDOWS
  1368. int get_num_procs(void) {
  1369. static int nums = 0;
  1370. if (nums == 0) {
  1371. SYSTEM_INFO sysinfo;
  1372. GetSystemInfo(&sysinfo);
  1373. nums = sysinfo.dwNumberOfProcessors;
  1374. }
  1375. return nums;
  1376. }
  1377. #endif
  1378. #if defined(OS_FREEBSD)
  1379. int get_num_procs(void) {
  1380. static int nums = 0;
  1381. int m[2];
  1382. size_t len;
  1383. if (nums == 0) {
  1384. m[0] = CTL_HW;
  1385. m[1] = HW_NCPU;
  1386. len = sizeof(int);
  1387. sysctl(m, 2, &nums, &len, NULL, 0);
  1388. }
  1389. return nums;
  1390. }
  1391. #endif
  1392. #if defined(OS_DARWIN)
  1393. int get_num_procs(void) {
  1394. static int nums = 0;
  1395. size_t len;
  1396. if (nums == 0){
  1397. len = sizeof(int);
  1398. sysctlbyname("hw.physicalcpu", &nums, &len, NULL, 0);
  1399. }
  1400. return nums;
  1401. }
  1402. /*
  1403. void set_stack_limit(int limitMB){
  1404. int result=0;
  1405. struct rlimit rl;
  1406. rlim_t StackSize;
  1407. StackSize=limitMB*1024*1024;
  1408. result=getrlimit(RLIMIT_STACK, &rl);
  1409. if(result==0){
  1410. if(rl.rlim_cur < StackSize){
  1411. rl.rlim_cur=StackSize;
  1412. result=setrlimit(RLIMIT_STACK, &rl);
  1413. if(result !=0){
  1414. fprintf(stderr, "OpenBLAS: set stack limit error =%d\n", result);
  1415. }
  1416. }
  1417. }
  1418. }
  1419. */
  1420. #endif
  1421. /*
  1422. OpenBLAS uses the numbers of CPU cores in multithreading.
  1423. It can be set by openblas_set_num_threads(int num_threads);
  1424. */
  1425. int blas_cpu_number = 0;
  1426. /*
  1427. The numbers of threads in the thread pool.
  1428. This value is equal or large than blas_cpu_number. This means some threads are sleep.
  1429. */
  1430. int blas_num_threads = 0;
  1431. int goto_get_num_procs (void) {
  1432. return blas_cpu_number;
  1433. }
  1434. void openblas_fork_handler()
  1435. {
  1436. // This handler shuts down the OpenBLAS-managed PTHREAD pool when OpenBLAS is
  1437. // built with "make USE_OPENMP=0".
  1438. // Hanging can still happen when OpenBLAS is built against the libgomp
  1439. // implementation of OpenMP. The problem is tracked at:
  1440. // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=60035
  1441. // In the mean time build with USE_OPENMP=0 or link against another
  1442. // implementation of OpenMP.
  1443. #if !(defined(OS_WINDOWS) || defined(OS_ANDROID)) && defined(SMP_SERVER)
  1444. int err;
  1445. err = pthread_atfork ((void (*)(void)) BLASFUNC(blas_thread_shutdown), NULL, NULL);
  1446. if(err != 0)
  1447. openblas_warning(0, "OpenBLAS Warning ... cannot install fork handler. You may meet hang after fork.\n");
  1448. #endif
  1449. }
  1450. extern int openblas_num_threads_env();
  1451. extern int openblas_goto_num_threads_env();
  1452. extern int openblas_omp_num_threads_env();
  1453. int blas_get_cpu_number(void){
  1454. #if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_DARWIN) || defined(OS_ANDROID)
  1455. int max_num;
  1456. #endif
  1457. int blas_goto_num = 0;
  1458. int blas_omp_num = 0;
  1459. if (blas_num_threads) return blas_num_threads;
  1460. #if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_DARWIN) || defined(OS_ANDROID)
  1461. max_num = get_num_procs();
  1462. #endif
  1463. blas_goto_num = 0;
  1464. #ifndef USE_OPENMP
  1465. blas_goto_num=openblas_num_threads_env();
  1466. if (blas_goto_num < 0) blas_goto_num = 0;
  1467. if (blas_goto_num == 0) {
  1468. blas_goto_num=openblas_goto_num_threads_env();
  1469. if (blas_goto_num < 0) blas_goto_num = 0;
  1470. }
  1471. #endif
  1472. blas_omp_num = 0;
  1473. blas_omp_num=openblas_omp_num_threads_env();
  1474. if (blas_omp_num < 0) blas_omp_num = 0;
  1475. if (blas_goto_num > 0) blas_num_threads = blas_goto_num;
  1476. else if (blas_omp_num > 0) blas_num_threads = blas_omp_num;
  1477. else blas_num_threads = MAX_CPU_NUMBER;
  1478. #if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_DARWIN) || defined(OS_ANDROID)
  1479. if (blas_num_threads > max_num) blas_num_threads = max_num;
  1480. #endif
  1481. if (blas_num_threads > MAX_CPU_NUMBER) blas_num_threads = MAX_CPU_NUMBER;
  1482. #ifdef DEBUG
  1483. printf( "Adjusted number of threads : %3d\n", blas_num_threads);
  1484. #endif
  1485. blas_cpu_number = blas_num_threads;
  1486. return blas_num_threads;
  1487. }
  1488. #endif
  1489. int openblas_get_num_procs(void) {
  1490. #ifndef SMP
  1491. return 1;
  1492. #else
  1493. return get_num_procs();
  1494. #endif
  1495. }
  1496. int openblas_get_num_threads(void) {
  1497. #ifndef SMP
  1498. return 1;
  1499. #else
  1500. // init blas_cpu_number if needed
  1501. blas_get_cpu_number();
  1502. return blas_cpu_number;
  1503. #endif
  1504. }
  1505. struct release_t {
  1506. void *address;
  1507. void (*func)(struct release_t *);
  1508. long attr;
  1509. };
  1510. int hugetlb_allocated = 0;
  1511. static struct release_t release_info[NUM_BUFFERS];
  1512. static int release_pos = 0;
  1513. #if defined(OS_LINUX) && !defined(NO_WARMUP)
  1514. static int hot_alloc = 0;
  1515. #endif
  1516. /* Global lock for memory allocation */
  1517. #if defined(USE_PTHREAD_LOCK)
  1518. static pthread_mutex_t alloc_lock = PTHREAD_MUTEX_INITIALIZER;
  1519. #elif defined(USE_PTHREAD_SPINLOCK)
  1520. static pthread_spinlock_t alloc_lock = 0;
  1521. #else
  1522. static BLASULONG alloc_lock = 0UL;
  1523. #endif
  1524. #ifdef ALLOC_MMAP
  1525. static void alloc_mmap_free(struct release_t *release){
  1526. if (munmap(release -> address, BUFFER_SIZE)) {
  1527. printf("OpenBLAS : munmap failed\n");
  1528. }
  1529. }
  1530. #ifdef NO_WARMUP
  1531. static void *alloc_mmap(void *address){
  1532. void *map_address;
  1533. if (address){
  1534. map_address = mmap(address,
  1535. BUFFER_SIZE,
  1536. MMAP_ACCESS, MMAP_POLICY | MAP_FIXED, -1, 0);
  1537. } else {
  1538. map_address = mmap(address,
  1539. BUFFER_SIZE,
  1540. MMAP_ACCESS, MMAP_POLICY, -1, 0);
  1541. }
  1542. if (map_address != (void *)-1) {
  1543. LOCK_COMMAND(&alloc_lock);
  1544. release_info[release_pos].address = map_address;
  1545. release_info[release_pos].func = alloc_mmap_free;
  1546. release_pos ++;
  1547. UNLOCK_COMMAND(&alloc_lock);
  1548. }
  1549. #ifdef OS_LINUX
  1550. my_mbind(map_address, BUFFER_SIZE, MPOL_PREFERRED, NULL, 0, 0);
  1551. #endif
  1552. return map_address;
  1553. }
  1554. #else
  1555. #define BENCH_ITERATION 4
  1556. #define SCALING 2
  1557. static inline BLASULONG run_bench(BLASULONG address, BLASULONG size) {
  1558. BLASULONG original, *p;
  1559. BLASULONG start, stop, min;
  1560. int iter, i, count;
  1561. min = (BLASULONG)-1;
  1562. original = *(BLASULONG *)(address + size - PAGESIZE);
  1563. *(BLASULONG *)(address + size - PAGESIZE) = (BLASULONG)address;
  1564. for (iter = 0; iter < BENCH_ITERATION; iter ++ ) {
  1565. p = (BLASULONG *)address;
  1566. count = size / PAGESIZE;
  1567. start = rpcc();
  1568. for (i = 0; i < count; i ++) {
  1569. p = (BLASULONG *)(*p);
  1570. }
  1571. stop = rpcc();
  1572. if (min > stop - start) min = stop - start;
  1573. }
  1574. *(BLASULONG *)(address + size - PAGESIZE + 0) = original;
  1575. *(BLASULONG *)(address + size - PAGESIZE + 8) = (BLASULONG)p;
  1576. return min;
  1577. }
  1578. static void *alloc_mmap(void *address){
  1579. void *map_address, *best_address;
  1580. BLASULONG best, start, current;
  1581. BLASULONG allocsize;
  1582. if (address){
  1583. /* Just give up use advanced operation */
  1584. map_address = mmap(address, BUFFER_SIZE, MMAP_ACCESS, MMAP_POLICY | MAP_FIXED, -1, 0);
  1585. #ifdef OS_LINUX
  1586. my_mbind(map_address, BUFFER_SIZE, MPOL_PREFERRED, NULL, 0, 0);
  1587. #endif
  1588. } else {
  1589. #if defined(OS_LINUX) && !defined(NO_WARMUP)
  1590. if (hot_alloc == 0) {
  1591. map_address = mmap(NULL, BUFFER_SIZE, MMAP_ACCESS, MMAP_POLICY, -1, 0);
  1592. #ifdef OS_LINUX
  1593. my_mbind(map_address, BUFFER_SIZE, MPOL_PREFERRED, NULL, 0, 0);
  1594. #endif
  1595. } else {
  1596. #endif
  1597. map_address = mmap(NULL, BUFFER_SIZE * SCALING,
  1598. MMAP_ACCESS, MMAP_POLICY, -1, 0);
  1599. if (map_address != (void *)-1) {
  1600. #ifdef OS_LINUX
  1601. #ifdef DEBUG
  1602. int ret=0;
  1603. ret=my_mbind(map_address, BUFFER_SIZE * SCALING, MPOL_PREFERRED, NULL, 0, 0);
  1604. if(ret==-1){
  1605. int errsv=errno;
  1606. perror("OpenBLAS alloc_mmap:");
  1607. printf("error code=%d,\tmap_address=%lx\n",errsv,map_address);
  1608. }
  1609. #else
  1610. my_mbind(map_address, BUFFER_SIZE * SCALING, MPOL_PREFERRED, NULL, 0, 0);
  1611. #endif
  1612. #endif
  1613. allocsize = DGEMM_P * DGEMM_Q * sizeof(double);
  1614. start = (BLASULONG)map_address;
  1615. current = (SCALING - 1) * BUFFER_SIZE;
  1616. while(current > 0) {
  1617. *(BLASLONG *)start = (BLASLONG)start + PAGESIZE;
  1618. start += PAGESIZE;
  1619. current -= PAGESIZE;
  1620. }
  1621. *(BLASLONG *)(start - PAGESIZE) = (BLASULONG)map_address;
  1622. start = (BLASULONG)map_address;
  1623. best = (BLASULONG)-1;
  1624. best_address = map_address;
  1625. while ((start + allocsize < (BLASULONG)map_address + (SCALING - 1) * BUFFER_SIZE)) {
  1626. current = run_bench(start, allocsize);
  1627. if (best > current) {
  1628. best = current;
  1629. best_address = (void *)start;
  1630. }
  1631. start += PAGESIZE;
  1632. }
  1633. if ((BLASULONG)best_address > (BLASULONG)map_address)
  1634. munmap(map_address, (BLASULONG)best_address - (BLASULONG)map_address);
  1635. munmap((void *)((BLASULONG)best_address + BUFFER_SIZE), (SCALING - 1) * BUFFER_SIZE + (BLASULONG)map_address - (BLASULONG)best_address);
  1636. map_address = best_address;
  1637. #if defined(OS_LINUX) && !defined(NO_WARMUP)
  1638. hot_alloc = 2;
  1639. #endif
  1640. }
  1641. }
  1642. #if defined(OS_LINUX) && !defined(NO_WARMUP)
  1643. }
  1644. #endif
  1645. LOCK_COMMAND(&alloc_lock);
  1646. if (map_address != (void *)-1) {
  1647. release_info[release_pos].address = map_address;
  1648. release_info[release_pos].func = alloc_mmap_free;
  1649. release_pos ++;
  1650. }
  1651. UNLOCK_COMMAND(&alloc_lock);
  1652. return map_address;
  1653. }
  1654. #endif
  1655. #endif
  1656. #ifdef ALLOC_MALLOC
  1657. static void alloc_malloc_free(struct release_t *release){
  1658. free(release -> address);
  1659. }
  1660. static void *alloc_malloc(void *address){
  1661. void *map_address;
  1662. map_address = (void *)malloc(BUFFER_SIZE + FIXED_PAGESIZE);
  1663. if (map_address == (void *)NULL) map_address = (void *)-1;
  1664. if (map_address != (void *)-1) {
  1665. release_info[release_pos].address = map_address;
  1666. release_info[release_pos].func = alloc_malloc_free;
  1667. release_pos ++;
  1668. }
  1669. return map_address;
  1670. }
  1671. #endif
  1672. #ifdef ALLOC_QALLOC
  1673. void *qalloc(int flags, size_t bytes);
  1674. void *qfree (void *address);
  1675. #define QNONCACHE 0x1
  1676. #define QCOMMS 0x2
  1677. #define QFAST 0x4
  1678. static void alloc_qalloc_free(struct release_t *release){
  1679. qfree(release -> address);
  1680. }
  1681. static void *alloc_qalloc(void *address){
  1682. void *map_address;
  1683. map_address = (void *)qalloc(QCOMMS | QFAST, BUFFER_SIZE + FIXED_PAGESIZE);
  1684. if (map_address == (void *)NULL) map_address = (void *)-1;
  1685. if (map_address != (void *)-1) {
  1686. release_info[release_pos].address = map_address;
  1687. release_info[release_pos].func = alloc_qalloc_free;
  1688. release_pos ++;
  1689. }
  1690. return (void *)(((BLASULONG)map_address + FIXED_PAGESIZE - 1) & ~(FIXED_PAGESIZE - 1));
  1691. }
  1692. #endif
  1693. #ifdef ALLOC_WINDOWS
  1694. static void alloc_windows_free(struct release_t *release){
  1695. VirtualFree(release -> address, BUFFER_SIZE, MEM_DECOMMIT);
  1696. }
  1697. static void *alloc_windows(void *address){
  1698. void *map_address;
  1699. map_address = VirtualAlloc(address,
  1700. BUFFER_SIZE,
  1701. MEM_RESERVE | MEM_COMMIT,
  1702. PAGE_READWRITE);
  1703. if (map_address == (void *)NULL) map_address = (void *)-1;
  1704. if (map_address != (void *)-1) {
  1705. release_info[release_pos].address = map_address;
  1706. release_info[release_pos].func = alloc_windows_free;
  1707. release_pos ++;
  1708. }
  1709. return map_address;
  1710. }
  1711. #endif
  1712. #ifdef ALLOC_DEVICEDRIVER
  1713. #ifndef DEVICEDRIVER_NAME
  1714. #define DEVICEDRIVER_NAME "/dev/mapper"
  1715. #endif
  1716. static void alloc_devicedirver_free(struct release_t *release){
  1717. if (munmap(release -> address, BUFFER_SIZE)) {
  1718. printf("OpenBLAS : Bugphysarea unmap failed.\n");
  1719. }
  1720. if (close(release -> attr)) {
  1721. printf("OpenBLAS : Bugphysarea close failed.\n");
  1722. }
  1723. }
  1724. static void *alloc_devicedirver(void *address){
  1725. int fd;
  1726. void *map_address;
  1727. if ((fd = open(DEVICEDRIVER_NAME, O_RDWR | O_SYNC)) < 0) {
  1728. return (void *)-1;
  1729. }
  1730. map_address = mmap(address, BUFFER_SIZE,
  1731. PROT_READ | PROT_WRITE,
  1732. MAP_FILE | MAP_SHARED,
  1733. fd, 0);
  1734. if (map_address != (void *)-1) {
  1735. release_info[release_pos].address = map_address;
  1736. release_info[release_pos].attr = fd;
  1737. release_info[release_pos].func = alloc_devicedirver_free;
  1738. release_pos ++;
  1739. }
  1740. return map_address;
  1741. }
  1742. #endif
  1743. #ifdef ALLOC_SHM
  1744. static void alloc_shm_free(struct release_t *release){
  1745. if (shmdt(release -> address)) {
  1746. printf("OpenBLAS : Shared memory unmap failed.\n");
  1747. }
  1748. }
  1749. static void *alloc_shm(void *address){
  1750. void *map_address;
  1751. int shmid;
  1752. shmid = shmget(IPC_PRIVATE, BUFFER_SIZE,IPC_CREAT | 0600);
  1753. map_address = (void *)shmat(shmid, address, 0);
  1754. if (map_address != (void *)-1){
  1755. #ifdef OS_LINUX
  1756. my_mbind(map_address, BUFFER_SIZE, MPOL_PREFERRED, NULL, 0, 0);
  1757. #endif
  1758. shmctl(shmid, IPC_RMID, 0);
  1759. release_info[release_pos].address = map_address;
  1760. release_info[release_pos].attr = shmid;
  1761. release_info[release_pos].func = alloc_shm_free;
  1762. release_pos ++;
  1763. }
  1764. return map_address;
  1765. }
  1766. #if defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS
  1767. static void alloc_hugetlb_free(struct release_t *release){
  1768. #if defined(OS_LINUX) || defined(OS_AIX)
  1769. if (shmdt(release -> address)) {
  1770. printf("OpenBLAS : Hugepage unmap failed.\n");
  1771. }
  1772. #endif
  1773. #ifdef __sun__
  1774. munmap(release -> address, BUFFER_SIZE);
  1775. #endif
  1776. #ifdef OS_WINDOWS
  1777. VirtualFree(release -> address, BUFFER_SIZE, MEM_LARGE_PAGES | MEM_DECOMMIT);
  1778. #endif
  1779. }
  1780. static void *alloc_hugetlb(void *address){
  1781. void *map_address = (void *)-1;
  1782. #if defined(OS_LINUX) || defined(OS_AIX)
  1783. int shmid;
  1784. shmid = shmget(IPC_PRIVATE, BUFFER_SIZE,
  1785. #ifdef OS_LINUX
  1786. SHM_HUGETLB |
  1787. #endif
  1788. #ifdef OS_AIX
  1789. SHM_LGPAGE | SHM_PIN |
  1790. #endif
  1791. IPC_CREAT | SHM_R | SHM_W);
  1792. if (shmid != -1) {
  1793. map_address = (void *)shmat(shmid, address, SHM_RND);
  1794. #ifdef OS_LINUX
  1795. my_mbind(map_address, BUFFER_SIZE, MPOL_PREFERRED, NULL, 0, 0);
  1796. #endif
  1797. if (map_address != (void *)-1){
  1798. shmctl(shmid, IPC_RMID, 0);
  1799. }
  1800. }
  1801. #endif
  1802. #ifdef __sun__
  1803. struct memcntl_mha mha;
  1804. mha.mha_cmd = MHA_MAPSIZE_BSSBRK;
  1805. mha.mha_flags = 0;
  1806. mha.mha_pagesize = HUGE_PAGESIZE;
  1807. memcntl(NULL, 0, MC_HAT_ADVISE, (char *)&mha, 0, 0);
  1808. map_address = (BLASULONG)memalign(HUGE_PAGESIZE, BUFFER_SIZE);
  1809. #endif
  1810. #ifdef OS_WINDOWS
  1811. HANDLE hToken;
  1812. TOKEN_PRIVILEGES tp;
  1813. if (OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &hToken) != TRUE) return (void *) -1;
  1814. tp.PrivilegeCount = 1;
  1815. tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
  1816. if (LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &tp.Privileges[0].Luid) != TRUE) {
  1817. CloseHandle(hToken);
  1818. return (void*)-1;
  1819. }
  1820. if (AdjustTokenPrivileges(hToken, FALSE, &tp, 0, NULL, NULL) != TRUE) {
  1821. CloseHandle(hToken);
  1822. return (void*)-1;
  1823. }
  1824. map_address = (void *)VirtualAlloc(address,
  1825. BUFFER_SIZE,
  1826. MEM_LARGE_PAGES | MEM_RESERVE | MEM_COMMIT,
  1827. PAGE_READWRITE);
  1828. tp.Privileges[0].Attributes = 0;
  1829. AdjustTokenPrivileges(hToken, FALSE, &tp, 0, NULL, NULL);
  1830. if (map_address == (void *)NULL) map_address = (void *)-1;
  1831. #endif
  1832. if (map_address != (void *)-1){
  1833. release_info[release_pos].address = map_address;
  1834. release_info[release_pos].func = alloc_hugetlb_free;
  1835. release_pos ++;
  1836. }
  1837. return map_address;
  1838. }
  1839. #endif
  1840. #endif
  1841. #ifdef ALLOC_HUGETLBFILE
  1842. static int hugetlb_pid = 0;
  1843. static void alloc_hugetlbfile_free(struct release_t *release){
  1844. if (munmap(release -> address, BUFFER_SIZE)) {
  1845. printf("OpenBLAS : HugeTLBfs unmap failed.\n");
  1846. }
  1847. if (close(release -> attr)) {
  1848. printf("OpenBLAS : HugeTLBfs close failed.\n");
  1849. }
  1850. }
  1851. static void *alloc_hugetlbfile(void *address){
  1852. void *map_address = (void *)-1;
  1853. int fd;
  1854. char filename[64];
  1855. if (!hugetlb_pid) hugetlb_pid = getpid();
  1856. sprintf(filename, "%s/gotoblas.%d", HUGETLB_FILE_NAME, hugetlb_pid);
  1857. if ((fd = open(filename, O_RDWR | O_CREAT, 0700)) < 0) {
  1858. return (void *)-1;
  1859. }
  1860. unlink(filename);
  1861. map_address = mmap(address, BUFFER_SIZE,
  1862. PROT_READ | PROT_WRITE,
  1863. MAP_SHARED,
  1864. fd, 0);
  1865. if (map_address != (void *)-1) {
  1866. release_info[release_pos].address = map_address;
  1867. release_info[release_pos].attr = fd;
  1868. release_info[release_pos].func = alloc_hugetlbfile_free;
  1869. release_pos ++;
  1870. }
  1871. return map_address;
  1872. }
  1873. #endif
  1874. #ifdef SEEK_ADDRESS
  1875. static BLASULONG base_address = 0UL;
  1876. #else
  1877. static BLASULONG base_address = BASE_ADDRESS;
  1878. #endif
  1879. static volatile struct {
  1880. BLASULONG lock;
  1881. void *addr;
  1882. #if defined(WHEREAMI) && !defined(USE_OPENMP)
  1883. int pos;
  1884. #endif
  1885. int used;
  1886. #ifndef __64BIT__
  1887. char dummy[48];
  1888. #else
  1889. char dummy[40];
  1890. #endif
  1891. } memory[NUM_BUFFERS];
  1892. static int memory_initialized = 0;
  1893. /* Memory allocation routine */
  1894. /* procpos ... indicates where it comes from */
  1895. /* 0 : Level 3 functions */
  1896. /* 1 : Level 2 functions */
  1897. /* 2 : Thread */
  1898. void *blas_memory_alloc(int procpos){
  1899. int position;
  1900. #if defined(WHEREAMI) && !defined(USE_OPENMP)
  1901. int mypos;
  1902. #endif
  1903. void *map_address;
  1904. void *(*memoryalloc[])(void *address) = {
  1905. #ifdef ALLOC_DEVICEDRIVER
  1906. alloc_devicedirver,
  1907. #endif
  1908. /* Hugetlb implicitly assumes ALLOC_SHM */
  1909. #ifdef ALLOC_SHM
  1910. alloc_shm,
  1911. #endif
  1912. #if ((defined ALLOC_SHM) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS))
  1913. alloc_hugetlb,
  1914. #endif
  1915. #ifdef ALLOC_MMAP
  1916. alloc_mmap,
  1917. #endif
  1918. #ifdef ALLOC_QALLOC
  1919. alloc_qalloc,
  1920. #endif
  1921. #ifdef ALLOC_WINDOWS
  1922. alloc_windows,
  1923. #endif
  1924. #ifdef ALLOC_MALLOC
  1925. alloc_malloc,
  1926. #endif
  1927. NULL,
  1928. };
  1929. void *(**func)(void *address);
  1930. LOCK_COMMAND(&alloc_lock);
  1931. if (!memory_initialized) {
  1932. #if defined(WHEREAMI) && !defined(USE_OPENMP)
  1933. for (position = 0; position < NUM_BUFFERS; position ++){
  1934. memory[position].addr = (void *)0;
  1935. memory[position].pos = -1;
  1936. memory[position].used = 0;
  1937. memory[position].lock = 0;
  1938. }
  1939. #endif
  1940. #ifdef DYNAMIC_ARCH
  1941. gotoblas_dynamic_init();
  1942. #endif
  1943. #if defined(SMP) && defined(OS_LINUX) && !defined(NO_AFFINITY)
  1944. gotoblas_affinity_init();
  1945. #endif
  1946. #ifdef SMP
  1947. if (!blas_num_threads) blas_cpu_number = blas_get_cpu_number();
  1948. #endif
  1949. #if defined(ARCH_X86) || defined(ARCH_X86_64) || defined(ARCH_IA64) || defined(ARCH_MIPS64) || defined(ARCH_ARM64)
  1950. #ifndef DYNAMIC_ARCH
  1951. blas_set_parameter();
  1952. #endif
  1953. #endif
  1954. memory_initialized = 1;
  1955. }
  1956. UNLOCK_COMMAND(&alloc_lock);
  1957. #ifdef DEBUG
  1958. printf("Alloc Start ...\n");
  1959. #endif
  1960. /* #if defined(WHEREAMI) && !defined(USE_OPENMP)
  1961. mypos = WhereAmI();
  1962. position = mypos;
  1963. while (position >= NUM_BUFFERS) position >>= 1;
  1964. do {
  1965. if (!memory[position].used && (memory[position].pos == mypos)) {
  1966. LOCK_COMMAND(&alloc_lock);
  1967. // blas_lock(&memory[position].lock);
  1968. if (!memory[position].used) goto allocation;
  1969. UNLOCK_COMMAND(&alloc_lock);
  1970. // blas_unlock(&memory[position].lock);
  1971. }
  1972. position ++;
  1973. } while (position < NUM_BUFFERS);
  1974. #endif */
  1975. position = 0;
  1976. LOCK_COMMAND(&alloc_lock);
  1977. do {
  1978. /* if (!memory[position].used) { */
  1979. /* blas_lock(&memory[position].lock);*/
  1980. if (!memory[position].used) goto allocation;
  1981. /* blas_unlock(&memory[position].lock);*/
  1982. /* } */
  1983. position ++;
  1984. } while (position < NUM_BUFFERS);
  1985. UNLOCK_COMMAND(&alloc_lock);
  1986. goto error;
  1987. allocation :
  1988. #ifdef DEBUG
  1989. printf(" Position -> %d\n", position);
  1990. #endif
  1991. memory[position].used = 1;
  1992. UNLOCK_COMMAND(&alloc_lock);
  1993. /* blas_unlock(&memory[position].lock);*/
  1994. if (!memory[position].addr) {
  1995. do {
  1996. #ifdef DEBUG
  1997. printf("Allocation Start : %lx\n", base_address);
  1998. #endif
  1999. map_address = (void *)-1;
  2000. func = &memoryalloc[0];
  2001. while ((func != NULL) && (map_address == (void *) -1)) {
  2002. map_address = (*func)((void *)base_address);
  2003. #ifdef ALLOC_DEVICEDRIVER
  2004. if ((*func == alloc_devicedirver) && (map_address == (void *)-1)) {
  2005. fprintf(stderr, "OpenBLAS Warning ... Physically contigous allocation was failed.\n");
  2006. }
  2007. #endif
  2008. #ifdef ALLOC_HUGETLBFILE
  2009. if ((*func == alloc_hugetlbfile) && (map_address == (void *)-1)) {
  2010. #ifndef OS_WINDOWS
  2011. fprintf(stderr, "OpenBLAS Warning ... HugeTLB(File) allocation was failed.\n");
  2012. #endif
  2013. }
  2014. #endif
  2015. #if (defined ALLOC_SHM) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS)
  2016. if ((*func == alloc_hugetlb) && (map_address != (void *)-1)) hugetlb_allocated = 1;
  2017. #endif
  2018. func ++;
  2019. }
  2020. #ifdef DEBUG
  2021. printf(" Success -> %08lx\n", map_address);
  2022. #endif
  2023. if (((BLASLONG) map_address) == -1) base_address = 0UL;
  2024. if (base_address) base_address += BUFFER_SIZE + FIXED_PAGESIZE;
  2025. } while ((BLASLONG)map_address == -1);
  2026. LOCK_COMMAND(&alloc_lock);
  2027. memory[position].addr = map_address;
  2028. UNLOCK_COMMAND(&alloc_lock);
  2029. #ifdef DEBUG
  2030. printf(" Mapping Succeeded. %p(%d)\n", (void *)memory[position].addr, position);
  2031. #endif
  2032. }
  2033. #if defined(WHEREAMI) && !defined(USE_OPENMP)
  2034. if (memory[position].pos == -1) memory[position].pos = mypos;
  2035. #endif
  2036. #ifdef DYNAMIC_ARCH
  2037. if (memory_initialized == 1) {
  2038. LOCK_COMMAND(&alloc_lock);
  2039. if (memory_initialized == 1) {
  2040. if (!gotoblas) gotoblas_dynamic_init();
  2041. memory_initialized = 2;
  2042. }
  2043. UNLOCK_COMMAND(&alloc_lock);
  2044. }
  2045. #endif
  2046. #ifdef DEBUG
  2047. printf("Mapped : %p %3d\n\n",
  2048. (void *)memory[position].addr, position);
  2049. #endif
  2050. return (void *)memory[position].addr;
  2051. error:
  2052. printf("BLAS : Program is Terminated. Because you tried to allocate too many memory regions.\n");
  2053. return NULL;
  2054. }
  2055. void blas_memory_free(void *free_area){
  2056. int position;
  2057. #ifdef DEBUG
  2058. printf("Unmapped Start : %p ...\n", free_area);
  2059. #endif
  2060. position = 0;
  2061. LOCK_COMMAND(&alloc_lock);
  2062. while ((position < NUM_BUFFERS) && (memory[position].addr != free_area))
  2063. position++;
  2064. if (memory[position].addr != free_area) goto error;
  2065. #ifdef DEBUG
  2066. printf(" Position : %d\n", position);
  2067. #endif
  2068. // arm: ensure all writes are finished before other thread takes this memory
  2069. WMB;
  2070. memory[position].used = 0;
  2071. UNLOCK_COMMAND(&alloc_lock);
  2072. #ifdef DEBUG
  2073. printf("Unmap Succeeded.\n\n");
  2074. #endif
  2075. return;
  2076. error:
  2077. printf("BLAS : Bad memory unallocation! : %4d %p\n", position, free_area);
  2078. #ifdef DEBUG
  2079. for (position = 0; position < NUM_BUFFERS; position++)
  2080. printf("%4ld %p : %d\n", position, memory[position].addr, memory[position].used);
  2081. #endif
  2082. UNLOCK_COMMAND(&alloc_lock);
  2083. return;
  2084. }
  2085. void *blas_memory_alloc_nolock(int unused) {
  2086. void *map_address;
  2087. map_address = (void *)malloc(BUFFER_SIZE + FIXED_PAGESIZE);
  2088. return map_address;
  2089. }
  2090. void blas_memory_free_nolock(void * map_address) {
  2091. free(map_address);
  2092. }
  2093. void blas_shutdown(void){
  2094. int pos;
  2095. #ifdef SMP
  2096. BLASFUNC(blas_thread_shutdown)();
  2097. #endif
  2098. LOCK_COMMAND(&alloc_lock);
  2099. for (pos = 0; pos < release_pos; pos ++) {
  2100. release_info[pos].func(&release_info[pos]);
  2101. }
  2102. #ifdef SEEK_ADDRESS
  2103. base_address = 0UL;
  2104. #else
  2105. base_address = BASE_ADDRESS;
  2106. #endif
  2107. for (pos = 0; pos < NUM_BUFFERS; pos ++){
  2108. memory[pos].addr = (void *)0;
  2109. memory[pos].used = 0;
  2110. #if defined(WHEREAMI) && !defined(USE_OPENMP)
  2111. memory[pos].pos = -1;
  2112. #endif
  2113. memory[pos].lock = 0;
  2114. }
  2115. UNLOCK_COMMAND(&alloc_lock);
  2116. return;
  2117. }
  2118. #if defined(OS_LINUX) && !defined(NO_WARMUP)
  2119. #ifdef SMP
  2120. #if defined(USE_PTHREAD_LOCK)
  2121. static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
  2122. #elif defined(USE_PTHREAD_SPINLOCK)
  2123. static pthread_spinlock_t init_lock = 0;
  2124. #else
  2125. static BLASULONG init_lock = 0UL;
  2126. #endif
  2127. #endif
  2128. static void _touch_memory(blas_arg_t *arg, BLASLONG *range_m, BLASLONG *range_n,
  2129. void *sa, void *sb, BLASLONG pos) {
  2130. #if !defined(ARCH_POWER) && !defined(ARCH_SPARC)
  2131. size_t size;
  2132. BLASULONG buffer;
  2133. size = BUFFER_SIZE - PAGESIZE;
  2134. buffer = (BLASULONG)sa + GEMM_OFFSET_A;
  2135. #if defined(OS_LINUX) && !defined(NO_WARMUP)
  2136. if (hot_alloc != 2) {
  2137. #endif
  2138. #ifdef SMP
  2139. LOCK_COMMAND(&init_lock);
  2140. #endif
  2141. while (size > 0) {
  2142. *(int *)buffer = size;
  2143. buffer += PAGESIZE;
  2144. size -= PAGESIZE;
  2145. }
  2146. #ifdef SMP
  2147. UNLOCK_COMMAND(&init_lock);
  2148. #endif
  2149. size = MIN((BUFFER_SIZE - PAGESIZE), L2_SIZE);
  2150. buffer = (BLASULONG)sa + GEMM_OFFSET_A;
  2151. while (size > 0) {
  2152. *(int *)buffer = size;
  2153. buffer += 64;
  2154. size -= 64;
  2155. }
  2156. #if defined(OS_LINUX) && !defined(NO_WARMUP)
  2157. }
  2158. #endif
  2159. #endif
  2160. }
  2161. #ifdef SMP
  2162. static void _init_thread_memory(void *buffer) {
  2163. blas_queue_t queue[MAX_CPU_NUMBER];
  2164. int num_cpu;
  2165. for (num_cpu = 0; num_cpu < blas_num_threads; num_cpu++) {
  2166. blas_queue_init(&queue[num_cpu]);
  2167. queue[num_cpu].mode = BLAS_DOUBLE | BLAS_REAL;
  2168. queue[num_cpu].routine = &_touch_memory;
  2169. queue[num_cpu].args = NULL;
  2170. queue[num_cpu].next = &queue[num_cpu + 1];
  2171. }
  2172. queue[num_cpu - 1].next = NULL;
  2173. queue[0].sa = buffer;
  2174. exec_blas(num_cpu, queue);
  2175. }
  2176. #endif
  2177. static void gotoblas_memory_init(void) {
  2178. void *buffer;
  2179. hot_alloc = 1;
  2180. buffer = (void *)blas_memory_alloc(0);
  2181. #ifdef SMP
  2182. if (blas_cpu_number == 0) blas_get_cpu_number();
  2183. #ifdef SMP_SERVER
  2184. if (blas_server_avail == 0) blas_thread_init();
  2185. #endif
  2186. _init_thread_memory((void *)((BLASULONG)buffer + GEMM_OFFSET_A));
  2187. #else
  2188. _touch_memory(NULL, NULL, NULL, (void *)((BLASULONG)buffer + GEMM_OFFSET_A), NULL, 0);
  2189. #endif
  2190. blas_memory_free(buffer);
  2191. }
  2192. #endif
  2193. /* Initialization for all function; this function should be called before main */
  2194. static int gotoblas_initialized = 0;
  2195. extern void openblas_read_env();
  2196. void CONSTRUCTOR gotoblas_init(void) {
  2197. if (gotoblas_initialized) return;
  2198. #ifdef SMP
  2199. openblas_fork_handler();
  2200. #endif
  2201. openblas_read_env();
  2202. #ifdef PROFILE
  2203. moncontrol (0);
  2204. #endif
  2205. #ifdef DYNAMIC_ARCH
  2206. gotoblas_dynamic_init();
  2207. #endif
  2208. #if defined(SMP) && defined(OS_LINUX) && !defined(NO_AFFINITY)
  2209. gotoblas_affinity_init();
  2210. #endif
  2211. #if defined(OS_LINUX) && !defined(NO_WARMUP)
  2212. gotoblas_memory_init();
  2213. #endif
  2214. //#if defined(OS_LINUX)
  2215. #if 0
  2216. struct rlimit curlimit;
  2217. if ( getrlimit(RLIMIT_STACK, &curlimit ) == 0 )
  2218. {
  2219. if ( curlimit.rlim_cur != curlimit.rlim_max )
  2220. {
  2221. curlimit.rlim_cur = curlimit.rlim_max;
  2222. setrlimit(RLIMIT_STACK, &curlimit);
  2223. }
  2224. }
  2225. #endif
  2226. #ifdef SMP
  2227. if (blas_cpu_number == 0) blas_get_cpu_number();
  2228. #ifdef SMP_SERVER
  2229. if (blas_server_avail == 0) blas_thread_init();
  2230. #endif
  2231. #endif
  2232. #ifdef FUNCTION_PROFILE
  2233. gotoblas_profile_init();
  2234. #endif
  2235. gotoblas_initialized = 1;
  2236. #ifdef PROFILE
  2237. moncontrol (1);
  2238. #endif
  2239. }
  2240. void DESTRUCTOR gotoblas_quit(void) {
  2241. if (gotoblas_initialized == 0) return;
  2242. blas_shutdown();
  2243. #ifdef PROFILE
  2244. moncontrol (0);
  2245. #endif
  2246. #ifdef FUNCTION_PROFILE
  2247. gotoblas_profile_quit();
  2248. #endif
  2249. #if defined(SMP) && defined(OS_LINUX) && !defined(NO_AFFINITY)
  2250. gotoblas_affinity_quit();
  2251. #endif
  2252. #ifdef DYNAMIC_ARCH
  2253. gotoblas_dynamic_quit();
  2254. #endif
  2255. gotoblas_initialized = 0;
  2256. #ifdef PROFILE
  2257. moncontrol (1);
  2258. #endif
  2259. }
  2260. #if defined(_MSC_VER) && !defined(__clang__)
  2261. BOOL APIENTRY DllMain(HMODULE hModule, DWORD ul_reason_for_call, LPVOID lpReserved)
  2262. {
  2263. switch (ul_reason_for_call)
  2264. {
  2265. case DLL_PROCESS_ATTACH:
  2266. gotoblas_init();
  2267. break;
  2268. case DLL_THREAD_ATTACH:
  2269. break;
  2270. case DLL_THREAD_DETACH:
  2271. break;
  2272. case DLL_PROCESS_DETACH:
  2273. gotoblas_quit();
  2274. break;
  2275. default:
  2276. break;
  2277. }
  2278. return TRUE;
  2279. }
  2280. /*
  2281. This is to allow static linking.
  2282. Code adapted from Google performance tools:
  2283. https://gperftools.googlecode.com/git-history/perftools-1.0/src/windows/port.cc
  2284. Reference:
  2285. https://sourceware.org/ml/pthreads-win32/2008/msg00028.html
  2286. http://ci.boost.org/svn-trac/browser/trunk/libs/thread/src/win32/tss_pe.cpp
  2287. */
  2288. static int on_process_term(void)
  2289. {
  2290. gotoblas_quit();
  2291. return 0;
  2292. }
  2293. #ifdef _WIN64
  2294. #pragma comment(linker, "/INCLUDE:_tls_used")
  2295. #else
  2296. #pragma comment(linker, "/INCLUDE:__tls_used")
  2297. #endif
  2298. #ifdef _WIN64
  2299. #pragma const_seg(".CRT$XLB")
  2300. #else
  2301. #pragma data_seg(".CRT$XLB")
  2302. #endif
  2303. static void (APIENTRY *dll_callback)(HINSTANCE h, DWORD ul_reason_for_call, PVOID pv) = DllMain;
  2304. #ifdef _WIN64
  2305. #pragma const_seg()
  2306. #else
  2307. #pragma data_seg()
  2308. #endif
  2309. #ifdef _WIN64
  2310. #pragma const_seg(".CRT$XTU")
  2311. #else
  2312. #pragma data_seg(".CRT$XTU")
  2313. #endif
  2314. static int(*p_process_term)(void) = on_process_term;
  2315. #ifdef _WIN64
  2316. #pragma const_seg()
  2317. #else
  2318. #pragma data_seg()
  2319. #endif
  2320. #endif
  2321. #if (defined(C_PGI) || (!defined(C_SUN) && defined(F_INTERFACE_SUN))) && (defined(ARCH_X86) || defined(ARCH_X86_64))
  2322. /* Don't call me; this is just work around for PGI / Sun bug */
  2323. void gotoblas_dummy_for_PGI(void) {
  2324. gotoblas_init();
  2325. gotoblas_quit();
  2326. #if 0
  2327. asm ("\t.section\t.ctors,\"aw\",@progbits; .align 8; .quad gotoblas_init; .section .text");
  2328. asm ("\t.section\t.dtors,\"aw\",@progbits; .align 8; .quad gotoblas_quit; .section .text");
  2329. #else
  2330. asm (".section .init,\"ax\"; call gotoblas_init@PLT; .section .text");
  2331. asm (".section .fini,\"ax\"; call gotoblas_quit@PLT; .section .text");
  2332. #endif
  2333. }
  2334. #endif
  2335. #endif