Browse Source

Merge branch 'develop' into risc-v

tags/v0.3.13^2
Xianyi Zhang 5 years ago
parent
commit
7037849498
10 changed files with 218 additions and 46 deletions
  1. +3
    -0
      cmake/cc.cmake
  2. +22
    -26
      driver/others/blas_server_omp.c
  3. +18
    -17
      kernel/arm/sum.c
  4. +1
    -1
      kernel/simd/intrin.h
  5. +10
    -0
      kernel/simd/intrin_avx.h
  6. +5
    -0
      kernel/simd/intrin_avx512.h
  7. +10
    -0
      kernel/simd/intrin_neon.h
  8. +13
    -0
      kernel/simd/intrin_sse.h
  9. +67
    -1
      kernel/x86_64/drot.c
  10. +69
    -1
      kernel/x86_64/srot.c

+ 3
- 0
cmake/cc.cmake View File

@@ -124,6 +124,9 @@ if (NOT DYNAMIC_ARCH)
if (HAVE_AVX)
set (CCOMMON_OPT "${CCOMMON_OPT} -mavx")
endif ()
if (HAVE_FMA3)
set (CCOMMON_OPT "${CCOMMON_OPT} -mfma")
endif ()
if (HAVE_SSE)
set (CCOMMON_OPT "${CCOMMON_OPT} -msse")
endif ()


+ 22
- 26
driver/others/blas_server_omp.c View File

@@ -76,10 +76,28 @@ static atomic_bool blas_buffer_inuse[MAX_PARALLEL_NUMBER];
static _Bool blas_buffer_inuse[MAX_PARALLEL_NUMBER];
#endif

void goto_set_num_threads(int num_threads) {
static void adjust_thread_buffers() {

int i=0, j=0;

//adjust buffer for each thread
for(i=0; i < MAX_PARALLEL_NUMBER; i++) {
for(j=0; j < blas_cpu_number; j++){
if(blas_thread_buffer[i][j] == NULL){
blas_thread_buffer[i][j] = blas_memory_alloc(2);
}
}
for(; j < MAX_CPU_NUMBER; j++){
if(blas_thread_buffer[i][j] != NULL){
blas_memory_free(blas_thread_buffer[i][j]);
blas_thread_buffer[i][j] = NULL;
}
}
}
}

void goto_set_num_threads(int num_threads) {

if (num_threads < 1) num_threads = blas_num_threads;

if (num_threads > MAX_CPU_NUMBER) num_threads = MAX_CPU_NUMBER;
@@ -92,20 +110,7 @@ void goto_set_num_threads(int num_threads) {

omp_set_num_threads(blas_cpu_number);

//adjust buffer for each thread
for(i=0; i<MAX_PARALLEL_NUMBER; i++) {
for(j=0; j<blas_cpu_number; j++){
if(blas_thread_buffer[i][j]==NULL){
blas_thread_buffer[i][j]=blas_memory_alloc(2);
}
}
for(; j<MAX_CPU_NUMBER; j++){
if(blas_thread_buffer[i][j]!=NULL){
blas_memory_free(blas_thread_buffer[i][j]);
blas_thread_buffer[i][j]=NULL;
}
}
}
adjust_thread_buffers();
#if defined(ARCH_MIPS64)
//set parameters for different number of threads.
blas_set_parameter();
@@ -119,20 +124,11 @@ void openblas_set_num_threads(int num_threads) {

int blas_thread_init(void){

int i=0, j=0;

blas_get_cpu_number();

blas_server_avail = 1;
adjust_thread_buffers();

for(i=0; i<MAX_PARALLEL_NUMBER; i++) {
for(j=0; j<blas_num_threads; j++){
blas_thread_buffer[i][j]=blas_memory_alloc(2);
}
for(; j<MAX_CPU_NUMBER; j++){
blas_thread_buffer[i][j]=NULL;
}
}
blas_server_avail = 1;

return 0;
}


+ 18
- 17
kernel/arm/sum.c View File

@@ -42,24 +42,27 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
n *= inc_x;
if (inc_x == 1)
{
#if V_SIMD
#if V_SIMD && (!defined(DOUBLE) || (defined(DOUBLE) && V_SIMD_F64 && V_SIMD > 128))
#ifdef DOUBLE
const int vstep = v_nlanes_f64;
const int unrollx2 = n & (-vstep * 2);
const int unrollx4 = n & (-vstep * 4);
const int unrollx = n & -vstep;
v_f64 vsum0 = v_zero_f64();
v_f64 vsum1 = v_zero_f64();
while (i < unrollx2)
v_f64 vsum2 = v_zero_f64();
v_f64 vsum3 = v_zero_f64();
for (; i < unrollx4; i += vstep * 4)
{
vsum0 = v_add_f64(vsum0, v_loadu_f64(x));
vsum1 = v_add_f64(vsum1, v_loadu_f64(x + vstep));
i += vstep * 2;
vsum0 = v_add_f64(vsum0, v_loadu_f64(x + i));
vsum1 = v_add_f64(vsum1, v_loadu_f64(x + i + vstep));
vsum2 = v_add_f64(vsum2, v_loadu_f64(x + i + vstep * 2));
vsum3 = v_add_f64(vsum3, v_loadu_f64(x + i + vstep * 3));
}
vsum0 = v_add_f64(vsum0, vsum1);
while (i < unrollx)
vsum0 = v_add_f64(
v_add_f64(vsum0, vsum1), v_add_f64(vsum2, vsum3));
for (; i < unrollx; i += vstep)
{
vsum0 = v_add_f64(vsum0, v_loadu_f64(x + i));
i += vstep;
}
sumf = v_sum_f64(vsum0);
#else
@@ -70,20 +73,18 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
v_f32 vsum1 = v_zero_f32();
v_f32 vsum2 = v_zero_f32();
v_f32 vsum3 = v_zero_f32();
while (i < unrollx4)
for (; i < unrollx4; i += vstep * 4)
{
vsum0 = v_add_f32(vsum0, v_loadu_f32(x));
vsum1 = v_add_f32(vsum1, v_loadu_f32(x + vstep));
vsum2 = v_add_f32(vsum2, v_loadu_f32(x + vstep * 2));
vsum3 = v_add_f32(vsum3, v_loadu_f32(x + vstep * 3));
i += vstep * 4;
vsum0 = v_add_f32(vsum0, v_loadu_f32(x + i));
vsum1 = v_add_f32(vsum1, v_loadu_f32(x + i + vstep));
vsum2 = v_add_f32(vsum2, v_loadu_f32(x + i + vstep * 2));
vsum3 = v_add_f32(vsum3, v_loadu_f32(x + i + vstep * 3));
}
vsum0 = v_add_f32(
v_add_f32(vsum0, vsum1), v_add_f32(vsum2, vsum3));
while (i < unrollx)
for (; i < unrollx; i += vstep)
{
vsum0 = v_add_f32(vsum0, v_loadu_f32(x + i));
i += vstep;
}
sumf = v_sum_f32(vsum0);
#endif


+ 1
- 1
kernel/simd/intrin.h View File

@@ -47,7 +47,7 @@ extern "C" {
#endif

/** AVX **/
#ifdef HAVE_AVX
#if defined(HAVE_AVX) || defined(HAVE_FMA3)
#include <immintrin.h>
#endif



+ 10
- 0
kernel/simd/intrin_avx.h View File

@@ -12,6 +12,8 @@ typedef __m256d v_f64;
***************************/
#define v_add_f32 _mm256_add_ps
#define v_add_f64 _mm256_add_pd
#define v_sub_f32 _mm256_sub_ps
#define v_sub_f64 _mm256_sub_pd
#define v_mul_f32 _mm256_mul_ps
#define v_mul_f64 _mm256_mul_pd

@@ -19,12 +21,20 @@ typedef __m256d v_f64;
// multiply and add, a*b + c
#define v_muladd_f32 _mm256_fmadd_ps
#define v_muladd_f64 _mm256_fmadd_pd
// multiply and subtract, a*b - c
#define v_mulsub_f32 _mm256_fmsub_ps
#define v_mulsub_f64 _mm256_fmsub_pd
#else
// multiply and add, a*b + c
BLAS_FINLINE v_f32 v_muladd_f32(v_f32 a, v_f32 b, v_f32 c)
{ return v_add_f32(v_mul_f32(a, b), c); }
BLAS_FINLINE v_f64 v_muladd_f64(v_f64 a, v_f64 b, v_f64 c)
{ return v_add_f64(v_mul_f64(a, b), c); }
// multiply and subtract, a*b - c
BLAS_FINLINE v_f32 v_mulsub_f32(v_f32 a, v_f32 b, v_f32 c)
{ return v_sub_f32(v_mul_f32(a, b), c); }
BLAS_FINLINE v_f64 v_mulsub_f64(v_f64 a, v_f64 b, v_f64 c)
{ return v_sub_f64(v_mul_f64(a, b), c); }
#endif // !HAVE_FMA3

// Horizontal add: Calculates the sum of all vector elements.


+ 5
- 0
kernel/simd/intrin_avx512.h View File

@@ -12,11 +12,16 @@ typedef __m512d v_f64;
***************************/
#define v_add_f32 _mm512_add_ps
#define v_add_f64 _mm512_add_pd
#define v_sub_f32 _mm512_sub_ps
#define v_sub_f64 _mm512_sub_pd
#define v_mul_f32 _mm512_mul_ps
#define v_mul_f64 _mm512_mul_pd
// multiply and add, a*b + c
#define v_muladd_f32 _mm512_fmadd_ps
#define v_muladd_f64 _mm512_fmadd_pd
// multiply and subtract, a*b - c
#define v_mulsub_f32 _mm512_fmsub_ps
#define v_mulsub_f64 _mm512_fmsub_pd
BLAS_FINLINE float v_sum_f32(v_f32 a)
{
__m512 h64 = _mm512_shuffle_f32x4(a, a, _MM_SHUFFLE(3, 2, 3, 2));


+ 10
- 0
kernel/simd/intrin_neon.h View File

@@ -18,6 +18,8 @@ typedef float32x4_t v_f32;
***************************/
#define v_add_f32 vaddq_f32
#define v_add_f64 vaddq_f64
#define v_sub_f32 vsubq_f32
#define v_sub_f64 vsubq_f64
#define v_mul_f32 vmulq_f32
#define v_mul_f64 vmulq_f64

@@ -26,16 +28,24 @@ typedef float32x4_t v_f32;
// multiply and add, a*b + c
BLAS_FINLINE v_f32 v_muladd_f32(v_f32 a, v_f32 b, v_f32 c)
{ return vfmaq_f32(c, a, b); }
// multiply and subtract, a*b - c
BLAS_FINLINE v_f32 v_mulsub_f32(v_f32 a, v_f32 b, v_f32 c)
{ return vfmaq_f32(vnegq_f32(c), a, b); }
#else
// multiply and add, a*b + c
BLAS_FINLINE v_f32 v_muladd_f32(v_f32 a, v_f32 b, v_f32 c)
{ return vmlaq_f32(c, a, b); }
// multiply and subtract, a*b - c
BLAS_FINLINE v_f32 v_mulsub_f32(v_f32 a, v_f32 b, v_f32 c)
{ return vmlaq_f32(vnegq_f32(c), a, b); }
#endif

// FUSED F64
#if V_SIMD_F64
BLAS_FINLINE v_f64 v_muladd_f64(v_f64 a, v_f64 b, v_f64 c)
{ return vfmaq_f64(c, a, b); }
BLAS_FINLINE v_f64 v_mulsub_f64(v_f64 a, v_f64 b, v_f64 c)
{ return vfmaq_f64(vnegq_f64(c), a, b); }
#endif

// Horizontal add: Calculates the sum of all vector elements.


+ 13
- 0
kernel/simd/intrin_sse.h View File

@@ -12,22 +12,35 @@ typedef __m128d v_f64;
***************************/
#define v_add_f32 _mm_add_ps
#define v_add_f64 _mm_add_pd
#define v_sub_f32 _mm_sub_ps
#define v_sub_f64 _mm_sub_pd
#define v_mul_f32 _mm_mul_ps
#define v_mul_f64 _mm_mul_pd
#ifdef HAVE_FMA3
// multiply and add, a*b + c
#define v_muladd_f32 _mm_fmadd_ps
#define v_muladd_f64 _mm_fmadd_pd
// multiply and subtract, a*b - c
#define v_mulsub_f32 _mm_fmsub_ps
#define v_mulsub_f64 _mm_fmsub_pd
#elif defined(HAVE_FMA4)
// multiply and add, a*b + c
#define v_muladd_f32 _mm_macc_ps
#define v_muladd_f64 _mm_macc_pd
// multiply and subtract, a*b - c
#define v_mulsub_f32 _mm_msub_ps
#define v_mulsub_f64 _mm_msub_pd
#else
// multiply and add, a*b + c
BLAS_FINLINE v_f32 v_muladd_f32(v_f32 a, v_f32 b, v_f32 c)
{ return v_add_f32(v_mul_f32(a, b), c); }
BLAS_FINLINE v_f64 v_muladd_f64(v_f64 a, v_f64 b, v_f64 c)
{ return v_add_f64(v_mul_f64(a, b), c); }
// multiply and subtract, a*b - c
BLAS_FINLINE v_f32 v_mulsub_f32(v_f32 a, v_f32 b, v_f32 c)
{ return v_sub_f32(v_mul_f32(a, b), c); }
BLAS_FINLINE v_f64 v_mulsub_f64(v_f64 a, v_f64 b, v_f64 c)
{ return v_sub_f64(v_mul_f64(a, b), c); }
#endif // HAVE_FMA3

// Horizontal add: Calculates the sum of all vector elements.


+ 67
- 1
kernel/x86_64/drot.c View File

@@ -7,10 +7,76 @@
#endif

#ifndef HAVE_DROT_KERNEL
#include "../simd/intrin.h"

static void drot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s)
{
BLASLONG i = 0;
#if V_SIMD_F64 && V_SIMD > 256
const int vstep = v_nlanes_f64;
const int unrollx4 = n & (-vstep * 4);
const int unrollx = n & -vstep;

v_f64 __c = v_setall_f64(c);
v_f64 __s = v_setall_f64(s);
v_f64 vx0, vx1, vx2, vx3;
v_f64 vy0, vy1, vy2, vy3;
v_f64 vt0, vt1, vt2, vt3;

for (; i < unrollx4; i += vstep * 4) {
vx0 = v_loadu_f64(x + i);
vx1 = v_loadu_f64(x + i + vstep);
vx2 = v_loadu_f64(x + i + vstep * 2);
vx3 = v_loadu_f64(x + i + vstep * 3);
vy0 = v_loadu_f64(y + i);
vy1 = v_loadu_f64(y + i + vstep);
vy2 = v_loadu_f64(y + i + vstep * 2);
vy3 = v_loadu_f64(y + i + vstep * 3);

vt0 = v_mul_f64(__s, vy0);
vt1 = v_mul_f64(__s, vy1);
vt2 = v_mul_f64(__s, vy2);
vt3 = v_mul_f64(__s, vy3);

vt0 = v_muladd_f64(__c, vx0, vt0);
vt1 = v_muladd_f64(__c, vx1, vt1);
vt2 = v_muladd_f64(__c, vx2, vt2);
vt3 = v_muladd_f64(__c, vx3, vt3);

v_storeu_f64(x + i, vt0);
v_storeu_f64(x + i + vstep, vt1);
v_storeu_f64(x + i + vstep * 2, vt2);
v_storeu_f64(x + i + vstep * 3, vt3);

vt0 = v_mul_f64(__s, vx0);
vt1 = v_mul_f64(__s, vx1);
vt2 = v_mul_f64(__s, vx2);
vt3 = v_mul_f64(__s, vx3);

vt0 = v_mulsub_f64(__c, vy0, vt0);
vt1 = v_mulsub_f64(__c, vy1, vt1);
vt2 = v_mulsub_f64(__c, vy2, vt2);
vt3 = v_mulsub_f64(__c, vy3, vt3);

v_storeu_f64(y + i, vt0);
v_storeu_f64(y + i + vstep, vt1);
v_storeu_f64(y + i + vstep * 2, vt2);
v_storeu_f64(y + i + vstep * 3, vt3);
}

for (; i < unrollx; i += vstep) {
vx0 = v_loadu_f64(x + i);
vy0 = v_loadu_f64(y + i);

vt0 = v_mul_f64(__s, vy0);
vt0 = v_muladd_f64(__c, vx0, vt0);
v_storeu_f64(x + i, vt0);

vt0 = v_mul_f64(__s, vx0);
vt0 = v_mulsub_f64(__c, vy0, vt0);
v_storeu_f64(y + i, vt0);
}
#else
FLOAT f0, f1, f2, f3;
FLOAT x0, x1, x2, x3;
FLOAT g0, g1, g2, g3;
@@ -53,7 +119,7 @@ static void drot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s)
yp += 4;
i += 4;
}
#endif
while (i < n) {
FLOAT temp = c*x[i] + s*y[i];
y[i] = c*y[i] - s*x[i];


+ 69
- 1
kernel/x86_64/srot.c View File

@@ -7,10 +7,78 @@
#endif

#ifndef HAVE_SROT_KERNEL
#include"../simd/intrin.h"

static void srot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s)
{
BLASLONG i = 0;
#if V_SIMD && (defined(HAVE_FMA3) || V_SIMD > 128)
const int vstep = v_nlanes_f32;
const int unrollx4 = n & (-vstep * 4);
const int unrollx = n & -vstep;

v_f32 __c = v_setall_f32(c);
v_f32 __s = v_setall_f32(s);
v_f32 vx0, vx1, vx2, vx3;
v_f32 vy0, vy1, vy2, vy3;
v_f32 vt0, vt1, vt2, vt3;

for (; i < unrollx4; i += vstep * 4) {
vx0 = v_loadu_f32(x + i);
vx1 = v_loadu_f32(x + i + vstep);
vx2 = v_loadu_f32(x + i + vstep * 2);
vx3 = v_loadu_f32(x + i + vstep * 3);
vy0 = v_loadu_f32(y + i);
vy1 = v_loadu_f32(y + i + vstep);
vy2 = v_loadu_f32(y + i + vstep * 2);
vy3 = v_loadu_f32(y + i + vstep * 3);

vt0 = v_mul_f32(__s, vy0);
vt1 = v_mul_f32(__s, vy1);
vt2 = v_mul_f32(__s, vy2);
vt3 = v_mul_f32(__s, vy3);

vt0 = v_muladd_f32(__c, vx0, vt0);
vt1 = v_muladd_f32(__c, vx1, vt1);
vt2 = v_muladd_f32(__c, vx2, vt2);
vt3 = v_muladd_f32(__c, vx3, vt3);

v_storeu_f32(x + i, vt0);
v_storeu_f32(x + i + vstep, vt1);
v_storeu_f32(x + i + vstep * 2, vt2);
v_storeu_f32(x + i + vstep * 3, vt3);

vt0 = v_mul_f32(__s, vx0);
vt1 = v_mul_f32(__s, vx1);
vt2 = v_mul_f32(__s, vx2);
vt3 = v_mul_f32(__s, vx3);

vt0 = v_mulsub_f32(__c, vy0, vt0);
vt1 = v_mulsub_f32(__c, vy1, vt1);
vt2 = v_mulsub_f32(__c, vy2, vt2);
vt3 = v_mulsub_f32(__c, vy3, vt3);

v_storeu_f32(y + i, vt0);
v_storeu_f32(y + i + vstep, vt1);
v_storeu_f32(y + i + vstep * 2, vt2);
v_storeu_f32(y + i + vstep * 3, vt3);

}

for (; i < unrollx; i += vstep) {
vx0 = v_loadu_f32(x + i);
vy0 = v_loadu_f32(y + i);

vt0 = v_mul_f32(__s, vy0);
vt0 = v_muladd_f32(__c, vx0, vt0);
v_storeu_f32(x + i, vt0);

vt0 = v_mul_f32(__s, vx0);
vt0 = v_mulsub_f32(__c, vy0, vt0);
v_storeu_f32(y + i, vt0);
}
#else
FLOAT f0, f1, f2, f3;
FLOAT x0, x1, x2, x3;
FLOAT g0, g1, g2, g3;
@@ -20,7 +88,6 @@ static void srot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s)
FLOAT* yp = y;

BLASLONG n1 = n & (~7);

while (i < n1) {
x0 = xp[0];
y0 = yp[0];
@@ -53,6 +120,7 @@ static void srot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s)
yp += 4;
i += 4;
}
#endif

while (i < n) {
FLOAT temp = c*x[i] + s*y[i];


Loading…
Cancel
Save