@@ -36,12 +36,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
#define VSSEV_FLOAT __riscv_vsse32_v_f32m4 | |||
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m4_f32m1 | |||
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m4 | |||
#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f32m4_tu | |||
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4 | |||
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 | |||
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 | |||
#define VFMULVV_FLOAT __riscv_vfmul_vv_f32m4 | |||
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4 | |||
#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f32m4 | |||
#define VFNMSACVV_FLOAT_TU __riscv_vfnmsac_vv_f32m4_tu | |||
#else | |||
#define VSETVL(n) __riscv_vsetvl_e64m4(n) | |||
#define VSETVL_MAX __riscv_vsetvlmax_e64m1() | |||
@@ -52,12 +54,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
#define VSSEV_FLOAT __riscv_vsse64_v_f64m4 | |||
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m4_f64m1 | |||
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m4 | |||
#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f64m4_tu | |||
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4 | |||
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 | |||
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 | |||
#define VFMULVV_FLOAT __riscv_vfmul_vv_f64m4 | |||
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4 | |||
#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f64m4 | |||
#define VFNMSACVV_FLOAT_TU __riscv_vfnmsac_vv_f64m4_tu | |||
#endif | |||
int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG incx, FLOAT *y, BLASLONG incy, FLOAT *buffer){ | |||
@@ -143,49 +147,45 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, B | |||
iy += inc_yv; | |||
ia += inc_av; | |||
} | |||
v_res = VFREDSUM_FLOAT(vr0, v_z0, gvl); | |||
temp_r2 = VFMVFS_FLOAT(v_res); | |||
v_res = VFREDSUM_FLOAT(vr1, v_z0, gvl); | |||
temp_i2 = VFMVFS_FLOAT(v_res); | |||
if(i < m){ | |||
gvl = VSETVL(m-i); | |||
va0 = VLSEV_FLOAT(&a_ptr[ia], stride_a, gvl); | |||
va1 = VLSEV_FLOAT(&a_ptr[ia+1], stride_a, gvl); | |||
vy0 = VLSEV_FLOAT(&y[iy], stride_y, gvl); | |||
vy1 = VLSEV_FLOAT(&y[iy+1], stride_y, gvl); | |||
unsigned int gvl_rem = VSETVL(m-i); | |||
va0 = VLSEV_FLOAT(&a_ptr[ia], stride_a, gvl_rem); | |||
va1 = VLSEV_FLOAT(&a_ptr[ia+1], stride_a, gvl_rem); | |||
vy0 = VLSEV_FLOAT(&y[iy], stride_y, gvl_rem); | |||
vy1 = VLSEV_FLOAT(&y[iy+1], stride_y, gvl_rem); | |||
#ifndef HEMVREV | |||
vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl); | |||
vy0 = VFNMSACVF_FLOAT(vy0, temp_i1, va1, gvl); | |||
vy1 = VFMACCVF_FLOAT(vy1, temp_r1, va1, gvl); | |||
vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl); | |||
vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl_rem); | |||
vy0 = VFNMSACVF_FLOAT(vy0, temp_i1, va1, gvl_rem); | |||
vy1 = VFMACCVF_FLOAT(vy1, temp_r1, va1, gvl_rem); | |||
vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl_rem); | |||
#else | |||
vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl); | |||
vy0 = VFMACCVF_FLOAT(vy0, temp_i1, va1, gvl); | |||
vy1 = VFNMSACVF_FLOAT(vy1, temp_r1, va1, gvl); | |||
vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl); | |||
vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl_rem); | |||
vy0 = VFMACCVF_FLOAT(vy0, temp_i1, va1, gvl_rem); | |||
vy1 = VFNMSACVF_FLOAT(vy1, temp_r1, va1, gvl_rem); | |||
vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl_rem); | |||
#endif | |||
VSSEV_FLOAT(&y[iy], stride_y, vy0, gvl); | |||
VSSEV_FLOAT(&y[iy+1], stride_y, vy1, gvl); | |||
VSSEV_FLOAT(&y[iy], stride_y, vy0, gvl_rem); | |||
VSSEV_FLOAT(&y[iy+1], stride_y, vy1, gvl_rem); | |||
vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl); | |||
vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl); | |||
vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl_rem); | |||
vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl_rem); | |||
#ifndef HEMVREV | |||
vr0 = VFMULVV_FLOAT(vx0, va0, gvl); | |||
vr0 = VFMACCVV_FLOAT(vr0, vx1, va1, gvl); | |||
vr1 = VFMULVV_FLOAT(vx1, va0, gvl); | |||
vr1 = VFNMSACVV_FLOAT(vr1, vx0, va1, gvl); | |||
vr0 = VFMACCVV_FLOAT_TU(vr0, vx0, va0, gvl_rem); | |||
vr0 = VFMACCVV_FLOAT_TU(vr0, vx1, va1, gvl_rem); | |||
vr1 = VFMACCVV_FLOAT_TU(vr1, vx1, va0, gvl_rem); | |||
vr1 = VFNMSACVV_FLOAT_TU(vr1, vx0, va1, gvl_rem); | |||
#else | |||
vr0 = VFMULVV_FLOAT(vx0, va0, gvl); | |||
vr0 = VFNMSACVV_FLOAT(vr0, vx1, va1, gvl); | |||
vr1 = VFMULVV_FLOAT(vx1, va0, gvl); | |||
vr1 = VFMACCVV_FLOAT(vr1, vx0, va1, gvl); | |||
vr0 = VFMACCVV_FLOAT_TU(vr0, vx0, va0, gvl_rem); | |||
vr0 = VFNMSACVV_FLOAT_TU(vr0, vx1, va1, gvl_rem); | |||
vr1 = VFMACCVV_FLOAT_TU(vr1, vx1, va0, gvl_rem); | |||
vr1 = VFMACCVV_FLOAT_TU(vr1, vx0, va1, gvl_rem); | |||
#endif | |||
v_res = VFREDSUM_FLOAT(vr0, v_z0, gvl); | |||
temp_r2 += VFMVFS_FLOAT(v_res); | |||
v_res = VFREDSUM_FLOAT(vr1, v_z0, gvl); | |||
temp_i2 += VFMVFS_FLOAT(v_res); | |||
} | |||
v_res = VFREDSUM_FLOAT(vr0, v_z0, gvl); | |||
temp_r2 = VFMVFS_FLOAT(v_res); | |||
v_res = VFREDSUM_FLOAT(vr1, v_z0, gvl); | |||
temp_i2 = VFMVFS_FLOAT(v_res); | |||
} | |||
y[jy] += alpha_r * temp_r2 - alpha_i * temp_i2; | |||
y[jy+1] += alpha_r * temp_i2 + alpha_i * temp_r2; | |||
@@ -36,12 +36,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
#define VSSEV_FLOAT __riscv_vsse32_v_f32m4 | |||
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m4_f32m1 | |||
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m4 | |||
#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f32m4_tu | |||
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4 | |||
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 | |||
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 | |||
#define VFMULVV_FLOAT __riscv_vfmul_vv_f32m4 | |||
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4 | |||
#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f32m4 | |||
#define VFNMSACVV_FLOAT_TU __riscv_vfnmsac_vv_f32m4_tu | |||
#else | |||
#define VSETVL(n) __riscv_vsetvl_e64m4(n) | |||
#define VSETVL_MAX __riscv_vsetvlmax_e64m1() | |||
@@ -52,12 +54,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
#define VSSEV_FLOAT __riscv_vsse64_v_f64m4 | |||
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m4_f64m1 | |||
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m4 | |||
#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f64m4_tu | |||
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4 | |||
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 | |||
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 | |||
#define VFMULVV_FLOAT __riscv_vfmul_vv_f64m4 | |||
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4 | |||
#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f64m4 | |||
#define VFNMSACVV_FLOAT_TU __riscv_vfnmsac_vv_f64m4_tu | |||
#endif | |||
int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG incx, FLOAT *y, BLASLONG incy, FLOAT *buffer){ | |||
@@ -142,49 +146,45 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, B | |||
iy += inc_yv; | |||
ia += inc_av; | |||
} | |||
v_res = VFREDSUM_FLOAT(vr0, v_z0, gvl); | |||
temp_r2 = VFMVFS_FLOAT(v_res); | |||
v_res = VFREDSUM_FLOAT(vr1, v_z0, gvl); | |||
temp_i2 = VFMVFS_FLOAT(v_res); | |||
if(i < j){ | |||
gvl = VSETVL(j-i); | |||
va0 = VLSEV_FLOAT(&a_ptr[ia], stride_a, gvl); | |||
va1 = VLSEV_FLOAT(&a_ptr[ia+1], stride_a, gvl); | |||
vy0 = VLSEV_FLOAT(&y[iy], stride_y, gvl); | |||
vy1 = VLSEV_FLOAT(&y[iy+1], stride_y, gvl); | |||
unsigned int gvl_rem = VSETVL(j-i); | |||
va0 = VLSEV_FLOAT(&a_ptr[ia], stride_a, gvl_rem); | |||
va1 = VLSEV_FLOAT(&a_ptr[ia+1], stride_a, gvl_rem); | |||
vy0 = VLSEV_FLOAT(&y[iy], stride_y, gvl_rem); | |||
vy1 = VLSEV_FLOAT(&y[iy+1], stride_y, gvl_rem); | |||
#ifndef HEMVREV | |||
vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl); | |||
vy0 = VFNMSACVF_FLOAT(vy0, temp_i1, va1, gvl); | |||
vy1 = VFMACCVF_FLOAT(vy1, temp_r1, va1, gvl); | |||
vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl); | |||
vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl_rem); | |||
vy0 = VFNMSACVF_FLOAT(vy0, temp_i1, va1, gvl_rem); | |||
vy1 = VFMACCVF_FLOAT(vy1, temp_r1, va1, gvl_rem); | |||
vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl_rem); | |||
#else | |||
vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl); | |||
vy0 = VFMACCVF_FLOAT(vy0, temp_i1, va1, gvl); | |||
vy1 = VFNMSACVF_FLOAT(vy1, temp_r1, va1, gvl); | |||
vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl); | |||
vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl_rem); | |||
vy0 = VFMACCVF_FLOAT(vy0, temp_i1, va1, gvl_rem); | |||
vy1 = VFNMSACVF_FLOAT(vy1, temp_r1, va1, gvl_rem); | |||
vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl_rem); | |||
#endif | |||
VSSEV_FLOAT(&y[iy], stride_y, vy0, gvl); | |||
VSSEV_FLOAT(&y[iy+1], stride_y, vy1, gvl); | |||
VSSEV_FLOAT(&y[iy], stride_y, vy0, gvl_rem); | |||
VSSEV_FLOAT(&y[iy+1], stride_y, vy1, gvl_rem); | |||
vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl); | |||
vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl); | |||
vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl_rem); | |||
vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl_rem); | |||
#ifndef HEMVREV | |||
vr0 = VFMULVV_FLOAT(vx0, va0, gvl); | |||
vr0 = VFMACCVV_FLOAT(vr0, vx1, va1, gvl); | |||
vr1 = VFMULVV_FLOAT(vx1, va0, gvl); | |||
vr1 = VFNMSACVV_FLOAT(vr1, vx0, va1, gvl); | |||
vr0 = VFMACCVV_FLOAT_TU(vr0, vx0, va0, gvl_rem); | |||
vr0 = VFMACCVV_FLOAT_TU(vr0, vx1, va1, gvl_rem); | |||
vr1 = VFMACCVV_FLOAT_TU(vr1, vx1, va0, gvl_rem); | |||
vr1 = VFNMSACVV_FLOAT_TU(vr1, vx0, va1, gvl_rem); | |||
#else | |||
vr0 = VFMULVV_FLOAT(vx0, va0, gvl); | |||
vr0 = VFNMSACVV_FLOAT(vr0, vx1, va1, gvl); | |||
vr1 = VFMULVV_FLOAT(vx1, va0, gvl); | |||
vr1 = VFMACCVV_FLOAT(vr1, vx0, va1, gvl); | |||
vr0 = VFMACCVV_FLOAT_TU(vr0, vx0, va0, gvl_rem); | |||
vr0 = VFNMSACVV_FLOAT_TU(vr0, vx1, va1, gvl_rem); | |||
vr1 = VFMACCVV_FLOAT_TU(vr1, vx1, va0, gvl_rem); | |||
vr1 = VFMACCVV_FLOAT_TU(vr1, vx0, va1, gvl_rem); | |||
#endif | |||
v_res = VFREDSUM_FLOAT(vr0, v_z0, gvl); | |||
temp_r2 += VFMVFS_FLOAT(v_res); | |||
v_res = VFREDSUM_FLOAT(vr1, v_z0, gvl); | |||
temp_i2 += VFMVFS_FLOAT(v_res); | |||
} | |||
v_res = VFREDSUM_FLOAT(vr0, v_z0, gvl); | |||
temp_r2 = VFMVFS_FLOAT(v_res); | |||
v_res = VFREDSUM_FLOAT(vr1, v_z0, gvl); | |||
temp_i2 = VFMVFS_FLOAT(v_res); | |||
} | |||
y[jy] += temp_r1 * a_ptr[ja]; | |||
y[jy+1] += temp_i1 * a_ptr[ja]; | |||
@@ -38,6 +38,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m4_f32m1 | |||
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m4 | |||
#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f32m4 | |||
#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f32m4_tu | |||
#define VFNMSACVV_FLOAT_TU __riscv_vfnmsac_vv_f32m4_tu | |||
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4 | |||
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4 | |||
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 | |||
@@ -57,6 +59,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m4_f64m1 | |||
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m4 | |||
#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f64m4 | |||
#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f64m4_tu | |||
#define VFNMSACVV_FLOAT_TU __riscv_vfnmsac_vv_f64m4_tu | |||
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4 | |||
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4 | |||
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 | |||
@@ -133,38 +137,34 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha_r, FLOAT alpha_i, | |||
ix += inc_xv; | |||
iy += inc_yv; | |||
} | |||
v_res = VFREDSUM_FLOAT(vr_r, v_z0, gvl); | |||
temp2[0] = VFMVFS_FLOAT_M1(v_res); | |||
v_res = VFREDSUM_FLOAT(vr_i, v_z0, gvl); | |||
temp2[1] = VFMVFS_FLOAT_M1(v_res); | |||
if(i < m){ | |||
gvl = VSETVL(m-i); | |||
vy_r = VLSEV_FLOAT(&y[2 * iy], stride_y, gvl); | |||
vy_i = VLSEV_FLOAT(&y[2 * iy + 1], stride_y, gvl); | |||
va_r = VLSEV_FLOAT(&a_ptr[2 * i], 2 * sizeof(FLOAT), gvl); | |||
va_i = VLSEV_FLOAT(&a_ptr[2 * i + 1], 2 * sizeof(FLOAT), gvl); | |||
unsigned int gvl_rem = VSETVL(m-i); | |||
vy_r = VLSEV_FLOAT(&y[2 * iy], stride_y, gvl_rem); | |||
vy_i = VLSEV_FLOAT(&y[2 * iy + 1], stride_y, gvl_rem); | |||
va_r = VLSEV_FLOAT(&a_ptr[2 * i], 2 * sizeof(FLOAT), gvl_rem); | |||
va_i = VLSEV_FLOAT(&a_ptr[2 * i + 1], 2 * sizeof(FLOAT), gvl_rem); | |||
vy_r = VFMACCVF_FLOAT(vy_r, temp1[0], va_r, gvl); | |||
vy_r = VFNMSACVF_FLOAT(vy_r, temp1[1], va_i, gvl); | |||
vy_i = VFMACCVF_FLOAT(vy_i, temp1[0], va_i, gvl); | |||
vy_i = VFMACCVF_FLOAT(vy_i, temp1[1], va_r, gvl); | |||
vy_r = VFMACCVF_FLOAT(vy_r, temp1[0], va_r, gvl_rem); | |||
vy_r = VFNMSACVF_FLOAT(vy_r, temp1[1], va_i, gvl_rem); | |||
vy_i = VFMACCVF_FLOAT(vy_i, temp1[0], va_i, gvl_rem); | |||
vy_i = VFMACCVF_FLOAT(vy_i, temp1[1], va_r, gvl_rem); | |||
VSSEV_FLOAT(&y[2 * iy], stride_y, vy_r, gvl); | |||
VSSEV_FLOAT(&y[2 * iy + 1], stride_y, vy_i, gvl); | |||
VSSEV_FLOAT(&y[2 * iy], stride_y, vy_r, gvl_rem); | |||
VSSEV_FLOAT(&y[2 * iy + 1], stride_y, vy_i, gvl_rem); | |||
vx_r = VLSEV_FLOAT(&x[2 * ix], stride_x, gvl); | |||
vx_i = VLSEV_FLOAT(&x[2 * ix + 1], stride_x, gvl); | |||
vr_r = VFMULVV_FLOAT(vx_r, va_r, gvl); | |||
vr_r = VFNMSACVV_FLOAT(vr_r, vx_i, va_i, gvl); | |||
vr_i = VFMULVV_FLOAT(vx_r, va_i, gvl); | |||
vr_i = VFMACCVV_FLOAT(vr_i, vx_i, va_r, gvl); | |||
vx_r = VLSEV_FLOAT(&x[2 * ix], stride_x, gvl_rem); | |||
vx_i = VLSEV_FLOAT(&x[2 * ix + 1], stride_x, gvl_rem); | |||
vr_r = VFMACCVV_FLOAT_TU(vr_r, vx_r, va_r, gvl_rem); | |||
vr_r = VFNMSACVV_FLOAT_TU(vr_r, vx_i, va_i, gvl_rem); | |||
vr_i = VFMACCVV_FLOAT_TU(vr_i, vx_r, va_i, gvl_rem); | |||
vr_i = VFMACCVV_FLOAT_TU(vr_i, vx_i, va_r, gvl_rem); | |||
v_res = VFREDSUM_FLOAT(vr_r, v_z0, gvl); | |||
temp2[0] += VFMVFS_FLOAT_M1(v_res); | |||
v_res = VFREDSUM_FLOAT(vr_i, v_z0, gvl); | |||
temp2[1] += VFMVFS_FLOAT_M1(v_res); | |||
} | |||
v_res = VFREDSUM_FLOAT(vr_r, v_z0, gvl); | |||
temp2[0] = VFMVFS_FLOAT_M1(v_res); | |||
v_res = VFREDSUM_FLOAT(vr_i, v_z0, gvl); | |||
temp2[1] = VFMVFS_FLOAT_M1(v_res); | |||
} | |||
y[2 * jy] += alpha_r * temp2[0] - alpha_i * temp2[1]; | |||
y[2 * jy + 1] += alpha_r * temp2[1] + alpha_i * temp2[0]; | |||
@@ -38,6 +38,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m4_f32m1 | |||
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m4 | |||
#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f32m4 | |||
#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f32m4_tu | |||
#define VFNMSACVV_FLOAT_TU __riscv_vfnmsac_vv_f32m4_tu | |||
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4 | |||
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4 | |||
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 | |||
@@ -56,6 +58,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m4_f64m1 | |||
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m4 | |||
#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f64m4 | |||
#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f64m4_tu | |||
#define VFNMSACVV_FLOAT_TU __riscv_vfnmsac_vv_f64m4_tu | |||
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4 | |||
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4 | |||
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 | |||
@@ -129,39 +133,35 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha_r, FLOAT alpha_i, | |||
ix += inc_xv; | |||
iy += inc_yv; | |||
} | |||
v_res = VFREDSUM_FLOAT(vr_r, v_z0, gvl); | |||
temp2[0] = VFMVFS_FLOAT_M1(v_res); | |||
v_res = VFREDSUM_FLOAT(vr_i, v_z0, gvl); | |||
temp2[1] = VFMVFS_FLOAT_M1(v_res); | |||
if(i < j){ | |||
gvl = VSETVL(j-i); | |||
vy_r = VLSEV_FLOAT(&y[2 * iy], stride_y, gvl); | |||
vy_i = VLSEV_FLOAT(&y[2 * iy + 1], stride_y, gvl); | |||
unsigned int gvl_rem = VSETVL(j-i); | |||
vy_r = VLSEV_FLOAT(&y[2 * iy], stride_y, gvl_rem); | |||
vy_i = VLSEV_FLOAT(&y[2 * iy + 1], stride_y, gvl_rem); | |||
va_r = VLSEV_FLOAT(&a_ptr[2 * i], 2 * sizeof(FLOAT), gvl); | |||
va_i = VLSEV_FLOAT(&a_ptr[2 * i + 1], 2 * sizeof(FLOAT), gvl); | |||
va_r = VLSEV_FLOAT(&a_ptr[2 * i], 2 * sizeof(FLOAT), gvl_rem); | |||
va_i = VLSEV_FLOAT(&a_ptr[2 * i + 1], 2 * sizeof(FLOAT), gvl_rem); | |||
vy_r = VFMACCVF_FLOAT(vy_r, temp1[0], va_r, gvl); | |||
vy_r = VFNMSACVF_FLOAT(vy_r, temp1[1], va_i, gvl); | |||
vy_i = VFMACCVF_FLOAT(vy_i, temp1[0], va_i, gvl); | |||
vy_i = VFMACCVF_FLOAT(vy_i, temp1[1], va_r, gvl); | |||
vy_r = VFMACCVF_FLOAT(vy_r, temp1[0], va_r, gvl_rem); | |||
vy_r = VFNMSACVF_FLOAT(vy_r, temp1[1], va_i, gvl_rem); | |||
vy_i = VFMACCVF_FLOAT(vy_i, temp1[0], va_i, gvl_rem); | |||
vy_i = VFMACCVF_FLOAT(vy_i, temp1[1], va_r, gvl_rem); | |||
VSSEV_FLOAT(&y[2 * iy], stride_y, vy_r, gvl); | |||
VSSEV_FLOAT(&y[2 * iy + 1], stride_y, vy_i, gvl); | |||
VSSEV_FLOAT(&y[2 * iy], stride_y, vy_r, gvl_rem); | |||
VSSEV_FLOAT(&y[2 * iy + 1], stride_y, vy_i, gvl_rem); | |||
vx_r = VLSEV_FLOAT(&x[2 * ix], stride_x, gvl); | |||
vx_i = VLSEV_FLOAT(&x[2 * ix + 1], stride_x, gvl); | |||
vr_r = VFMULVV_FLOAT(vx_r, va_r, gvl); | |||
vr_r = VFNMSACVV_FLOAT(vr_r, vx_i, va_i, gvl); | |||
vr_i = VFMULVV_FLOAT(vx_r, va_i, gvl); | |||
vr_i = VFMACCVV_FLOAT(vr_i, vx_i, va_r, gvl); | |||
vx_r = VLSEV_FLOAT(&x[2 * ix], stride_x, gvl_rem); | |||
vx_i = VLSEV_FLOAT(&x[2 * ix + 1], stride_x, gvl_rem); | |||
vr_r = VFMACCVV_FLOAT_TU(vr_r, vx_r, va_r, gvl_rem); | |||
vr_r = VFNMSACVV_FLOAT_TU(vr_r, vx_i, va_i, gvl_rem); | |||
vr_i = VFMACCVV_FLOAT_TU(vr_i, vx_r, va_i, gvl_rem); | |||
vr_i = VFMACCVV_FLOAT_TU(vr_i, vx_i, va_r, gvl_rem); | |||
v_res = VFREDSUM_FLOAT(vr_r, v_z0, gvl); | |||
temp2[0] += VFMVFS_FLOAT_M1(v_res); | |||
v_res = VFREDSUM_FLOAT(vr_i, v_z0, gvl); | |||
temp2[1] += VFMVFS_FLOAT_M1(v_res); | |||
} | |||
} | |||
v_res = VFREDSUM_FLOAT(vr_r, v_z0, gvl); | |||
temp2[0] = VFMVFS_FLOAT_M1(v_res); | |||
v_res = VFREDSUM_FLOAT(vr_i, v_z0, gvl); | |||
temp2[1] = VFMVFS_FLOAT_M1(v_res); | |||
} | |||
y[2 * jy] += temp1[0] * a_ptr[j * 2] - temp1[1] * a_ptr[j * 2 + 1] + alpha_r * temp2[0] - alpha_i * temp2[1]; | |||