@@ -75,6 +75,12 @@ DNRM2KERNEL = dnrm2_lsx.S | |||
CNRM2KERNEL = cnrm2_lsx.S | |||
ZNRM2KERNEL = znrm2_lsx.S | |||
CSWAPKERNEL = cswap_lsx.S | |||
ZSWAPKERNEL = cswap_lsx.S | |||
CSUMKERNEL = csum_lsx.S | |||
ZSUMKERNEL = csum_lsx.S | |||
DGEMMKERNEL = dgemm_kernel_8x4.S | |||
DGEMMINCOPY = dgemm_ncopy_8_lsx.S | |||
DGEMMITCOPY = dgemm_tcopy_8_lsx.S | |||
@@ -75,6 +75,12 @@ DNRM2KERNEL = dnrm2_lasx.S | |||
CNRM2KERNEL = cnrm2_lasx.S | |||
ZNRM2KERNEL = znrm2_lasx.S | |||
CSWAPKERNEL = cswap_lasx.S | |||
ZSWAPKERNEL = cswap_lasx.S | |||
CSUMKERNEL = csum_lasx.S | |||
ZSUMKERNEL = csum_lasx.S | |||
DGEMMKERNEL = dgemm_kernel_16x4.S | |||
DGEMMINCOPY = dgemm_ncopy_16.S | |||
DGEMMITCOPY = dgemm_tcopy_16.S | |||
@@ -0,0 +1,274 @@ | |||
/******************************************************************************* | |||
Copyright (c) 2023, The OpenBLAS Project | |||
All rights reserved. | |||
Redistribution and use in source and binary forms, with or without | |||
modification, are permitted provided that the following conditions are | |||
met: | |||
1. Redistributions of source code must retain the above copyright | |||
notice, this list of conditions and the following disclaimer. | |||
2. Redistributions in binary form must reproduce the above copyright | |||
notice, this list of conditions and the following disclaimer in | |||
the documentation and/or other materials provided with the | |||
distribution. | |||
3. Neither the name of the OpenBLAS project nor the names of | |||
its contributors may be used to endorse or promote products | |||
derived from this software without specific prior written permission. | |||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE | |||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | |||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | |||
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
*******************************************************************************/ | |||
#define ASSEMBLER | |||
#include "common.h" | |||
#define N $r4 | |||
#define X $r5 | |||
#define INCX $r6 | |||
#define I $r17 | |||
#define TEMP $r18 | |||
#define t1 $r15 | |||
#define t2 $r12 | |||
#define t3 $r13 | |||
#define t4 $r14 | |||
#define a1 $f12 | |||
#define a2 $f13 | |||
#define a3 $f14 | |||
#define a4 $f15 | |||
#define s1 $f16 | |||
#define VX0 $xr12 | |||
#define VX1 $xr13 | |||
#define VX2 $xr14 | |||
#define VX3 $xr15 | |||
#define res1 $xr16 | |||
#define res2 $xr17 | |||
PROLOGUE | |||
xvxor.v res1, res1, res1 | |||
xvxor.v res2, res2, res2 | |||
bge $r0, N, .L999 | |||
bge $r0, INCX, .L999 | |||
li.d TEMP, 1 | |||
slli.d TEMP, TEMP, ZBASE_SHIFT | |||
slli.d INCX, INCX, ZBASE_SHIFT | |||
srai.d I, N, 3 | |||
bne INCX, TEMP, .L20 | |||
bge $r0, I, .L13 | |||
.align 3 | |||
.L11: | |||
#ifdef DOUBLE | |||
xvld VX0, X, 0 * SIZE | |||
xvld VX1, X, 4 * SIZE | |||
xvfadd.d res2, VX0, VX1 | |||
xvfadd.d res1, res1, res2 | |||
xvld VX2, X, 8 * SIZE | |||
xvld VX3, X, 12 * SIZE | |||
xvfadd.d res2, VX2, VX3 | |||
xvfadd.d res1, res1, res2 | |||
#else | |||
xvld VX0, X, 0 * SIZE | |||
xvld VX1, X, 8 * SIZE | |||
xvfadd.s res2, VX0, VX1 | |||
xvfadd.s res1, res2, res1 | |||
#endif | |||
addi.d X, X, 16 * SIZE | |||
addi.d I, I, -1 | |||
blt $r0, I, .L11 | |||
.align 3 | |||
.L12: | |||
#ifdef DOUBLE | |||
xvpickve.d VX1, res1, 1 | |||
xvpickve.d VX2, res1, 2 | |||
xvpickve.d VX3, res1, 3 | |||
xvfadd.d res1, VX1, res1 | |||
xvfadd.d res1, VX2, res1 | |||
xvfadd.d res1, VX3, res1 | |||
#else | |||
xvfadd.s res2, res1, res2 | |||
xvpickve.w VX1, res1, 1 | |||
xvpickve.w VX2, res1, 2 | |||
xvpickve.w VX3, res1, 3 | |||
xvfadd.s res1, VX1, res1 | |||
xvfadd.s res1, VX2, res1 | |||
xvfadd.s res1, VX3, res1 | |||
xvpickve.w VX0, res2, 4 | |||
xvpickve.w VX1, res2, 5 | |||
xvpickve.w VX2, res2, 6 | |||
xvpickve.w VX3, res2, 7 | |||
xvfadd.s res1, VX0, res1 | |||
xvfadd.s res1, VX1, res1 | |||
xvfadd.s res1, VX2, res1 | |||
xvfadd.s res1, VX2, res1 | |||
#endif | |||
.align 3 | |||
.L13: | |||
andi I, N, 7 | |||
bge $r0, I, .L999 | |||
.align 3 | |||
.L14: | |||
LD a1, X, 0 * SIZE | |||
LD a2, X, 1 * SIZE | |||
ADD a1, a1, a2 | |||
ADD s1, a1, s1 | |||
addi.d I, I, -1 | |||
addi.d X, X, 2 * SIZE | |||
blt $r0, I, .L14 | |||
b .L999 | |||
.align 3 | |||
.L20: | |||
bge $r0, I, .L23 | |||
.align 3 | |||
.L21: | |||
#ifdef DOUBLE | |||
ld.d t1, X, 0 * SIZE | |||
ld.d t2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
ld.d t3, X, 0 * SIZE | |||
ld.d t4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
xvinsgr2vr.d VX0, t1, 0 | |||
xvinsgr2vr.d VX0, t2, 1 | |||
xvinsgr2vr.d VX0, t3, 2 | |||
xvinsgr2vr.d VX0, t4, 3 | |||
ld.d t1, X, 0 * SIZE | |||
ld.d t2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
ld.d t3, X, 0 * SIZE | |||
ld.d t4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
xvinsgr2vr.d VX1, t1, 0 | |||
xvinsgr2vr.d VX1, t2, 1 | |||
xvinsgr2vr.d VX1, t3, 2 | |||
xvinsgr2vr.d VX1, t4, 3 | |||
xvfadd.d res2, VX0, VX1 | |||
xvfadd.d res1, res1, res2 | |||
ld.d t1, X, 0 * SIZE | |||
ld.d t2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
ld.d t3, X, 0 * SIZE | |||
ld.d t4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
xvinsgr2vr.d VX0, t1, 0 | |||
xvinsgr2vr.d VX0, t2, 1 | |||
xvinsgr2vr.d VX0, t3, 2 | |||
xvinsgr2vr.d VX0, t4, 3 | |||
ld.d t1, X, 0 * SIZE | |||
ld.d t2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
ld.d t3, X, 0 * SIZE | |||
ld.d t4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
xvinsgr2vr.d VX1, t1, 0 | |||
xvinsgr2vr.d VX1, t2, 1 | |||
xvinsgr2vr.d VX1, t3, 2 | |||
xvinsgr2vr.d VX1, t4, 3 | |||
xvfadd.d res2, VX0, VX1 | |||
xvfadd.d res1, res1, res2 | |||
#else | |||
ld.w t1, X, 0 * SIZE | |||
ld.w t2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
ld.w t3, X, 0 * SIZE | |||
ld.w t4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
xvinsgr2vr.w VX0, t1, 0 | |||
xvinsgr2vr.w VX0, t2, 1 | |||
xvinsgr2vr.w VX0, t3, 2 | |||
xvinsgr2vr.w VX0, t4, 3 | |||
ld.w t1, X, 0 * SIZE | |||
ld.w t2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
ld.w t3, X, 0 * SIZE | |||
ld.w t4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
xvinsgr2vr.w VX0, t1, 4 | |||
xvinsgr2vr.w VX0, t2, 5 | |||
xvinsgr2vr.w VX0, t3, 6 | |||
xvinsgr2vr.w VX0, t4, 7 | |||
ld.w t1, X, 0 * SIZE | |||
ld.w t2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
ld.w t3, X, 0 * SIZE | |||
ld.w t4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
xvinsgr2vr.w VX1, t1, 0 | |||
xvinsgr2vr.w VX1, t2, 1 | |||
xvinsgr2vr.w VX1, t3, 2 | |||
xvinsgr2vr.w VX1, t4, 3 | |||
ld.w t1, X, 0 * SIZE | |||
ld.w t2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
ld.w t3, X, 0 * SIZE | |||
ld.w t4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
xvinsgr2vr.w VX1, t1, 4 | |||
xvinsgr2vr.w VX1, t2, 5 | |||
xvinsgr2vr.w VX1, t3, 6 | |||
xvinsgr2vr.w VX1, t4, 7 | |||
xvfadd.s res2, VX0, VX1 | |||
xvfadd.s res1, res2, res1 | |||
#endif | |||
addi.d I, I, -1 | |||
blt $r0, I, .L21 | |||
.align 3 | |||
.L22: | |||
#ifdef DOUBLE | |||
xvpickve.d VX1, res1, 1 | |||
xvpickve.d VX2, res1, 2 | |||
xvpickve.d VX3, res1, 3 | |||
xvfadd.d res1, VX1, res1 | |||
xvfadd.d res1, VX2, res1 | |||
xvfadd.d res1, VX3, res1 | |||
#else | |||
xvfadd.s res2, res1, res2 | |||
xvpickve.w VX1, res1, 1 | |||
xvpickve.w VX2, res1, 2 | |||
xvpickve.w VX3, res1, 3 | |||
xvfadd.s res1, VX1, res1 | |||
xvfadd.s res1, VX2, res1 | |||
xvfadd.s res1, VX3, res1 | |||
xvpickve.w VX0, res2, 4 | |||
xvpickve.w VX1, res2, 5 | |||
xvpickve.w VX2, res2, 6 | |||
xvpickve.w VX3, res2, 7 | |||
xvfadd.s res1, VX0, res1 | |||
xvfadd.s res1, VX1, res1 | |||
xvfadd.s res1, VX2, res1 | |||
xvfadd.s res1, VX2, res1 | |||
#endif | |||
.align 3 | |||
.L23: | |||
andi I, N, 7 | |||
bge $r0, I, .L999 | |||
.align 3 | |||
.L24: | |||
LD a1, X, 0 * SIZE | |||
LD a2, X, 1 * SIZE | |||
ADD a1, a1, a2 | |||
ADD s1, a1, s1 | |||
addi.d I, I, -1 | |||
add.d X, X, INCX | |||
blt $r0, I, .L24 | |||
.align 3 | |||
.L999: | |||
fmov.s $f0, $f16 | |||
jirl $r0, $r1, 0x0 | |||
.align 3 | |||
EPILOGUE |
@@ -0,0 +1,266 @@ | |||
/******************************************************************************* | |||
Copyright (c) 2023, The OpenBLAS Project | |||
All rights reserved. | |||
Redistribution and use in source and binary forms, with or without | |||
modification, are permitted provided that the following conditions are | |||
met: | |||
1. Redistributions of source code must retain the above copyright | |||
notice, this list of conditions and the following disclaimer. | |||
2. Redistributions in binary form must reproduce the above copyright | |||
notice, this list of conditions and the following disclaimer in | |||
the documentation and/or other materials provided with the | |||
distribution. | |||
3. Neither the name of the OpenBLAS project nor the names of | |||
its contributors may be used to endorse or promote products | |||
derived from this software without specific prior written permission. | |||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE | |||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | |||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | |||
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
*******************************************************************************/ | |||
#define ASSEMBLER | |||
#include "common.h" | |||
#define N $r4 | |||
#define X $r5 | |||
#define INCX $r6 | |||
#define I $r17 | |||
#define TEMP $r18 | |||
#define t1 $r15 | |||
#define t2 $r12 | |||
#define t3 $r13 | |||
#define t4 $r14 | |||
#define a1 $f12 | |||
#define a2 $f13 | |||
#define a3 $f14 | |||
#define a4 $f15 | |||
#define s1 $f16 | |||
#define VX0 $vr12 | |||
#define VX1 $vr13 | |||
#define VX2 $vr14 | |||
#define VX3 $vr15 | |||
#define res1 $vr16 | |||
#define res2 $vr17 | |||
#define res3 $vr18 | |||
PROLOGUE | |||
vxor.v res1, res1, res1 | |||
vxor.v res2, res2, res2 | |||
bge $r0, N, .L999 | |||
bge $r0, INCX, .L999 | |||
li.d TEMP, 1 | |||
slli.d TEMP, TEMP, ZBASE_SHIFT | |||
slli.d INCX, INCX, ZBASE_SHIFT | |||
srai.d I, N, 3 | |||
bne INCX, TEMP, .L20 | |||
bge $r0, I, .L13 | |||
.align 3 | |||
.L11: | |||
#ifdef DOUBLE | |||
vld VX0, X, 0 * SIZE | |||
vld VX1, X, 2 * SIZE | |||
vfadd.d res2, VX0, VX1 | |||
vfadd.d res1, res1, res2 | |||
vld VX2, X, 4 * SIZE | |||
vld VX3, X, 6 * SIZE | |||
vfadd.d res2, VX2, VX3 | |||
vfadd.d res1, res1, res2 | |||
vld VX0, X, 8 * SIZE | |||
vld VX1, X, 10 * SIZE | |||
vfadd.d res2, VX0, VX1 | |||
vfadd.d res1, res1, res2 | |||
vld VX2, X, 12 * SIZE | |||
vld VX3, X, 14 * SIZE | |||
vfadd.d res2, VX2, VX3 | |||
vfadd.d res1, res1, res2 | |||
#else | |||
vld VX0, X, 0 * SIZE | |||
vld VX1, X, 4 * SIZE | |||
vfadd.s res2, VX0, VX1 | |||
vld VX2, X, 8 * SIZE | |||
vld VX3, X, 12 * SIZE | |||
vfadd.s res3, VX2, VX3 | |||
vfadd.s res2, res3, res2 | |||
vfadd.s res1, res1, res2 | |||
#endif | |||
addi.d I, I, -1 | |||
addi.d X, X, 16 * SIZE | |||
blt $r0, I, .L11 | |||
.align 3 | |||
.L12: | |||
#ifdef DOUBLE | |||
vreplvei.d VX1, res1, 1 | |||
vfadd.d res1, VX1, res1 | |||
#else | |||
vreplvei.w VX1, res1, 1 | |||
vreplvei.w VX2, res1, 2 | |||
vreplvei.w VX3, res1, 3 | |||
vfadd.s res1, VX1, res1 | |||
vfadd.s res1, VX2, res1 | |||
vfadd.s res1, VX3, res1 | |||
#endif | |||
.align 3 | |||
.L13: | |||
andi I, N, 7 | |||
bge $r0, I, .L999 | |||
.align 3 | |||
.L14: | |||
LD a1, X, 0 * SIZE | |||
LD a2, X, 1 * SIZE | |||
ADD a1, a1, a2 | |||
ADD s1, a1, s1 | |||
addi.d I, I, -1 | |||
addi.d X, X, 2 * SIZE | |||
blt $r0, I, .L14 | |||
b .L999 | |||
.align 3 | |||
.L20: | |||
bge $r0, I, .L23 | |||
.align 3 | |||
.L21: | |||
#ifdef DOUBLE | |||
ld.d t1, X, 0 * SIZE | |||
ld.d t2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
vinsgr2vr.d VX0, t1, 0 | |||
vinsgr2vr.d VX0, t2, 1 | |||
ld.d t1, X, 0 * SIZE | |||
ld.d t2, X, 1 * SIZE | |||
vinsgr2vr.d VX1, t1, 0 | |||
vinsgr2vr.d VX1, t2, 1 | |||
add.d X, X, INCX | |||
vfadd.d res2, VX0, VX1 | |||
vfadd.d res1, res1, res2 | |||
ld.d t3, X, 0 * SIZE | |||
ld.d t4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
vinsgr2vr.d VX0, t3, 0 | |||
vinsgr2vr.d VX0, t4, 1 | |||
ld.d t3, X, 0 * SIZE | |||
ld.d t4, X, 1 * SIZE | |||
vinsgr2vr.d VX1, t3, 0 | |||
vinsgr2vr.d VX1, t4, 1 | |||
add.d X, X, INCX | |||
vfadd.d res2, VX0, VX1 | |||
vfadd.d res1, res1, res2 | |||
ld.d t1, X, 0 * SIZE | |||
ld.d t2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
vinsgr2vr.d VX0, t1, 0 | |||
vinsgr2vr.d VX0, t2, 1 | |||
ld.d t1, X, 0 * SIZE | |||
ld.d t2, X, 1 * SIZE | |||
vinsgr2vr.d VX1, t1, 0 | |||
vinsgr2vr.d VX1, t2, 1 | |||
add.d X, X, INCX | |||
vfadd.d res2, VX0, VX1 | |||
vfadd.d res1, res1, res2 | |||
ld.d t3, X, 0 * SIZE | |||
ld.d t4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
vinsgr2vr.d VX0, t3, 0 | |||
vinsgr2vr.d VX0, t4, 1 | |||
ld.d t3, X, 0 * SIZE | |||
ld.d t4, X, 1 * SIZE | |||
vinsgr2vr.d VX1, t3, 0 | |||
vinsgr2vr.d VX1, t4, 1 | |||
add.d X, X, INCX | |||
vfadd.d res2, VX0, VX1 | |||
vfadd.d res1, res1, res2 | |||
#else | |||
ld.w t1, X, 0 * SIZE | |||
ld.w t2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
ld.w t3, X, 0 * SIZE | |||
ld.w t4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
vinsgr2vr.w VX0, t1, 0 | |||
vinsgr2vr.w VX0, t2, 1 | |||
vinsgr2vr.w VX0, t3, 2 | |||
vinsgr2vr.w VX0, t4, 3 | |||
ld.w t1, X, 0 * SIZE | |||
ld.w t2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
ld.w t3, X, 0 * SIZE | |||
ld.w t4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
vinsgr2vr.w VX1, t1, 0 | |||
vinsgr2vr.w VX1, t2, 1 | |||
vinsgr2vr.w VX1, t3, 2 | |||
vinsgr2vr.w VX1, t4, 3 | |||
vfadd.s res2, VX0, VX1 | |||
ld.w t1, X, 0 * SIZE | |||
ld.w t2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
ld.w t3, X, 0 * SIZE | |||
ld.w t4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
vinsgr2vr.w VX2, t1, 0 | |||
vinsgr2vr.w VX2, t2, 1 | |||
vinsgr2vr.w VX2, t3, 2 | |||
vinsgr2vr.w VX2, t4, 3 | |||
ld.w t1, X, 0 * SIZE | |||
ld.w t2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
ld.w t3, X, 0 * SIZE | |||
ld.w t4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
vinsgr2vr.w VX3, t1, 0 | |||
vinsgr2vr.w VX3, t2, 1 | |||
vinsgr2vr.w VX3, t3, 2 | |||
vinsgr2vr.w VX3, t4, 3 | |||
vfadd.s res3, VX2, VX3 | |||
vfadd.s res2, res3, res2 | |||
vfadd.s res1, res1, res2 | |||
#endif | |||
addi.d I, I, -1 | |||
blt $r0, I, .L21 | |||
.align 3 | |||
.L22: | |||
#ifdef DOUBLE | |||
vreplvei.d VX1, res1, 1 | |||
vfadd.d res1, VX1, res1 | |||
#else | |||
vreplvei.w VX1, res1, 1 | |||
vreplvei.w VX2, res1, 2 | |||
vreplvei.w VX3, res1, 3 | |||
vfadd.s res1, VX1, res1 | |||
vfadd.s res1, VX2, res1 | |||
vfadd.s res1, VX3, res1 | |||
#endif | |||
.align 3 | |||
.L23: | |||
andi I, N, 7 | |||
bge $r0, I, .L999 | |||
.align 3 | |||
.L24: | |||
LD a1, X, 0 * SIZE | |||
LD a2, X, 1 * SIZE | |||
ADD a1, a1, a2 | |||
ADD s1, a1, s1 | |||
addi.d I, I, -1 | |||
add.d X, X, INCX | |||
blt $r0, I, .L24 | |||
.align 3 | |||
.L999: | |||
fmov.s $f0, $f16 | |||
jirl $r0, $r1, 0x0 | |||
.align 3 | |||
EPILOGUE |
@@ -0,0 +1,394 @@ | |||
/******************************************************************************* | |||
Copyright (c) 2023, The OpenBLAS Project | |||
All rights reserved. | |||
Redistribution and use in source and binary forms, with or without | |||
modification, are permitted provided that the following conditions are | |||
met: | |||
1. Redistributions of source code must retain the above copyright | |||
notice, this list of conditions and the following disclaimer. | |||
2. Redistributions in binary form must reproduce the above copyright | |||
notice, this list of conditions and the following disclaimer in | |||
the documentation and/or other materials provided with the | |||
distribution. | |||
3. Neither the name of the OpenBLAS project nor the names of | |||
its contributors may be used to endorse or promote products | |||
derived from this software without specific prior written permission. | |||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE | |||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | |||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | |||
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
*******************************************************************************/ | |||
#define ASSEMBLER | |||
#include "common.h" | |||
#define N $r4 | |||
#define X $r7 | |||
#define INCX $r8 | |||
#define Y $r9 | |||
#define INCY $r10 | |||
#define I $r17 | |||
#define TEMP $r18 | |||
#define XX $r5 | |||
#define YY $r6 | |||
#define t1 $r14 | |||
#define t2 $r15 | |||
#define t3 $r16 | |||
#define t4 $r19 | |||
#define a1 $f12 | |||
#define a2 $f13 | |||
#define a3 $f14 | |||
#define a4 $f15 | |||
#define b1 $f16 | |||
#define b2 $f17 | |||
#define b3 $f18 | |||
#define b4 $f19 | |||
#define VX0 $xr12 | |||
#define VX1 $xr13 | |||
#define VX2 $xr14 | |||
#define VX3 $xr15 | |||
PROLOGUE | |||
bge $r0, N, .L999 | |||
li.d TEMP, 1 | |||
slli.d TEMP, TEMP, ZBASE_SHIFT | |||
slli.d INCX, INCX, ZBASE_SHIFT | |||
slli.d INCY, INCY, ZBASE_SHIFT | |||
srai.d I, N, 2 | |||
bne INCX, TEMP, .L20 | |||
bne INCY, TEMP, .L12 // INCX==1 and INCY!=1 | |||
b .L11 // INCX==1 and INCY==1 | |||
.L20: | |||
bne INCY, TEMP, .L22 // INCX!=1 and INCY!=1 | |||
b .L21 // INCX!=1 and INCY==1 | |||
.L11: | |||
bge $r0, I, .L112 | |||
.align 3 | |||
.L111: | |||
#ifdef DOUBLE | |||
xvld VX0, X, 0 * SIZE | |||
xvld VX1, X, 4 * SIZE | |||
xvld VX2, Y, 0 * SIZE | |||
xvld VX3, Y, 4 * SIZE | |||
xvst VX2, X, 0 * SIZE | |||
xvst VX3, X, 4 * SIZE | |||
xvst VX0, Y, 0 * SIZE | |||
xvst VX1, Y, 4 * SIZE | |||
#else | |||
xvld VX0, X, 0 * SIZE | |||
xvld VX2, Y, 0 * SIZE | |||
xvst VX2, X, 0 * SIZE | |||
xvst VX0, Y, 0 * SIZE | |||
#endif | |||
addi.d X, X, 8 * SIZE | |||
addi.d Y, Y, 8 * SIZE | |||
addi.d I, I, -1 | |||
blt $r0, I, .L111 | |||
.align 3 | |||
.L112: | |||
andi I, N, 3 | |||
bge $r0, I, .L999 | |||
.align 3 | |||
.L113: | |||
LD a1, X, 0 * SIZE | |||
LD a2, X, 1 * SIZE | |||
LD a3, Y, 0 * SIZE | |||
LD a4, Y, 1 * SIZE | |||
ST a1, Y, 0 * SIZE | |||
ST a2, Y, 1 * SIZE | |||
ST a3, X, 0 * SIZE | |||
ST a4, X, 1 * SIZE | |||
addi.d I, I, -1 | |||
addi.d X, X, 2 * SIZE | |||
addi.d Y, Y, 2 * SIZE | |||
blt $r0, I, .L113 | |||
b .L999 | |||
.align 3 | |||
.L12: // INCX==1 and INCY!=1 | |||
bge $r0, I, .L122 | |||
.align 3 | |||
.L121: | |||
#ifdef DOUBLE | |||
xvld VX0, X, 0 * SIZE | |||
ld.d t1, Y, 0 * SIZE | |||
xvstelm.d VX0, Y, 0 * SIZE, 0 | |||
ld.d t2, Y, 1 * SIZE | |||
xvstelm.d VX0, Y, 1 * SIZE, 1 | |||
add.d Y, Y, INCY | |||
ld.d t3, Y, 0 * SIZE | |||
xvstelm.d VX0, Y, 0 * SIZE, 2 | |||
ld.d t4, Y, 1 * SIZE | |||
xvstelm.d VX0, Y, 1 * SIZE, 3 | |||
xvinsgr2vr.d VX2, t1, 0 | |||
xvinsgr2vr.d VX2, t2, 1 | |||
xvinsgr2vr.d VX2, t3, 2 | |||
xvinsgr2vr.d VX2, t4, 3 | |||
add.d Y, Y, INCY | |||
xvst VX2, X, 0 * SIZE | |||
xvld VX1, X, 4 * SIZE | |||
ld.d t1, Y, 0 * SIZE | |||
xvstelm.d VX1, Y, 0 * SIZE, 0 | |||
ld.d t2, Y, 1 * SIZE | |||
xvstelm.d VX1, Y, 1 * SIZE, 1 | |||
add.d Y, Y, INCY | |||
ld.d t3, Y, 0 * SIZE | |||
xvstelm.d VX1, Y, 0 * SIZE, 2 | |||
ld.d t4, Y, 1 * SIZE | |||
xvstelm.d VX1, Y, 1 * SIZE, 3 | |||
xvinsgr2vr.d VX3, t1, 0 | |||
xvinsgr2vr.d VX3, t2, 1 | |||
xvinsgr2vr.d VX3, t3, 2 | |||
xvinsgr2vr.d VX3, t4, 3 | |||
add.d Y, Y, INCY | |||
xvst VX3, X, 4 * SIZE | |||
#else | |||
xvld VX0, X, 0 * SIZE | |||
ld.w t1, Y, 0 * SIZE | |||
xvstelm.w VX0, Y, 0 * SIZE, 0 | |||
ld.w t2, Y, 1 * SIZE | |||
xvstelm.w VX0, Y, 1 * SIZE, 1 | |||
add.d Y, Y, INCY | |||
ld.w t3, Y, 0 * SIZE | |||
xvstelm.w VX0, Y, 0 * SIZE, 2 | |||
ld.w t4, Y, 1 * SIZE | |||
xvstelm.w VX0, Y, 1 * SIZE, 3 | |||
xvinsgr2vr.w VX2, t1, 0 | |||
xvinsgr2vr.w VX2, t2, 1 | |||
xvinsgr2vr.w VX2, t3, 2 | |||
xvinsgr2vr.w VX2, t4, 3 | |||
add.d Y, Y, INCY | |||
ld.w t1, Y, 0 * SIZE | |||
xvstelm.w VX0, Y, 0 * SIZE, 4 | |||
ld.w t2, Y, 1 * SIZE | |||
xvstelm.w VX0, Y, 1 * SIZE, 5 | |||
add.d Y, Y, INCY | |||
ld.w t3, Y, 0 * SIZE | |||
xvstelm.w VX0, Y, 0 * SIZE, 6 | |||
ld.w t4, Y, 1 * SIZE | |||
xvstelm.w VX0, Y, 1 * SIZE, 7 | |||
xvinsgr2vr.w VX2, t1, 4 | |||
xvinsgr2vr.w VX2, t2, 5 | |||
xvinsgr2vr.w VX2, t3, 6 | |||
xvinsgr2vr.w VX2, t4, 7 | |||
add.d Y, Y, INCY | |||
xvst VX2, X, 0 * SIZE | |||
#endif | |||
addi.d X, X, 8 * SIZE | |||
addi.d I, I, -1 | |||
blt $r0, I, .L121 | |||
.align 3 | |||
.L122: | |||
andi I, N, 3 | |||
bge $r0, I, .L999 | |||
.align 3 | |||
.L123: | |||
LD a1, X, 0 * SIZE | |||
LD a2, X, 1 * SIZE | |||
LD a3, Y, 0 * SIZE | |||
LD a4, Y, 1 * SIZE | |||
ST a1, Y, 0 * SIZE | |||
ST a2, Y, 1 * SIZE | |||
ST a3, X, 0 * SIZE | |||
ST a4, X, 1 * SIZE | |||
addi.d I, I, -1 | |||
addi.d X, X, 2 * SIZE | |||
add.d Y, Y, INCY | |||
blt $r0, I, .L123 | |||
b .L999 | |||
.align 3 | |||
.L21: | |||
bge $r0, I, .L212 | |||
.align 3 | |||
.L211: | |||
#ifdef DOUBLE | |||
xvld VX2, Y, 0 * SIZE | |||
ld.d t1, X, 0 * SIZE | |||
xvstelm.d VX2, X, 0 * SIZE, 0 | |||
ld.d t2, X, 1 * SIZE | |||
xvstelm.d VX2, X, 1 * SIZE, 1 | |||
add.d X, X, INCX | |||
ld.d t3, X, 0 * SIZE | |||
xvstelm.d VX2, X, 0 * SIZE, 2 | |||
ld.d t4, X, 1 * SIZE | |||
xvstelm.d VX2, X, 1 * SIZE, 3 | |||
xvinsgr2vr.d VX0, t1, 0 | |||
xvinsgr2vr.d VX0, t2, 1 | |||
xvinsgr2vr.d VX0, t3, 2 | |||
xvinsgr2vr.d VX0, t4, 3 | |||
add.d X, X, INCX | |||
xvst VX0, Y, 0 * SIZE | |||
xvld VX3, Y, 4 * SIZE | |||
ld.d t1, X, 0 * SIZE | |||
xvstelm.d VX3, X, 0 * SIZE, 0 | |||
ld.d t2, X, 1 * SIZE | |||
xvstelm.d VX3, X, 1 * SIZE, 1 | |||
add.d X, X, INCX | |||
ld.d t3, X, 0 * SIZE | |||
xvstelm.d VX3, X, 0 * SIZE, 2 | |||
ld.d t4, X, 1 * SIZE | |||
xvstelm.d VX3, X, 1 * SIZE, 3 | |||
xvinsgr2vr.d VX1, t1, 0 | |||
xvinsgr2vr.d VX1, t2, 1 | |||
xvinsgr2vr.d VX1, t3, 2 | |||
xvinsgr2vr.d VX1, t4, 3 | |||
add.d X, X, INCX | |||
xvst VX1, Y, 4 * SIZE | |||
#else | |||
xvld VX2, Y, 0 * SIZE | |||
ld.w t1, X, 0 * SIZE | |||
xvstelm.w VX2, X, 0 * SIZE, 0 | |||
ld.w t2, X, 1 * SIZE | |||
xvstelm.w VX2, X, 1 * SIZE, 1 | |||
add.d X, X, INCX | |||
ld.w t3, X, 0 * SIZE | |||
xvstelm.w VX2, X, 0 * SIZE, 2 | |||
ld.w t4, X, 1 * SIZE | |||
xvstelm.w VX2, X, 1 * SIZE, 3 | |||
xvinsgr2vr.w VX0, t1, 0 | |||
xvinsgr2vr.w VX0, t2, 1 | |||
xvinsgr2vr.w VX0, t3, 2 | |||
xvinsgr2vr.w VX0, t4, 3 | |||
add.d X, X, INCX | |||
ld.w t1, X, 0 * SIZE | |||
xvstelm.w VX2, X, 0 * SIZE, 4 | |||
ld.w t2, X, 1 * SIZE | |||
xvstelm.w VX2, X, 1 * SIZE, 5 | |||
add.d X, X, INCX | |||
ld.w t3, X, 0 * SIZE | |||
xvstelm.w VX2, X, 0 * SIZE, 6 | |||
ld.w t4, X, 1 * SIZE | |||
xvstelm.w VX2, X, 1 * SIZE, 7 | |||
xvinsgr2vr.w VX0, t1, 4 | |||
xvinsgr2vr.w VX0, t2, 5 | |||
xvinsgr2vr.w VX0, t3, 6 | |||
xvinsgr2vr.w VX0, t4, 7 | |||
add.d X, X, INCX | |||
xvst VX0, Y, 0 * SIZE | |||
#endif | |||
addi.d Y, Y, 8 * SIZE | |||
addi.d I, I, -1 | |||
blt $r0, I, .L211 | |||
.align 3 | |||
.L212: | |||
andi I, N, 3 | |||
bge $r0, I, .L999 | |||
.align 3 | |||
.L213: | |||
LD a1, X, 0 * SIZE | |||
LD a2, X, 1 * SIZE | |||
LD a3, Y, 0 * SIZE | |||
LD a4, Y, 1 * SIZE | |||
ST a1, Y, 0 * SIZE | |||
ST a2, Y, 1 * SIZE | |||
ST a3, X, 0 * SIZE | |||
ST a4, X, 1 * SIZE | |||
addi.d I, I, -1 | |||
add.d X, X, INCX | |||
addi.d Y, Y, 2 * SIZE | |||
blt $r0, I, .L213 | |||
b .L999 | |||
.align 3 | |||
.L22: | |||
bge $r0, I, .L223 | |||
.align 3 | |||
move XX, X | |||
.L222: | |||
LD a1, X, 0 * SIZE | |||
LD a2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
LD a3, X, 0 * SIZE | |||
LD a4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
LD b1, Y, 0 * SIZE | |||
ST a1, Y, 0 * SIZE | |||
LD b2, Y, 1 * SIZE | |||
ST a2, Y, 1 * SIZE | |||
add.d Y, Y, INCY | |||
LD b3, Y, 0 * SIZE | |||
ST a3, Y, 0 * SIZE | |||
LD b4, Y, 1 * SIZE | |||
ST a4, Y, 1 * SIZE | |||
add.d Y, Y, INCY | |||
LD a1, X, 0 * SIZE | |||
ST b1, XX, 0 * SIZE | |||
LD a2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
ST b2, XX, 1 * SIZE | |||
add.d XX, XX, INCX | |||
LD a3, X, 0 * SIZE | |||
ST b3, XX, 0 * SIZE | |||
LD a4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
ST b4, XX, 1 * SIZE | |||
add.d XX, XX, INCX | |||
LD b1, Y, 0 * SIZE | |||
ST a1, Y, 0 * SIZE | |||
LD b2, Y, 1 * SIZE | |||
ST a2, Y, 1 * SIZE | |||
add.d Y, Y, INCY | |||
LD b3, Y, 0 * SIZE | |||
ST a3, Y, 0 * SIZE | |||
LD b4, Y, 1 * SIZE | |||
ST a4, Y, 1 * SIZE | |||
add.d Y, Y, INCY | |||
ST b1, XX, 0 * SIZE | |||
ST b2, XX, 1 * SIZE | |||
add.d XX, XX, INCX | |||
ST b3, XX, 0 * SIZE | |||
ST b4, XX, 1 * SIZE | |||
add.d XX, XX, INCX | |||
addi.d I, I, -1 | |||
blt $r0, I, .L222 | |||
.align 3 | |||
.L223: | |||
andi I, N, 3 | |||
bge $r0, I, .L999 | |||
.align 3 | |||
.L224: | |||
LD a1, X, 0 * SIZE | |||
LD a2, X, 1 * SIZE | |||
LD a3, Y, 0 * SIZE | |||
LD a4, Y, 1 * SIZE | |||
ST a1, Y, 0 * SIZE | |||
ST a2, Y, 1 * SIZE | |||
ST a3, X, 0 * SIZE | |||
ST a4, X, 1 * SIZE | |||
addi.d I, I, -1 | |||
add.d X, X, INCX | |||
add.d Y, Y, INCY | |||
blt $r0, I, .L224 | |||
.align 3 | |||
.L999: | |||
move $r4, $r12 | |||
jirl $r0, $r1, 0x0 | |||
.align 3 | |||
EPILOGUE |
@@ -0,0 +1,421 @@ | |||
/******************************************************************************* | |||
Copyright (c) 2023, The OpenBLAS Project | |||
All rights reserved. | |||
Redistribution and use in source and binary forms, with or without | |||
modification, are permitted provided that the following conditions are | |||
met: | |||
1. Redistributions of source code must retain the above copyright | |||
notice, this list of conditions and the following disclaimer. | |||
2. Redistributions in binary form must reproduce the above copyright | |||
notice, this list of conditions and the following disclaimer in | |||
the documentation and/or other materials provided with the | |||
distribution. | |||
3. Neither the name of the OpenBLAS project nor the names of | |||
its contributors may be used to endorse or promote products | |||
derived from this software without specific prior written permission. | |||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE | |||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | |||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | |||
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
*******************************************************************************/ | |||
#define ASSEMBLER | |||
#include "common.h" | |||
#define N $r4 | |||
#define X $r7 | |||
#define INCX $r8 | |||
#define Y $r9 | |||
#define INCY $r10 | |||
#define I $r17 | |||
#define TEMP $r18 | |||
#define XX $r5 | |||
#define YY $r6 | |||
#define t1 $r14 | |||
#define t2 $r15 | |||
#define t3 $r16 | |||
#define t4 $r19 | |||
#define a1 $f12 | |||
#define a2 $f13 | |||
#define a3 $f14 | |||
#define a4 $f15 | |||
#define b1 $f16 | |||
#define b2 $f17 | |||
#define b3 $f18 | |||
#define b4 $f19 | |||
#define VX0 $vr12 | |||
#define VX1 $vr13 | |||
#define VX2 $vr14 | |||
#define VX3 $vr15 | |||
PROLOGUE | |||
bge $r0, N, .L999 | |||
li.d TEMP, 1 | |||
slli.d TEMP, TEMP, ZBASE_SHIFT | |||
slli.d INCX, INCX, ZBASE_SHIFT | |||
slli.d INCY, INCY, ZBASE_SHIFT | |||
srai.d I, N, 2 | |||
bne INCX, TEMP, .L20 | |||
bne INCY, TEMP, .L12 // INCX==1 and INCY!=1 | |||
b .L11 // INCX==1 and INCY==1 | |||
.L20: | |||
bne INCY, TEMP, .L22 // INCX!=1 and INCY!=1 | |||
b .L21 // INCX!=1 and INCY==1 | |||
.L11: | |||
bge $r0, I, .L112 | |||
.align 3 | |||
.L111: | |||
#ifdef DOUBLE | |||
vld VX0, X, 0 * SIZE | |||
vld VX1, X, 2 * SIZE | |||
vld VX2, Y, 0 * SIZE | |||
vld VX3, Y, 2 * SIZE | |||
vst VX2, X, 0 * SIZE | |||
vst VX3, X, 2 * SIZE | |||
vst VX0, Y, 0 * SIZE | |||
vst VX1, Y, 2 * SIZE | |||
vld VX0, X, 4 * SIZE | |||
vld VX1, X, 6 * SIZE | |||
vld VX2, Y, 4 * SIZE | |||
vld VX3, Y, 6 * SIZE | |||
vst VX2, X, 4 * SIZE | |||
vst VX3, X, 6 * SIZE | |||
vst VX0, Y, 4 * SIZE | |||
vst VX1, Y, 6 * SIZE | |||
#else | |||
vld VX0, X, 0 * SIZE | |||
vld VX1, X, 4 * SIZE | |||
vld VX2, Y, 0 * SIZE | |||
vld VX3, Y, 4 * SIZE | |||
vst VX2, X, 0 * SIZE | |||
vst VX3, X, 4 * SIZE | |||
vst VX0, Y, 0 * SIZE | |||
vst VX1, Y, 4 * SIZE | |||
#endif | |||
addi.d I, I, -1 | |||
addi.d X, X, 8 * SIZE | |||
addi.d Y, Y, 8 * SIZE | |||
blt $r0, I, .L111 | |||
.align 3 | |||
.L112: | |||
andi I, N, 3 | |||
bge $r0, I, .L999 | |||
.align 3 | |||
.L113: | |||
LD a1, X, 0 * SIZE | |||
LD a2, X, 1 * SIZE | |||
LD a3, Y, 0 * SIZE | |||
LD a4, Y, 1 * SIZE | |||
ST a1, Y, 0 * SIZE | |||
ST a2, Y, 1 * SIZE | |||
ST a3, X, 0 * SIZE | |||
ST a4, X, 1 * SIZE | |||
addi.d I, I, -1 | |||
addi.d X, X, 2 * SIZE | |||
addi.d Y, Y, 2 * SIZE | |||
blt $r0, I, .L113 | |||
b .L999 | |||
.align 3 | |||
.L12: // INCX==1 and INCY!=1 | |||
bge $r0, I, .L122 | |||
.align 3 | |||
.L121: | |||
#ifdef DOUBLE | |||
vld VX0, X, 0 * SIZE | |||
ld.d t1, Y, 0 * SIZE | |||
vstelm.d VX0, Y, 0 * SIZE, 0 | |||
ld.d t2, Y, 1 * SIZE | |||
vstelm.d VX0, Y, 1 * SIZE, 1 | |||
vinsgr2vr.d VX2, t1, 0 | |||
vinsgr2vr.d VX2, t2, 1 | |||
add.d Y, Y, INCY | |||
vst VX2, X, 0 * SIZE | |||
vld VX1, X, 2 * SIZE | |||
ld.d t3, Y, 0 * SIZE | |||
vstelm.d VX1, Y, 0 * SIZE, 0 | |||
ld.d t4, Y, 1 * SIZE | |||
vstelm.d VX1, Y, 1 * SIZE, 1 | |||
vinsgr2vr.d VX3, t3, 0 | |||
vinsgr2vr.d VX3, t4, 1 | |||
add.d Y, Y, INCY | |||
vst VX3, X, 2 * SIZE | |||
vld VX0, X, 4 * SIZE | |||
ld.d t1, Y, 0 * SIZE | |||
vstelm.d VX0, Y, 0 * SIZE, 0 | |||
ld.d t2, Y, 1 * SIZE | |||
vstelm.d VX0, Y, 1 * SIZE, 1 | |||
vinsgr2vr.d VX2, t1, 0 | |||
vinsgr2vr.d VX2, t2, 1 | |||
add.d Y, Y, INCY | |||
vst VX2, X, 4 * SIZE | |||
vld VX1, X, 6 * SIZE | |||
ld.d t3, Y, 0 * SIZE | |||
vstelm.d VX1, Y, 0 * SIZE, 0 | |||
ld.d t4, Y, 1 * SIZE | |||
vstelm.d VX1, Y, 1 * SIZE, 1 | |||
vinsgr2vr.d VX3, t3, 0 | |||
vinsgr2vr.d VX3, t4, 1 | |||
add.d Y, Y, INCY | |||
vst VX3, X, 6 * SIZE | |||
#else | |||
vld VX0, X, 0 * SIZE | |||
ld.w t1, Y, 0 * SIZE | |||
vstelm.w VX0, Y, 0 * SIZE, 0 | |||
ld.w t2, Y, 1 * SIZE | |||
vstelm.w VX0, Y, 1 * SIZE, 1 | |||
add.d Y, Y, INCY | |||
ld.w t3, Y, 0 * SIZE | |||
vstelm.w VX0, Y, 0 * SIZE, 2 | |||
ld.w t4, Y, 1 * SIZE | |||
vstelm.w VX0, Y, 1 * SIZE, 3 | |||
vinsgr2vr.w VX2, t1, 0 | |||
vinsgr2vr.w VX2, t2, 1 | |||
vinsgr2vr.w VX2, t3, 2 | |||
vinsgr2vr.w VX2, t4, 3 | |||
add.d Y, Y, INCY | |||
vst VX2, X, 0 * SIZE | |||
vld VX1, X, 4 * SIZE | |||
ld.w t1, Y, 0 * SIZE | |||
vstelm.w VX1, Y, 0 * SIZE, 0 | |||
ld.w t2, Y, 1 * SIZE | |||
vstelm.w VX1, Y, 1 * SIZE, 1 | |||
add.d Y, Y, INCY | |||
ld.w t3, Y, 0 * SIZE | |||
vstelm.w VX1, Y, 0 * SIZE, 2 | |||
ld.w t4, Y, 1 * SIZE | |||
vstelm.w VX1, Y, 1 * SIZE, 3 | |||
vinsgr2vr.w VX3, t1, 0 | |||
vinsgr2vr.w VX3, t2, 1 | |||
vinsgr2vr.w VX3, t3, 2 | |||
vinsgr2vr.w VX3, t4, 3 | |||
add.d Y, Y, INCY | |||
vst VX3, X, 4 * SIZE | |||
#endif | |||
addi.d X, X, 8 * SIZE | |||
addi.d I, I, -1 | |||
blt $r0, I, .L121 | |||
.align 3 | |||
.L122: | |||
andi I, N, 3 | |||
bge $r0, I, .L999 | |||
.align 3 | |||
.L123: | |||
LD a1, X, 0 * SIZE | |||
LD a2, X, 1 * SIZE | |||
LD a3, Y, 0 * SIZE | |||
LD a4, Y, 1 * SIZE | |||
ST a1, Y, 0 * SIZE | |||
ST a2, Y, 1 * SIZE | |||
ST a3, X, 0 * SIZE | |||
ST a4, X, 1 * SIZE | |||
addi.d I, I, -1 | |||
addi.d X, X, 2 * SIZE | |||
add.d Y, Y, INCY | |||
blt $r0, I, .L123 | |||
b .L999 | |||
.align 3 | |||
.L21:// INCX!=1 and INCY==1 | |||
bge $r0, I, .L212 | |||
.align 3 | |||
.L211: | |||
#ifdef DOUBLE | |||
vld VX2, Y, 0 * SIZE | |||
ld.d t1, X, 0 * SIZE | |||
vstelm.d VX2, X, 0 * SIZE, 0 | |||
ld.d t2, X, 1 * SIZE | |||
vstelm.d VX2, X, 1 * SIZE, 1 | |||
vinsgr2vr.d VX0, t1, 0 | |||
vinsgr2vr.d VX0, t2, 1 | |||
add.d X, X, INCX | |||
vst VX0, Y, 0 * SIZE | |||
vld VX3, Y, 2 * SIZE | |||
ld.d t3, X, 0 * SIZE | |||
vstelm.d VX3, X, 0 * SIZE, 0 | |||
ld.d t4, X, 1 * SIZE | |||
vstelm.d VX3, X, 1 * SIZE, 1 | |||
vinsgr2vr.d VX1, t3, 0 | |||
vinsgr2vr.d VX1, t4, 1 | |||
add.d X, X, INCX | |||
vst VX1, Y, 2 * SIZE | |||
vld VX2, Y, 4 * SIZE | |||
ld.d t1, X, 0 * SIZE | |||
vstelm.d VX2, X, 0 * SIZE, 0 | |||
ld.d t2, X, 1 * SIZE | |||
vstelm.d VX2, X, 1 * SIZE, 1 | |||
vinsgr2vr.d VX0, t1, 0 | |||
vinsgr2vr.d VX0, t2, 1 | |||
add.d X, X, INCX | |||
vst VX0, Y, 4 * SIZE | |||
vld VX3, Y, 6 * SIZE | |||
ld.d t3, X, 0 * SIZE | |||
vstelm.d VX3, X, 0 * SIZE, 0 | |||
ld.d t4, X, 1 * SIZE | |||
vstelm.d VX3, X, 1 * SIZE, 1 | |||
vinsgr2vr.d VX1, t3, 0 | |||
vinsgr2vr.d VX1, t4, 1 | |||
add.d X, X, INCX | |||
vst VX1, Y, 6 * SIZE | |||
#else | |||
vld VX2, Y, 0 * SIZE | |||
ld.w t1, X, 0 * SIZE | |||
vstelm.w VX2, X, 0 * SIZE, 0 | |||
ld.w t2, X, 1 * SIZE | |||
vstelm.w VX2, X, 1 * SIZE, 1 | |||
add.d X, X, INCX | |||
ld.w t3, X, 0 * SIZE | |||
vstelm.w VX2, X, 0 * SIZE, 2 | |||
ld.w t4, X, 1 * SIZE | |||
vstelm.w VX2, X, 1 * SIZE, 3 | |||
vinsgr2vr.w VX0, t1, 0 | |||
vinsgr2vr.w VX0, t2, 1 | |||
vinsgr2vr.w VX0, t3, 2 | |||
vinsgr2vr.w VX0, t4, 3 | |||
add.d X, X, INCX | |||
vst VX0, Y, 0 * SIZE | |||
vld VX3, Y, 4 * SIZE | |||
ld.w t1, X, 0 * SIZE | |||
vstelm.w VX3, X, 0 * SIZE, 0 | |||
ld.w t2, X, 1 * SIZE | |||
vstelm.w VX3, X, 1 * SIZE, 1 | |||
add.d X, X, INCX | |||
ld.w t3, X, 0 * SIZE | |||
vstelm.w VX3, X, 0 * SIZE, 2 | |||
ld.w t4, X, 1 * SIZE | |||
vstelm.w VX3, X, 1 * SIZE, 3 | |||
vinsgr2vr.w VX1, t1, 0 | |||
vinsgr2vr.w VX1, t2, 1 | |||
vinsgr2vr.w VX1, t3, 2 | |||
vinsgr2vr.w VX1, t4, 3 | |||
add.d X, X, INCX | |||
vst VX1, Y, 4 * SIZE | |||
#endif | |||
addi.d Y, Y, 8 * SIZE | |||
addi.d I, I, -1 | |||
blt $r0, I, .L211 | |||
.align 3 | |||
.L212: | |||
andi I, N, 3 | |||
bge $r0, I, .L999 | |||
.align 3 | |||
.L213: | |||
LD a1, X, 0 * SIZE | |||
LD a2, X, 1 * SIZE | |||
LD a3, Y, 0 * SIZE | |||
LD a4, Y, 1 * SIZE | |||
ST a1, Y, 0 * SIZE | |||
ST a2, Y, 1 * SIZE | |||
ST a3, X, 0 * SIZE | |||
ST a4, X, 1 * SIZE | |||
addi.d I, I, -1 | |||
add.d X, X, INCX | |||
addi.d Y, Y, 2 * SIZE | |||
blt $r0, I, .L213 | |||
b .L999 | |||
.align 3 | |||
.L22: | |||
bge $r0, I, .L223 | |||
.align 3 | |||
move XX, X | |||
.L222: | |||
LD a1, X, 0 * SIZE | |||
LD a2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
LD a3, X, 0 * SIZE | |||
LD a4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
LD b1, Y, 0 * SIZE | |||
ST a1, Y, 0 * SIZE | |||
LD b2, Y, 1 * SIZE | |||
ST a2, Y, 1 * SIZE | |||
add.d Y, Y, INCY | |||
LD b3, Y, 0 * SIZE | |||
ST a3, Y, 0 * SIZE | |||
LD b4, Y, 1 * SIZE | |||
ST a4, Y, 1 * SIZE | |||
add.d Y, Y, INCY | |||
LD a1, X, 0 * SIZE | |||
ST b1, XX, 0 * SIZE | |||
LD a2, X, 1 * SIZE | |||
add.d X, X, INCX | |||
ST b2, XX, 1 * SIZE | |||
add.d XX, XX, INCX | |||
LD a3, X, 0 * SIZE | |||
ST b3, XX, 0 * SIZE | |||
LD a4, X, 1 * SIZE | |||
add.d X, X, INCX | |||
ST b4, XX, 1 * SIZE | |||
add.d XX, XX, INCX | |||
LD b1, Y, 0 * SIZE | |||
ST a1, Y, 0 * SIZE | |||
LD b2, Y, 1 * SIZE | |||
ST a2, Y, 1 * SIZE | |||
add.d Y, Y, INCY | |||
LD b3, Y, 0 * SIZE | |||
ST a3, Y, 0 * SIZE | |||
LD b4, Y, 1 * SIZE | |||
ST a4, Y, 1 * SIZE | |||
add.d Y, Y, INCY | |||
ST b1, XX, 0 * SIZE | |||
ST b2, XX, 1 * SIZE | |||
add.d XX, XX, INCX | |||
ST b3, XX, 0 * SIZE | |||
ST b4, XX, 1 * SIZE | |||
add.d XX, XX, INCX | |||
addi.d I, I, -1 | |||
blt $r0, I, .L222 | |||
.align 3 | |||
.L223: | |||
andi I, N, 3 | |||
bge $r0, I, .L999 | |||
.align 3 | |||
.L224: | |||
LD a1, X, 0 * SIZE | |||
LD a2, X, 1 * SIZE | |||
LD a3, Y, 0 * SIZE | |||
LD a4, Y, 1 * SIZE | |||
ST a1, Y, 0 * SIZE | |||
ST a2, Y, 1 * SIZE | |||
ST a3, X, 0 * SIZE | |||
ST a4, X, 1 * SIZE | |||
addi.d I, I, -1 | |||
add.d X, X, INCX | |||
add.d Y, Y, INCY | |||
blt $r0, I, .L224 | |||
.align 3 | |||
.L999: | |||
move $r4, $r12 | |||
jirl $r0, $r1, 0x0 | |||
.align 3 | |||
EPILOGUE |