|
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063 |
- /***************************************************************************
- Copyright (c) 2023, The OpenBLAS Project
- All rights reserved.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- 3. Neither the name of the OpenBLAS project nor the names of
- its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
- #define ASSEMBLER
- #include "common.h"
-
- #define N $r4
- #define ALPHA $f0
- #define X $r5
- #define INCX $r6
- #define BETA $f1
- #define Y $r7
- #define INCY $r8
-
- #define I $r12
- #define TEMP $r13
- #define t1 $r14
- #define t2 $r16
- #define t3 $r15
- #define t4 $r17
- #define XX $r18
- #define YY $r19
- #define a1 $f12
- #define a2 $f13
- #define VX0 $xr8
- #define VX1 $xr20
- #define VX2 $xr21
- #define VX3 $xr22
- #define VXA $xr23
- #define VXB $xr9
- #define VXZ $xr19
-
- PROLOGUE
-
- bge $r0, N, .L999
- movgr2fr.d a1, $r0
- ffint.s.l a1, a1
- slli.d INCX, INCX, BASE_SHIFT
- slli.d INCY, INCY, BASE_SHIFT
- MTG t1, ALPHA
- MTG t2, BETA
- MTG t3, a1
- #ifdef DOUBLE
- xvreplgr2vr.d VXA, t1
- xvreplgr2vr.d VXB, t2
- xvreplgr2vr.d VXZ, t3
- #else
- xvreplgr2vr.w VXA, t1
- xvreplgr2vr.w VXB, t2
- xvreplgr2vr.w VXZ, t3
- #endif
- // If incx == 0 || incy == 0, do one by one
- and TEMP, INCX, INCY
- or I, N, N
- beqz TEMP, .L998
-
- li.d TEMP, 1
- slli.d TEMP, TEMP, BASE_SHIFT
- srai.d I, N, 3
- bne INCX, TEMP, .L20
- bne INCY, TEMP, .L12 // INCX==1 and INCY!=1
- b .L11 // INCX==1 and INCY==1
- .L20:
- bne INCY, TEMP, .L22 // INCX!=1 and INCY!=1
- b .L21 // INCX!=1 and INCY==1
-
- .L11:
- bge $r0, I, .L997
- CMPEQ $fcc0, ALPHA, a1
- bcnez $fcc0, .L110
- CMPEQ $fcc0, BETA, a1
- bcnez $fcc0, .L112 // ALPHA!=0 BETA==0
- b .L111 // ALPHA!=0 BETA!=0
- .align 3
-
- .L110:
- CMPEQ $fcc0, BETA, a1
- bcnez $fcc0, .L114 // ALPHA==0 BETA==0
- b .L113 // ALPHA==0 BETA!=0
- .align 3
-
- .L111: // ALPHA!=0 BETA!=0
- xvld VX0, X, 0 * SIZE
- #ifdef DOUBLE
- xvld VX2, Y, 0 * SIZE
- xvld VX1, X, 4 * SIZE
- xvld VX3, Y, 4 * SIZE
- xvfmul.d VX0, VX0, VXA
- xvfmul.d VX1, VX1, VXA
- xvfmadd.d VX2, VX2, VXB, VX0
- xvfmadd.d VX3, VX3, VXB, VX1
- addi.d I, I, -1
- xvst VX2, Y, 0 * SIZE
- xvst VX3, Y, 4 * SIZE
- #else
- xvld VX2, Y, 0 * SIZE
- xvfmul.s VX0, VX0, VXA
- addi.d I, I, -1
- xvfmadd.s VX2, VX2, VXB, VX0
- xvst VX2, Y, 0 * SIZE
- #endif
- addi.d X, X, 8 * SIZE
- addi.d Y, Y, 8 * SIZE
- blt $r0, I, .L111
- b .L997
- .align 3
-
- .L112: // ALPHA!=0 BETA==0
- xvld VX0, X, 0 * SIZE
- #ifdef DOUBLE
- xvld VX1, X, 4 * SIZE
- xvfmul.d VX0, VX0, VXA
- xvfmul.d VX1, VX1, VXA
- xvst VX0, Y, 0 * SIZE
- xvst VX1, Y, 4 * SIZE
- #else
- xvfmul.s VX0, VX0, VXA
- xvst VX0, Y, 0 * SIZE
- #endif
- addi.d I, I, -1
- addi.d X, X, 8 * SIZE
- addi.d Y, Y, 8 * SIZE
- blt $r0, I, .L112
- b .L997
- .align 3
-
- .L113: // ALPHA==0 BETA!=0
- xvld VX2, Y, 0 * SIZE
- #ifdef DOUBLE
- xvld VX3, Y, 4 * SIZE
- xvfmul.d VX2, VX2, VXB
- xvfmul.d VX3, VX3, VXB
- xvst VX2, Y, 0 * SIZE
- xvst VX3, Y, 4 * SIZE
- #else
- xvfmul.s VX2, VX2, VXB
- xvst VX2, Y, 0 * SIZE
- #endif
- addi.d I, I, -1
- addi.d Y, Y, 8 * SIZE
- blt $r0, I, .L113
- b .L997
- .align 3
-
- .L114: // ALPHA==0 BETA==0
- xvst VXZ, Y, 0 * SIZE
- #ifdef DOUBLE
- xvst VXZ, Y, 4 * SIZE
- #endif
- addi.d Y, Y, 8 * SIZE
- addi.d I, I, -1
- blt $r0, I, .L114
- b .L997
- .align 3
-
- .L12: // INCX==1 and INCY!=1
- bge $r0, I, .L997
- move YY, Y
- CMPEQ $fcc0, ALPHA, a1
- bcnez $fcc0, .L120
- CMPEQ $fcc0, BETA, a1
- bcnez $fcc0, .L122 // ALPHA!=0 BETA==0
- b .L121 // ALPHA!=0 BETA!=0
- .align 3
-
- .L120:
- CMPEQ $fcc0, BETA, a1
- bcnez $fcc0, .L124 // ALPHA==0 BETA==0
- b .L123 // ALPHA==0 BETA!=0
- .align 3
-
- .L121: // ALPHA!=0 BETA!=0
- xvld VX0, X, 0 * SIZE
- #ifdef DOUBLE
- ld.d t1, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t2, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t3, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t4, Y, 0 * SIZE
- xvinsgr2vr.d VX2, t1, 0
- xvinsgr2vr.d VX2, t2, 1
- xvinsgr2vr.d VX2, t3, 2
- xvinsgr2vr.d VX2, t4, 3
- add.d Y, Y, INCY
- xvfmul.d VX0, VX0, VXA
- xvld VX1, X, 4 * SIZE
- xvfmadd.d VX2, VX2, VXB, VX0
- ld.d t1, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t2, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t3, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t4, Y, 0 * SIZE
- add.d Y, Y, INCY
- xvinsgr2vr.d VX3, t1, 0
- xvinsgr2vr.d VX3, t2, 1
- xvinsgr2vr.d VX3, t3, 2
- xvinsgr2vr.d VX3, t4, 3
- xvstelm.d VX2, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.d VX2, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.d VX2, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.d VX2, YY, 0, 3
- add.d YY, YY, INCY
- xvfmul.d VX1, VX1, VXA
- xvfmadd.d VX3, VX3, VXB, VX1
- addi.d I, I, -1
- xvstelm.d VX3, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.d VX3, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.d VX3, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.d VX3, YY, 0, 3
- #else
- ld.w t1, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t2, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t3, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t4, Y, 0 * SIZE
- add.d Y, Y, INCY
- xvinsgr2vr.w VX2, t1, 0
- xvinsgr2vr.w VX2, t2, 1
- xvinsgr2vr.w VX2, t3, 2
- xvinsgr2vr.w VX2, t4, 3
- ld.w t1, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t2, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t3, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t4, Y, 0 * SIZE
- xvinsgr2vr.w VX2, t1, 4
- xvinsgr2vr.w VX2, t2, 5
- xvinsgr2vr.w VX2, t3, 6
- xvinsgr2vr.w VX2, t4, 7
- add.d Y, Y, INCY
- xvfmul.s VX0, VX0, VXA
- xvfmadd.s VX2, VX2, VXB, VX0
- xvstelm.w VX2, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 3
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 4
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 5
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 6
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 7
- #endif
- add.d YY, YY, INCY
- addi.d X, X, 8 * SIZE
- addi.d I, I, -1
- blt $r0, I, .L121
- move Y, YY
- b .L997
- .align 3
-
- .L122: // ALPHA!=0 BETA==0
- xvld VX0, X, 0 * SIZE
- #ifdef DOUBLE
- xvld VX1, X, 4 * SIZE
- xvfmul.d VX0, VX0, VXA
- xvfmul.d VX1, VX1, VXA
- xvstelm.d VX0, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.d VX0, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.d VX0, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.d VX0, YY, 0, 3
- add.d YY, YY, INCY
- xvstelm.d VX1, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.d VX1, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.d VX1, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.d VX1, YY, 0, 3
- #else
- xvfmul.s VX0, VX0, VXA
- addi.d I, I, -1
- xvstelm.w VX0, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.w VX0, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.w VX0, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.w VX0, YY, 0, 3
- add.d YY, YY, INCY
- xvstelm.w VX0, YY, 0, 4
- add.d YY, YY, INCY
- xvstelm.w VX0, YY, 0, 5
- add.d YY, YY, INCY
- xvstelm.w VX0, YY, 0, 6
- add.d YY, YY, INCY
- xvstelm.w VX0, YY, 0, 7
- #endif
- add.d YY, YY, INCY
- addi.d X, X, 8 * SIZE
- blt $r0, I, .L122
- move Y, YY
- b .L997
- .align 3
-
- .L123: // ALPHA==0 BETA!=0
- #ifdef DOUBLE
- ld.d t1, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t2, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t3, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t4, Y, 0 * SIZE
- xvinsgr2vr.d VX2, t1, 0
- xvinsgr2vr.d VX2, t2, 1
- xvinsgr2vr.d VX2, t3, 2
- xvinsgr2vr.d VX2, t4, 3
- add.d Y, Y, INCY
- xvfmul.d VX2, VX2, VXB
- ld.d t1, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t2, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t3, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t4, Y, 0 * SIZE
- add.d Y, Y, INCY
- xvinsgr2vr.d VX3, t1, 0
- xvinsgr2vr.d VX3, t2, 1
- xvinsgr2vr.d VX3, t3, 2
- xvinsgr2vr.d VX3, t4, 3
- xvstelm.d VX2, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.d VX2, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.d VX2, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.d VX2, YY, 0, 3
- add.d YY, YY, INCY
- xvfmul.d VX3, VX3, VXB
- xvstelm.d VX3, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.d VX3, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.d VX3, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.d VX3, YY, 0, 3
- #else
- ld.w t1, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t2, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t3, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t4, Y, 0 * SIZE
- add.d Y, Y, INCY
- xvinsgr2vr.w VX2, t1, 0
- xvinsgr2vr.w VX2, t2, 1
- xvinsgr2vr.w VX2, t3, 2
- xvinsgr2vr.w VX2, t4, 3
- ld.w t1, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t2, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t3, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t4, Y, 0 * SIZE
- xvinsgr2vr.w VX2, t1, 4
- xvinsgr2vr.w VX2, t2, 5
- xvinsgr2vr.w VX2, t3, 6
- xvinsgr2vr.w VX2, t4, 7
- add.d Y, Y, INCY
- xvfmul.s VX2, VX2, VXB
- xvstelm.w VX2, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 3
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 4
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 5
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 6
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 7
- #endif
- add.d YY, YY, INCY
- addi.d I, I, -1
- blt $r0, I, .L123
- move Y, YY
- b .L997
- .align 3
-
- .L124: // ALPHA==0 BETA==0
- #ifdef DOUBLE
- xvstelm.d VXZ, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.d VXZ, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.d VXZ, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.d VXZ, YY, 0, 3
- add.d YY, YY, INCY
- xvstelm.d VXZ, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.d VXZ, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.d VXZ, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.d VXZ, YY, 0, 3
- #else
- xvstelm.w VXZ, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.w VXZ, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.w VXZ, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.w VXZ, YY, 0, 3
- add.d YY, YY, INCY
- xvstelm.w VXZ, YY, 0, 4
- add.d YY, YY, INCY
- xvstelm.w VXZ, YY, 0, 5
- add.d YY, YY, INCY
- xvstelm.w VXZ, YY, 0, 6
- add.d YY, YY, INCY
- xvstelm.w VXZ, YY, 0, 7
- #endif
- add.d YY, YY, INCY
- addi.d I, I, -1
- blt $r0, I, .L124
- move Y, YY
- b .L997
- .align 3
-
- .L21:// INCX!=1 and INCY==1
- bge $r0, I, .L997
- CMPEQ $fcc0, ALPHA, a1
- bcnez $fcc0, .L210
- CMPEQ $fcc0, BETA, a1
- bcnez $fcc0, .L212 // ALPHA!=0 BETA==0
- b .L211 // ALPHA!=0 BETA!=0
- .align 3
-
- .L210:
- CMPEQ $fcc0, BETA, a1
- bcnez $fcc0, .L214 // ALPHA==0 BETA==0
- b .L213 // ALPHA==0 BETA!=0
- .align 3
-
- .L211: // ALPHA!=0 BETA!=0
- xvld VX2, Y, 0 * SIZE
- #ifdef DOUBLE
- ld.d t1, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t2, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t3, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t4, X, 0 * SIZE
- xvinsgr2vr.d VX0, t1, 0
- xvinsgr2vr.d VX0, t2, 1
- xvinsgr2vr.d VX0, t3, 2
- xvinsgr2vr.d VX0, t4, 3
- add.d X, X, INCX
- xvfmul.d VX0, VXA, VX0
- xvfmadd.d VX2, VX2, VXB, VX0
- xvld VX3, Y, 4 * SIZE
- xvst VX2, Y, 0 * SIZE
- ld.d t1, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t2, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t3, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t4, X, 0 * SIZE
- xvinsgr2vr.d VX1, t1, 0
- xvinsgr2vr.d VX1, t2, 1
- xvinsgr2vr.d VX1, t3, 2
- xvinsgr2vr.d VX1, t4, 3
- add.d X, X, INCX
- xvfmul.d VX1, VX1, VXA
- xvfmadd.d VX3, VX3, VXB, VX1
- addi.d I, I, -1
- xvst VX3, Y, 4 * SIZE
- #else
- ld.w t1, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t2, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t3, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t4, X, 0 * SIZE
- add.d X, X, INCX
- xvinsgr2vr.w VX0, t1, 0
- xvinsgr2vr.w VX0, t2, 1
- xvinsgr2vr.w VX0, t3, 2
- xvinsgr2vr.w VX0, t4, 3
- ld.w t1, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t2, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t3, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t4, X, 0 * SIZE
- xvinsgr2vr.w VX0, t1, 4
- xvinsgr2vr.w VX0, t2, 5
- xvinsgr2vr.w VX0, t3, 6
- xvinsgr2vr.w VX0, t4, 7
- add.d X, X, INCX
- xvfmul.s VX0, VXA, VX0
- xvfmadd.s VX2, VX2, VXB, VX0
- addi.d I, I, -1
- xvst VX2, Y, 0 * SIZE
- #endif
- addi.d Y, Y, 8 * SIZE
- blt $r0, I, .L211
- b .L997
- .align 3
-
- .L212: // ALPHA!=0 BETA==0
- #ifdef DOUBLE
- ld.d t1, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t2, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t3, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t4, X, 0 * SIZE
- xvinsgr2vr.d VX0, t1, 0
- xvinsgr2vr.d VX0, t2, 1
- xvinsgr2vr.d VX0, t3, 2
- xvinsgr2vr.d VX0, t4, 3
- add.d X, X, INCX
- xvfmul.d VX0, VXA, VX0
- ld.d t1, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t2, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t3, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t4, X, 0 * SIZE
- add.d X, X, INCX
- xvinsgr2vr.d VX1, t1, 0
- xvinsgr2vr.d VX1, t2, 1
- xvinsgr2vr.d VX1, t3, 2
- xvinsgr2vr.d VX1, t4, 3
- xvst VX0, Y, 0 * SIZE
- xvfmul.d VX1, VX1, VXA
- addi.d I, I, -1
- xvst VX1, Y, 4 * SIZE
- #else
- ld.w t1, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t2, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t3, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t4, X, 0 * SIZE
- add.d X, X, INCX
- xvinsgr2vr.w VX0, t1, 0
- xvinsgr2vr.w VX0, t2, 1
- xvinsgr2vr.w VX0, t3, 2
- xvinsgr2vr.w VX0, t4, 3
- ld.w t1, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t2, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t3, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t4, X, 0 * SIZE
- xvinsgr2vr.w VX0, t1, 4
- xvinsgr2vr.w VX0, t2, 5
- xvinsgr2vr.w VX0, t3, 6
- xvinsgr2vr.w VX0, t4, 7
- add.d X, X, INCX
- xvfmul.s VX0, VXA, VX0
- addi.d I, I, -1
- xvst VX0, Y, 0 * SIZE
- #endif
- addi.d Y, Y, 8 * SIZE
- blt $r0, I, .L212
- b .L997
- .align 3
-
- .L213: // ALPHA==0 BETA!=0
- xvld VX2, Y, 0 * SIZE
- #ifdef DOUBLE
- xvld VX3, Y, 4 * SIZE
- xvfmul.d VX2, VX2, VXB
- xvfmul.d VX3, VX3, VXB
- xvst VX2, Y, 0 * SIZE
- xvst VX3, Y, 4 * SIZE
- #else
- xvfmul.s VX2, VX2, VXB
- xvst VX2, Y, 0 * SIZE
- #endif
- addi.d Y, Y, 8 * SIZE
- addi.d I, I, -1
- blt $r0, I, .L213
- b .L997
- .align 3
-
- .L214: // ALPHA==0 BETA==0
- xvst VXZ, Y, 0 * SIZE
- #ifdef DOUBLE
- xvst VXZ, Y, 4 * SIZE
- #endif
- addi.d Y, Y, 8 * SIZE
- addi.d I, I, -1
- blt $r0, I, .L214
- b .L997
- .align 3
-
- .L22:
- bge $r0, I, .L997
- move YY, Y
- CMPEQ $fcc0, ALPHA, a1
- bcnez $fcc0, .L220
- CMPEQ $fcc0, BETA, a1
- bcnez $fcc0, .L222 // ALPHA!=0 BETA==0
- b .L221 // ALPHA!=0 BETA!=0
- .align 3
-
- .L220:
- CMPEQ $fcc0, BETA, a1
- bcnez $fcc0, .L224 // ALPHA==0 BETA==0
- b .L223 // ALPHA==0 BETA!=0
- .align 3
-
- .L221: // ALPHA!=0 BETA!=0
- #ifdef DOUBLE
- ld.d t1, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t2, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t3, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t4, X, 0 * SIZE
- add.d X, X, INCX
- xvinsgr2vr.d VX0, t1, 0
- xvinsgr2vr.d VX0, t2, 1
- xvinsgr2vr.d VX0, t3, 2
- xvinsgr2vr.d VX0, t4, 3
- ld.d t1, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t2, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t3, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t4, Y, 0 * SIZE
- xvinsgr2vr.d VX2, t1, 0
- xvinsgr2vr.d VX2, t2, 1
- xvinsgr2vr.d VX2, t3, 2
- xvinsgr2vr.d VX2, t4, 3
- add.d Y, Y, INCY
- xvfmul.d VX0, VX0, VXA
- ld.d t1, X, 0 * SIZE
- add.d X, X, INCX
- xvfmadd.d VX2, VX2, VXB, VX0
- ld.d t2, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t3, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t4, X, 0 * SIZE
- add.d X, X, INCX
- xvinsgr2vr.d VX1, t1, 0
- xvinsgr2vr.d VX1, t2, 1
- xvinsgr2vr.d VX1, t3, 2
- xvinsgr2vr.d VX1, t4, 3
- xvstelm.d VX2, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.d VX2, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.d VX2, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.d VX2, YY, 0, 3
- add.d YY, YY, INCY
- ld.d t1, Y, 0 * SIZE
- xvinsgr2vr.d VX3, t1, 0
- add.d Y, Y, INCY
- ld.d t2, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t3, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t4, Y, 0 * SIZE
- xvinsgr2vr.d VX3, t2, 1
- xvinsgr2vr.d VX3, t3, 2
- xvinsgr2vr.d VX3, t4, 3
- add.d Y, Y, INCY
- xvfmul.d VX1, VX1, VXA
- xvfmadd.d VX3, VX3, VXB, VX1
- addi.d I, I, -1
- xvstelm.d VX3, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.d VX3, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.d VX3, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.d VX3, YY, 0, 3
- #else
- ld.w t1, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t2, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t3, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t4, X, 0 * SIZE
- add.d X, X, INCX
- xvinsgr2vr.w VX0, t1, 0
- xvinsgr2vr.w VX0, t2, 1
- xvinsgr2vr.w VX0, t3, 2
- xvinsgr2vr.w VX0, t4, 3
- ld.w t1, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t2, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t3, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t4, X, 0 * SIZE
- xvinsgr2vr.w VX0, t1, 4
- xvinsgr2vr.w VX0, t2, 5
- xvinsgr2vr.w VX0, t3, 6
- xvinsgr2vr.w VX0, t4, 7
- add.d X, X, INCX
- ld.w t1, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t2, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t3, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t4, Y, 0 * SIZE
- xvinsgr2vr.w VX2, t1, 0
- xvinsgr2vr.w VX2, t2, 1
- xvinsgr2vr.w VX2, t3, 2
- xvinsgr2vr.w VX2, t4, 3
- add.d Y, Y, INCY
- ld.w t1, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t2, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t3, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t4, Y, 0 * SIZE
- xvinsgr2vr.w VX2, t1, 4
- xvinsgr2vr.w VX2, t2, 5
- xvinsgr2vr.w VX2, t3, 6
- xvinsgr2vr.w VX2, t4, 7
- add.d Y, Y, INCY
- xvfmul.s VX0, VX0, VXA
- xvfmadd.s VX2, VX2, VXB, VX0
- addi.d I, I, -1
- xvstelm.w VX2, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 3
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 4
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 5
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 6
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 7
- #endif
- add.d YY, YY, INCY
- blt $r0, I, .L221
- move Y, YY
- b .L997
- .align 3
-
- .L222: // ALPHA!=0 BETA==0
- #ifdef DOUBLE
- ld.d t1, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t2, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t3, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t4, X, 0 * SIZE
- xvinsgr2vr.d VX0, t1, 0
- xvinsgr2vr.d VX0, t2, 1
- xvinsgr2vr.d VX0, t3, 2
- xvinsgr2vr.d VX0, t4, 3
- add.d X, X, INCX
- xvfmul.d VX0, VX0, VXA
- ld.d t1, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t2, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t3, X, 0 * SIZE
- add.d X, X, INCX
- ld.d t4, X, 0 * SIZE
- add.d X, X, INCX
- xvinsgr2vr.d VX1, t1, 0
- xvinsgr2vr.d VX1, t2, 1
- xvinsgr2vr.d VX1, t3, 2
- xvinsgr2vr.d VX1, t4, 3
- xvstelm.d VX0, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.d VX0, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.d VX0, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.d VX0, YY, 0, 3
- add.d YY, YY, INCY
- xvfmul.d VX1, VX1, VXA
- addi.d I, I, -1
- xvstelm.d VX1, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.d VX1, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.d VX1, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.d VX1, YY, 0, 3
- #else
- ld.w t1, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t2, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t3, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t4, X, 0 * SIZE
- xvinsgr2vr.w VX0, t1, 0
- xvinsgr2vr.w VX0, t2, 1
- xvinsgr2vr.w VX0, t3, 2
- xvinsgr2vr.w VX0, t4, 3
- add.d X, X, INCX
- ld.w t1, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t2, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t3, X, 0 * SIZE
- add.d X, X, INCX
- ld.w t4, X, 0 * SIZE
- xvinsgr2vr.w VX0, t1, 4
- xvinsgr2vr.w VX0, t2, 5
- xvinsgr2vr.w VX0, t3, 6
- xvinsgr2vr.w VX0, t4, 7
- add.d X, X, INCX
- xvfmul.s VX0, VX0, VXA
- addi.d I, I, -1
- xvstelm.w VX0, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.w VX0, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.w VX0, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.w VX0, YY, 0, 3
- add.d YY, YY, INCY
- xvstelm.w VX0, YY, 0, 4
- add.d YY, YY, INCY
- xvstelm.w VX0, YY, 0, 5
- add.d YY, YY, INCY
- xvstelm.w VX0, YY, 0, 6
- add.d YY, YY, INCY
- xvstelm.w VX0, YY, 0, 7
- #endif
- add.d YY, YY, INCY
- blt $r0, I, .L222
- move Y, YY
- b .L997
- .align 3
-
- .L223: // ALPHA==0 BETA!=0
- #ifdef DOUBLE
- ld.d t1, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t2, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t3, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t4, Y, 0 * SIZE
- xvinsgr2vr.d VX2, t1, 0
- xvinsgr2vr.d VX2, t2, 1
- xvinsgr2vr.d VX2, t3, 2
- xvinsgr2vr.d VX2, t4, 3
- add.d Y, Y, INCY
- xvfmul.d VX2, VX2, VXB
- ld.d t1, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t2, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t3, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.d t4, Y, 0 * SIZE
- add.d Y, Y, INCY
- xvinsgr2vr.d VX3, t1, 0
- xvinsgr2vr.d VX3, t2, 1
- xvinsgr2vr.d VX3, t3, 2
- xvinsgr2vr.d VX3, t4, 3
- xvstelm.d VX2, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.d VX2, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.d VX2, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.d VX2, YY, 0, 3
- add.d YY, YY, INCY
- xvfmul.d VX3, VX3, VXB
- addi.d I, I, -1
- xvstelm.d VX3, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.d VX3, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.d VX3, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.d VX3, YY, 0, 3
- #else
- ld.w t1, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t2, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t3, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t4, Y, 0 * SIZE
- add.d Y, Y, INCY
- xvinsgr2vr.w VX2, t1, 0
- xvinsgr2vr.w VX2, t2, 1
- xvinsgr2vr.w VX2, t3, 2
- xvinsgr2vr.w VX2, t4, 3
- ld.w t1, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t2, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t3, Y, 0 * SIZE
- add.d Y, Y, INCY
- ld.w t4, Y, 0 * SIZE
- xvinsgr2vr.w VX2, t1, 4
- xvinsgr2vr.w VX2, t2, 5
- xvinsgr2vr.w VX2, t3, 6
- xvinsgr2vr.w VX2, t4, 7
- add.d Y, Y, INCY
- xvfmul.s VX2, VX2, VXB
- addi.d I, I, -1
- xvstelm.w VX2, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 3
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 4
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 5
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 6
- add.d YY, YY, INCY
- xvstelm.w VX2, YY, 0, 7
- #endif
- add.d YY, YY, INCY
- blt $r0, I, .L223
- move Y, YY
- b .L997
- .align 3
-
- .L224: // ALPHA==0 BETA==0
- #ifdef DOUBLE
- xvstelm.d VXZ, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.d VXZ, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.d VXZ, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.d VXZ, YY, 0, 3
- add.d YY, YY, INCY
- xvstelm.d VXZ, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.d VXZ, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.d VXZ, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.d VXZ, YY, 0, 3
- #else
- xvstelm.w VXZ, YY, 0, 0
- add.d YY, YY, INCY
- xvstelm.w VXZ, YY, 0, 1
- add.d YY, YY, INCY
- xvstelm.w VXZ, YY, 0, 2
- add.d YY, YY, INCY
- xvstelm.w VXZ, YY, 0, 3
- add.d YY, YY, INCY
- xvstelm.w VXZ, YY, 0, 4
- add.d YY, YY, INCY
- xvstelm.w VXZ, YY, 0, 5
- add.d YY, YY, INCY
- xvstelm.w VXZ, YY, 0, 6
- add.d YY, YY, INCY
- xvstelm.w VXZ, YY, 0, 7
- #endif
- add.d YY, YY, INCY
- addi.d I, I, -1
- blt $r0, I, .L224
- move Y, YY
- b .L997
- .align 3
-
- .L997:
- andi I, N, 7
- bge $r0, I, .L999
- .align 3
-
- .L998:
- LD $f12, X, 0 * SIZE
- LD $f13, Y, 0 * SIZE
- addi.d I, I, -1
- MUL $f12, $f12, ALPHA
- MADD $f13, $f13, BETA, $f12
- ST $f13, Y, 0 * SIZE
- add.d X, X, INCX
- add.d Y, Y, INCY
- blt $r0, I, .L998
- .align 3
-
- .L999:
- move $r4, $r12
- jirl $r0, $r1, 0x0
- .align 3
-
- EPILOGUE
|