|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805 |
- /*********************************************************************/
- /* Copyright 2009, 2010 The University of Texas at Austin. */
- /* All rights reserved. */
- /* */
- /* Redistribution and use in source and binary forms, with or */
- /* without modification, are permitted provided that the following */
- /* conditions are met: */
- /* */
- /* 1. Redistributions of source code must retain the above */
- /* copyright notice, this list of conditions and the following */
- /* disclaimer. */
- /* */
- /* 2. Redistributions in binary form must reproduce the above */
- /* copyright notice, this list of conditions and the following */
- /* disclaimer in the documentation and/or other materials */
- /* provided with the distribution. */
- /* */
- /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
- /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
- /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
- /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
- /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
- /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
- /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
- /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
- /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
- /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
- /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
- /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
- /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
- /* POSSIBILITY OF SUCH DAMAGE. */
- /* */
- /* The views and conclusions contained in the software and */
- /* documentation are those of the authors and should not be */
- /* interpreted as representing official policies, either expressed */
- /* or implied, of The University of Texas at Austin. */
- /*********************************************************************/
-
- #define ASSEMBLER
- #include "common.h"
-
- #define STACK 16
- #define ARGS 0
-
- #define STACK_M 4 + STACK + ARGS(%esp)
- #define STACK_ALPHA 16 + STACK + ARGS(%esp)
- #define STACK_X 24 + STACK + ARGS(%esp)
- #define STACK_INCX 28 + STACK + ARGS(%esp)
- #define STACK_Y 32 + STACK + ARGS(%esp)
- #define STACK_INCY 36 + STACK + ARGS(%esp)
-
- #define M %ebx
- #define X %esi
- #define Y %edi
- #define INCX %ecx
- #define INCY %edx
- #define YY %ebp
-
- #define ALPHA %xmm7
-
- #include "l1param.h"
-
- PROLOGUE
- PROFCODE
-
- pushl %edi
- pushl %esi
- pushl %ebx
- pushl %ebp
-
- movl STACK_M, M
- movsd STACK_ALPHA, ALPHA
- movl STACK_X, X
- movl STACK_INCX, INCX
- movl STACK_Y, Y
- movl STACK_INCY, INCY
-
- unpcklpd ALPHA, ALPHA
-
- leal (, INCX, SIZE), INCX
- leal (, INCY, SIZE), INCY
-
- testl M, M
- jle .L47
-
- cmpl $SIZE, INCX
- jne .L40
- cmpl $SIZE, INCY
- jne .L40
-
- testl $SIZE, Y
- je .L10
-
- movsd (X), %xmm0
- mulsd ALPHA, %xmm0
- addsd (Y), %xmm0
- movsd %xmm0, (Y)
- addl $1 * SIZE, X
- addl $1 * SIZE, Y
- decl M
- jle .L19
- ALIGN_4
-
- .L10:
- subl $-16 * SIZE, X
- subl $-16 * SIZE, Y
-
- testl $SIZE, X
- jne .L20
-
- movl M, %eax
- sarl $4, %eax
- jle .L13
-
- movaps -16 * SIZE(X), %xmm0
- movaps -14 * SIZE(X), %xmm1
- movaps -12 * SIZE(X), %xmm2
- movaps -10 * SIZE(X), %xmm3
-
- decl %eax
- jle .L12
- ALIGN_3
-
- .L11:
- #ifdef PREFETCHW
- PREFETCHW (PREFETCHSIZE + 0) - PREOFFSET(Y)
- #endif
-
- mulpd ALPHA, %xmm0
- addpd -16 * SIZE(Y), %xmm0
- movaps %xmm0, -16 * SIZE(Y)
- movaps -8 * SIZE(X), %xmm0
-
- mulpd ALPHA, %xmm1
- addpd -14 * SIZE(Y), %xmm1
- movaps %xmm1, -14 * SIZE(Y)
- movaps -6 * SIZE(X), %xmm1
-
- #ifdef PREFETCH
- PREFETCH (PREFETCHSIZE + 0) - PREOFFSET(X)
- #endif
-
- mulpd ALPHA, %xmm2
- addpd -12 * SIZE(Y), %xmm2
- movaps %xmm2, -12 * SIZE(Y)
- movaps -4 * SIZE(X), %xmm2
-
- mulpd ALPHA, %xmm3
- addpd -10 * SIZE(Y), %xmm3
- movaps %xmm3, -10 * SIZE(Y)
- movaps -2 * SIZE(X), %xmm3
-
- #if defined(PREFETCHW) && !defined(FETCH128)
- PREFETCHW (PREFETCHSIZE + 64) - PREOFFSET(Y)
- #endif
-
- mulpd ALPHA, %xmm0
- addpd -8 * SIZE(Y), %xmm0
- movaps %xmm0, -8 * SIZE(Y)
- movaps 0 * SIZE(X), %xmm0
-
- mulpd ALPHA, %xmm1
- addpd -6 * SIZE(Y), %xmm1
- movaps %xmm1, -6 * SIZE(Y)
- movaps 2 * SIZE(X), %xmm1
-
- #if defined(PREFETCH) && !defined(FETCH128)
- PREFETCH (PREFETCHSIZE + 64) - PREOFFSET(X)
- #endif
-
- mulpd ALPHA, %xmm2
- addpd -4 * SIZE(Y), %xmm2
- movaps %xmm2, -4 * SIZE(Y)
- movaps 4 * SIZE(X), %xmm2
-
- mulpd ALPHA, %xmm3
- addpd -2 * SIZE(Y), %xmm3
- movaps %xmm3, -2 * SIZE(Y)
- movaps 6 * SIZE(X), %xmm3
-
- subl $-16 * SIZE, Y
- subl $-16 * SIZE, X
- decl %eax
- jg .L11
- ALIGN_3
-
- .L12:
- mulpd ALPHA, %xmm0
- addpd -16 * SIZE(Y), %xmm0
- movaps %xmm0, -16 * SIZE(Y)
- movaps -8 * SIZE(X), %xmm0
-
- mulpd ALPHA, %xmm1
- addpd -14 * SIZE(Y), %xmm1
- movaps %xmm1, -14 * SIZE(Y)
- movaps -6 * SIZE(X), %xmm1
-
- mulpd ALPHA, %xmm2
- addpd -12 * SIZE(Y), %xmm2
- movaps %xmm2, -12 * SIZE(Y)
- movaps -4 * SIZE(X), %xmm2
-
- mulpd ALPHA, %xmm3
- addpd -10 * SIZE(Y), %xmm3
- movaps %xmm3, -10 * SIZE(Y)
- movaps -2 * SIZE(X), %xmm3
-
- mulpd ALPHA, %xmm0
- addpd -8 * SIZE(Y), %xmm0
- movaps %xmm0, -8 * SIZE(Y)
-
- mulpd ALPHA, %xmm1
- addpd -6 * SIZE(Y), %xmm1
- movaps %xmm1, -6 * SIZE(Y)
-
- mulpd ALPHA, %xmm2
- addpd -4 * SIZE(Y), %xmm2
- movaps %xmm2, -4 * SIZE(Y)
-
- mulpd ALPHA, %xmm3
- addpd -2 * SIZE(Y), %xmm3
- movaps %xmm3, -2 * SIZE(Y)
-
- subl $-16 * SIZE, Y
- subl $-16 * SIZE, X
- ALIGN_3
-
- .L13:
- movl M, %eax
- andl $8, %eax
- jle .L14
- ALIGN_3
-
- movaps -16 * SIZE(X), %xmm0
- movaps -14 * SIZE(X), %xmm1
- movaps -12 * SIZE(X), %xmm2
- movaps -10 * SIZE(X), %xmm3
-
- mulpd ALPHA, %xmm0
- addpd -16 * SIZE(Y), %xmm0
- mulpd ALPHA, %xmm1
- addpd -14 * SIZE(Y), %xmm1
- mulpd ALPHA, %xmm2
- addpd -12 * SIZE(Y), %xmm2
- mulpd ALPHA, %xmm3
- addpd -10 * SIZE(Y), %xmm3
-
- movaps %xmm0, -16 * SIZE(Y)
- movaps %xmm1, -14 * SIZE(Y)
- movaps %xmm2, -12 * SIZE(Y)
- movaps %xmm3, -10 * SIZE(Y)
-
- addl $8 * SIZE, X
- addl $8 * SIZE, Y
- ALIGN_3
-
- .L14:
- movl M, %eax
- andl $4, %eax
- jle .L15
- ALIGN_3
-
- movaps -16 * SIZE(X), %xmm0
- movaps -14 * SIZE(X), %xmm1
-
- mulpd ALPHA, %xmm0
- mulpd ALPHA, %xmm1
-
- addpd -16 * SIZE(Y), %xmm0
- addpd -14 * SIZE(Y), %xmm1
-
- movaps %xmm0, -16 * SIZE(Y)
- movaps %xmm1, -14 * SIZE(Y)
-
- addl $4 * SIZE, X
- addl $4 * SIZE, Y
- ALIGN_3
-
- .L15:
- movl M, %eax
- andl $2, %eax
- jle .L16
- ALIGN_3
-
- movaps -16 * SIZE(X), %xmm0
- mulpd ALPHA, %xmm0
- addpd -16 * SIZE(Y), %xmm0
- movaps %xmm0, -16 * SIZE(Y)
-
- addl $2 * SIZE, X
- addl $2 * SIZE, Y
- ALIGN_3
-
- .L16:
- movl M, %eax
- andl $1, %eax
- jle .L19
- ALIGN_3
-
- movsd -16 * SIZE(X), %xmm0
- mulsd ALPHA, %xmm0
- addsd -16 * SIZE(Y), %xmm0
-
- movsd %xmm0, -16 * SIZE(Y)
- ALIGN_3
-
- .L19:
- popl %ebp
- popl %ebx
- popl %esi
- popl %edi
- ret
- ALIGN_3
-
- .L20:
- #ifdef ALIGNED_ACCESS
-
- movhps -16 * SIZE(X), %xmm0
-
- movl M, %eax
- sarl $4, %eax
- jle .L23
-
- movaps -15 * SIZE(X), %xmm1
- movaps -13 * SIZE(X), %xmm2
- movaps -11 * SIZE(X), %xmm3
-
- decl %eax
- jle .L22
- ALIGN_4
-
- .L21:
- #ifdef PREFETCHW
- PREFETCHW (PREFETCHSIZE + 0) - PREOFFSET(Y)
- #endif
-
- SHUFPD_1 %xmm1, %xmm0
- mulpd ALPHA, %xmm0
- addpd -16 * SIZE(Y), %xmm0
- movaps %xmm0, -16 * SIZE(Y)
- movaps -9 * SIZE(X), %xmm0
-
- SHUFPD_1 %xmm2, %xmm1
- mulpd ALPHA, %xmm1
- addpd -14 * SIZE(Y), %xmm1
- movaps %xmm1, -14 * SIZE(Y)
- movaps -7 * SIZE(X), %xmm1
-
- #ifdef PREFETCH
- PREFETCH (PREFETCHSIZE + 0) - PREOFFSET(X)
- #endif
-
- SHUFPD_1 %xmm3, %xmm2
- mulpd ALPHA, %xmm2
- addpd -12 * SIZE(Y), %xmm2
- movaps %xmm2, -12 * SIZE(Y)
- movaps -5 * SIZE(X), %xmm2
-
- SHUFPD_1 %xmm0, %xmm3
- mulpd ALPHA, %xmm3
- addpd -10 * SIZE(Y), %xmm3
- movaps %xmm3, -10 * SIZE(Y)
- movaps -3 * SIZE(X), %xmm3
-
- #if defined(PREFETCHW) && !defined(FETCH128)
- PREFETCHW (PREFETCHSIZE + 64) - PREOFFSET(Y)
- #endif
-
- SHUFPD_1 %xmm1, %xmm0
- mulpd ALPHA, %xmm0
- addpd -8 * SIZE(Y), %xmm0
- movaps %xmm0, -8 * SIZE(Y)
- movaps -1 * SIZE(X), %xmm0
-
- SHUFPD_1 %xmm2, %xmm1
- mulpd ALPHA, %xmm1
- addpd -6 * SIZE(Y), %xmm1
- movaps %xmm1, -6 * SIZE(Y)
- movaps 1 * SIZE(X), %xmm1
-
- #if defined(PREFETCH) && !defined(FETCH128)
- PREFETCH (PREFETCHSIZE + 64) - PREOFFSET(X)
- #endif
-
- SHUFPD_1 %xmm3, %xmm2
- mulpd ALPHA, %xmm2
- addpd -4 * SIZE(Y), %xmm2
- movaps %xmm2, -4 * SIZE(Y)
- movaps 3 * SIZE(X), %xmm2
-
- SHUFPD_1 %xmm0, %xmm3
- mulpd ALPHA, %xmm3
- addpd -2 * SIZE(Y), %xmm3
- movaps %xmm3, -2 * SIZE(Y)
- movaps 5 * SIZE(X), %xmm3
-
- subl $-16 * SIZE, X
- subl $-16 * SIZE, Y
- decl %eax
- jg .L21
- ALIGN_3
-
- .L22:
- SHUFPD_1 %xmm1, %xmm0
- mulpd ALPHA, %xmm0
- addpd -16 * SIZE(Y), %xmm0
- movaps %xmm0, -16 * SIZE(Y)
- movaps -9 * SIZE(X), %xmm0
-
- SHUFPD_1 %xmm2, %xmm1
- mulpd ALPHA, %xmm1
- addpd -14 * SIZE(Y), %xmm1
- movaps %xmm1, -14 * SIZE(Y)
- movaps -7 * SIZE(X), %xmm1
-
- SHUFPD_1 %xmm3, %xmm2
- mulpd ALPHA, %xmm2
- addpd -12 * SIZE(Y), %xmm2
- movaps %xmm2, -12 * SIZE(Y)
- movaps -5 * SIZE(X), %xmm2
-
- SHUFPD_1 %xmm0, %xmm3
- mulpd ALPHA, %xmm3
- addpd -10 * SIZE(Y), %xmm3
- movaps %xmm3, -10 * SIZE(Y)
- movaps -3 * SIZE(X), %xmm3
-
- SHUFPD_1 %xmm1, %xmm0
- mulpd ALPHA, %xmm0
- addpd -8 * SIZE(Y), %xmm0
- movaps %xmm0, -8 * SIZE(Y)
- movaps -1 * SIZE(X), %xmm0
-
- SHUFPD_1 %xmm2, %xmm1
- mulpd ALPHA, %xmm1
- addpd -6 * SIZE(Y), %xmm1
- movaps %xmm1, -6 * SIZE(Y)
-
- SHUFPD_1 %xmm3, %xmm2
- mulpd ALPHA, %xmm2
- addpd -4 * SIZE(Y), %xmm2
- movaps %xmm2, -4 * SIZE(Y)
-
- SHUFPD_1 %xmm0, %xmm3
- mulpd ALPHA, %xmm3
- addpd -2 * SIZE(Y), %xmm3
- movaps %xmm3, -2 * SIZE(Y)
-
- subl $-16 * SIZE, X
- subl $-16 * SIZE, Y
- ALIGN_3
-
- .L23:
- movl M, %eax
- andl $8, %eax
- jle .L24
- ALIGN_3
-
- movaps -15 * SIZE(X), %xmm1
- movaps -13 * SIZE(X), %xmm2
- movaps -11 * SIZE(X), %xmm3
- movaps -9 * SIZE(X), %xmm4
-
- SHUFPD_1 %xmm1, %xmm0
- mulpd ALPHA, %xmm0
- addpd -16 * SIZE(Y), %xmm0
- movaps %xmm0, -16 * SIZE(Y)
-
- SHUFPD_1 %xmm2, %xmm1
- mulpd ALPHA, %xmm1
- addpd -14 * SIZE(Y), %xmm1
- movaps %xmm1, -14 * SIZE(Y)
-
- SHUFPD_1 %xmm3, %xmm2
- mulpd ALPHA, %xmm2
- addpd -12 * SIZE(Y), %xmm2
- movaps %xmm2, -12 * SIZE(Y)
-
- SHUFPD_1 %xmm4, %xmm3
- mulpd ALPHA, %xmm3
- addpd -10 * SIZE(Y), %xmm3
- movaps %xmm3, -10 * SIZE(Y)
-
- movaps %xmm4, %xmm0
-
- addl $8 * SIZE, X
- addl $8 * SIZE, Y
- ALIGN_3
-
- .L24:
- movl M, %eax
- andl $4, %eax
- jle .L25
- ALIGN_3
-
- movaps -15 * SIZE(X), %xmm1
- movaps -13 * SIZE(X), %xmm2
-
- SHUFPD_1 %xmm1, %xmm0
- SHUFPD_1 %xmm2, %xmm1
-
- mulpd ALPHA, %xmm0
- mulpd ALPHA, %xmm1
-
- addpd -16 * SIZE(Y), %xmm0
- addpd -14 * SIZE(Y), %xmm1
-
- movaps %xmm0, -16 * SIZE(Y)
- movaps %xmm1, -14 * SIZE(Y)
- movaps %xmm2, %xmm0
-
- addl $4 * SIZE, X
- addl $4 * SIZE, Y
- ALIGN_3
-
- .L25:
- movl M, %eax
- andl $2, %eax
- jle .L26
- ALIGN_3
-
- movaps -15 * SIZE(X), %xmm1
- SHUFPD_1 %xmm1, %xmm0
- mulpd ALPHA, %xmm0
- addpd -16 * SIZE(Y), %xmm0
-
- movaps %xmm0, -16 * SIZE(Y)
-
- addl $2 * SIZE, X
- addl $2 * SIZE, Y
- ALIGN_3
-
- .L26:
- movl M, %eax
- andl $1, %eax
- jle .L29
- ALIGN_3
-
- movsd -16 * SIZE(X), %xmm0
- mulsd ALPHA, %xmm0
- addsd -16 * SIZE(Y), %xmm0
-
- movsd %xmm0, -16 * SIZE(Y)
- ALIGN_3
-
- .L29:
- popl %ebp
- popl %ebx
- popl %esi
- popl %edi
- ret
- ALIGN_3
-
- #else
- movl M, %eax
- sarl $3, %eax
- jle .L23
-
- movsd -16 * SIZE(X), %xmm0
- movhps -15 * SIZE(X), %xmm0
- movsd -14 * SIZE(X), %xmm1
- movhps -13 * SIZE(X), %xmm1
- movsd -12 * SIZE(X), %xmm2
- movhps -11 * SIZE(X), %xmm2
- movsd -10 * SIZE(X), %xmm3
- movhps -9 * SIZE(X), %xmm3
-
- decl %eax
- jle .L22
- ALIGN_3
-
- .L21:
- #ifdef PREFETCHW
- PREFETCHW (PREFETCHSIZE + 0) - PREOFFSET(Y)
- #endif
-
- mulpd ALPHA, %xmm0
- addpd -16 * SIZE(Y), %xmm0
- movaps %xmm0, -16 * SIZE(Y)
-
- movsd -8 * SIZE(X), %xmm0
- movhps -7 * SIZE(X), %xmm0
-
- mulpd ALPHA, %xmm1
- addpd -14 * SIZE(Y), %xmm1
- movaps %xmm1, -14 * SIZE(Y)
-
- movsd -6 * SIZE(X), %xmm1
- movhps -5 * SIZE(X), %xmm1
-
- #ifdef PREFETCH
- PREFETCH (PREFETCHSIZE + 0) - PREOFFSET(X)
- #endif
-
- mulpd ALPHA, %xmm2
- addpd -12 * SIZE(Y), %xmm2
- movaps %xmm2, -12 * SIZE(Y)
-
- movsd -4 * SIZE(X), %xmm2
- movhps -3 * SIZE(X), %xmm2
-
- mulpd ALPHA, %xmm3
- addpd -10 * SIZE(Y), %xmm3
- movaps %xmm3, -10 * SIZE(Y)
-
- movsd -2 * SIZE(X), %xmm3
- movhps -1 * SIZE(X), %xmm3
-
- subl $-8 * SIZE, Y
- subl $-8 * SIZE, X
- decl %eax
- jg .L21
- ALIGN_3
-
- .L22:
- mulpd ALPHA, %xmm0
- addpd -16 * SIZE(Y), %xmm0
- movaps %xmm0, -16 * SIZE(Y)
-
- mulpd ALPHA, %xmm1
- addpd -14 * SIZE(Y), %xmm1
- movaps %xmm1, -14 * SIZE(Y)
-
- mulpd ALPHA, %xmm2
- addpd -12 * SIZE(Y), %xmm2
- movaps %xmm2, -12 * SIZE(Y)
-
- mulpd ALPHA, %xmm3
- addpd -10 * SIZE(Y), %xmm3
- movaps %xmm3, -10 * SIZE(Y)
-
- subl $-8 * SIZE, Y
- subl $-8 * SIZE, X
- ALIGN_3
-
- .L23:
- movl M, %eax
- andl $4, %eax
- jle .L25
- ALIGN_3
-
- movsd -16 * SIZE(X), %xmm0
- movhps -15 * SIZE(X), %xmm0
- movsd -14 * SIZE(X), %xmm1
- movhps -13 * SIZE(X), %xmm1
-
- mulpd ALPHA, %xmm0
- mulpd ALPHA, %xmm1
-
- addpd -16 * SIZE(Y), %xmm0
- addpd -14 * SIZE(Y), %xmm1
-
- movaps %xmm0, -16 * SIZE(Y)
- movaps %xmm1, -14 * SIZE(Y)
-
- addl $4 * SIZE, X
- addl $4 * SIZE, Y
- ALIGN_3
-
- .L25:
- movl M, %eax
- andl $2, %eax
- jle .L26
- ALIGN_3
-
- movsd -16 * SIZE(X), %xmm0
- movhps -15 * SIZE(X), %xmm0
- mulpd ALPHA, %xmm0
- addpd -16 * SIZE(Y), %xmm0
- movaps %xmm0, -16 * SIZE(Y)
-
- addl $2 * SIZE, X
- addl $2 * SIZE, Y
- ALIGN_3
-
- .L26:
- movl M, %eax
- andl $1, %eax
- jle .L29
- ALIGN_3
-
- movsd -16 * SIZE(X), %xmm0
- mulsd ALPHA, %xmm0
- addsd -16 * SIZE(Y), %xmm0
-
- movsd %xmm0, -16 * SIZE(Y)
- ALIGN_3
-
- .L29:
- popl %ebp
- popl %ebx
- popl %esi
- popl %edi
- ret
- ALIGN_3
- #endif
-
- .L40:
- movl Y, YY
- movl M, %eax
- //If incx==0 || incy==0, avoid unloop.
- cmpl $0, INCX
- je .L46
- cmpl $0, INCY
- je .L46
-
- sarl $3, %eax
- jle .L45
- ALIGN_3
-
- .L41:
- movsd 0 * SIZE(X), %xmm0
- addl INCX, X
- movhpd 0 * SIZE(X), %xmm0
- addl INCX, X
- mulpd ALPHA, %xmm0
-
- movsd 0 * SIZE(YY), %xmm6
- addl INCY, YY
- movhpd 0 * SIZE(YY), %xmm6
- addl INCY, YY
- addpd %xmm6, %xmm0
-
- movsd 0 * SIZE(X), %xmm1
- addl INCX, X
- movhpd 0 * SIZE(X), %xmm1
- addl INCX, X
- mulpd ALPHA, %xmm1
-
- movsd 0 * SIZE(YY), %xmm6
- addl INCY, YY
- movhpd 0 * SIZE(YY), %xmm6
- addl INCY, YY
- addpd %xmm6, %xmm1
-
- movsd 0 * SIZE(X), %xmm2
- addl INCX, X
- movhpd 0 * SIZE(X), %xmm2
- addl INCX, X
- mulpd ALPHA, %xmm2
-
- movsd 0 * SIZE(YY), %xmm6
- addl INCY, YY
- movhpd 0 * SIZE(YY), %xmm6
- addl INCY, YY
- addpd %xmm6, %xmm2
-
- movsd 0 * SIZE(X), %xmm3
- addl INCX, X
- movhpd 0 * SIZE(X), %xmm3
- addl INCX, X
- mulpd ALPHA, %xmm3
-
- movsd 0 * SIZE(YY), %xmm6
- addl INCY, YY
- movhpd 0 * SIZE(YY), %xmm6
- addl INCY, YY
- addpd %xmm6, %xmm3
-
- movsd %xmm0, 0 * SIZE(Y)
- addl INCY, Y
- movhpd %xmm0, 0 * SIZE(Y)
- addl INCY, Y
- movsd %xmm1, 0 * SIZE(Y)
- addl INCY, Y
- movhpd %xmm1, 0 * SIZE(Y)
- addl INCY, Y
- movsd %xmm2, 0 * SIZE(Y)
- addl INCY, Y
- movhpd %xmm2, 0 * SIZE(Y)
- addl INCY, Y
- movsd %xmm3, 0 * SIZE(Y)
- addl INCY, Y
- movhpd %xmm3, 0 * SIZE(Y)
- addl INCY, Y
-
- decl %eax
- jg .L41
- ALIGN_3
-
- .L45:
- movl M, %eax
- andl $7, %eax
- jle .L47
- ALIGN_3
-
- .L46:
- movsd (X), %xmm0
- addl INCX, X
- mulsd ALPHA, %xmm0
- addsd (Y), %xmm0
- movsd %xmm0, (Y)
- addl INCY, Y
- decl %eax
- jg .L46
- ALIGN_3
-
- .L47:
- popl %ebp
- popl %ebx
- popl %esi
- popl %edi
- ret
-
- EPILOGUE
|