|
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173 |
- /*****************************************************************************
- Copyright (c) 2011,2012 Lab of Parallel Software and Computational Science,ISCAS
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- 3. Neither the name of the ISCAS nor the names of its contributors may
- be used to endorse or promote products derived from this software
- without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- **********************************************************************************/
-
- #define ASSEMBLER
- #include "common.h"
-
- #define old_bm %rdi
- #define old_bn %rsi
- #define old_bk %rdx
-
- #define bm %r13
- #define bn %r14
- #define bk %r15
-
- #define ALPHA %xmm0
- #define ba %rcx
- #define bb %r8
- #define C %r9
- #define ldc %r10
-
- #define i %r11
- #define k %rax
-
- #define ptrba %rdi
- #define ptrbb %rsi
- #define C0 %rbx
- #define C1 %rbp
-
- #define prebb %r12
-
- #ifndef WINDOWS_ABI
-
- #define STACKSIZE 128
-
- #define old_ldc 8+STACKSIZE(%rsp)
- #define old_offset 16+STACKSIZE(%rsp)
- #define MEMALPHA 48(%rsp)
- #define j 56(%rsp)
- #define OFFSET 64(%rsp)
- #define kk 72(%rsp)
- #define kkk 80(%rsp)
-
- #else
-
- #define STACKSIZE 512
-
- #define OLD_A 40 + STACKSIZE(%rsp)
- #define OLD_B 48 + STACKSIZE(%rsp)
- #define OLD_C 56 + STACKSIZE(%rsp)
- #define old_ldc 64 + STACKSIZE(%rsp)
- #define old_offset 72 + STACKSIZE(%rsp)
-
- #define MEMALPHA 224(%rsp)
- #define j 232(%rsp)
- #define OFFSET 240(%rsp)
- #define kk 248(%rsp)
- #define kkk 256(%rsp)
-
- #endif
-
- #define PREFETCH0 prefetcht0
- #define PREFETCH1 prefetcht0
- #define PREFETCH2 prefetcht2
-
- #define xvec0 %xmm0
- #define xvec1 %xmm1
- #define xvec2 %xmm2
- #define xvec3 %xmm3
- #define xvec4 %xmm4
- #define xvec5 %xmm5
- #define xvec6 %xmm6
- #define xvec7 %xmm7
- #define xvec8 %xmm8
- #define xvec9 %xmm9
- #define xvec10 %xmm10
- #define xvec11 %xmm11
- #define xvec12 %xmm12
- #define xvec13 %xmm13
- #define xvec14 %xmm14
- #define xvec15 %xmm15
-
- #define yvec0 %ymm0
- #define yvec1 %ymm1
- #define yvec2 %ymm2
- #define yvec3 %ymm3
- #define yvec4 %ymm4
- #define yvec5 %ymm5
- #define yvec6 %ymm6
- #define yvec7 %ymm7
- #define yvec8 %ymm8
- #define yvec9 %ymm9
- #define yvec10 %ymm10
- #define yvec11 %ymm11
- #define yvec12 %ymm12
- #define yvec13 %ymm13
- #define yvec14 %ymm14
- #define yvec15 %ymm15
-
- #define LEAQ leaq
- #define ADDQ addq
- #define MULQ imulq
- #define SARQ sarq
- #define SALQ salq
- #define ANDQ andq
- #define SUBQ subq
- #define DECQ decq
- #define JG jg
- #define JLE jle
- #define TEST testq
- #define OR orq
- #define JNE jne
- #define NOP
- #define XOR xorpd
- #undef MOVQ
- #define MOVQ movq
-
- #define XOR_DY vxorpd
- #define XOR_DX vxorpd
-
- #define LD_DY vmovapd
- #define LD_DX vmovapd
- #define LDL_DX vmovlpd
- #define LDL_DY vmovlpd
- #define LDH_DX vmovhpd
- #define LDH_DY vmovhpd
-
- #define ST_DY vmovapd
- #define ST_DX vmovapd
- #define STL_DX vmovlpd
- #define STL_DY vmovlpd
- #define STH_DX vmovhpd
- #define STH_DY vmovhpd
-
- #define EDUP_DY vmovddup
-
- #define ADD_DY vaddpd
- #define ADD_DX vaddpd
-
- #define ADD1_DY vaddpd
- #define ADD2_DY vaddpd
- #define ADDSUB_DY vaddsubpd
-
- #define MUL_DY vmulpd
- #define MUL_DX vmulpd
-
- #define SHUF_DY vperm2f128
- #define SHUF_DX vpshufd
-
- #define VPERMILP_DY vpermilpd
-
- #define BROAD_DY vbroadcastsd
- #define BROAD_DX vmovddup
-
- #define MOV_DY vmovapd
- #define MOV_DX vmovapd
-
- #define REVS_DY vshufpd
- #define REVS_DX vmovsd
-
- #define EXTRA_DY vextractf128
-
- PROLOGUE
-
- subq $STACKSIZE, %rsp;
- movq %rbx, 0(%rsp);
- movq %rbp, 8(%rsp);
- movq %r12, 16(%rsp);
- movq %r13, 24(%rsp);
- movq %r14, 32(%rsp);
- movq %r15, 40(%rsp);
-
- #ifdef WINDOWS_ABI
- movq %rdi, 48(%rsp)
- movq %rsi, 56(%rsp)
- movups %xmm6, 64(%rsp)
- movups %xmm7, 80(%rsp)
- movups %xmm8, 96(%rsp)
- movups %xmm9, 112(%rsp)
- movups %xmm10, 128(%rsp)
- movups %xmm11, 144(%rsp)
- movups %xmm12, 160(%rsp)
- movups %xmm13, 176(%rsp)
- movups %xmm14, 192(%rsp)
- movups %xmm15, 208(%rsp)
-
- movq ARG1, old_bm
- movq ARG2, old_bn
- movq ARG3, old_bk
- movq OLD_A, ba
- movq OLD_B, bb
- movq OLD_C, C
- movq old_ldc, ldc
- #ifdef TRMMKERNEL
- movq old_offset, %r11
- #endif
- movaps %xmm3, %xmm0
- #else
-
- movq old_ldc, ldc
- #ifdef TRMMKERNEL
- movq old_offset, %r11
- #endif
- #endif
-
- vzeroupper
-
- vmovlps ALPHA, MEMALPHA
- movq old_bm, bm
- movq old_bn, bn
- movq old_bk, bk
- leaq (, ldc, SIZE), ldc
- #ifdef TRMMKERNEL
- movq %r11, OFFSET
- #ifndef LEFT
- negq %r11;
- #endif
- movq %r11, kk
- #endif
-
- MOVQ bn,j;
- SARQ $2,j; # Rn = 4
- JLE .L0_loopE;
- ALIGN_5;
- .L0_bodyB:;
- #if defined(TRMMKERNEL) && defined(LEFT)
- MOVQ OFFSET, %rax;
- MOVQ %rax, kk;
- #endif
-
- MOVQ C,C0;
- LEAQ (C,ldc,2),C1;
- MOVQ bk, k;
- SALQ $5, k;
- LEAQ (bb, k, 1), prebb;
- MOVQ ba,ptrba;
- MOVQ bm,i;
- SARQ $3,i; # Rm = 8
- JLE .L1_loopE;
- ALIGN_5;
- .L1_bodyB:;
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb, ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- LEAQ (, %rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 8), ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- #### Initial Results Register ####
- PREFETCH2 0*SIZE(prebb);
- XOR_DY yvec15, yvec15, yvec15;
- PREFETCH2 8*SIZE(prebb);
- XOR_DY yvec14, yvec14, yvec14;
- XOR_DY yvec13, yvec13, yvec13;
- ADDQ $16*SIZE, prebb
- XOR_DY yvec12, yvec12, yvec12;
- PREFETCH0 3*SIZE(C0)
- LD_DY 0*SIZE(ptrbb), yvec2;
- PREFETCH0 3*SIZE(C0, ldc, 1)
- XOR_DY yvec11, yvec11, yvec11;
- PREFETCH0 3*SIZE(C1)
- XOR_DY yvec10, yvec10, yvec10;
- PREFETCH0 3*SIZE(C1, ldc, 1)
- LD_DY 0*SIZE(ptrba), yvec0;
- XOR_DY yvec9, yvec9, yvec9;
- XOR_DY yvec8, yvec8, yvec8;
- VPERMILP_DY $0x05, yvec2, yvec3;
- #ifndef TRMMKERNEL
- MOVQ bk,k;
- #elif (defined(LEFT) && !defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $8, %rax;
- #else
- ADDQ $4, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2,k;
- JLE .L2_loopE;
- ALIGN_5;
- .L2_bodyB:;
- # Computing kernel
-
- #### Unroll times 1 ####
- LD_DY 4*SIZE(ptrba), yvec1;
- MUL_DY yvec0, yvec2, yvec6;
- SHUF_DY $0x03, yvec2, yvec2, yvec4;
- MUL_DY yvec0, yvec3, yvec7;
- SHUF_DY $0x03, yvec3, yvec3, yvec5;
- ADD_DY yvec15, yvec6, yvec15;
- ADD_DY yvec13, yvec7, yvec13;
-
- PREFETCH0 64*SIZE(ptrba)
- MUL_DY yvec1, yvec2, yvec6;
- LD_DY 4*SIZE(ptrbb), yvec2;
- MUL_DY yvec1, yvec3, yvec7;
- VPERMILP_DY $0x05, yvec2, yvec3;
- ADD_DY yvec14, yvec6, yvec14;
- ADD_DY yvec12, yvec7, yvec12;
-
- MUL_DY yvec0, yvec4, yvec6;
- MUL_DY yvec0, yvec5, yvec7;
- LD_DY 8*SIZE(ptrba), yvec0;
- ADD_DY yvec11, yvec6, yvec11;
- ADD_DY yvec9, yvec7, yvec9;
-
- MUL_DY yvec1, yvec4, yvec6;
- MUL_DY yvec1, yvec5, yvec7;
- ADD_DY yvec10, yvec6, yvec10;
- ADD_DY yvec8, yvec7, yvec8;
-
- #### Unroll times 2 ####
- LD_DY 12*SIZE(ptrba), yvec1;
- MUL_DY yvec0, yvec2, yvec6;
- SHUF_DY $0x03, yvec2, yvec2, yvec4;
- MUL_DY yvec0, yvec3, yvec7;
- SHUF_DY $0x03, yvec3, yvec3, yvec5;
- ADD_DY yvec15, yvec6, yvec15;
- ADD_DY yvec13, yvec7, yvec13;
-
- PREFETCH0 72*SIZE(ptrba)
- MUL_DY yvec1, yvec2, yvec6;
- LD_DY 8*SIZE(ptrbb), yvec2;
- MUL_DY yvec1, yvec3, yvec7;
- VPERMILP_DY $0x05, yvec2, yvec3;
- ADD_DY yvec14, yvec6, yvec14;
- ADD_DY yvec12, yvec7, yvec12;
-
- MUL_DY yvec0, yvec4, yvec6;
- MUL_DY yvec0, yvec5, yvec7;
- LD_DY 16*SIZE(ptrba), yvec0;
- ADD_DY yvec11, yvec6, yvec11;
- ADD_DY yvec9, yvec7, yvec9;
-
- MUL_DY yvec1, yvec4, yvec6;
- MUL_DY yvec1, yvec5, yvec7;
- ADD_DY yvec10, yvec6, yvec10;
- ADD_DY yvec8, yvec7, yvec8;
-
- #### Unroll times 3 ####
- LD_DY 20*SIZE(ptrba), yvec1;
- MUL_DY yvec0, yvec2, yvec6;
- SHUF_DY $0x03, yvec2, yvec2, yvec4;
- MUL_DY yvec0, yvec3, yvec7;
- SHUF_DY $0x03, yvec3, yvec3, yvec5;
- ADD_DY yvec15, yvec6, yvec15;
- ADD_DY yvec13, yvec7, yvec13;
-
- PREFETCH0 80*SIZE(ptrba)
- MUL_DY yvec1, yvec2, yvec6;
- LD_DY 12*SIZE(ptrbb), yvec2;
- ADDQ $16*SIZE, ptrbb;
- MUL_DY yvec1, yvec3, yvec7;
- VPERMILP_DY $0x05, yvec2, yvec3;
- ADD_DY yvec14, yvec6, yvec14;
- ADD_DY yvec12, yvec7, yvec12;
-
- MUL_DY yvec0, yvec4, yvec6;
- MUL_DY yvec0, yvec5, yvec7;
- LD_DY 24*SIZE(ptrba), yvec0;
- ADD_DY yvec11, yvec6, yvec11;
- ADD_DY yvec9, yvec7, yvec9;
-
- MUL_DY yvec1, yvec4, yvec6;
- MUL_DY yvec1, yvec5, yvec7;
- ADD_DY yvec10, yvec6, yvec10;
- ADD_DY yvec8, yvec7, yvec8;
-
- #### Unroll times 4 ####
- LD_DY 28*SIZE(ptrba), yvec1;
- MUL_DY yvec0, yvec2, yvec6;
- SHUF_DY $0x03, yvec2, yvec2, yvec4;
- MUL_DY yvec0, yvec3, yvec7;
- SHUF_DY $0x03, yvec3, yvec3, yvec5;
- ADDQ $32*SIZE, ptrba;
- ADD_DY yvec15, yvec6, yvec15;
- ADD_DY yvec13, yvec7, yvec13;
-
- PREFETCH0 88*SIZE(ptrba)
- MUL_DY yvec1, yvec2, yvec6;
- LD_DY 0*SIZE(ptrbb), yvec2;
- MUL_DY yvec1, yvec3, yvec7;
- VPERMILP_DY $0x05, yvec2, yvec3;
- ADD_DY yvec14, yvec6, yvec14;
- ADD_DY yvec12, yvec7, yvec12;
-
- MUL_DY yvec0, yvec4, yvec6;
- MUL_DY yvec0, yvec5, yvec7;
- LD_DY 0*SIZE(ptrba), yvec0;
- ADD_DY yvec11, yvec6, yvec11;
- ADD_DY yvec9, yvec7, yvec9;
-
- MUL_DY yvec1, yvec4, yvec6;
- MUL_DY yvec1, yvec5, yvec7;
- ADD_DY yvec10, yvec6, yvec10;
- ADD_DY yvec8, yvec7, yvec8;
- .L2_bodyE:;
- DECQ k;
- JG .L2_bodyB;
- ALIGN_5
- .L2_loopE:;
- PREFETCH2 0*SIZE(prebb);
- ADDQ $8*SIZE, prebb;
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- MOVQ kkk, %rax;
- TEST $2, %rax;
- #endif
- JLE .L3_loopE;
- ALIGN_5
- .L3_bodyB:
- #### Unroll times 1 ####
- PREFETCH0 64*SIZE(ptrba)
- LD_DY 4*SIZE(ptrba), yvec1;
- MUL_DY yvec0, yvec2, yvec6;
- SHUF_DY $0x03, yvec2, yvec2, yvec4;
- MUL_DY yvec0, yvec3, yvec7;
- SHUF_DY $0x03, yvec3, yvec3, yvec5;
- ADD_DY yvec15, yvec6, yvec15;
- ADD_DY yvec13, yvec7, yvec13;
-
- MUL_DY yvec1, yvec2, yvec6;
- LD_DY 4*SIZE(ptrbb), yvec2;
- ADDQ $8*SIZE, ptrbb;
- MUL_DY yvec1, yvec3, yvec7;
- VPERMILP_DY $0x05, yvec2, yvec3;
- ADD_DY yvec14, yvec6, yvec14;
- ADD_DY yvec12, yvec7, yvec12;
-
- MUL_DY yvec0, yvec4, yvec6;
- MUL_DY yvec0, yvec5, yvec7;
- LD_DY 8*SIZE(ptrba), yvec0;
- ADD_DY yvec11, yvec6, yvec11;
- ADD_DY yvec9, yvec7, yvec9;
-
- MUL_DY yvec1, yvec4, yvec6;
- MUL_DY yvec1, yvec5, yvec7;
- ADD_DY yvec10, yvec6, yvec10;
- ADD_DY yvec8, yvec7, yvec8;
-
- #### Unroll times 2 ####
- PREFETCH0 72*SIZE(ptrba)
- LD_DY 12*SIZE(ptrba), yvec1;
- MUL_DY yvec0, yvec2, yvec6;
- SHUF_DY $0x03, yvec2, yvec2, yvec4;
- MUL_DY yvec0, yvec3, yvec7;
- SHUF_DY $0x03, yvec3, yvec3, yvec5;
- ADDQ $16*SIZE, ptrba;
- ADD_DY yvec15, yvec6, yvec15;
- ADD_DY yvec13, yvec7, yvec13;
-
- MUL_DY yvec1, yvec2, yvec6;
- LD_DY 0*SIZE(ptrbb), yvec2;
- MUL_DY yvec1, yvec3, yvec7;
- VPERMILP_DY $0x05, yvec2, yvec3;
- ADD_DY yvec14, yvec6, yvec14;
- ADD_DY yvec12, yvec7, yvec12;
-
- MUL_DY yvec0, yvec4, yvec6;
- MUL_DY yvec0, yvec5, yvec7;
- LD_DY 0*SIZE(ptrba), yvec0;
- ADD_DY yvec11, yvec6, yvec11;
- ADD_DY yvec9, yvec7, yvec9;
-
- MUL_DY yvec1, yvec4, yvec6;
- MUL_DY yvec1, yvec5, yvec7;
- ADD_DY yvec10, yvec6, yvec10;
- ADD_DY yvec8, yvec7, yvec8;
-
- .L3_loopE:
- PREFETCH2 0*SIZE(prebb);
- ADDQ $8*SIZE, prebb
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- MOVQ kkk, %rax;
- TEST $1, %rax;
- #endif
- JLE .L4_loopE;
- ALIGN_5
- .L4_bodyB:;
- #### Unroll times 1 ####
- PREFETCH0 64*SIZE(ptrba)
- LD_DY 4*SIZE(ptrba), yvec1;
- MUL_DY yvec0, yvec2, yvec6;
- SHUF_DY $0x03, yvec2, yvec2, yvec4;
- MUL_DY yvec0, yvec3, yvec7;
- SHUF_DY $0x03, yvec3, yvec3, yvec5;
- ADDQ $8*SIZE, ptrba;
- ADD_DY yvec15, yvec6, yvec15;
- ADD_DY yvec13, yvec7, yvec13;
-
- MUL_DY yvec1, yvec2, yvec6;
- MUL_DY yvec1, yvec3, yvec7;
- ADDQ $4*SIZE, ptrbb;
- ADD_DY yvec14, yvec6, yvec14;
- ADD_DY yvec12, yvec7, yvec12;
-
- MUL_DY yvec0, yvec4, yvec6;
- MUL_DY yvec0, yvec5, yvec7;
- ADD_DY yvec11, yvec6, yvec11;
- ADD_DY yvec9, yvec7, yvec9;
-
- MUL_DY yvec1, yvec4, yvec6;
- MUL_DY yvec1, yvec5, yvec7;
- ADD_DY yvec10, yvec6, yvec10;
- ADD_DY yvec8, yvec7, yvec8;
-
- .L4_loopE:;
- #### Load Alpha ####
- BROAD_DY MEMALPHA,yvec7;
- #### Multiply Alpha ####
- MUL_DY yvec7,yvec15,yvec15;
- MUL_DY yvec7,yvec14,yvec14;
- MUL_DY yvec7,yvec13,yvec13;
- MUL_DY yvec7,yvec12,yvec12;
- MUL_DY yvec7,yvec11,yvec11;
- MUL_DY yvec7,yvec10,yvec10;
- MUL_DY yvec7,yvec9,yvec9;
- MUL_DY yvec7,yvec8,yvec8;
- #### Reverse the Results ####
- MOV_DY yvec15,yvec7;
- REVS_DY $0x0a,yvec13,yvec15,yvec15;
- REVS_DY $0x0a,yvec7,yvec13,yvec13;
- MOV_DY yvec14,yvec7;
- REVS_DY $0x0a,yvec12,yvec14,yvec14;
- REVS_DY $0x0a,yvec7,yvec12,yvec12;
- MOV_DY yvec11,yvec7;
- REVS_DY $0x0a,yvec9,yvec11,yvec11;
- REVS_DY $0x0a,yvec7,yvec9,yvec9;
- MOV_DY yvec10,yvec7;
- REVS_DY $0x0a,yvec8,yvec10,yvec10;
- REVS_DY $0x0a,yvec7,yvec8,yvec8;
- #### Testing alignment ####
- MOVQ C0, %rax;
- OR ldc, %rax;
- TEST $15, %rax;
- JNE .L4_loopEx; # Unalign part write back
- ALIGN_5
- #### Writing Back ####
- EXTRA_DY $1,yvec15,xvec7;
- EXTRA_DY $1,yvec14,xvec6;
- EXTRA_DY $1,yvec13,xvec5;
- EXTRA_DY $1,yvec12,xvec4;
- EXTRA_DY $1,yvec11,xvec3;
- EXTRA_DY $1,yvec10,xvec2;
- EXTRA_DY $1,yvec9,xvec1;
- EXTRA_DY $1,yvec8,xvec0;
- #ifndef TRMMKERNEL
- ADD_DY 0*SIZE(C0),xvec15,xvec15;
- ADD_DY 2*SIZE(C1),xvec7,xvec7;
- ADD_DY 4*SIZE(C0),xvec14,xvec14;
- ADD_DY 6*SIZE(C1),xvec6,xvec6;
- ADD_DY 0*SIZE(C0,ldc,1),xvec13,xvec13;
- ADD_DY 2*SIZE(C1,ldc,1),xvec5,xvec5;
- ADD_DY 4*SIZE(C0,ldc,1),xvec12,xvec12;
- ADD_DY 6*SIZE(C1,ldc,1),xvec4,xvec4;
- ADD_DY 0*SIZE(C1),xvec11,xvec11;
- ADD_DY 2*SIZE(C0),xvec3,xvec3;
- ADD_DY 4*SIZE(C1),xvec10,xvec10;
- ADD_DY 6*SIZE(C0),xvec2,xvec2;
- ADD_DY 0*SIZE(C1,ldc,1),xvec9,xvec9;
- ADD_DY 2*SIZE(C0,ldc,1),xvec1,xvec1;
- ADD_DY 4*SIZE(C1,ldc,1),xvec8,xvec8;
- ADD_DY 6*SIZE(C0,ldc,1),xvec0,xvec0;
- #endif
- ST_DY xvec15, 0*SIZE(C0);
- ST_DY xvec7, 2*SIZE(C1);
- ST_DY xvec14, 4*SIZE(C0);
- ST_DY xvec6, 6*SIZE(C1);
- ST_DY xvec13, 0*SIZE(C0,ldc,1);
- ST_DY xvec5, 2*SIZE(C1,ldc,1);
- ST_DY xvec12, 4*SIZE(C0,ldc,1);
- ST_DY xvec4, 6*SIZE(C1,ldc,1);
- ST_DY xvec11, 0*SIZE(C1);
- ST_DY xvec3, 2*SIZE(C0);
- ST_DY xvec10, 4*SIZE(C1);
- ST_DY xvec2, 6*SIZE(C0);
- ST_DY xvec9, 0*SIZE(C1,ldc,1);
- ST_DY xvec1, 2*SIZE(C0,ldc,1);
- ST_DY xvec8, 4*SIZE(C1,ldc,1);
- ST_DY xvec0, 6*SIZE(C0,ldc,1);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) ||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (, %rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 8), ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $8, kk
- #endif
- ADDQ $8*SIZE,C0;
- ADDQ $8*SIZE,C1;
- .L1_bodyE:;
- DECQ i;
- JG .L1_bodyB;
- JMP .L1_loopE;
- ALIGN_5;
- .L4_loopEx:;
- EXTRA_DY $1, yvec15, xvec7;
- #ifndef TRMMKERNEL
- LDL_DY 0*SIZE(C0), xvec6, xvec6;
- LDH_DY 1*SIZE(C0), xvec6, xvec6;
- ADD_DY xvec6, xvec15, xvec15;
- LDL_DY 2*SIZE(C1), xvec5, xvec5;
- LDH_DY 3*SIZE(C1), xvec5, xvec5;
- ADD_DY xvec5, xvec7, xvec7;
- #endif
- STL_DY xvec15, 0*SIZE(C0);
- STH_DY xvec15, 1*SIZE(C0);
- STL_DY xvec7, 2*SIZE(C1);
- STH_DY xvec7, 3*SIZE(C1);
-
- EXTRA_DY $1, yvec14, xvec4;
- #ifndef TRMMKERNEL
- LDL_DY 4*SIZE(C0), xvec3, xvec3;
- LDH_DY 5*SIZE(C0), xvec3, xvec3;
- ADD_DY xvec3, xvec14, xvec14;
- LDL_DY 6*SIZE(C1), xvec2, xvec2;
- LDH_DY 7*SIZE(C1), xvec2, xvec2;
- ADD_DY xvec2, xvec4, xvec4;
- #endif
- STL_DY xvec14, 4*SIZE(C0);
- STH_DY xvec14, 5*SIZE(C0);
- STL_DY xvec4, 6*SIZE(C1);
- STH_DY xvec4, 7*SIZE(C1);
-
- EXTRA_DY $1, yvec13, xvec7;
- #ifndef TRMMKERNEL
- LDL_DY 0*SIZE(C0, ldc, 1), xvec6, xvec6;
- LDH_DY 1*SIZE(C0, ldc, 1), xvec6, xvec6;
- ADD_DY xvec6, xvec13, xvec13;
- LDL_DY 2*SIZE(C1, ldc, 1), xvec5, xvec5;
- LDH_DY 3*SIZE(C1, ldc, 1), xvec5, xvec5;
- ADD_DY xvec5, xvec7, xvec7;
- #endif
- STL_DY xvec13, 0*SIZE(C0, ldc, 1);
- STH_DY xvec13, 1*SIZE(C0, ldc, 1);
- STL_DY xvec7, 2*SIZE(C1, ldc, 1);
- STH_DY xvec7, 3*SIZE(C1, ldc, 1);
-
- EXTRA_DY $1, yvec12, xvec4;
- #ifndef TRMMKERNEL
- LDL_DY 4*SIZE(C0, ldc, 1), xvec3, xvec3;
- LDH_DY 5*SIZE(C0, ldc, 1), xvec3, xvec3;
- ADD_DY xvec3, xvec12, xvec12;
- LDL_DY 6*SIZE(C1, ldc, 1), xvec2, xvec2;
- LDH_DY 7*SIZE(C1, ldc, 1), xvec2, xvec2;
- ADD_DY xvec2, xvec4, xvec4;
- #endif
- STL_DY xvec12, 4*SIZE(C0, ldc, 1);
- STH_DY xvec12, 5*SIZE(C0, ldc ,1);
- STL_DY xvec4, 6*SIZE(C1, ldc, 1);
- STH_DY xvec4, 7*SIZE(C1, ldc, 1);
-
- EXTRA_DY $1, yvec11, xvec7;
- #ifndef TRMMKERNEL
- LDL_DY 0*SIZE(C1), xvec6, xvec6;
- LDH_DY 1*SIZE(C1), xvec6, xvec6;
- ADD_DY xvec6, xvec11, xvec11;
- LDL_DY 2*SIZE(C0), xvec5, xvec5;
- LDH_DY 3*SIZE(C0), xvec5, xvec5;
- ADD_DY xvec5, xvec7, xvec7;
- #endif
- STL_DY xvec11, 0*SIZE(C1);
- STH_DY xvec11, 1*SIZE(C1);
- STL_DY xvec7, 2*SIZE(C0);
- STH_DY xvec7, 3*SIZE(C0);
-
- EXTRA_DY $1, yvec10, xvec4;
- #ifndef TRMMKERNEL
- LDL_DY 4*SIZE(C1), xvec3, xvec3;
- LDH_DY 5*SIZE(C1), xvec3, xvec3;
- ADD_DY xvec3, xvec10, xvec10;
- LDL_DY 6*SIZE(C0), xvec2, xvec2;
- LDH_DY 7*SIZE(C0), xvec2, xvec2;
- ADD_DY xvec2, xvec4, xvec4;
- #endif
- STL_DY xvec10, 4*SIZE(C1);
- STH_DY xvec10, 5*SIZE(C1);
- STL_DY xvec4, 6*SIZE(C0);
- STH_DY xvec4, 7*SIZE(C0);
-
- EXTRA_DY $1, yvec9, xvec7;
- #ifndef TRMMKERNEL
- LDL_DY 0*SIZE(C1, ldc, 1), xvec6, xvec6;
- LDH_DY 1*SIZE(C1, ldc, 1), xvec6, xvec6;
- ADD_DY xvec6, xvec9, xvec9;
- LDL_DY 2*SIZE(C0, ldc, 1), xvec5, xvec5;
- LDH_DY 3*SIZE(C0, ldc ,1), xvec5, xvec5;
- ADD_DY xvec5, xvec7, xvec7;
- #endif
- STL_DY xvec9, 0*SIZE(C1, ldc, 1);
- STH_DY xvec9, 1*SIZE(C1, ldc, 1);
- STL_DY xvec7, 2*SIZE(C0, ldc, 1);
- STH_DY xvec7, 3*SIZE(C0, ldc, 1);
-
- EXTRA_DY $1, yvec8, xvec4;
- #ifndef TRMMKERNEL
- LDL_DY 4*SIZE(C1, ldc, 1), xvec3, xvec3;
- LDH_DY 5*SIZE(C1, ldc, 1), xvec3, xvec3;
- ADD_DY xvec3, xvec8, xvec8;
- LDL_DY 6*SIZE(C0, ldc, 1), xvec2, xvec2;
- LDH_DY 7*SIZE(C0, ldc, 1), xvec2, xvec2;
- ADD_DY xvec2, xvec4, xvec4;
- #endif
- STL_DY xvec8, 4*SIZE(C1, ldc, 1);
- STH_DY xvec8, 5*SIZE(C1, ldc, 1);
- STL_DY xvec4, 6*SIZE(C0, ldc, 1);
- STH_DY xvec4, 7*SIZE(C0, ldc, 1);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) ||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (, %rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 8), ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $8, kk
- #endif
-
- ADDQ $8*SIZE, C0;
- ADDQ $8*SIZE, C1;
- DECQ i;
- JG .L1_bodyB;
- ALIGN_5
- .L1_loopE:;
- TEST $4, bm; # Rm = 4
- JLE .L5_loopE;
- ALIGN_5
- .L5_bodyB:;
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb, ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- LEAQ (, %rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 4), ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- #### Initial Results Register ####
- XOR_DY yvec15, yvec15, yvec15;
- XOR_DY yvec13, yvec13, yvec13;
- LD_DY 0*SIZE(ptrbb), yvec2;
- XOR_DY yvec11, yvec11, yvec11;
- XOR_DY yvec9, yvec9, yvec9;
- LD_DY 0*SIZE(ptrba), yvec0;
- VPERMILP_DY $0x05, yvec2, yvec3;
- #ifndef TRMMKERNEL
- MOVQ bk, k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $4, %rax;
- #else
- ADDQ $4, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L6_loopE;
- ALIGN_5;
- .L6_bodyB:;
- # Computing kernel
-
- #### Untoll time 1 ####
- LD_DY 4*SIZE(ptrba), yvec1;
- MUL_DY yvec0, yvec2, yvec6;
- ADD_DY yvec15, yvec6, yvec15;
- SHUF_DY $0x03, yvec2, yvec2, yvec4;
- MUL_DY yvec0, yvec3, yvec7;
- ADD_DY yvec13, yvec7, yvec13;
- SHUF_DY $0x03, yvec3, yvec3, yvec5;
-
- LD_DY 4*SIZE(ptrbb), yvec2;
- MUL_DY yvec0, yvec4, yvec6;
- ADD_DY yvec11, yvec6, yvec11;
- VPERMILP_DY $0x05, yvec2, yvec3;
- MUL_DY yvec0, yvec5, yvec7;
- ADD_DY yvec9, yvec7, yvec9;
-
- #### Untoll time 2 ####
- LD_DY 8*SIZE(ptrba), yvec0;
- MUL_DY yvec1, yvec2, yvec6;
- ADD_DY yvec15, yvec6, yvec15;
- SHUF_DY $0x03, yvec2, yvec2, yvec4;
- MUL_DY yvec1, yvec3, yvec7;
- ADD_DY yvec13, yvec7, yvec13;
- SHUF_DY $0x03, yvec3, yvec3, yvec5;
-
- LD_DY 8*SIZE(ptrbb), yvec2;
- MUL_DY yvec1, yvec4, yvec6;
- ADD_DY yvec11, yvec6, yvec11;
- VPERMILP_DY $0x05, yvec2, yvec3;
- MUL_DY yvec1, yvec5, yvec7;
- ADD_DY yvec9, yvec7, yvec9;
-
- #### Untoll time 3 ####
- LD_DY 12*SIZE(ptrba), yvec1;
- MUL_DY yvec0, yvec2, yvec6;
- ADD_DY yvec15, yvec6, yvec15;
- SHUF_DY $0x03, yvec2, yvec2, yvec4;
- ADDQ $16*SIZE, ptrba;
- MUL_DY yvec0, yvec3, yvec7;
- ADD_DY yvec13, yvec7, yvec13;
- SHUF_DY $0x03, yvec3, yvec3, yvec5;
-
- LD_DY 12*SIZE(ptrbb), yvec2;
- MUL_DY yvec0, yvec4, yvec6;
- ADD_DY yvec11, yvec6, yvec11;
- VPERMILP_DY $0x05, yvec2, yvec3;
- ADDQ $16*SIZE, ptrbb;
- MUL_DY yvec0, yvec5, yvec7;
- ADD_DY yvec9, yvec7, yvec9;
-
- #### Untoll time 4 ####
- LD_DY 0*SIZE(ptrba), yvec0;
- MUL_DY yvec1, yvec2, yvec6;
- ADD_DY yvec15, yvec6, yvec15;
- SHUF_DY $0x03, yvec2, yvec2, yvec4;
- MUL_DY yvec1, yvec3, yvec7;
- ADD_DY yvec13, yvec7, yvec13;
- SHUF_DY $0x03, yvec3, yvec3, yvec5;
-
- LD_DY 0*SIZE(ptrbb), yvec2;
- MUL_DY yvec1, yvec4, yvec6;
- ADD_DY yvec11, yvec6, yvec11;
- VPERMILP_DY $0x05, yvec2, yvec3;
- MUL_DY yvec1, yvec5, yvec7;
- ADD_DY yvec9, yvec7, yvec9;
- DECQ k;
- JG .L6_bodyB;
- ALIGN_5
- .L6_loopE:;
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- MOVQ kkk, %rax;
- TEST $2, %rax;
- #endif
- JLE .L7_loopE;
- ALIGN_5
- .L7_bodyB:;
- #### Untoll time 1 ####
- LD_DY 4*SIZE(ptrba), yvec1;
- MUL_DY yvec0, yvec2, yvec6;
- ADD_DY yvec15, yvec6, yvec15;
- SHUF_DY $0x03, yvec2, yvec2, yvec4;
- ADDQ $8*SIZE, ptrba;
- MUL_DY yvec0, yvec3, yvec7;
- ADD_DY yvec13, yvec7, yvec13;
- SHUF_DY $0x03, yvec3, yvec3, yvec5;
-
- LD_DY 4*SIZE(ptrbb), yvec2;
- MUL_DY yvec0, yvec4, yvec6;
- ADD_DY yvec11, yvec6, yvec11;
- VPERMILP_DY $0x05, yvec2, yvec3;
- ADDQ $8*SIZE, ptrbb;
- MUL_DY yvec0, yvec5, yvec7;
- ADD_DY yvec9, yvec7, yvec9;
-
- #### Untoll time 2 ####
- LD_DY 0*SIZE(ptrba), yvec0;
- MUL_DY yvec1, yvec2, yvec6;
- ADD_DY yvec15, yvec6, yvec15;
- SHUF_DY $0x03, yvec2, yvec2, yvec4;
- MUL_DY yvec1, yvec3, yvec7;
- ADD_DY yvec13, yvec7, yvec13;
- SHUF_DY $0x03, yvec3, yvec3, yvec5;
-
- LD_DY 0*SIZE(ptrbb), yvec2;
- MUL_DY yvec1, yvec4, yvec6;
- ADD_DY yvec11, yvec6, yvec11;
- VPERMILP_DY $0x05, yvec2, yvec3;
- MUL_DY yvec1, yvec5, yvec7;
- ADD_DY yvec9, yvec7, yvec9;
-
- .L7_loopE:;
- #ifndef TRMMKERNEL
- TEST $1, bk
- #else
- MOVQ kkk, %rax;
- TEST $1, %rax;
- #endif
- JLE .L8_loopE;
- ALIGN_5
- .L8_bodyB:;
- #### Untoll time 1 ####
- MUL_DY yvec0, yvec2, yvec6;
- ADD_DY yvec15, yvec6, yvec15;
- SHUF_DY $0x03, yvec2, yvec2, yvec4;
- ADDQ $4*SIZE, ptrba;
- MUL_DY yvec0, yvec3, yvec7;
- ADD_DY yvec13, yvec7, yvec13;
- SHUF_DY $0x03, yvec3, yvec3, yvec5;
-
- MUL_DY yvec0, yvec4, yvec6;
- ADD_DY yvec11, yvec6, yvec11;
- ADDQ $4*SIZE, ptrbb;
- MUL_DY yvec0, yvec5, yvec7;
- ADD_DY yvec9, yvec7, yvec9;
-
- .L8_loopE:;
- #### Load Alpha ####
- BROAD_DY MEMALPHA, yvec7;
- #### Multiply Alpha ####
- MUL_DY yvec7,yvec15,yvec15;
- MUL_DY yvec7,yvec13,yvec13;
- MUL_DY yvec7,yvec11,yvec11;
- MUL_DY yvec7,yvec9,yvec9;
- #### Reverse the Results ####
- MOV_DY yvec15, yvec7;
- REVS_DY $0x0a,yvec13,yvec15,yvec15;
- REVS_DY $0x0a,yvec7,yvec13,yvec13;
- MOV_DY yvec11,yvec7;
- REVS_DY $0x0a,yvec9,yvec11,yvec11;
- REVS_DY $0x0a,yvec7,yvec9,yvec9;
- #### Testing alignment ####
- MOVQ C0, %rax;
- OR ldc, %rax;
- TEST $15, %rax;
- JNE .L8_loopEx; # Unalign part write back
- ALIGN_5
- #### Writing Back ####
- EXTRA_DY $1,yvec15,xvec7;
- EXTRA_DY $1,yvec13,xvec5;
- EXTRA_DY $1,yvec11,xvec3;
- EXTRA_DY $1,yvec9,xvec1;
- #ifndef TRMMKERNEL
- ADD_DX 0*SIZE(C0), xvec15, xvec15;
- ADD_DX 2*SIZE(C1), xvec7, xvec7;
- ADD_DX 0*SIZE(C0, ldc, 1), xvec13, xvec13;
- ADD_DX 2*SIZE(C1, ldc, 1), xvec5, xvec5;
- ADD_DX 0*SIZE(C1), xvec11, xvec11;
- ADD_DX 2*SIZE(C0), xvec3, xvec3;
- ADD_DX 0*SIZE(C1, ldc, 1), xvec9, xvec9;
- ADD_DX 2*SIZE(C0, ldc, 1), xvec1, xvec1;
- #endif
- ST_DX xvec15, 0*SIZE(C0);
- ST_DX xvec7, 2*SIZE(C1);
- ST_DX xvec13, 0*SIZE(C0,ldc,1);
- ST_DX xvec5, 2*SIZE(C1,ldc,1);
- ST_DX xvec11, 0*SIZE(C1);
- ST_DX xvec3, 2*SIZE(C0);
- ST_DX xvec9, 0*SIZE(C1,ldc,1);
- ST_DX xvec1, 2*SIZE(C0,ldc,1);
- #if (defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (, %rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 4), ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- #if defined(TRMMKERNEL)&&defined(LEFT)
- ADDQ $4, kk
- #endif
- ADDQ $4*SIZE, C0;
- ADDQ $4*SIZE, C1;
- JMP .L5_loopE;
- ALIGN_5
- .L8_loopEx:;
- EXTRA_DY $1,yvec15,xvec7;
- EXTRA_DY $1,yvec13,xvec5;
- EXTRA_DY $1,yvec11,xvec3;
- EXTRA_DY $1,yvec9,xvec1;
- #ifndef TRMMKERNEL
- LDL_DX 0*SIZE(C0), xvec14, xvec14;
- LDH_DX 1*SIZE(C0), xvec14, xvec14;
- LDL_DX 0*SIZE(C0, ldc, 1), xvec12, xvec12;
- LDH_DX 1*SIZE(C0, ldc, 1), xvec12, xvec12;
- LDL_DX 0*SIZE(C1), xvec10, xvec10;
- LDH_DX 1*SIZE(C1), xvec10, xvec10;
- LDL_DX 0*SIZE(C1, ldc, 1), xvec8, xvec8;
- LDH_DX 1*SIZE(C1, ldc, 1), xvec8, xvec8;
- ADD_DX xvec14, xvec15, xvec15;
- ADD_DX xvec12, xvec13, xvec13;
- ADD_DX xvec10, xvec11, xvec11;
- ADD_DX xvec8, xvec9, xvec9;
- #endif
- STL_DX xvec15, 0*SIZE(C0);
- STH_DX xvec15, 1*SIZE(C0);
- STL_DX xvec13, 0*SIZE(C0, ldc, 1);
- STH_DX xvec13, 1*SIZE(C0, ldc, 1);
- STL_DX xvec11, 0*SIZE(C1);
- STH_DX xvec11, 1*SIZE(C1);
- STL_DX xvec9, 0*SIZE(C1, ldc, 1);
- STH_DX xvec9, 1*SIZE(C1, ldc, 1);
- #ifndef TRMMKERNEL
- LDL_DX 2*SIZE(C0), xvec0, xvec0;
- LDH_DX 3*SIZE(C0), xvec0, xvec0;
- LDL_DX 2*SIZE(C0, ldc, 1), xvec2, xvec2;
- LDH_DX 3*SIZE(C0, ldc, 1), xvec2, xvec2;
- LDL_DX 2*SIZE(C1), xvec4, xvec4;
- LDH_DX 3*SIZE(C1), xvec4, xvec4;
- LDL_DX 2*SIZE(C1, ldc, 1), xvec6, xvec6;
- LDH_DX 3*SIZE(C1, ldc, 1), xvec6, xvec6;
- ADD_DX xvec0, xvec3, xvec3;
- ADD_DX xvec2, xvec1, xvec1;
- ADD_DX xvec4, xvec7, xvec7;
- ADD_DX xvec6, xvec5, xvec5;
- #endif
- STL_DX xvec3, 2*SIZE(C0);
- STH_DX xvec3, 3*SIZE(C0);
- STL_DX xvec1, 2*SIZE(C0, ldc, 1);
- STH_DX xvec1, 3*SIZE(C0, ldc, 1);
- STL_DX xvec7, 2*SIZE(C1);
- STH_DX xvec7, 3*SIZE(C1);
- STL_DX xvec5, 2*SIZE(C1, ldc, 1);
- STH_DX xvec5, 3*SIZE(C1, ldc, 1);
- #if (defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (, %rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 4), ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- #if defined(TRMMKERNEL)&&defined(LEFT)
- ADDQ $4, kk
- #endif
-
- ADDQ $4*SIZE, C0;
- ADDQ $4*SIZE, C1;
- .L5_loopE:;
- TEST $2, bm;
- JLE .L9_loopE;
- ALIGN_5
- .L9_bodyB:;
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb, ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- LEAQ (, %rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 2), ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb
- #endif
- #### Initial Results Register ####
- LD_DX 0*SIZE(ptrbb), xvec2;
- XOR_DY yvec15, yvec15, yvec15;
- LD_DX 2*SIZE(ptrbb), xvec3;
- XOR_DY yvec13, yvec13, yvec13;
- LD_DX 0*SIZE(ptrba), xvec0;
- XOR_DY yvec11, yvec11, yvec11;
- SHUF_DX $0x4e, xvec2, xvec4;
- XOR_DY yvec9, yvec9, yvec9;
- #ifndef TRMMKERNEL
- MOVQ bk, k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $2, %rax;
- #else
- ADDQ $4, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L10_loopE;
- ALIGN_5;
- .L10_bodyB:;
- # Computing kernel
-
- ##### Unroll time 1 ####
- LD_DX 4*SIZE(ptrbb), xvec6;
- SHUF_DX $0x4e, xvec3, xvec5;
- MUL_DX xvec0, xvec2, xvec2;
- ADD_DX xvec2, xvec15, xvec15;
-
- LD_DX 6*SIZE(ptrbb), xvec7;
- MUL_DX xvec0, xvec3, xvec3;
- ADD_DX xvec3, xvec11, xvec11;
-
- LD_DX 2*SIZE(ptrba), xvec1;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec13, xvec13;
- SHUF_DX $0x4e, xvec6, xvec4;
- MUL_DX xvec0, xvec5, xvec5;
- ADD_DX xvec5, xvec9, xvec9;
-
- #### Unroll time 2 ####
- LD_DX 8*SIZE(ptrbb), xvec2;
- SHUF_DX $0x4e, xvec7, xvec5;
- MUL_DX xvec1, xvec6, xvec6;
- ADD_DX xvec6, xvec15, xvec15;
-
- LD_DX 10*SIZE(ptrbb), xvec3;
- MUL_DX xvec1, xvec7, xvec7;
- ADD_DX xvec7, xvec11, xvec11;
-
- LD_DX 4*SIZE(ptrba), xvec0;
- MUL_DX xvec1, xvec4, xvec4;
- ADD_DX xvec4, xvec13, xvec13;
- SHUF_DX $0x4e, xvec2, xvec4;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec9, xvec9;
-
- ##### Unroll time 3 ####
- LD_DX 12*SIZE(ptrbb), xvec6;
- SHUF_DX $0x4e, xvec3, xvec5;
- MUL_DX xvec0, xvec2, xvec2;
- ADD_DX xvec2, xvec15, xvec15;
-
- LD_DX 14*SIZE(ptrbb), xvec7;
- MUL_DX xvec0, xvec3, xvec3;
- ADD_DX xvec3, xvec11, xvec11;
- ADDQ $16*SIZE, ptrbb;
-
- LD_DX 6*SIZE(ptrba), xvec1;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec13, xvec13;
- SHUF_DX $0x4e, xvec6, xvec4;
- ADDQ $8*SIZE, ptrba;
- MUL_DX xvec0, xvec5, xvec5;
- ADD_DX xvec5, xvec9, xvec9;
-
- #### Unroll time 4 ####
- LD_DX 0*SIZE(ptrbb), xvec2;
- SHUF_DX $0x4e, xvec7, xvec5;
- MUL_DX xvec1, xvec6, xvec6;
- ADD_DX xvec6, xvec15, xvec15;
-
- LD_DX 2*SIZE(ptrbb), xvec3;
- MUL_DX xvec1, xvec7, xvec7;
- ADD_DX xvec7, xvec11, xvec11;
-
- LD_DX 0*SIZE(ptrba), xvec0;
- MUL_DX xvec1, xvec4, xvec4;
- ADD_DX xvec4, xvec13, xvec13;
- SHUF_DX $0x4e, xvec2, xvec4;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec9, xvec9;
- DECQ k;
- JG .L10_bodyB;
- ALIGN_5
- .L10_loopE:;
- #ifndef TRMMKERNEL
- TEST $2, bk
- #else
- MOVQ kkk, %rax;
- TEST $2, %rax;
- #endif
- JLE .L11_loopE;
- ALIGN_5
- .L11_bodyB:;
- ##### Unroll time 1 ####
- LD_DX 4*SIZE(ptrbb), xvec6;
- SHUF_DX $0x4e, xvec3, xvec5;
- MUL_DX xvec0, xvec2, xvec2;
- ADD_DX xvec2, xvec15, xvec15;
-
- LD_DX 6*SIZE(ptrbb), xvec7;
- MUL_DX xvec0, xvec3, xvec3;
- ADD_DX xvec3, xvec11, xvec11;
- ADDQ $8*SIZE, ptrbb;
-
- LD_DX 2*SIZE(ptrba), xvec1;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec13, xvec13;
- SHUF_DX $0x4e, xvec6, xvec4;
- ADDQ $4*SIZE, ptrba;
-
- MUL_DX xvec0, xvec5, xvec5;
- ADD_DX xvec5, xvec9, xvec9;
-
- #### Unroll time 2 ####
- LD_DX 0*SIZE(ptrbb), xvec2;
- SHUF_DX $0x4e, xvec7, xvec5;
- MUL_DX xvec1, xvec6, xvec6;
- ADD_DX xvec6, xvec15, xvec15;
-
- LD_DX 2*SIZE(ptrbb), xvec3;
- MUL_DX xvec1, xvec7, xvec7;
- ADD_DX xvec7, xvec11, xvec11;
-
- LD_DX 0*SIZE(ptrba), xvec0;
- MUL_DX xvec1, xvec4, xvec4;
- ADD_DX xvec4, xvec13, xvec13;
- SHUF_DX $0x4e, xvec2, xvec4;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec9, xvec9;
-
- .L11_loopE:;
- #ifndef TRMMKERNEL
- TEST $1, bk
- #else
- MOVQ kkk, %rax;
- TEST $1, %rax;
- #endif
- JLE .L12_loopE;
- ALIGN_5
- .L12_bodyB:;
- SHUF_DX $0x4e, xvec3, xvec5;
- MUL_DX xvec0, xvec2, xvec2;
- ADD_DX xvec2, xvec15, xvec15;
- ADDQ $4*SIZE, ptrbb;
-
- MUL_DX xvec0, xvec3, xvec3;
- ADD_DX xvec3, xvec11, xvec11;
- ADDQ $2*SIZE, ptrba;
-
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec13, xvec13;
-
- MUL_DX xvec0, xvec5, xvec5;
- ADD_DX xvec5, xvec9, xvec9;
-
- .L12_loopE:;
- #### Load Alpha ####
- BROAD_DX MEMALPHA, xvec7;
- #### Multiply Alpha ####
- MUL_DX xvec7, xvec15, xvec15;
- MUL_DX xvec7, xvec13, xvec13;
- MUL_DX xvec7, xvec11, xvec11;
- MUL_DX xvec7, xvec9, xvec9;
- #### Reverse the Results ####
- MOV_DX xvec15, xvec6;
- REVS_DX xvec13, xvec15, xvec15;
- REVS_DX xvec6, xvec13, xvec13;
- MOV_DX xvec11, xvec6;
- REVS_DX xvec9, xvec11, xvec11;
- REVS_DX xvec6, xvec9, xvec9;
- #### Testing Alignment ####
- MOVQ C0, %rax;
- OR ldc, %rax;
- TEST $15, %rax;
- JNE .L12_loopEx;
- ALIGN_5
- #### Writing Back ####
- #ifndef TRMMKERNEL
- ADD_DX 0*SIZE(C0), xvec13, xvec13;
- ADD_DX 0*SIZE(C0, ldc, 1), xvec15, xvec15;
- ADD_DX 0*SIZE(C1), xvec9, xvec9;
- ADD_DX 0*SIZE(C1, ldc, 1), xvec11, xvec11;
- #endif
- ST_DX xvec13, 0*SIZE(C0);
- ST_DX xvec15, 0*SIZE(C0, ldc, 1);
- ST_DX xvec9, 0*SIZE(C1);
- ST_DX xvec11, 0*SIZE(C1, ldc, 1);
- #if (defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 2), ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $2, kk
- #endif
- ADDQ $2*SIZE, C0
- ADDQ $2*SIZE, C1
- JMP .L9_loopE;
- ALIGN_5
- .L12_loopEx:
- #ifndef TRMMKERNEL
- LDL_DX 0*SIZE(C0), xvec14, xvec14;
- LDH_DX 1*SIZE(C0), xvec14, xvec14;
- LDL_DX 0*SIZE(C0, ldc, 1), xvec12, xvec12;
- LDH_DX 1*SIZE(C0, ldc, 1), xvec12, xvec12;
- LDL_DX 0*SIZE(C1), xvec10, xvec10;
- LDH_DX 1*SIZE(C1), xvec10, xvec10;
- LDL_DX 0*SIZE(C1, ldc, 1), xvec8, xvec8;
- LDH_DX 1*SIZE(C1, ldc, 1), xvec8, xvec8;
- ADD_DX xvec14, xvec13, xvec13;
- ADD_DX xvec12, xvec15, xvec15;
- ADD_DX xvec10, xvec9, xvec9;
- ADD_DX xvec8, xvec11, xvec11;
- #endif
- STL_DX xvec13, 0*SIZE(C0);
- STH_DX xvec13, 1*SIZE(C0);
- STL_DX xvec15, 0*SIZE(C0, ldc, 1);
- STH_DX xvec15, 1*SIZE(C0, ldc, 1);
- STL_DX xvec9, 0*SIZE(C1);
- STH_DX xvec9, 1*SIZE(C1);
- STL_DX xvec11, 0*SIZE(C1, ldc, 1);
- STH_DX xvec11, 1*SIZE(C1, ldc, 1);
- #if (defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 2), ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $2, kk
- #endif
- ADDQ $2*SIZE, C0;
- ADDQ $2*SIZE, C1;
- .L9_loopE:;
- TEST $1, bm
- JLE .L13_loopE;
- ALIGN_5
- .L13_bodyB:;
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb, ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- ADDQ %rax, ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- #### Initial Results Register ####
- XOR_DY yvec15, yvec15, yvec15;
- #ifndef TRMMKERNEL
- MOVQ bk, k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $1, %rax;
- #else
- ADDQ $4, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L14_loopE;
- ALIGN_5
- .L14_bodyB:;
- BROAD_DY 0*SIZE(ptrba), yvec0;
- LD_DY 0*SIZE(ptrbb), yvec2;
- MUL_DY yvec0, yvec2, yvec6;
- ADD_DY yvec15, yvec6, yvec15;
-
- BROAD_DY 1*SIZE(ptrba), yvec1;
- LD_DY 4*SIZE(ptrbb), yvec3;
- MUL_DY yvec1, yvec3, yvec7;
- ADD_DY yvec15, yvec7, yvec15;
-
- BROAD_DY 2*SIZE(ptrba), yvec0;
- LD_DY 8*SIZE(ptrbb), yvec2;
- MUL_DY yvec0, yvec2, yvec6;
- ADD_DY yvec15, yvec6, yvec15;
-
- BROAD_DY 3*SIZE(ptrba), yvec1;
- LD_DY 12*SIZE(ptrbb), yvec3;
- MUL_DY yvec1, yvec3, yvec7;
- ADD_DY yvec15, yvec7, yvec15;
- ADDQ $4*SIZE, ptrba;
- ADDQ $16*SIZE, ptrbb;
- DECQ k;
- JG .L14_bodyB;
- ALIGN_5
- .L14_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- MOVQ kkk, %rax;
- TEST $2, %rax;
- #endif
- JLE .L15_loopE;
- ALIGN_5
- .L15_bodyB:
- BROAD_DY 0*SIZE(ptrba), yvec0;
- LD_DY 0*SIZE(ptrbb), yvec2;
- MUL_DY yvec0, yvec2, yvec6;
- ADD_DY yvec15, yvec6, yvec15;
-
- BROAD_DY 1*SIZE(ptrba), yvec1;
- LD_DY 4*SIZE(ptrbb), yvec3;
- MUL_DY yvec1, yvec3, yvec7;
- ADD_DY yvec15, yvec7, yvec15;
- ADDQ $2*SIZE, ptrba;
- ADDQ $8*SIZE, ptrbb;
- .L15_loopE:;
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- MOVQ kkk, %rax;
- TEST $1, %rax;
- #endif
- JLE .L16_loopE;
- ALIGN_5
- .L16_bodyB:;
- BROAD_DY 0*SIZE(ptrba), yvec0;
- LD_DY 0*SIZE(ptrbb), yvec2;
- MUL_DY yvec0, yvec2, yvec6;
- ADD_DY yvec15, yvec6, yvec15;
- ADDQ $1*SIZE, ptrba;
- ADDQ $4*SIZE, ptrbb;
-
- .L16_loopE:
- #### Load Alpha ####
- BROAD_DY MEMALPHA, yvec7;
- #### Multiply Alpha ####
- MUL_DY yvec15, yvec7, yvec15;
- #### Writing Back ####
- EXTRA_DY $1, yvec15, xvec7;
- #ifndef TRMMKERNEL
- LDL_DX 0*SIZE(C0), xvec0, xvec0;
- LDH_DX 0*SIZE(C0, ldc, 1), xvec0, xvec0;
- LDL_DX 0*SIZE(C1), xvec1, xvec1;
- LDH_DX 0*SIZE(C1, ldc, 1), xvec1, xvec1;
- ADD_DX xvec0, xvec15, xvec15;
- ADD_DX xvec1, xvec7, xvec7;
- #endif
- STL_DX xvec15, 0*SIZE(C0);
- STH_DX xvec15, 0*SIZE(C0, ldc, 1);
- STL_DX xvec7, 0*SIZE(C1);
- STH_DX xvec7, 0*SIZE(C1, ldc, 1);
- #if (defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- ADDQ %rax, ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- #if defined(TRMMKERNEL)&&defined(LEFT)
- ADDQ $1, kk
- #endif
- ADDQ $1*SIZE, C0
- ADDQ $1*SIZE, C1
- .L13_loopE:;
- #if defined(TRMMKERNEL)&&!defined(LEFT)
- ADDQ $4, kk
- #endif
- MOVQ bk,k;
- SALQ $5,k;
- ADDQ k,bb;
- LEAQ (C,ldc,4),C;
- .L0_bodyE:;
- DECQ j;
- JG .L0_bodyB;
- ALIGN_5;
- .L0_loopE:;
- TEST $2, bn;
- JLE .L20_loopE;
- ALIGN_5;
- .L20_loopB:;
- #if defined(TRMMKERNEL) && defined(LEFT)
- MOVQ OFFSET, %rax;
- MOVQ %rax, kk
- #endif
- MOVQ C, C0;
- LEAQ (C, ldc, 1), C1;
- MOVQ ba, ptrba;
- MOVQ bm, i;
- SARQ $3, i; # Rm = 8
- JLE .L21_loopE;
- ALIGN_5;
- .L21_bodyB:;
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb, ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- LEAQ (, %rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 8), ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- #### Initial Results Register ####
- XOR_DY yvec15, yvec15, yvec15;
- XOR_DY yvec14, yvec14, yvec14;
- XOR_DY yvec13, yvec13, yvec13;
- XOR_DY yvec12, yvec12, yvec12;
- XOR_DY yvec11, yvec11, yvec11;
- XOR_DY yvec10, yvec10, yvec10;
- XOR_DY yvec9, yvec9, yvec9;
- XOR_DY yvec8, yvec8, yvec8;
- #ifndef TRMMKERNEL
- MOVQ bk, k;
- #elif (defined(LEFT) && !defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $8, %rax;
- #else
- ADDQ $2, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L211_loopE;
- ALIGN_5;
- .L211_bodyB:
- # Computing kernel
- #### Unroll time 1 ####
- LD_DX 0*SIZE(ptrba), xvec0;
- LD_DX 0*SIZE(ptrbb), xvec4;
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
-
- LD_DX 2*SIZE(ptrba), xvec1;
- MOV_DX xvec5, xvec6;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec14, xvec14;
-
- LD_DX 4*SIZE(ptrba), xvec2;
- MOV_DX xvec6, xvec7;
- MUL_DX xvec2, xvec6, xvec6;
- ADD_DX xvec6, xvec13, xvec13;
-
- LD_DX 6*SIZE(ptrba), xvec3;
- SHUF_DX $0x4e, xvec7, xvec4;
- MUL_DX xvec3, xvec7, xvec7;
- ADD_DX xvec7, xvec12, xvec12;
-
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec11, xvec11;
-
- MOV_DX xvec5, xvec6;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec10, xvec10;
-
- MOV_DX xvec6, xvec7;
- MUL_DX xvec2, xvec6, xvec6;
- ADD_DX xvec6, xvec9, xvec9;
-
- MUL_DX xvec3, xvec7, xvec7;
- ADD_DX xvec7, xvec8, xvec8;
-
- #### Unroll time 2 ####
- LD_DX 8*SIZE(ptrba), xvec0;
- LD_DX 2*SIZE(ptrbb), xvec4;
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
-
- LD_DX 10*SIZE(ptrba), xvec1;
- MOV_DX xvec5, xvec6;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec14, xvec14;
-
- LD_DX 12*SIZE(ptrba), xvec2;
- MOV_DX xvec6, xvec7;
- MUL_DX xvec2, xvec6, xvec6;
- ADD_DX xvec6, xvec13, xvec13;
-
- LD_DX 14*SIZE(ptrba), xvec3;
- SHUF_DX $0x4e, xvec7, xvec4;
- MUL_DX xvec3, xvec7, xvec7;
- ADD_DX xvec7, xvec12, xvec12;
-
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec11, xvec11;
-
- MOV_DX xvec5, xvec6;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec10, xvec10;
-
- MOV_DX xvec6, xvec7;
- MUL_DX xvec2, xvec6, xvec6;
- ADD_DX xvec6, xvec9, xvec9;
-
- MUL_DX xvec3, xvec7, xvec7;
- ADD_DX xvec7, xvec8, xvec8;
-
- #### Unroll time 3 ####
- LD_DX 16*SIZE(ptrba), xvec0;
- LD_DX 4*SIZE(ptrbb), xvec4;
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
-
- LD_DX 18*SIZE(ptrba), xvec1;
- MOV_DX xvec5, xvec6;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec14, xvec14;
-
- LD_DX 20*SIZE(ptrba), xvec2;
- MOV_DX xvec6, xvec7;
- MUL_DX xvec2, xvec6, xvec6;
- ADD_DX xvec6, xvec13, xvec13;
-
- LD_DX 22*SIZE(ptrba), xvec3;
- SHUF_DX $0x4e, xvec7, xvec4;
- MUL_DX xvec3, xvec7, xvec7;
- ADD_DX xvec7, xvec12, xvec12;
-
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec11, xvec11;
-
- MOV_DX xvec5, xvec6;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec10, xvec10;
-
- MOV_DX xvec6, xvec7;
- MUL_DX xvec2, xvec6, xvec6;
- ADD_DX xvec6, xvec9, xvec9;
-
- MUL_DX xvec3, xvec7, xvec7;
- ADD_DX xvec7, xvec8, xvec8;
-
- #### Unroll time 4 ####
- LD_DX 24*SIZE(ptrba), xvec0;
- LD_DX 6*SIZE(ptrbb), xvec4;
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
- ADDQ $8*SIZE, ptrbb;
-
- LD_DX 26*SIZE(ptrba), xvec1;
- MOV_DX xvec5, xvec6;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec14, xvec14;
-
- LD_DX 28*SIZE(ptrba), xvec2;
- MOV_DX xvec6, xvec7;
- MUL_DX xvec2, xvec6, xvec6;
- ADD_DX xvec6, xvec13, xvec13;
-
- LD_DX 30*SIZE(ptrba), xvec3;
- SHUF_DX $0x4e, xvec7, xvec4;
- MUL_DX xvec3, xvec7, xvec7;
- ADD_DX xvec7, xvec12, xvec12;
- ADDQ $32*SIZE, ptrba;
-
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec11, xvec11;
-
- MOV_DX xvec5, xvec6;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec10, xvec10;
-
- MOV_DX xvec6, xvec7;
- MUL_DX xvec2, xvec6, xvec6;
- ADD_DX xvec6, xvec9, xvec9;
-
- MUL_DX xvec3, xvec7, xvec7;
- ADD_DX xvec7, xvec8, xvec8;
- DECQ k;
- JG .L211_bodyB;
- ALIGN_5
- .L211_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- MOVQ kkk, %rax;
- TEST $2, %rax;
- #endif
- JLE .L212_loopE;
- ALIGN_5;
- .L212_bodyB:
- # Computing kernel
- #### Unroll time 1 ####
- LD_DX 0*SIZE(ptrba), xvec0;
- LD_DX 0*SIZE(ptrbb), xvec4;
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
-
- LD_DX 2*SIZE(ptrba), xvec1;
- MOV_DX xvec5, xvec6;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec14, xvec14;
-
- LD_DX 4*SIZE(ptrba), xvec2;
- MOV_DX xvec6, xvec7;
- MUL_DX xvec2, xvec6, xvec6;
- ADD_DX xvec6, xvec13, xvec13;
-
- LD_DX 6*SIZE(ptrba), xvec3;
- SHUF_DX $0x4e, xvec7, xvec4;
- MUL_DX xvec3, xvec7, xvec7;
- ADD_DX xvec7, xvec12, xvec12;
-
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec11, xvec11;
-
- MOV_DX xvec5, xvec6;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec10, xvec10;
-
- MOV_DX xvec6, xvec7;
- MUL_DX xvec2, xvec6, xvec6;
- ADD_DX xvec6, xvec9, xvec9;
-
- MUL_DX xvec3, xvec7, xvec7;
- ADD_DX xvec7, xvec8, xvec8;
-
- #### Unroll time 2 ####
- LD_DX 8*SIZE(ptrba), xvec0;
- LD_DX 2*SIZE(ptrbb), xvec4;
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
- ADDQ $4*SIZE, ptrbb;
-
- LD_DX 10*SIZE(ptrba), xvec1;
- MOV_DX xvec5, xvec6;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec14, xvec14;
-
- LD_DX 12*SIZE(ptrba), xvec2;
- MOV_DX xvec6, xvec7;
- MUL_DX xvec2, xvec6, xvec6;
- ADD_DX xvec6, xvec13, xvec13;
-
- LD_DX 14*SIZE(ptrba), xvec3;
- SHUF_DX $0x4e, xvec7, xvec4;
- MUL_DX xvec3, xvec7, xvec7;
- ADD_DX xvec7, xvec12, xvec12;
- ADDQ $16*SIZE, ptrba;
-
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec11, xvec11;
-
- MOV_DX xvec5, xvec6;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec10, xvec10;
-
- MOV_DX xvec6, xvec7;
- MUL_DX xvec2, xvec6, xvec6;
- ADD_DX xvec6, xvec9, xvec9;
-
- MUL_DX xvec3, xvec7, xvec7;
- ADD_DX xvec7, xvec8, xvec8;
-
- .L212_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- MOVQ kkk, %rax;
- TEST $1, %rax;
- #endif
- JLE .L213_loopE;
- ALIGN_5
- .L213_bodyB:
- #### Unroll time 1 ####
- LD_DX 0*SIZE(ptrba), xvec0;
- LD_DX 0*SIZE(ptrbb), xvec4;
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
- ADDQ $2*SIZE, ptrbb;
-
- LD_DX 2*SIZE(ptrba), xvec1;
- MOV_DX xvec5, xvec6;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec14, xvec14;
-
- LD_DX 4*SIZE(ptrba), xvec2;
- MOV_DX xvec6, xvec7;
- MUL_DX xvec2, xvec6, xvec6;
- ADD_DX xvec6, xvec13, xvec13;
-
- LD_DX 6*SIZE(ptrba), xvec3;
- SHUF_DX $0x4e, xvec7, xvec4;
- MUL_DX xvec3, xvec7, xvec7;
- ADD_DX xvec7, xvec12, xvec12;
- ADDQ $8*SIZE, ptrba;
-
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec11, xvec11;
-
- MOV_DX xvec5, xvec6;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec10, xvec10;
-
- MOV_DX xvec6, xvec7;
- MUL_DX xvec2, xvec6, xvec6;
- ADD_DX xvec6, xvec9, xvec9;
-
- MUL_DX xvec3, xvec7, xvec7;
- ADD_DX xvec7, xvec8, xvec8;
-
- .L213_loopE:
- #### Multiply Alpha ####
- BROAD_DX MEMALPHA, xvec7;
- MUL_DX xvec7, xvec15, xvec15;
- MUL_DX xvec7, xvec14, xvec14;
- MUL_DX xvec7, xvec13, xvec13;
- MUL_DX xvec7, xvec12, xvec12;
- MUL_DX xvec7, xvec11, xvec11;
- MUL_DX xvec7, xvec10, xvec10;
- MUL_DX xvec7, xvec9, xvec9;
- MUL_DX xvec7, xvec8, xvec8;
- #### Reverse #####
- MOV_DX xvec15, xvec6;
- REVS_DX xvec11, xvec15, xvec15;
- REVS_DX xvec6, xvec11, xvec11;
- MOV_DX xvec14, xvec6;
- REVS_DX xvec10, xvec14, xvec14;
- REVS_DX xvec6, xvec10, xvec10;
- MOV_DX xvec13, xvec6;
- REVS_DX xvec9, xvec13, xvec13;
- REVS_DX xvec6, xvec9, xvec9;
- MOV_DX xvec12, xvec6;
- REVS_DX xvec8, xvec12, xvec12;
- REVS_DX xvec6, xvec8, xvec8;
- #### Testing Alignment ####
- MOVQ C0, %rax;
- OR ldc, %rax;
- TEST $15, %rax;
- JNE .L213_loopEx;
- ALIGN_5
- #### Writing Back ####
- #ifndef TRMMKERNEL
- ADD_DX 0*SIZE(C0), xvec11, xvec11;
- ADD_DX 2*SIZE(C0), xvec10, xvec10;
- ADD_DX 4*SIZE(C0), xvec9, xvec9;
- ADD_DX 6*SIZE(C0), xvec8, xvec8;
- ADD_DX 0*SIZE(C1), xvec15, xvec15;
- ADD_DX 2*SIZE(C1), xvec14, xvec14;
- ADD_DX 4*SIZE(C1), xvec13, xvec13;
- ADD_DX 6*SIZE(C1), xvec12, xvec12;
- #endif
- ST_DX xvec11, 0*SIZE(C0);
- ST_DX xvec10, 2*SIZE(C0);
- ST_DX xvec9, 4*SIZE(C0);
- ST_DX xvec8, 6*SIZE(C0);
- ST_DX xvec15, 0*SIZE(C1);
- ST_DX xvec14, 2*SIZE(C1);
- ST_DX xvec13, 4*SIZE(C1);
- ST_DX xvec12, 6*SIZE(C1);
- #if (defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 8), ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $8, kk
- #endif
- ADDQ $8*SIZE, C0;
- ADDQ $8*SIZE, C1;
- DECQ i;
- JG .L21_bodyB;
- JMP .L21_loopE;
- ALIGN_5
- .L213_loopEx:;
- #ifndef TRMMKERNEL
- LDL_DX 0*SIZE(C0), xvec0, xvec0;
- LDH_DX 1*SIZE(C0), xvec0, xvec0;
- LDL_DX 2*SIZE(C0), xvec1, xvec1;
- LDH_DX 3*SIZE(C0), xvec1, xvec1;
- LDL_DX 4*SIZE(C0), xvec2, xvec2;
- LDH_DX 5*SIZE(C0), xvec2, xvec2;
- LDL_DX 6*SIZE(C0), xvec3, xvec3;
- LDH_DX 7*SIZE(C0), xvec3, xvec3;
- ADD_DX xvec0, xvec11, xvec11;
- ADD_DX xvec1, xvec10, xvec10;
- ADD_DX xvec2, xvec9, xvec9;
- ADD_DX xvec3, xvec8, xvec8;
- #endif
- STL_DX xvec11, 0*SIZE(C0);
- STH_DX xvec11, 1*SIZE(C0);
- STL_DX xvec10, 2*SIZE(C0);
- STH_DX xvec10, 3*SIZE(C0);
- STL_DX xvec9, 4*SIZE(C0);
- STH_DX xvec9, 5*SIZE(C0);
- STL_DX xvec8, 6*SIZE(C0);
- STH_DX xvec8, 7*SIZE(C0);
- #ifndef TRMMKERNEL
- LDL_DX 0*SIZE(C1), xvec4, xvec4;
- LDH_DX 1*SIZE(C1), xvec4, xvec4;
- LDL_DX 2*SIZE(C1), xvec5, xvec5;
- LDH_DX 3*SIZE(C1), xvec5, xvec5;
- LDL_DX 4*SIZE(C1), xvec6, xvec6;
- LDH_DX 5*SIZE(C1), xvec6, xvec6;
- LDL_DX 6*SIZE(C1), xvec7, xvec7;
- LDH_DX 7*SIZE(C1), xvec7, xvec7;
- ADD_DX xvec4, xvec15, xvec15;
- ADD_DX xvec5, xvec14, xvec14;
- ADD_DX xvec6, xvec13, xvec13;
- ADD_DX xvec7, xvec12, xvec12;
- #endif
- STL_DX xvec15, 0*SIZE(C1);
- STH_DX xvec15, 1*SIZE(C1);
- STL_DX xvec14, 2*SIZE(C1);
- STH_DX xvec14, 3*SIZE(C1);
- STL_DX xvec13, 4*SIZE(C1);
- STH_DX xvec13, 5*SIZE(C1);
- STL_DX xvec12, 6*SIZE(C1);
- STH_DX xvec12, 7*SIZE(C1);
- #if (defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 8), ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $8, kk
- #endif
- ADDQ $8*SIZE, C0;
- ADDQ $8*SIZE, C1;
- DECQ i;
- JG .L21_bodyB;
- .L21_loopE:;
- TEST $4, bm; # Rm = 4
- JLE .L22_loopE;
- ALIGN_5;
- .L22_bodyB:;
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb, ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 4), ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- #### Initial Results Register ####
- XOR_DY yvec15, yvec15, yvec15;
- XOR_DY yvec14, yvec14, yvec14;
- XOR_DY yvec11, yvec11, yvec11;
- XOR_DY yvec10, yvec10, yvec10;
- #ifndef TRMMKERNEL
- MOVQ bk, k;
- #elif (defined(LEFT) && !defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $4, %rax;
- #else
- ADDQ $2, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L221_loopE;
- ALIGN_5
- .L221_bodyB:;
- # Computing kernel
- #### Unroll time 1 ####
- LD_DX 0*SIZE(ptrba), xvec0;
- LD_DX 0*SIZE(ptrbb), xvec4;
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
-
- LD_DX 2*SIZE(ptrba), xvec1;
- SHUF_DX $0x4e, xvec5, xvec4;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec14, xvec14;
-
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec11, xvec11;
-
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec10, xvec10;
-
- #### Unroll time 2 ####
- LD_DX 4*SIZE(ptrba), xvec0;
- LD_DX 2*SIZE(ptrbb), xvec4;
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
-
- LD_DX 6*SIZE(ptrba), xvec1;
- SHUF_DX $0x4e, xvec5, xvec4;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec14, xvec14;
-
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec11, xvec11;
-
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec10, xvec10;
-
- #### Unroll time 3 ####
- LD_DX 8*SIZE(ptrba), xvec0;
- LD_DX 4*SIZE(ptrbb), xvec4;
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
-
- LD_DX 10*SIZE(ptrba), xvec1;
- SHUF_DX $0x4e, xvec5, xvec4;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec14, xvec14;
-
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec11, xvec11;
-
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec10, xvec10;
-
- #### Unroll time 4 ####
- LD_DX 12*SIZE(ptrba), xvec0;
- LD_DX 6*SIZE(ptrbb), xvec4;
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
- ADDQ $8*SIZE, ptrbb;
-
- LD_DX 14*SIZE(ptrba), xvec1;
- SHUF_DX $0x4e, xvec5, xvec4;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec14, xvec14;
- ADDQ $16*SIZE, ptrba;
-
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec11, xvec11;
-
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec10, xvec10;
- DECQ k;
- JG .L221_bodyB;
- ALIGN_5
- .L221_loopE:;
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- MOVQ kkk, %rax;
- TEST $2, %rax;
- #endif
- JLE .L222_loopE;
- ALIGN_5
- .L222_bodyB:
- #### Unroll time 1 ####
- LD_DX 0*SIZE(ptrba), xvec0;
- LD_DX 0*SIZE(ptrbb), xvec4;
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
-
- LD_DX 2*SIZE(ptrba), xvec1;
- SHUF_DX $0x4e, xvec5, xvec4;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec14, xvec14;
-
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec11, xvec11;
-
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec10, xvec10;
-
- #### Unroll time 2 ####
- LD_DX 4*SIZE(ptrba), xvec0;
- LD_DX 2*SIZE(ptrbb), xvec4;
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
- ADDQ $4*SIZE, ptrbb;
-
- LD_DX 6*SIZE(ptrba), xvec1;
- SHUF_DX $0x4e, xvec5, xvec4;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec14, xvec14;
- ADDQ $8*SIZE, ptrba;
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec11, xvec11;
-
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec10, xvec10;
-
- .L222_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk
- #else
- MOVQ kkk, %rax;
- TEST $1, %rax;
- #endif
- JLE .L223_loopE;
- ALIGN_5
- .L223_bodyB:
- #### Unroll time 1 ####
- LD_DX 0*SIZE(ptrba), xvec0;
- LD_DX 0*SIZE(ptrbb), xvec4;
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
- ADDQ $2*SIZE, ptrbb;
-
- LD_DX 2*SIZE(ptrba), xvec1;
- SHUF_DX $0x4e, xvec5, xvec4;
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec14, xvec14;
- ADDQ $4*SIZE, ptrba;
-
- MOV_DX xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec11, xvec11;
-
- MUL_DX xvec1, xvec5, xvec5;
- ADD_DX xvec5, xvec10, xvec10;
-
- .L223_loopE:
- #### Multiply Alpha ####
- BROAD_DX MEMALPHA, xvec7;
- MUL_DX xvec7, xvec15, xvec15;
- MUL_DX xvec7, xvec14, xvec14;
- MUL_DX xvec7, xvec11, xvec11;
- MUL_DX xvec7, xvec10, xvec10;
- #### Reverse #####
- MOV_DX xvec15, xvec6;
- REVS_DX xvec11, xvec15, xvec15;
- REVS_DX xvec6, xvec11, xvec11;
- MOV_DX xvec14, xvec6;
- REVS_DX xvec10, xvec14, xvec14;
- REVS_DX xvec6, xvec10, xvec10;
- #### Testing Alignment ####
- MOVQ C0, %rax;
- OR ldc, %rax;
- TEST $15, %rax;
- JNE .L223_loopEx;
- ALIGN_5
- #### Writing Back ####
- #ifndef TRMMKERNEL
- ADD_DX 0*SIZE(C0), xvec11, xvec11;
- ADD_DX 2*SIZE(C0), xvec10, xvec10;
- ADD_DX 0*SIZE(C1), xvec15, xvec15;
- ADD_DX 2*SIZE(C1), xvec14, xvec14;
- #endif
- ST_DX xvec11, 0*SIZE(C0);
- ST_DX xvec10, 2*SIZE(C0);
- ST_DX xvec15, 0*SIZE(C1);
- ST_DX xvec14, 2*SIZE(C1);
- #if (defined(TRMMKERNEL)&& defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&& !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 4), ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $4, kk
- #endif
- ADDQ $4*SIZE, C0;
- ADDQ $4*SIZE, C1;
- JMP .L22_loopE;
- ALIGN_5
- .L223_loopEx:;
- #ifndef TRMMKERNEL
- LDL_DX 0*SIZE(C0), xvec0, xvec0;
- LDH_DX 1*SIZE(C0), xvec0, xvec0;
- LDL_DX 2*SIZE(C0), xvec1, xvec1;
- LDH_DX 3*SIZE(C0), xvec1, xvec1;
- ADD_DX xvec0, xvec11, xvec11;
- ADD_DX xvec1, xvec10, xvec10;
- #endif
- STL_DX xvec11, 0*SIZE(C0);
- STH_DX xvec11, 1*SIZE(C0);
- STL_DX xvec10, 2*SIZE(C0);
- STH_DX xvec10, 3*SIZE(C0);
- #ifndef TRMMKERNEL
- LDL_DX 0*SIZE(C1), xvec4, xvec4;
- LDH_DX 1*SIZE(C1), xvec4, xvec4;
- LDL_DX 2*SIZE(C1), xvec5, xvec5;
- LDH_DX 3*SIZE(C1), xvec5, xvec5;
- ADD_DX xvec4, xvec15, xvec15;
- ADD_DX xvec5, xvec14, xvec14;
- #endif
- STL_DX xvec15, 0*SIZE(C1);
- STH_DX xvec15, 1*SIZE(C1);
- STL_DX xvec14, 2*SIZE(C1);
- STH_DX xvec14, 3*SIZE(C1);
- #if (defined(TRMMKERNEL)&& defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&& !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 4), ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $4, kk
- #endif
- ADDQ $4*SIZE, C0;
- ADDQ $4*SIZE, C1;
- .L22_loopE:;
- TEST $2, bm; # Rm = 2
- JLE .L23_loopE;
- ALIGN_5;
- .L23_bodyB:
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb, ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 2), ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- XOR_DY yvec15, yvec15, yvec15;
- XOR_DY yvec11, yvec11, yvec11;
- #ifndef TRMMKERNEL
- MOVQ bk, k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $2, %rax;
- #else
- ADDQ $2, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L231_loopE;
- ALIGN_5
- .L231_bodyB:
- # Computing kernel
- #### Unroll time 1 ####
- LD_DX 0*SIZE(ptrba), xvec0;
- LD_DX 0*SIZE(ptrbb), xvec4;
- SHUF_DX $0x4e, xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
-
- MUL_DX xvec0, xvec5, xvec5;
- ADD_DX xvec5, xvec11, xvec11;
- #### Unroll time 2 ####
- LD_DX 2*SIZE(ptrba), xvec0;
- LD_DX 2*SIZE(ptrbb), xvec4;
- SHUF_DX $0x4e, xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
-
- MUL_DX xvec0, xvec5, xvec5;
- ADD_DX xvec5, xvec11, xvec11;
- #### Unroll time 3 ####
- LD_DX 4*SIZE(ptrba), xvec0;
- LD_DX 4*SIZE(ptrbb), xvec4;
- SHUF_DX $0x4e, xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
-
- MUL_DX xvec0, xvec5, xvec5;
- ADD_DX xvec5, xvec11, xvec11;
- #### Unroll time 4 ####
- LD_DX 6*SIZE(ptrba), xvec0;
- LD_DX 6*SIZE(ptrbb), xvec4;
- SHUF_DX $0x4e, xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
- ADDQ $8*SIZE, ptrba;
- MUL_DX xvec0, xvec5, xvec5;
- ADD_DX xvec5, xvec11, xvec11;
- ADDQ $8*SIZE, ptrbb;
- DECQ k;
- JG .L231_bodyB;
- ALIGN_5
- .L231_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- MOVQ kkk, %rax;
- TEST $2, %rax;
- #endif
- JLE .L232_loopE;
- ALIGN_5
- .L232_bodyB:
- #### Unroll time 1 ####
- LD_DX 0*SIZE(ptrba), xvec0;
- LD_DX 0*SIZE(ptrbb), xvec4;
- SHUF_DX $0x4e, xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
-
- MUL_DX xvec0, xvec5, xvec5;
- ADD_DX xvec5, xvec11, xvec11;
- #### Unroll time 2 ####
- LD_DX 2*SIZE(ptrba), xvec0;
- LD_DX 2*SIZE(ptrbb), xvec4;
- SHUF_DX $0x4e, xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
- ADDQ $4*SIZE, ptrba;
- MUL_DX xvec0, xvec5, xvec5;
- ADD_DX xvec5, xvec11, xvec11;
- ADDQ $4*SIZE, ptrbb;
- .L232_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- MOVQ kkk, %rax;
- TEST $1, %rax;
- #endif
- JLE .L233_loopE;
- ALIGN_5
- .L233_bodyB:
- #### Unroll time 1 ####
- LD_DX 0*SIZE(ptrba), xvec0;
- LD_DX 0*SIZE(ptrbb), xvec4;
- SHUF_DX $0x4e, xvec4, xvec5;
- MUL_DX xvec0, xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
- ADDQ $2*SIZE, ptrba;
- MUL_DX xvec0, xvec5, xvec5;
- ADD_DX xvec5, xvec11, xvec11;
- ADDQ $2*SIZE, ptrbb;
- .L233_loopE:
- #### Multiply Alpha ####
- BROAD_DX MEMALPHA, xvec7;
- MUL_DX xvec7, xvec15, xvec15;
- MUL_DX xvec7, xvec11, xvec11;
- #### Reverse #####
- MOV_DX xvec15, xvec6;
- REVS_DX xvec11, xvec15, xvec15;
- REVS_DX xvec6, xvec11, xvec11;
- #### Testing Alignment ####
- MOVQ C0, %rax;
- OR ldc, %rax;
- TEST $15, %rax;
- JNE .L233_loopEx;
- ALIGN_5
- #### Writing Back ####
- #ifndef TRMMKERNEL
- ADD_DX 0*SIZE(C0), xvec11, xvec11;
- ADD_DX 0*SIZE(C1), xvec15, xvec15;
- #endif
- ST_DX xvec11, 0*SIZE(C0);
- ST_DX xvec15, 0*SIZE(C1);
- #if (defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 2), ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $2, kk;
- #endif
- ADDQ $2*SIZE, C0;
- ADDQ $2*SIZE, C1;
- JMP .L23_loopE;
- ALIGN_5
- .L233_loopEx:;
- #ifndef TRMMKERNEL
- LDL_DX 0*SIZE(C0), xvec0, xvec0;
- LDH_DX 1*SIZE(C0), xvec0, xvec0;
- ADD_DX xvec0, xvec11, xvec11;
- #endif
- STL_DX xvec11, 0*SIZE(C0);
- STH_DX xvec11, 1*SIZE(C0);
- #ifndef TRMMKERNEL
- LDL_DX 0*SIZE(C1), xvec4, xvec4;
- LDH_DX 1*SIZE(C1), xvec4, xvec4;
- ADD_DX xvec4, xvec15, xvec15;
- #endif
- STL_DX xvec15, 0*SIZE(C1);
- STH_DX xvec15, 1*SIZE(C1);
- #if (defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 2), ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $2, kk;
- #endif
- ADDQ $2*SIZE, C0;
- ADDQ $2*SIZE, C1;
- .L23_loopE:
- TEST $1, bm; # Rm = 1
- JLE .L24_loopE;
- ALIGN_5;
- .L24_bodyB:
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb, ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- LEAQ (, %rax, SIZE), %rax;
- ADDQ %rax, ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- XOR_DY yvec15, yvec15, yvec15;
- #ifndef TRMMKERNEL
- MOVQ bk, k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $1, %rax;
- #else
- ADDQ $2, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L241_loopE;
- ALIGN_5
- .L241_bodyB:
- BROAD_DX 0*SIZE(ptrba), xvec0;
- LD_DX 0*SIZE(ptrbb), xvec2;
- MUL_DX xvec0, xvec2, xvec2;
- ADD_DX xvec2, xvec15, xvec15;
-
- BROAD_DX 1*SIZE(ptrba), xvec1;
- LD_DX 2*SIZE(ptrbb), xvec3;
- MUL_DX xvec1, xvec3, xvec3;
- ADD_DX xvec3, xvec15, xvec15;
-
- BROAD_DX 2*SIZE(ptrba), xvec0;
- LD_DX 4*SIZE(ptrbb), xvec2;
- MUL_DX xvec0, xvec2, xvec2;
- ADD_DX xvec2, xvec15, xvec15;
-
- BROAD_DX 3*SIZE(ptrba), xvec1;
- LD_DX 6*SIZE(ptrbb), xvec3;
- MUL_DX xvec1, xvec3, xvec3;
- ADD_DX xvec3, xvec15, xvec15;
- ADDQ $4*SIZE, ptrba;
- ADDQ $8*SIZE, ptrbb;
- DECQ k;
- JG .L241_bodyB;
- ALIGN_5
- .L241_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- MOVQ kkk, %rax;
- TEST $2, %rax;
- #endif
- JLE .L242_loopE;
- ALIGN_5
- .L242_bodyB:
- BROAD_DX 0*SIZE(ptrba), xvec0;
- LD_DX 0*SIZE(ptrbb), xvec2;
- MUL_DX xvec0, xvec2, xvec2;
- ADD_DX xvec2, xvec15, xvec15;
-
- BROAD_DX 1*SIZE(ptrba), xvec1;
- LD_DX 2*SIZE(ptrbb), xvec3;
- MUL_DX xvec1, xvec3, xvec3;
- ADD_DX xvec3, xvec15, xvec15;
- ADDQ $2*SIZE, ptrba;
- ADDQ $4*SIZE, ptrbb;
- .L242_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- MOVQ kkk, %rax;
- TEST $1, %rax;
- #endif
- JLE .L243_loopE;
- ALIGN_5
- .L243_bodyB:
- BROAD_DX 0*SIZE(ptrba), xvec0;
- LD_DX 0*SIZE(ptrbb), xvec2;
- MUL_DX xvec0, xvec2, xvec2;
- ADD_DX xvec2, xvec15, xvec15;
- ADDQ $1*SIZE, ptrba;
- ADDQ $2*SIZE, ptrbb;
-
- .L243_loopE:
- BROAD_DX MEMALPHA, xvec7;
- MUL_DX xvec7, xvec15, xvec15;
- #ifndef TRMMKERNEL
- LDL_DX 0*SIZE(C0), xvec0, xvec0;
- LDH_DX 0*SIZE(C1), xvec0, xvec0;
- ADD_DX xvec0, xvec15, xvec15;
- #endif
- STL_DX xvec15, 0*SIZE(C0);
- STH_DX xvec15, 0*SIZE(C1);
- #if (defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- ADDQ %rax, ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $1, kk;
- #endif
- ADDQ $1*SIZE, C0;
- ADDQ $1*SIZE, C1;
- .L24_loopE:
- #if defined(TRMMKERNEL) && !defined(LEFT)
- ADDQ $2, kk;
- #endif
- MOVQ bk, k;
- SALQ $4, k;
- ADDQ k, bb;
- LEAQ (C, ldc, 2), C;
- .L20_loopE:;
- TEST $1, bn; # Rn = 1
- JLE .L30_loopE;
- ALIGN_5
- .L30_bodyB:
- #if defined(TRMMKERNEL)&&defined(LEFT)
- MOVQ OFFSET, %rax;
- MOVQ %rax, kk;
- #endif
- MOVQ C, C0;
- MOVQ ba, ptrba;
- MOVQ bm, i;
- SARQ $3, i;
- JLE .L31_loopE;
- ALIGN_5
- .L31_bodyB:
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb, ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax
- LEAQ (, %rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 8), ptrba;
- ADDQ %rax, ptrbb;
- #endif
- #### Initial Results Register ####
- XOR_DY yvec15, yvec15, yvec15;
- XOR_DY yvec14, yvec14, yvec14;
- #ifndef TRMMKERNEL
- MOVQ bk, k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $8, %rax;
- #else
- ADDQ $1, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L311_loopE;
- ALIGN_5
- .L311_bodyB:
- #### Unroll time 1 ####
- LD_DY 0*SIZE(ptrba), yvec0;
- LD_DY 4*SIZE(ptrba), yvec1;
- BROAD_DY 0*SIZE(ptrbb), yvec2;
- MUL_DY yvec2, yvec0, yvec0;
- ADD_DY yvec0, yvec15, yvec15;
- MUL_DY yvec2, yvec1, yvec1;
- ADD_DY yvec1, yvec14, yvec14;
-
- #### Unroll time 2 ####
- LD_DY 8*SIZE(ptrba), yvec3;
- LD_DY 12*SIZE(ptrba), yvec4;
- BROAD_DY 1*SIZE(ptrbb), yvec5;
- MUL_DY yvec5, yvec3, yvec3;
- ADD_DY yvec3, yvec15, yvec15;
- MUL_DY yvec5, yvec4, yvec4
- ADD_DY yvec4, yvec14, yvec14;
-
- #### Unroll time 3 ####
- LD_DY 16*SIZE(ptrba), yvec0;
- LD_DY 20*SIZE(ptrba), yvec1;
- BROAD_DY 2*SIZE(ptrbb), yvec2;
- MUL_DY yvec2, yvec0, yvec0;
- ADD_DY yvec0, yvec15, yvec15;
- MUL_DY yvec2, yvec1, yvec1;
- ADD_DY yvec1, yvec14, yvec14;
-
- #### Unroll time 2 ####
- LD_DY 24*SIZE(ptrba), yvec3;
- LD_DY 28*SIZE(ptrba), yvec4;
- BROAD_DY 3*SIZE(ptrbb), yvec5;
- MUL_DY yvec5, yvec3, yvec3;
- ADD_DY yvec3, yvec15, yvec15;
- ADDQ $32*SIZE, ptrba;
- MUL_DY yvec5, yvec4, yvec4;
- ADD_DY yvec4, yvec14, yvec14;
- ADDQ $4*SIZE, ptrbb;
- DECQ k;
- JG .L311_bodyB;
- ALIGN_5
- .L311_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- MOVQ kkk, %rax;
- TEST $2, %rax;
- #endif
- JLE .L312_loopE;
- ALIGN_5
- .L312_bodyB:
- #### Unroll time 1 ####
- LD_DY 0*SIZE(ptrba), yvec0;
- LD_DY 4*SIZE(ptrba), yvec1;
- BROAD_DY 0*SIZE(ptrbb), yvec2;
- MUL_DY yvec2, yvec0, yvec0;
- ADD_DY yvec0, yvec15, yvec15;
- MUL_DY yvec2, yvec1, yvec1;
- ADD_DY yvec1, yvec14, yvec14;
-
- #### Unroll time 2 ####
- LD_DY 8*SIZE(ptrba), yvec3;
- LD_DY 12*SIZE(ptrba), yvec4;
- BROAD_DY 1*SIZE(ptrbb), yvec5;
- MUL_DY yvec5, yvec3, yvec3;
- ADD_DY yvec3, yvec15, yvec15;
- ADDQ $16*SIZE, ptrba;
- MUL_DY yvec5, yvec4, yvec4
- ADD_DY yvec4, yvec14, yvec14;
- ADDQ $2*SIZE, ptrbb;
-
- .L312_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- MOVQ kkk, %rax;
- TEST $1, %rax;
- #endif
- JLE .L313_loopE;
- ALIGN_5
- .L313_bodyB:
- #### Unroll time 1 ####
- LD_DY 0*SIZE(ptrba), yvec0;
- LD_DY 4*SIZE(ptrba), yvec1;
- BROAD_DY 0*SIZE(ptrbb), yvec2;
- MUL_DY yvec2, yvec0, yvec0;
- ADD_DY yvec0, yvec15, yvec15;
- ADDQ $8*SIZE, ptrba;
- MUL_DY yvec2, yvec1, yvec1;
- ADD_DY yvec1, yvec14, yvec14;
- ADDQ $1*SIZE, ptrbb;
-
- .L313_loopE:
- #### Multiply Alpha ####
- BROAD_DY MEMALPHA, yvec7;
- MUL_DY yvec7, yvec15, yvec15;
- MUL_DY yvec7, yvec14, yvec14;
- #### Testing Alignment ####
- MOVQ C0, %rax;
- OR ldc, %rax;
- TEST $15, %rax;
- JNE .L313_loopEx;
- ALIGN_5
- #### Writing Back ####
- EXTRA_DY $1, yvec15, xvec13;
- EXTRA_DY $1, yvec14, xvec12;
- #ifndef TRMMKERNEL
- ADD_DX 0*SIZE(C0), xvec15, xvec15;
- ADD_DX 2*SIZE(C0), xvec13, xvec13;
- ADD_DX 4*SIZE(C0), xvec14, xvec14;
- ADD_DX 6*SIZE(C0), xvec12, xvec12;
- #endif
- ST_DX xvec15, 0*SIZE(C0);
- ST_DX xvec13, 2*SIZE(C0);
- ST_DX xvec14, 4*SIZE(C0);
- ST_DX xvec12, 6*SIZE(C0);
- #if (defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 8), ptrba;
- ADDQ %rax, ptrbb;
- #endif
- #if defined(TRMMKERNEL)&&defined(LEFT)
- ADDQ $8, kk;
- #endif
- ADDQ $8*SIZE, C0;
- DECQ i;
- JG .L31_bodyB;
- JMP .L31_loopE;
- ALIGN_5
- .L313_loopEx:
- EXTRA_DY $1, yvec15, xvec13;
- EXTRA_DY $1, yvec14, xvec12;
- #ifndef TRMMKERNEL
- LDL_DX 0*SIZE(C0), xvec11, xvec11;
- LDH_DX 1*SIZE(C0), xvec11, xvec11;
- LDL_DX 2*SIZE(C0), xvec10, xvec10;
- LDH_DX 3*SIZE(C0), xvec10, xvec10;
- LDL_DX 4*SIZE(C0), xvec9, xvec9;
- LDH_DX 5*SIZE(C0), xvec9, xvec9;
- LDL_DX 6*SIZE(C0), xvec8, xvec8;
- LDH_DX 7*SIZE(C0), xvec8, xvec8;
- ADD_DX xvec11, xvec15, xvec15;
- ADD_DX xvec10, xvec13, xvec13;
- ADD_DX xvec9, xvec14, xvec14;
- ADD_DX xvec8, xvec12, xvec12;
- #endif
- STL_DX xvec15, 0*SIZE(C0);
- STH_DX xvec15, 1*SIZE(C0);
- STL_DX xvec13, 2*SIZE(C0);
- STH_DX xvec13, 3*SIZE(C0);
- STL_DX xvec14, 4*SIZE(C0);
- STH_DX xvec14, 5*SIZE(C0);
- STL_DX xvec12, 6*SIZE(C0);
- STH_DX xvec12, 7*SIZE(C0);
- #if (defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 8), ptrba;
- ADDQ %rax, ptrbb;
- #endif
- #if defined(TRMMKERNEL)&&defined(LEFT)
- ADDQ $8, kk;
- #endif
- ADDQ $8*SIZE, C0;
- DECQ i;
- JG .L31_bodyB;
- .L31_loopE:
- TEST $4, bm
- JLE .L32_loopE;
- ALIGN_5
- .L32_bodyB:
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb, ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 4), ptrba;
- ADDQ %rax, ptrbb;
- #endif
- #### Initial Results Register ####
- XOR_DY yvec15, yvec15, yvec15;
- #ifndef TRMMKERNEL
- MOVQ bk, k;
- #elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $4, %rax;
- #else
- ADDQ $1, %rax;
- #endif
- MOVQ %rax, kkk
- #endif
- SARQ $2, k;
- JLE .L321_loopE;
- ALIGN_5
- .L321_bodyB:
- LD_DY 0*SIZE(ptrba), yvec0;
- BROAD_DY 0*SIZE(ptrbb), yvec1;
- MUL_DY yvec0, yvec1, yvec1;
- ADD_DY yvec1, yvec15, yvec15;
-
- LD_DY 4*SIZE(ptrba), yvec2;
- BROAD_DY 1*SIZE(ptrbb), yvec3;
- MUL_DY yvec2, yvec3, yvec3;
- ADD_DY yvec3, yvec15, yvec15;
-
- LD_DY 8*SIZE(ptrba), yvec4;
- BROAD_DY 2*SIZE(ptrbb), yvec5;
- MUL_DY yvec4, yvec5, yvec5;
- ADD_DY yvec5, yvec15, yvec15;
-
- LD_DY 12*SIZE(ptrba), yvec6;
- BROAD_DY 3*SIZE(ptrbb), yvec7;
- MUL_DY yvec6, yvec7, yvec7;
- ADD_DY yvec7, yvec15, yvec15;
- ADDQ $16*SIZE, ptrba;
- ADDQ $4*SIZE, ptrbb;
- DECQ k;
- JG .L321_bodyB;
- ALIGN_5
- .L321_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- MOVQ kkk, %rax;
- TEST $2, %rax;
- #endif
- JLE .L322_loopE;
- ALIGN_5
- .L322_bodyB:
- LD_DY 0*SIZE(ptrba), yvec0;
- BROAD_DY 0*SIZE(ptrbb), yvec1;
- MUL_DY yvec0, yvec1, yvec1;
- ADD_DY yvec1, yvec15, yvec15;
-
- LD_DY 4*SIZE(ptrba), yvec2;
- BROAD_DY 1*SIZE(ptrbb), yvec3;
- MUL_DY yvec2, yvec3, yvec3;
- ADD_DY yvec3, yvec15, yvec15;
- ADDQ $8*SIZE, ptrba;
- ADDQ $2*SIZE, ptrbb;
-
- .L322_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- MOVQ kkk, %rax;
- TEST $1, %rax;
- #endif
- JLE .L323_loopE;
- ALIGN_5
- .L323_bodyB:
- LD_DY 0*SIZE(ptrba), yvec0;
- BROAD_DY 0*SIZE(ptrbb), yvec1;
- MUL_DY yvec0, yvec1, yvec1;
- ADD_DY yvec1, yvec15, yvec15;
- ADDQ $4*SIZE, ptrba;
- ADDQ $1*SIZE, ptrbb;
-
- .L323_loopE:
- #### Multiply Alpha ####
- BROAD_DY MEMALPHA, yvec7;
- MUL_DY yvec7, yvec15, yvec15;
- #### Testing Alignment ####
- MOVQ C0, %rax;
- OR ldc, %rax;
- TEST $15, %rax;
- JNE .L323_loopEx;
- ALIGN_5
- #### Writing Back ####
- EXTRA_DY $1, yvec15, xvec14;
- #ifndef TRMMKERNEL
- ADD_DX 0*SIZE(C0), xvec15, xvec15;
- ADD_DX 2*SIZE(C0), xvec14, xvec14;
- #endif
- ST_DX xvec15, 0*SIZE(C0);
- ST_DX xvec14, 2*SIZE(C0);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (, %rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 4), ptrba;
- ADDQ %rax, ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $4, kk
- #endif
- ADDQ $4*SIZE, C0;
- JMP .L32_loopE;
- ALIGN_5
- .L323_loopEx:
- #### Writing Back ####
- EXTRA_DY $1, yvec15, xvec14;
- #ifndef TRMMKERNEL
- LDL_DX 0*SIZE(C0), xvec13, xvec13;
- LDH_DX 1*SIZE(C0), xvec13, xvec13;
- LDL_DX 2*SIZE(C0), xvec12, xvec12;
- LDH_DX 3*SIZE(C0), xvec12, xvec12;
- ADD_DX xvec13, xvec15, xvec15;
- ADD_DX xvec12, xvec14, xvec14;
- #endif
- STL_DX xvec15, 0*SIZE(C0);
- STH_DX xvec15, 1*SIZE(C0);
- STL_DX xvec14, 2*SIZE(C0);
- STH_DX xvec14, 3*SIZE(C0);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (, %rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 4), ptrba;
- ADDQ %rax, ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $4, kk
- #endif
- ADDQ $4*SIZE, C0;
- .L32_loopE:
- TEST $2, bm
- JLE .L33_loopE;
- ALIGN_5
- .L33_bodyB:
- #if !defined(TRMMKERNEL) || (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bb, ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax
- LEAQ (, %rax, SIZE), %rax
- LEAQ (ptrba, %rax, 2), ptrba
- ADDQ %rax, ptrbb;
- #endif
- #### Initial Result ####
- XOR_DY yvec15, yvec15, yvec15;
- #ifndef TRMMKERNEL
- MOVQ bk, k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $2, %rax;
- #else
- ADDQ $1, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L331_loopE;
- ALIGN_5
- .L331_bodyB:
- LD_DX 0*SIZE(ptrba), xvec0;
- BROAD_DX 0*SIZE(ptrbb), xvec2;
- MUL_DX xvec0, xvec2, xvec2;
- ADD_DX xvec2, xvec15, xvec15;
-
- LD_DX 2*SIZE(ptrba), xvec1;
- BROAD_DX 1*SIZE(ptrbb), xvec3;
- MUL_DX xvec1, xvec3, xvec3;
- ADD_DX xvec3, xvec15, xvec15;
-
- LD_DX 4*SIZE(ptrba), xvec4;
- BROAD_DX 2*SIZE(ptrbb), xvec5;
- MUL_DX xvec4, xvec5, xvec5;
- ADD_DX xvec5, xvec15, xvec15;
-
- LD_DX 6*SIZE(ptrba), xvec6;
- BROAD_DX 3*SIZE(ptrbb), xvec7;
- MUL_DX xvec6, xvec7, xvec7;
- ADD_DX xvec7, xvec15, xvec15;
- ADDQ $8*SIZE, ptrba;
- ADDQ $4*SIZE, ptrbb;
- DECQ k;
- JG .L331_bodyB;
- ALIGN_5
- .L331_loopE:
- #ifndef TRMMKERNEL
- TEST $2,bk;
- #else
- MOVQ kkk, %rax;
- TEST $2, %rax
- #endif
- JLE .L332_loopE;
- ALIGN_5
- .L332_bodyB:
- LD_DX 0*SIZE(ptrba), xvec0;
- BROAD_DX 0*SIZE(ptrbb), xvec2;
- MUL_DX xvec0, xvec2, xvec2;
- ADD_DX xvec2, xvec15, xvec15;
-
- LD_DX 2*SIZE(ptrba), xvec1;
- BROAD_DX 1*SIZE(ptrbb), xvec3;
- MUL_DX xvec1, xvec3, xvec3;
- ADD_DX xvec3, xvec15, xvec15;
- ADDQ $4*SIZE, ptrba;
- ADDQ $2*SIZE, ptrbb;
- .L332_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- MOVQ kkk, %rax;
- TEST $1, %rax;
- #endif
- JLE .L333_loopE;
- ALIGN_5
- .L333_bodyB:
- LD_DX 0*SIZE(ptrba), xvec0;
- BROAD_DX 0*SIZE(ptrbb), xvec2;
- MUL_DX xvec0, xvec2, xvec2;
- ADD_DX xvec2, xvec15, xvec15;
- ADDQ $2*SIZE, ptrba;
- ADDQ $1*SIZE, ptrbb;
- .L333_loopE:
- #### Multiply Alpha ####
- BROAD_DX MEMALPHA, xvec7;
- MUL_DX xvec7, xvec15, xvec15;
- #ifndef TRMMKERNEL
- LDL_DX 0*SIZE(C0), xvec14, xvec14;
- LDH_DX 1*SIZE(C0), xvec14, xvec14;
- ADD_DX xvec14, xvec15, xvec15;
- #endif
- STL_DX xvec15, 0*SIZE(C0);
- STH_DX xvec15, 1*SIZE(C0);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) ||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- LEAQ (ptrba, %rax, 2), ptrba;
- ADDQ %rax, ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- addq $2, kk
- #endif
- ADDQ $2*SIZE, C0;
- .L33_loopE:
- TEST $1, bm
- JLE .L34_loopE;
- ALIGN_5
- .L34_bodyB:
- #if !defined(TRMMKERNEL) || (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bb, ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- LEAQ (, %rax, SIZE), %rax;
- ADDQ %rax, ptrba;
- ADDQ %rax, ptrbb;
- #endif
- XOR_DY yvec15, yvec15, yvec15;
- #ifndef TRMMKERNEL
- MOVQ bk, k;
- #elif (defined(LEFT)&& !defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $1, %rax;
- #else
- ADDQ $1, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L341_loopE;
- ALIGN_5
- .L341_bodyB:
- vmovsd 0*SIZE(ptrba), xvec0;
- vmovsd 0*SIZE(ptrbb), xvec1;
- vmulsd xvec0, xvec1, xvec1;
- vaddsd xvec1, xvec15, xvec15;
-
- vmovsd 1*SIZE(ptrba), xvec0;
- vmovsd 1*SIZE(ptrbb), xvec1;
- vmulsd xvec0, xvec1, xvec1;
- vaddsd xvec1, xvec15, xvec15;
-
- vmovsd 2*SIZE(ptrba), xvec0;
- vmovsd 2*SIZE(ptrbb), xvec1;
- vmulsd xvec0, xvec1, xvec1;
- vaddsd xvec1, xvec15, xvec15;
-
- vmovsd 3*SIZE(ptrba), xvec0;
- vmovsd 3*SIZE(ptrbb), xvec1;
- vmulsd xvec0, xvec1, xvec1;
- vaddsd xvec1, xvec15, xvec15;
- addq $4*SIZE, ptrba;
- addq $4*SIZE, ptrbb;
- decq k;
- JG .L341_bodyB;
- ALIGN_5
- .L341_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- MOVQ kkk, %rax;
- TEST $2, %rax;
- #endif
- JLE .L342_loopE;
- ALIGN_5
- .L342_bodyB:
- vmovsd 0*SIZE(ptrba), xvec0;
- vmovsd 0*SIZE(ptrbb), xvec1;
- vmulsd xvec0, xvec1, xvec1;
- vaddsd xvec1, xvec15, xvec15;
-
- vmovsd 1*SIZE(ptrba), xvec0;
- vmovsd 1*SIZE(ptrbb), xvec1;
- vmulsd xvec0, xvec1, xvec1;
- vaddsd xvec1, xvec15, xvec15;
- addq $2*SIZE, ptrba;
- addq $2*SIZE, ptrbb;
-
- .L342_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk
- #else
- MOVQ kkk, %rax;
- TEST $1, %rax;
- #endif
- JLE .L343_loopE;
- ALIGN_5
- .L343_bodyB:
- vmovsd 0*SIZE(ptrba), xvec0;
- vmovsd 0*SIZE(ptrbb), xvec1;
- vmulsd xvec0, xvec1, xvec1;
- vaddsd xvec1, xvec15, xvec15;
- addq $1*SIZE, ptrba;
- addq $1*SIZE, ptrbb;
-
- .L343_loopE:
- #### Writing Back ####
- vmovsd MEMALPHA, xvec7;
- vmulsd xvec7, xvec15, xvec15;
- #ifndef TRMMKERNEL
- vmovsd 0*SIZE(C0), xvec0;
- vaddsd xvec0, xvec15, xvec15;
- #endif
- movsd xvec15, 0*SIZE(C0);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) ||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- LEAQ (,%rax, SIZE), %rax;
- ADDQ %rax, ptrba;
- ADDQ %rax, ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- addq $1, kk
- #endif
- addq $1*SIZE, C0;
- .L34_loopE:
- MOVQ bk, k
- SALQ $3, k;
- ADDQ k, bb;
- LEAQ (C, ldc, 1), C;
-
- .L30_loopE:
- movq 0(%rsp), %rbx;
- movq 8(%rsp), %rbp;
- movq 16(%rsp), %r12;
- movq 24(%rsp), %r13;
- movq 32(%rsp), %r14;
- movq 40(%rsp), %r15;
-
- vzeroupper
-
- #ifdef WINDOWS_ABI
- movq 48(%rsp), %rdi
- movq 56(%rsp), %rsi
- movups 64(%rsp), %xmm6
- movups 80(%rsp), %xmm7
- movups 96(%rsp), %xmm8
- movups 112(%rsp), %xmm9
- movups 128(%rsp), %xmm10
- movups 144(%rsp), %xmm11
- movups 160(%rsp), %xmm12
- movups 176(%rsp), %xmm13
- movups 192(%rsp), %xmm14
- movups 208(%rsp), %xmm15
- #endif
- addq $STACKSIZE, %rsp;
- ret
-
- EPILOGUE
|