|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458 |
- /*****************************************************************************
- Copyright (c) 2011,2012 Lab of Parallel Software and Computational Science,ISCAS
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- 3. Neither the name of the ISCAS nor the names of its contributors may
- be used to endorse or promote products derived from this software
- without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- **********************************************************************************/
-
- #define ASSEMBLER
- #include "common.h"
-
- #define old_bm %rdi
- #define old_bn %rsi
- #define old_bk %rdx
-
- #define bm %r13
- #define bn %r14
- #define bk %r15
-
- #define ALPHA %xmm0
- #define ba %rcx
- #define bb %r8
- #define C %r9
- #define ldc %r10
-
- #define i %r11
- #define k %rax
-
- #define ptrba %rdi
- #define ptrbb %rsi
- #define C0 %rbx
- #define C1 %rbp
-
- #define prebb %r12
-
- #ifndef WINDOWS_ABI
-
- #define STACKSIZE 128
-
- #define old_ldc 8+STACKSIZE(%rsp)
- #define old_offset 16+STACKSIZE(%rsp)
-
- #define MEMALPHA_R 48(%rsp)
- #define MEMALPHA_I 56(%rsp)
- #define j 64(%rsp)
- #define OFFSET 72(%rsp)
- #define kk 80(%rsp)
- #define kkk 88(%rsp)
-
- #else
-
- #define STACKSIZE 512
-
- #define OLD_ALPHA_I 40 + STACKSIZE(%rsp)
- #define OLD_A 48 + STACKSIZE(%rsp)
- #define OLD_B 56 + STACKSIZE(%rsp)
- #define OLD_C 64 + STACKSIZE(%rsp)
- #define old_ldc 72 + STACKSIZE(%rsp)
- #define old_offset 80 + STACKSIZE(%rsp)
-
- #define MEMALPHA_R 224(%rsp)
- #define MEMALPHA_I 232(%rsp)
- #define j 240(%rsp)
- #define OFFSET 248(%rsp)
- #define kk 256(%rsp)
- #define kkk 264(%rsp)
-
- #endif
-
- #define PREFETCH0 prefetcht0
- #define PREFETCH1 prefetcht0
- #define PREFETCH2 prefetcht0
- #define PRESIZE 64
-
- #define xvec0 %xmm0
- #define xvec1 %xmm1
- #define xvec2 %xmm2
- #define xvec3 %xmm3
- #define xvec4 %xmm4
- #define xvec5 %xmm5
- #define xvec6 %xmm6
- #define xvec7 %xmm7
- #define xvec8 %xmm8
- #define xvec9 %xmm9
- #define xvec10 %xmm10
- #define xvec11 %xmm11
- #define xvec12 %xmm12
- #define xvec13 %xmm13
- #define xvec14 %xmm14
- #define xvec15 %xmm15
-
- #define yvec0 %ymm0
- #define yvec1 %ymm1
- #define yvec2 %ymm2
- #define yvec3 %ymm3
- #define yvec4 %ymm4
- #define yvec5 %ymm5
- #define yvec6 %ymm6
- #define yvec7 %ymm7
- #define yvec8 %ymm8
- #define yvec9 %ymm9
- #define yvec10 %ymm10
- #define yvec11 %ymm11
- #define yvec12 %ymm12
- #define yvec13 %ymm13
- #define yvec14 %ymm14
- #define yvec15 %ymm15
-
- #define LEAQ leaq
- #define ADDQ addq
- #define MULQ imulq
- #define SARQ sarq
- #define SALQ salq
- #define ANDQ andq
- #define SUBQ subq
- #define DECQ decq
- #define JG jg
- #define JLE jle
- #define TEST testq
- #define OR orq
- #define JNE jne
- #define JMP jmp
- #define NOP
- #define XOR xorpd
- #undef MOVQ
- #define MOVQ movq
-
- #define XOR_SY vxorps
- #define XOR_SX vxorps
-
- #define LD_SY vmovaps
- #define LD_SX vmovaps
- #define LDL_SX vmovlps
- #define LDL_SY vmovlps
- #define LDH_SX vmovhps
- #define LDH_SY vmovhps
-
- #define ST_SY vmovaps
- #define ST_SX vmovaps
- #define STL_SX vmovlps
- #define STL_SY vmovlps
- #define STH_SX vmovhps
- #define STH_SY vmovhps
-
- #define EDUP_SY vmovsldup
- #define ODUP_SY vmovshdup
- #define EDUP_SX vmovsldup
- #define ODUP_SX vmovshdup
-
- #define ADD_SY vaddps
- #define ADD_SX vaddps
- #define SUB_SY vsubps
- #define SUB_SX vsubps
-
- #define ADDSUB_SY vaddsubps
- #define ADDSUB_SX vaddsubps
-
- #define MUL_SY vmulps
- #define MUL_SX vmulps
-
- #define SHUF_SY vperm2f128
- #define SHUF_SX vpshufd
-
- #define VPERMILP_SY vpermilps
- #define VPERMILP_SX vpermilps
-
- #define BROAD_SY vbroadcastss
- #define BROAD_SX vbroadcastss
-
- #define MOV_SY vmovaps
- #define MOV_SX vmovaps
-
- #define REVS_SY vshufps
- #define REVS_SX vshufps
-
- #define EXTRA_SY vextractf128
-
- #if defined(NN) || defined(NT) || defined(TN) || defined(TT)
- #define ADD1_SY ADD_SY
- #define ADD2_SY ADDSUB_SY
- #define ADD1_SX ADD_SX
- #define ADD2_SX ADDSUB_SX
- #elif defined(NR) || defined(NC) || defined(TR) || defined(TC)
- #define ADD1_SY SUB_SY
- #define ADD2_SY ADDSUB_SY
- #define ADD1_SX SUB_SX
- #define ADD2_SX ADDSUB_SX
- #elif defined(RN) || defined(RT) || defined(CN) || defined(CT)
- #define ADD1_SY SUB_SY
- #define ADD2_SY ADDSUB_SY
- #define ADD1_SX SUB_SX
- #define ADD2_SX ADDSUB_SX
- #else
- #define ADD1_SY ADD_SY
- #define ADD2_SY ADDSUB_SY
- #define ADD1_SX ADD_SX
- #define ADD2_SX ADDSUB_SX
- #endif
-
- PROLOGUE
-
- subq $STACKSIZE, %rsp;
- movq %rbx, 0(%rsp);
- movq %rbp, 8(%rsp);
- movq %r12, 16(%rsp);
- movq %r13, 24(%rsp);
- movq %r14, 32(%rsp);
- movq %r15, 40(%rsp);
-
- #ifdef WINDOWS_ABI
- movq %rdi, 48(%rsp)
- movq %rsi, 56(%rsp)
- movups %xmm6, 64(%rsp)
- movups %xmm7, 80(%rsp)
- movups %xmm8, 96(%rsp)
- movups %xmm9, 112(%rsp)
- movups %xmm10, 128(%rsp)
- movups %xmm11, 144(%rsp)
- movups %xmm12, 160(%rsp)
- movups %xmm13, 176(%rsp)
- movups %xmm14, 192(%rsp)
- movups %xmm15, 208(%rsp)
-
- movq ARG1, old_bm
- movq ARG2, old_bn
- movq ARG3, old_bk
- movq OLD_A, ba
- movq OLD_B, bb
- movq OLD_C, C
- movq old_ldc, ldc
- #ifdef TRMMKERNEL
- movq old_offset, %r11
- #endif
- movaps %xmm3, %xmm0
- movsd OLD_ALPHA_I, %xmm1
- #else
- movq old_ldc, ldc
- #ifdef TRMMKERNEL
- movq old_offset, %r11;
- #endif
- #endif
-
- vzeroupper
-
- vmovlps %xmm0, MEMALPHA_R
- vmovlps %xmm1, MEMALPHA_I
- movq old_bm, bm
- movq old_bn, bn
- movq old_bk, bk
- salq $ZBASE_SHIFT, ldc
- #ifdef TRMMKERNEL
- movq %r11, OFFSET
- #ifndef LEFT
- negq %r11;
- #endif
- movq %r11, kk;
- #endif
-
- MOVQ bn,j;
- SARQ $2,j; # Rn = 4
- JLE .L0_loopE;
- ALIGN_5;
- .L0_bodyB:;
- #if defined(TRMMKERNEL) && defined(LEFT)
- MOVQ OFFSET, %rax;
- MOVQ %rax, kk;
- #endif
- MOVQ C,C0;
- LEAQ (C,ldc,2),C1;
- MOVQ bk, k;
- SALQ $5, k;
- LEAQ (bb, k, 1), prebb; # Rn=4, SIZE=4 COMPLEX=2
- MOVQ ba,ptrba;
- MOVQ bm,i;
- SARQ $3,i; # Rm = 8
- JLE .L1_loopE;
- ALIGN_5;
- .L1_bodyB:;
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb,ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 8), ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- # Initial results register
- PREFETCH0 0*SIZE(prebb);
- XOR_SY yvec15, yvec15, yvec15;
- PREFETCH0 16*SIZE(prebb);
- ADDQ $32*SIZE, prebb;
- XOR_SY yvec14, yvec14, yvec14;
- PREFETCH2 3*SIZE(C0);
- XOR_SY yvec13, yvec13, yvec13;
- PREFETCH2 3*SIZE(C0, ldc, 1);
- XOR_SY yvec12, yvec12, yvec12;
- PREFETCH2 3*SIZE(C1);
- EDUP_SY 0*SIZE(ptrbb), yvec2; # Br0, Br1, Br2, Br3
- PREFETCH2 3*SIZE(C1, ldc, 1);
- XOR_SY yvec11, yvec11, yvec11;
- XOR_SY yvec10, yvec10, yvec10;
- LD_SY 0*SIZE(ptrba), yvec0; # Ar0, Ai0, Ar1, Ai1..
- XOR_SY yvec9, yvec9, yvec9;
- XOR_SY yvec8, yvec8, yvec8;
- VPERMILP_SY $0x4e, yvec2, yvec3; # Br2, Br3, Br0, Br1
- #ifndef TRMMKERNEL
- MOVQ bk,k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $8, %rax;
- #else
- ADDQ $4, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2,k; # Unroll 4 times
- JLE .L2_loopE;
- ALIGN_5;
- .L2_bodyB:;
- # Computing kernel
-
- ######### Unroll 1 ##################
- PREFETCH0 PRESIZE*SIZE(ptrba);
- LD_SY 8*SIZE(ptrba), yvec1; # Ar4, Ai4, Ar5, Ai5..
- MUL_SY yvec0, yvec2, yvec6;
- SHUF_SY $0x03, yvec2, yvec2, yvec4; # Br1, Br0, Br3, Br2
- MUL_SY yvec0, yvec3, yvec7;
- SHUF_SY $0x03, yvec3, yvec3, yvec5; # Br3, Br2, Br1, Br0
- ADD1_SY yvec6, yvec15, yvec15;
- ADD1_SY yvec7, yvec13, yvec13;
-
- MUL_SY yvec1, yvec2, yvec6;
- ODUP_SY 0*SIZE(ptrbb), yvec2; # Bi0, Bi1, Bi2, Bi3
- MUL_SY yvec1, yvec3, yvec7;
- VPERMILP_SY $0x4e, yvec2, yvec3; # Bi2, Bi3, Bi0, Bi1
- ADD1_SY yvec6, yvec14, yvec14;
- ADD1_SY yvec7, yvec12, yvec12;
-
- MUL_SY yvec0, yvec4, yvec6;
- MUL_SY yvec0, yvec5, yvec7;
- VPERMILP_SY $0xb1, yvec0, yvec0; # Ai0, Ar0, Ai1, Ar1..
- ADD1_SY yvec6, yvec11, yvec11;
- ADD1_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec4, yvec6;
- SHUF_SY $0x03, yvec2, yvec2, yvec4; # Br1, Br0, Br3, Br2
- MUL_SY yvec1, yvec5, yvec7;
- SHUF_SY $0x03, yvec3, yvec3, yvec5; # Br3, Br2, Br1, Br0
- ADD1_SY yvec6, yvec10, yvec10;
- ADD1_SY yvec7, yvec8, yvec8;
-
- VPERMILP_SY $0xb1, yvec1, yvec1;
- MUL_SY yvec0, yvec2, yvec6;
- MUL_SY yvec0, yvec3, yvec7;
- ADD2_SY yvec6, yvec15, yvec15;
- ADD2_SY yvec7, yvec13, yvec13;
-
- MUL_SY yvec1, yvec2, yvec6;
- EDUP_SY 8*SIZE(ptrbb), yvec2;
- MUL_SY yvec1, yvec3, yvec7;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- ADD2_SY yvec6, yvec14, yvec14;
- ADD2_SY yvec7, yvec12, yvec12;
-
- MUL_SY yvec0, yvec4, yvec6;
- MUL_SY yvec0, yvec5, yvec7;
- LD_SY 16*SIZE(ptrba), yvec0;
- ADD2_SY yvec6, yvec11, yvec11;
- ADD2_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec4, yvec6;
- MUL_SY yvec1, yvec5, yvec7;
- ADD2_SY yvec6, yvec10, yvec10;
- ADD2_SY yvec7, yvec8, yvec8;
-
- ######### Unroll 2 ##################
- PREFETCH0 (PRESIZE+16)*SIZE(ptrba);
- LD_SY 24*SIZE(ptrba), yvec1; # Ar4, Ai4, Ar5, Ai5..
- MUL_SY yvec0, yvec2, yvec6;
- SHUF_SY $0x03, yvec2, yvec2, yvec4; # Br1, Br0, Br3, Br2
- MUL_SY yvec0, yvec3, yvec7;
- SHUF_SY $0x03, yvec3, yvec3, yvec5; # Br3, Br2, Br1, Br0
- ADD1_SY yvec6, yvec15, yvec15;
- ADD1_SY yvec7, yvec13, yvec13;
-
- MUL_SY yvec1, yvec2, yvec6;
- ODUP_SY 8*SIZE(ptrbb), yvec2; # Bi0, Bi1, Bi2, Bi3
- MUL_SY yvec1, yvec3, yvec7;
- VPERMILP_SY $0x4e, yvec2, yvec3; # Bi2, Bi3, Bi0, Bi1
- ADD1_SY yvec6, yvec14, yvec14;
- ADD1_SY yvec7, yvec12, yvec12;
-
- MUL_SY yvec0, yvec4, yvec6;
- MUL_SY yvec0, yvec5, yvec7;
- VPERMILP_SY $0xb1, yvec0, yvec0; # Ai0, Ar0, Ai1, Ar1..
- ADD1_SY yvec6, yvec11, yvec11;
- ADD1_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec4, yvec6;
- SHUF_SY $0x03, yvec2, yvec2, yvec4; # Br1, Br0, Br3, Br2
- MUL_SY yvec1, yvec5, yvec7;
- SHUF_SY $0x03, yvec3, yvec3, yvec5; # Br3, Br2, Br1, Br0
- ADD1_SY yvec6, yvec10, yvec10;
- ADD1_SY yvec7, yvec8, yvec8;
-
- VPERMILP_SY $0xb1, yvec1, yvec1;
- MUL_SY yvec0, yvec2, yvec6;
- MUL_SY yvec0, yvec3, yvec7;
- ADD2_SY yvec6, yvec15, yvec15;
- ADD2_SY yvec7, yvec13, yvec13;
-
- MUL_SY yvec1, yvec2, yvec6;
- EDUP_SY 16*SIZE(ptrbb), yvec2;
- MUL_SY yvec1, yvec3, yvec7;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- ADD2_SY yvec6, yvec14, yvec14;
- ADD2_SY yvec7, yvec12, yvec12;
-
- MUL_SY yvec0, yvec4, yvec6;
- MUL_SY yvec0, yvec5, yvec7;
- LD_SY 32*SIZE(ptrba), yvec0;
- ADD2_SY yvec6, yvec11, yvec11;
- ADD2_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec4, yvec6;
- MUL_SY yvec1, yvec5, yvec7;
- ADD2_SY yvec6, yvec10, yvec10;
- ADD2_SY yvec7, yvec8, yvec8;
-
- ######### Unroll 3 ##################
- PREFETCH0 (PRESIZE+32)*SIZE(ptrba);
- LD_SY 40*SIZE(ptrba), yvec1; # Ar4, Ai4, Ar5, Ai5..
- MUL_SY yvec0, yvec2, yvec6;
- SHUF_SY $0x03, yvec2, yvec2, yvec4; # Br1, Br0, Br3, Br2
- MUL_SY yvec0, yvec3, yvec7;
- SHUF_SY $0x03, yvec3, yvec3, yvec5; # Br3, Br2, Br1, Br0
- ADD1_SY yvec6, yvec15, yvec15;
- ADD1_SY yvec7, yvec13, yvec13;
-
- MUL_SY yvec1, yvec2, yvec6;
- ODUP_SY 16*SIZE(ptrbb), yvec2; # Bi0, Bi1, Bi2, Bi3
- MUL_SY yvec1, yvec3, yvec7;
- VPERMILP_SY $0x4e, yvec2, yvec3; # Bi2, Bi3, Bi0, Bi1
- ADD1_SY yvec6, yvec14, yvec14;
- ADD1_SY yvec7, yvec12, yvec12;
-
- MUL_SY yvec0, yvec4, yvec6;
- MUL_SY yvec0, yvec5, yvec7;
- VPERMILP_SY $0xb1, yvec0, yvec0; # Ai0, Ar0, Ai1, Ar1..
- ADD1_SY yvec6, yvec11, yvec11;
- ADD1_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec4, yvec6;
- SHUF_SY $0x03, yvec2, yvec2, yvec4; # Br1, Br0, Br3, Br2
- MUL_SY yvec1, yvec5, yvec7;
- SHUF_SY $0x03, yvec3, yvec3, yvec5; # Br3, Br2, Br1, Br0
- ADD1_SY yvec6, yvec10, yvec10;
- ADD1_SY yvec7, yvec8, yvec8;
-
- VPERMILP_SY $0xb1, yvec1, yvec1;
- MUL_SY yvec0, yvec2, yvec6;
- MUL_SY yvec0, yvec3, yvec7;
- ADD2_SY yvec6, yvec15, yvec15;
- ADD2_SY yvec7, yvec13, yvec13;
-
- MUL_SY yvec1, yvec2, yvec6;
- EDUP_SY 24*SIZE(ptrbb), yvec2;
- MUL_SY yvec1, yvec3, yvec7;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- ADD2_SY yvec6, yvec14, yvec14;
- ADD2_SY yvec7, yvec12, yvec12;
-
- MUL_SY yvec0, yvec4, yvec6;
- MUL_SY yvec0, yvec5, yvec7;
- LD_SY 48*SIZE(ptrba), yvec0;
- ADD2_SY yvec6, yvec11, yvec11;
- ADD2_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec4, yvec6;
- MUL_SY yvec1, yvec5, yvec7;
- ADD2_SY yvec6, yvec10, yvec10;
- ADD2_SY yvec7, yvec8, yvec8;
-
- ######### Unroll 4 ##################
- PREFETCH0 (PRESIZE+48)*SIZE(ptrba);
- LD_SY 56*SIZE(ptrba), yvec1; # Ar4, Ai4, Ar5, Ai5..
- MUL_SY yvec0, yvec2, yvec6;
- SHUF_SY $0x03, yvec2, yvec2, yvec4; # Br1, Br0, Br3, Br2
- MUL_SY yvec0, yvec3, yvec7;
- SHUF_SY $0x03, yvec3, yvec3, yvec5; # Br3, Br2, Br1, Br0
- ADDQ $64*SIZE, ptrba;
- ADD1_SY yvec6, yvec15, yvec15;
- ADD1_SY yvec7, yvec13, yvec13;
-
- MUL_SY yvec1, yvec2, yvec6;
- ODUP_SY 24*SIZE(ptrbb), yvec2; # Bi0, Bi1, Bi2, Bi3
- MUL_SY yvec1, yvec3, yvec7;
- VPERMILP_SY $0x4e, yvec2, yvec3; # Bi2, Bi3, Bi0, Bi1
- ADDQ $32*SIZE, ptrbb;
- ADD1_SY yvec6, yvec14, yvec14;
- ADD1_SY yvec7, yvec12, yvec12;
-
- MUL_SY yvec0, yvec4, yvec6;
- MUL_SY yvec0, yvec5, yvec7;
- VPERMILP_SY $0xb1, yvec0, yvec0; # Ai0, Ar0, Ai1, Ar1..
- ADD1_SY yvec6, yvec11, yvec11;
- ADD1_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec4, yvec6;
- SHUF_SY $0x03, yvec2, yvec2, yvec4; # Br1, Br0, Br3, Br2
- MUL_SY yvec1, yvec5, yvec7;
- SHUF_SY $0x03, yvec3, yvec3, yvec5; # Br3, Br2, Br1, Br0
- ADD1_SY yvec6, yvec10, yvec10;
- ADD1_SY yvec7, yvec8, yvec8;
-
- VPERMILP_SY $0xb1, yvec1, yvec1;
- MUL_SY yvec0, yvec2, yvec6;
- MUL_SY yvec0, yvec3, yvec7;
- ADD2_SY yvec6, yvec15, yvec15;
- ADD2_SY yvec7, yvec13, yvec13;
-
- MUL_SY yvec1, yvec2, yvec6;
- EDUP_SY 0*SIZE(ptrbb), yvec2;
- MUL_SY yvec1, yvec3, yvec7;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- ADD2_SY yvec6, yvec14, yvec14;
- ADD2_SY yvec7, yvec12, yvec12;
-
- MUL_SY yvec0, yvec4, yvec6;
- MUL_SY yvec0, yvec5, yvec7;
- LD_SY 0*SIZE(ptrba), yvec0;
- ADD2_SY yvec6, yvec11, yvec11;
- ADD2_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec4, yvec6;
- MUL_SY yvec1, yvec5, yvec7;
- ADD2_SY yvec6, yvec10, yvec10;
- ADD2_SY yvec7, yvec8, yvec8;
- .L2_bodyE:;
- DECQ k;
- JG .L2_bodyB;
- ALIGN_5
- .L2_loopE:;
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- TEST $2, kkk;
- #endif
- JLE .L3_loopE;
- ALIGN_5
- .L3_loopB:
- ######### Unroll 1 ##################
- PREFETCH0 PRESIZE*SIZE(ptrba)
- LD_SY 8*SIZE(ptrba), yvec1; # Ar4, Ai4, Ar5, Ai5..
- MUL_SY yvec0, yvec2, yvec6;
- MUL_SY yvec0, yvec3, yvec7;
- SHUF_SY $0x03, yvec2, yvec2, yvec4; # Br1, Br0, Br3, Br2
- ADD1_SY yvec6, yvec15, yvec15;
- ADD1_SY yvec7, yvec13, yvec13;
-
- MUL_SY yvec1, yvec2, yvec6;
- MUL_SY yvec1, yvec3, yvec7;
- SHUF_SY $0x03, yvec3, yvec3, yvec5; # Br3, Br2, Br1, Br0
- ADD1_SY yvec6, yvec14, yvec14;
- ADD1_SY yvec7, yvec12, yvec12;
-
- ODUP_SY 0*SIZE(ptrbb), yvec2; # Bi0, Bi1, Bi2, Bi3
- MUL_SY yvec0, yvec4, yvec6;
- MUL_SY yvec0, yvec5, yvec7;
- VPERMILP_SY $0x4e, yvec2, yvec3; # Bi2, Bi3, Bi0, Bi1
- ADD1_SY yvec6, yvec11, yvec11;
- ADD1_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec4, yvec6;
- MUL_SY yvec1, yvec5, yvec7;
- VPERMILP_SY $0xb1, yvec0, yvec0; # Ai0, Ar0, Ai1, Ar1..
- ADD1_SY yvec6, yvec10, yvec10;
- ADD1_SY yvec7, yvec8, yvec8;
-
- VPERMILP_SY $0xb1, yvec1, yvec1;
- MUL_SY yvec0, yvec2, yvec6;
- MUL_SY yvec0, yvec3, yvec7;
- SHUF_SY $0x03, yvec2, yvec2, yvec4; # Br1, Br0, Br3, Br2
- ADD2_SY yvec6, yvec15, yvec15;
- ADD2_SY yvec7, yvec13, yvec13;
-
- MUL_SY yvec1, yvec2, yvec6;
- MUL_SY yvec1, yvec3, yvec7;
- SHUF_SY $0x03, yvec3, yvec3, yvec5; # Br3, Br2, Br1, Br0
- ADD2_SY yvec6, yvec14, yvec14;
- ADD2_SY yvec7, yvec12, yvec12;
-
- EDUP_SY 8*SIZE(ptrbb), yvec2;
- MUL_SY yvec0, yvec4, yvec6;
- MUL_SY yvec0, yvec5, yvec7;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- ADD2_SY yvec6, yvec11, yvec11;
- ADD2_SY yvec7, yvec9, yvec9;
-
- LD_SY 16*SIZE(ptrba), yvec0;
- MUL_SY yvec1, yvec4, yvec6;
- MUL_SY yvec1, yvec5, yvec7;
- ADD2_SY yvec6, yvec10, yvec10;
- ADD2_SY yvec7, yvec8, yvec8;
-
- ######### Unroll 2 ##################
- PREFETCH0 (PRESIZE+16)*SIZE(ptrba)
- LD_SY 24*SIZE(ptrba), yvec1; # Ar4, Ai4, Ar5, Ai5..
- MUL_SY yvec0, yvec2, yvec6;
- MUL_SY yvec0, yvec3, yvec7;
- ADDQ $32*SIZE, ptrba
- SHUF_SY $0x03, yvec2, yvec2, yvec4; # Br1, Br0, Br3, Br2
- ADD1_SY yvec6, yvec15, yvec15;
- ADD1_SY yvec7, yvec13, yvec13;
-
- MUL_SY yvec1, yvec2, yvec6;
- MUL_SY yvec1, yvec3, yvec7;
- SHUF_SY $0x03, yvec3, yvec3, yvec5; # Br3, Br2, Br1, Br0
- ADD1_SY yvec6, yvec14, yvec14;
- ADD1_SY yvec7, yvec12, yvec12;
-
- ODUP_SY 8*SIZE(ptrbb), yvec2; # Bi0, Bi1, Bi2, Bi3
- MUL_SY yvec0, yvec4, yvec6;
- MUL_SY yvec0, yvec5, yvec7;
- ADDQ $16*SIZE, ptrbb;
- VPERMILP_SY $0x4e, yvec2, yvec3; # Bi2, Bi3, Bi0, Bi1
- ADD1_SY yvec6, yvec11, yvec11;
- ADD1_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec4, yvec6;
- MUL_SY yvec1, yvec5, yvec7;
- VPERMILP_SY $0xb1, yvec0, yvec0; # Ai0, Ar0, Ai1, Ar1..
- ADD1_SY yvec6, yvec10, yvec10;
- ADD1_SY yvec7, yvec8, yvec8;
-
- VPERMILP_SY $0xb1, yvec1, yvec1;
- MUL_SY yvec0, yvec2, yvec6;
- MUL_SY yvec0, yvec3, yvec7;
- SHUF_SY $0x03, yvec2, yvec2, yvec4; # Br1, Br0, Br3, Br2
- ADD2_SY yvec6, yvec15, yvec15;
- ADD2_SY yvec7, yvec13, yvec13;
-
- MUL_SY yvec1, yvec2, yvec6;
- MUL_SY yvec1, yvec3, yvec7;
- SHUF_SY $0x03, yvec3, yvec3, yvec5; # Br3, Br2, Br1, Br0
- ADD2_SY yvec6, yvec14, yvec14;
- ADD2_SY yvec7, yvec12, yvec12;
-
- EDUP_SY 0*SIZE(ptrbb), yvec2;
- MUL_SY yvec0, yvec4, yvec6;
- MUL_SY yvec0, yvec5, yvec7;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- ADD2_SY yvec6, yvec11, yvec11;
- ADD2_SY yvec7, yvec9, yvec9;
-
- LD_SY 0*SIZE(ptrba), yvec0;
- MUL_SY yvec1, yvec4, yvec6;
- MUL_SY yvec1, yvec5, yvec7;
- ADD2_SY yvec6, yvec10, yvec10;
- ADD2_SY yvec7, yvec8, yvec8;
- .L3_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- TEST $1, kkk;
- #endif
- JLE .L4_loopE;
- ALIGN_5
- .L4_loopB:;
- ######### Unroll 1 ##################
- PREFETCH0 PRESIZE*SIZE(ptrba)
- LD_SY 8*SIZE(ptrba), yvec1; # Ar4, Ai4, Ar5, Ai5..
- MUL_SY yvec0, yvec2, yvec6;
- MUL_SY yvec0, yvec3, yvec7;
- ADDQ $16*SIZE, ptrba;
- SHUF_SY $0x03, yvec2, yvec2, yvec4; # Br1, Br0, Br3, Br2
- ADD1_SY yvec6, yvec15, yvec15;
- ADD1_SY yvec7, yvec13, yvec13;
-
- MUL_SY yvec1, yvec2, yvec6;
- MUL_SY yvec1, yvec3, yvec7;
- SHUF_SY $0x03, yvec3, yvec3, yvec5; # Br3, Br2, Br1, Br0
- ADD1_SY yvec6, yvec14, yvec14;
- ADD1_SY yvec7, yvec12, yvec12;
-
- ODUP_SY 0*SIZE(ptrbb), yvec2; # Bi0, Bi1, Bi2, Bi3
- MUL_SY yvec0, yvec4, yvec6;
- MUL_SY yvec0, yvec5, yvec7;
- ADDQ $8*SIZE, ptrbb;
- VPERMILP_SY $0x4e, yvec2, yvec3; # Bi2, Bi3, Bi0, Bi1
- ADD1_SY yvec6, yvec11, yvec11;
- ADD1_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec4, yvec6;
- MUL_SY yvec1, yvec5, yvec7;
- VPERMILP_SY $0xb1, yvec0, yvec0; # Ai0, Ar0, Ai1, Ar1..
- ADD1_SY yvec6, yvec10, yvec10;
- ADD1_SY yvec7, yvec8, yvec8;
-
- VPERMILP_SY $0xb1, yvec1, yvec1;
- MUL_SY yvec0, yvec2, yvec6;
- MUL_SY yvec0, yvec3, yvec7;
- SHUF_SY $0x03, yvec2, yvec2, yvec4; # Br1, Br0, Br3, Br2
- ADD2_SY yvec6, yvec15, yvec15;
- ADD2_SY yvec7, yvec13, yvec13;
-
- MUL_SY yvec1, yvec2, yvec6;
- ADD2_SY yvec6, yvec14, yvec14;
- SHUF_SY $0x03, yvec3, yvec3, yvec5; # Br3, Br2, Br1, Br0
- MUL_SY yvec1, yvec3, yvec7;
- ADD2_SY yvec7, yvec12, yvec12;
-
- MUL_SY yvec0, yvec4, yvec6;
- MUL_SY yvec0, yvec5, yvec7;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- ADD2_SY yvec6, yvec11, yvec11;
- ADD2_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec4, yvec6;
- MUL_SY yvec1, yvec5, yvec7;
- ADD2_SY yvec6, yvec10, yvec10;
- ADD2_SY yvec7, yvec8, yvec8;
-
- .L4_loopE:;
- #### Handle ####
- XOR_SY yvec7, yvec7, yvec7;
- #if defined(RN) || defined(RT) || defined(CN) || defined(CT)
- ADDSUB_SY yvec15, yvec7, yvec15;
- ADDSUB_SY yvec14, yvec7, yvec14;
- ADDSUB_SY yvec13, yvec7, yvec13;
- ADDSUB_SY yvec12, yvec7, yvec12;
- ADDSUB_SY yvec11, yvec7, yvec11;
- ADDSUB_SY yvec10, yvec7, yvec10;
- ADDSUB_SY yvec9, yvec7, yvec9;
- ADDSUB_SY yvec8, yvec7, yvec8;
- #elif defined(NR) || defined(NC) || defined(TR) || defined(TC)
- SUB_SY yvec15, yvec7, yvec15;
- SUB_SY yvec14, yvec7, yvec14;
- SUB_SY yvec13, yvec7, yvec13;
- SUB_SY yvec12, yvec7, yvec12;
- SUB_SY yvec11, yvec7, yvec11;
- SUB_SY yvec10, yvec7, yvec10;
- SUB_SY yvec9, yvec7, yvec9;
- SUB_SY yvec8, yvec7, yvec8;
- #elif defined(RR) || defined(RC) || defined(CR) || defined(CC)
- VPERMILP_SY $0xb1, yvec15, yvec15;
- VPERMILP_SY $0xb1, yvec14, yvec14;
- VPERMILP_SY $0xb1, yvec13, yvec13;
- VPERMILP_SY $0xb1, yvec12, yvec12;
- VPERMILP_SY $0xb1, yvec11, yvec11;
- VPERMILP_SY $0xb1, yvec10, yvec10;
- VPERMILP_SY $0xb1, yvec9, yvec9;
- VPERMILP_SY $0xb1, yvec8, yvec8;
- ADDSUB_SY yvec15, yvec7, yvec15;
- ADDSUB_SY yvec14, yvec7, yvec14;
- ADDSUB_SY yvec13, yvec7, yvec13;
- ADDSUB_SY yvec12, yvec7, yvec12;
- ADDSUB_SY yvec11, yvec7, yvec11;
- ADDSUB_SY yvec10, yvec7, yvec10;
- ADDSUB_SY yvec9, yvec7, yvec9;
- ADDSUB_SY yvec8, yvec7, yvec8;
- VPERMILP_SY $0xb1, yvec15, yvec15;
- VPERMILP_SY $0xb1, yvec14, yvec14;
- VPERMILP_SY $0xb1, yvec13, yvec13;
- VPERMILP_SY $0xb1, yvec12, yvec12;
- VPERMILP_SY $0xb1, yvec11, yvec11;
- VPERMILP_SY $0xb1, yvec10, yvec10;
- VPERMILP_SY $0xb1, yvec9, yvec9;
- VPERMILP_SY $0xb1, yvec8, yvec8;
- #endif
- ##### Load Alpha ####
- BROAD_SY MEMALPHA_R,yvec7;
- BROAD_SY MEMALPHA_I,yvec6;
- ##### Multiply Alpha ####
- VPERMILP_SY $0xb1,yvec15, yvec5;
- MUL_SY yvec15, yvec7, yvec15;
- MUL_SY yvec5, yvec6, yvec5;
- ADDSUB_SY yvec5, yvec15, yvec15;
- VPERMILP_SY $0xb1,yvec14, yvec4;
- MUL_SY yvec14, yvec7, yvec14;
- MUL_SY yvec4, yvec6, yvec4;
- ADDSUB_SY yvec4, yvec14, yvec14;
- VPERMILP_SY $0xb1,yvec13, yvec3;
- MUL_SY yvec13, yvec7, yvec13;
- MUL_SY yvec3, yvec6, yvec3;
- ADDSUB_SY yvec3, yvec13, yvec13;
- VPERMILP_SY $0xb1,yvec12, yvec2;
- MUL_SY yvec12, yvec7, yvec12;
- MUL_SY yvec2, yvec6, yvec2;
- ADDSUB_SY yvec2, yvec12, yvec12;
- VPERMILP_SY $0xb1,yvec11, yvec1;
- MUL_SY yvec11, yvec7, yvec11;
- MUL_SY yvec1, yvec6, yvec1;
- ADDSUB_SY yvec1, yvec11, yvec11;
- VPERMILP_SY $0xb1,yvec10, yvec0;
- MUL_SY yvec10, yvec7, yvec10;
- MUL_SY yvec0, yvec6, yvec0;
- ADDSUB_SY yvec0, yvec10, yvec10;
- VPERMILP_SY $0xb1,yvec9, yvec5;
- MUL_SY yvec9, yvec7, yvec9;
- MUL_SY yvec5, yvec6, yvec5;
- ADDSUB_SY yvec5, yvec9, yvec9;
- VPERMILP_SY $0xb1,yvec8, yvec4;
- MUL_SY yvec8, yvec7, yvec8;
- MUL_SY yvec4, yvec6, yvec4;
- ADDSUB_SY yvec4, yvec8, yvec8;
- #### Shuffle Results ####
- MOV_SY yvec15,yvec7;
- REVS_SY $0xe4,yvec13,yvec15,yvec15;
- REVS_SY $0xe4,yvec7,yvec13,yvec13;
- MOV_SY yvec14,yvec7;
- REVS_SY $0xe4,yvec12,yvec14,yvec14;
- REVS_SY $0xe4,yvec7,yvec12,yvec12;
- MOV_SY yvec11,yvec7;
- REVS_SY $0xe4,yvec9,yvec11,yvec11;
- REVS_SY $0xe4,yvec7,yvec9,yvec9;
- MOV_SY yvec10,yvec7;
- REVS_SY $0xe4,yvec8,yvec10,yvec10;
- REVS_SY $0xe4,yvec7,yvec8,yvec8;
- #### Store Back ####
- #### Testing alignment ####
- MOVQ C0, %rax;
- OR ldc, %rax;
- TEST $15, %rax;
- JNE .L4_loopEx;
- ALIGN_5
- EXTRA_SY $1,yvec15,xvec7;
- EXTRA_SY $1,yvec14,xvec6;
- EXTRA_SY $1,yvec13,xvec5;
- EXTRA_SY $1,yvec12,xvec4;
- EXTRA_SY $1,yvec11,xvec3;
- EXTRA_SY $1,yvec10,xvec2;
- EXTRA_SY $1,yvec9,xvec1;
- EXTRA_SY $1,yvec8,xvec0;
- #ifndef TRMMKERNEL
- ADD_SY 0*SIZE(C0),xvec15, xvec15;
- ADD_SY 4*SIZE(C1),xvec7, xvec7;
- ADD_SY 8*SIZE(C0),xvec14, xvec14;
- ADD_SY 12*SIZE(C1),xvec6, xvec6;
- ADD_SY 0*SIZE(C0,ldc,1),xvec13, xvec13;
- ADD_SY 4*SIZE(C1,ldc,1),xvec5, xvec5;
- ADD_SY 8*SIZE(C0,ldc,1),xvec12, xvec12;
- ADD_SY 12*SIZE(C1,ldc,1),xvec4, xvec4;
- ADD_SY 0*SIZE(C1),xvec11, xvec11;
- ADD_SY 4*SIZE(C0),xvec3, xvec3;
- ADD_SY 8*SIZE(C1),xvec10, xvec10;
- ADD_SY 12*SIZE(C0),xvec2, xvec2;
- ADD_SY 0*SIZE(C1,ldc,1),xvec9, xvec9;
- ADD_SY 4*SIZE(C0,ldc,1),xvec1, xvec1;
- ADD_SY 8*SIZE(C1,ldc,1),xvec8, xvec8;
- ADD_SY 12*SIZE(C0,ldc,1),xvec0, xvec0;
- #endif
- ST_SY xvec15,0*SIZE(C0);
- ST_SY xvec7,4*SIZE(C1);
- ST_SY xvec14,8*SIZE(C0);
- ST_SY xvec6,12*SIZE(C1);
- ST_SY xvec13,0*SIZE(C0,ldc,1);
- ST_SY xvec5,4*SIZE(C1,ldc,1);
- ST_SY xvec12,8*SIZE(C0,ldc,1);
- ST_SY xvec4,12*SIZE(C1,ldc,1);
- ST_SY xvec11,0*SIZE(C1);
- ST_SY xvec3,4*SIZE(C0);
- ST_SY xvec10,8*SIZE(C1);
- ST_SY xvec2,12*SIZE(C0);
- ST_SY xvec9,0*SIZE(C1,ldc,1);
- ST_SY xvec1,4*SIZE(C0,ldc,1);
- ST_SY xvec8,8*SIZE(C1,ldc,1);
- ST_SY xvec0,12*SIZE(C0,ldc,1);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA))||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 8), ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $8, kk;
- #endif
- ADDQ $16*SIZE,C0;
- ADDQ $16*SIZE,C1;
- .L1_bodyE:;
- DECQ i;
- JG .L1_bodyB;
- JMP .L1_loopE;
- ALIGN_5
- .L4_loopEx:
- EXTRA_SY $1, yvec15, xvec7;
- #ifndef TRMMKERNEL
- LDL_SY 0*SIZE(C0), xvec6, xvec6;
- LDH_SY 2*SIZE(C0), xvec6, xvec6;
- ADD_SY xvec6, xvec15, xvec15;
- #endif
- STL_SY xvec15, 0*SIZE(C0);
- STH_SY xvec15, 2*SIZE(C0);
- #ifndef TRMMKERNEL
- LDL_SY 4*SIZE(C1), xvec5, xvec5;
- LDH_SY 6*SIZE(C1), xvec5, xvec5;
- ADD_SY xvec5, xvec7, xvec7;
- #endif
- STL_SY xvec7, 4*SIZE(C1);
- STH_SY xvec7, 6*SIZE(C1);
-
- EXTRA_SY $1, yvec14, xvec6;
- #ifndef TRMMKERNEL
- LDL_SY 8*SIZE(C0), xvec5, xvec5;
- LDH_SY 10*SIZE(C0), xvec5, xvec5;
- ADD_SY xvec5, xvec14, xvec14;
- #endif
- STL_SY xvec14, 8*SIZE(C0);
- STH_SY xvec14, 10*SIZE(C0);
- #ifndef TRMMKERNEL
- LDL_SY 12*SIZE(C1), xvec4, xvec4;
- LDH_SY 14*SIZE(C1), xvec4, xvec4;
- ADD_SY xvec4, xvec6, xvec6;
- #endif
- STL_SY xvec6, 12*SIZE(C1);
- STH_SY xvec6, 14*SIZE(C1);
-
- EXTRA_SY $1, yvec13, xvec5;
- #ifndef TRMMKERNEL
- LDL_SY 0*SIZE(C0, ldc, 1), xvec4, xvec4;
- LDH_SY 2*SIZE(C0, ldc, 1), xvec4, xvec4;
- ADD_SY xvec4, xvec13, xvec13;
- #endif
- STL_SY xvec13, 0*SIZE(C0, ldc, 1);
- STH_SY xvec13, 2*SIZE(C0, ldc, 1);
- #ifndef TRMMKERNEL
- LDL_SY 4*SIZE(C1, ldc, 1), xvec3, xvec3;
- LDH_SY 6*SIZE(C1, ldc, 1), xvec3, xvec3;
- ADD_SY xvec3, xvec5, xvec5;
- #endif
- STL_SY xvec5, 4*SIZE(C1, ldc, 1);
- STH_SX xvec5, 6*SIZE(C1, ldc, 1);
-
- EXTRA_SY $1, yvec12, xvec4;
- #ifndef TRMMKERNEL
- LDL_SY 8*SIZE(C0, ldc, 1), xvec3, xvec3;
- LDH_SY 10*SIZE(C0, ldc, 1), xvec3, xvec3;
- ADD_SY xvec3, xvec12, xvec12;
- #endif
- STL_SY xvec12, 8*SIZE(C0, ldc, 1);
- STH_SY xvec12, 10*SIZE(C0, ldc, 1);
- #ifndef TRMMKERNEL
- LDL_SY 12*SIZE(C1, ldc, 1), xvec2, xvec2;
- LDH_SY 14*SIZE(C1, ldc, 1), xvec2, xvec2;
- ADD_SY xvec2, xvec4, xvec4;
- #endif
- STL_SY xvec4, 12*SIZE(C1, ldc, 1);
- STH_SY xvec4, 14*SIZE(C1, ldc, 1);
-
- EXTRA_SY $1, yvec11, xvec3;
- #ifndef TRMMKERNEL
- LDL_SY 0*SIZE(C1), xvec2, xvec2;
- LDH_SY 2*SIZE(C1), xvec2, xvec2;
- ADD_SY xvec2, xvec11, xvec11;
- #endif
- STL_SY xvec11, 0*SIZE(C1);
- STH_SY xvec11, 2*SIZE(C1);
- #ifndef TRMMKERNEL
- LDL_SY 4*SIZE(C0), xvec1, xvec1;
- LDH_SY 6*SIZE(C0), xvec1, xvec1;
- ADD_SY xvec1, xvec3, xvec3;
- #endif
- STL_SY xvec3, 4*SIZE(C0);
- STH_SY xvec3, 6*SIZE(C0);
-
- EXTRA_SY $1, yvec10, xvec2;
- #ifndef TRMMKERNEL
- LDL_SY 8*SIZE(C1), xvec1, xvec1;
- LDH_SY 10*SIZE(C1), xvec1, xvec1;
- ADD_SY xvec1, xvec10, xvec10;
- #endif
- STL_SY xvec10, 8*SIZE(C1);
- STH_SY xvec10, 10*SIZE(C1);
- #ifndef TRMMKERNEL
- LDL_SY 12*SIZE(C0), xvec0, xvec0;
- LDH_SY 14*SIZE(C0), xvec0, xvec0;
- ADD_SY xvec0, xvec2, xvec2;
- #endif
- STL_SY xvec2, 12*SIZE(C0);
- STH_SY xvec2, 14*SIZE(C0);
-
- EXTRA_SY $1, yvec9, xvec1;
- #ifndef TRMMKERNEL
- LDL_SY 0*SIZE(C1, ldc, 1), xvec7, xvec7;
- LDH_SY 2*SIZE(C1, ldc, 1), xvec7, xvec7;
- ADD_SY xvec7, xvec9, xvec9;
- #endif
- STL_SY xvec9, 0*SIZE(C1, ldc, 1);
- STH_SY xvec9, 2*SIZE(C1, ldc, 1);
- #ifndef TRMMKERNEL
- LDL_SY 4*SIZE(C0, ldc, 1), xvec6, xvec6;
- LDH_SY 6*SIZE(C0, ldc, 1), xvec6, xvec6;
- ADD_SY xvec6, xvec1, xvec1;
- #endif
- STL_SY xvec1, 4*SIZE(C0, ldc, 1);
- STH_SY xvec1, 6*SIZE(C0, ldc, 1);
-
- EXTRA_SY $1, yvec8, xvec0;
- #ifndef TRMMKERNEL
- LDL_SY 8*SIZE(C1, ldc, 1), xvec6, xvec6;
- LDH_SY 10*SIZE(C1, ldc, 1), xvec6, xvec6;
- ADD_SY xvec6, xvec8, xvec8;
- #endif
- STL_SY xvec8, 8*SIZE(C1, ldc, 1);
- STH_SY xvec8, 10*SIZE(C1, ldc, 1);
- #ifndef TRMMKERNEL
- LDL_SY 12*SIZE(C0, ldc, 1), xvec5, xvec5;
- LDH_SY 14*SIZE(C0, ldc, 1), xvec5, xvec5;
- ADD_SY xvec5, xvec0, xvec0;
- #endif
- STL_SY xvec0, 12*SIZE(C0, ldc, 1);
- STH_SY xvec0, 14*SIZE(C0, ldc, 1);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA))||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 8), ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $8, kk;
- #endif
- ADDQ $16*SIZE, C0;
- ADDQ $16*SIZE, C1;
- DECQ i;
- JG .L1_bodyB;
- ALIGN_5;
- .L1_loopE:;
- TEST $4, bm;
- JLE .L5_loopE;
- ALIGN_5
- .L5_bodyB:
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb,ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 4), ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- XOR_SY yvec15, yvec15, yvec15;
- XOR_SY yvec13, yvec13, yvec13;
- XOR_SY yvec11, yvec11, yvec11;
- XOR_SY yvec9, yvec9, yvec9;
- #ifndef TRMMKERNEL
- MOVQ bk,k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $4, %rax;
- #else
- ADDQ $4, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L8_loopE;
- ALIGN_5
- .L8_bodyB:
- #### Unroll times 1 ####
- LD_SY 0*SIZE(ptrba), yvec0;
- VPERMILP_SY $0xb1, yvec0, yvec1;
- EDUP_SY 0*SIZE(ptrbb), yvec2;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
- SHUF_SY $0x03, yvec2, yvec2, yvec4;
- MUL_SY yvec0, yvec3, yvec7;
- ADD1_SY yvec7, yvec13, yvec13;
-
- ODUP_SY 0*SIZE(ptrbb), yvec2;
- SHUF_SY $0x03, yvec3, yvec3, yvec5;
- MUL_SY yvec0, yvec4, yvec6;
- ADD1_SY yvec6, yvec11, yvec11;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- MUL_SY yvec0, yvec5, yvec7;
- ADD1_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec2, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- SHUF_SY $0x03, yvec2, yvec2, yvec4;
- MUL_SY yvec1, yvec3, yvec7;
- ADD2_SY yvec7, yvec13, yvec13;
-
- SHUF_SY $0x03, yvec3, yvec3, yvec5;
- MUL_SY yvec1, yvec4, yvec6;
- ADD2_SY yvec6, yvec11, yvec11;
- MUL_SY yvec1, yvec5, yvec7;
- ADD2_SY yvec7, yvec9, yvec9;
-
- #### Unroll time 2 ####
- LD_SY 8*SIZE(ptrba), yvec0;
- VPERMILP_SY $0xb1, yvec0, yvec1;
- EDUP_SY 8*SIZE(ptrbb), yvec2;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
- SHUF_SY $0x03, yvec2, yvec2, yvec4;
- MUL_SY yvec0, yvec3, yvec7;
- ADD1_SY yvec7, yvec13, yvec13;
-
- ODUP_SY 8*SIZE(ptrbb), yvec2;
- SHUF_SY $0x03, yvec3, yvec3, yvec5;
- MUL_SY yvec0, yvec4, yvec6;
- ADD1_SY yvec6, yvec11, yvec11;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- MUL_SY yvec0, yvec5, yvec7;
- ADD1_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec2, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- SHUF_SY $0x03, yvec2, yvec2, yvec4;
- MUL_SY yvec1, yvec3, yvec7;
- ADD2_SY yvec7, yvec13, yvec13;
-
- SHUF_SY $0x03, yvec3, yvec3, yvec5;
- MUL_SY yvec1, yvec4, yvec6;
- ADD2_SY yvec6, yvec11, yvec11;
- MUL_SY yvec1, yvec5, yvec7;
- ADD2_SY yvec7, yvec9, yvec9;
-
- #### Unroll time 3 ####
- LD_SY 16*SIZE(ptrba), yvec0;
- VPERMILP_SY $0xb1, yvec0, yvec1;
- EDUP_SY 16*SIZE(ptrbb), yvec2;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
- SHUF_SY $0x03, yvec2, yvec2, yvec4;
- MUL_SY yvec0, yvec3, yvec7;
- ADD1_SY yvec7, yvec13, yvec13;
-
- ODUP_SY 16*SIZE(ptrbb), yvec2;
- SHUF_SY $0x03, yvec3, yvec3, yvec5;
- MUL_SY yvec0, yvec4, yvec6;
- ADD1_SY yvec6, yvec11, yvec11;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- MUL_SY yvec0, yvec5, yvec7;
- ADD1_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec2, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- SHUF_SY $0x03, yvec2, yvec2, yvec4;
- MUL_SY yvec1, yvec3, yvec7;
- ADD2_SY yvec7, yvec13, yvec13;
-
- SHUF_SY $0x03, yvec3, yvec3, yvec5;
- MUL_SY yvec1, yvec4, yvec6;
- ADD2_SY yvec6, yvec11, yvec11;
- MUL_SY yvec1, yvec5, yvec7;
- ADD2_SY yvec7, yvec9, yvec9;
-
- #### Unroll time 3 ####
- LD_SY 24*SIZE(ptrba), yvec0;
- VPERMILP_SY $0xb1, yvec0, yvec1;
- EDUP_SY 24*SIZE(ptrbb), yvec2;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
- SHUF_SY $0x03, yvec2, yvec2, yvec4;
- MUL_SY yvec0, yvec3, yvec7;
- ADD1_SY yvec7, yvec13, yvec13;
-
- ODUP_SY 24*SIZE(ptrbb), yvec2;
- SHUF_SY $0x03, yvec3, yvec3, yvec5;
- MUL_SY yvec0, yvec4, yvec6;
- ADD1_SY yvec6, yvec11, yvec11;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- MUL_SY yvec0, yvec5, yvec7;
- ADD1_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec2, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- SHUF_SY $0x03, yvec2, yvec2, yvec4;
- MUL_SY yvec1, yvec3, yvec7;
- ADD2_SY yvec7, yvec13, yvec13;
-
- SHUF_SY $0x03, yvec3, yvec3, yvec5;
- MUL_SY yvec1, yvec4, yvec6;
- ADD2_SY yvec6, yvec11, yvec11;
- MUL_SY yvec1, yvec5, yvec7;
- ADD2_SY yvec7, yvec9, yvec9;
- ADDQ $32*SIZE, ptrba;
- ADDQ $32*SIZE, ptrbb;
- DECQ k;
- JG .L8_bodyB;
- ALIGN_5
- .L8_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- TEST $2, kkk;
- #endif
- JLE .L9_loopE;
- ALIGN_5
- .L9_bodyB:
- #### Unroll times 1 ####
- LD_SY 0*SIZE(ptrba), yvec0;
- VPERMILP_SY $0xb1, yvec0, yvec1;
- EDUP_SY 0*SIZE(ptrbb), yvec2;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
- SHUF_SY $0x03, yvec2, yvec2, yvec4;
- MUL_SY yvec0, yvec3, yvec7;
- ADD1_SY yvec7, yvec13, yvec13;
-
- ODUP_SY 0*SIZE(ptrbb), yvec2;
- SHUF_SY $0x03, yvec3, yvec3, yvec5;
- MUL_SY yvec0, yvec4, yvec6;
- ADD1_SY yvec6, yvec11, yvec11;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- MUL_SY yvec0, yvec5, yvec7;
- ADD1_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec2, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- SHUF_SY $0x03, yvec2, yvec2, yvec4;
- MUL_SY yvec1, yvec3, yvec7;
- ADD2_SY yvec7, yvec13, yvec13;
-
- SHUF_SY $0x03, yvec3, yvec3, yvec5;
- MUL_SY yvec1, yvec4, yvec6;
- ADD2_SY yvec6, yvec11, yvec11;
- MUL_SY yvec1, yvec5, yvec7;
- ADD2_SY yvec7, yvec9, yvec9;
-
- #### Unroll time 2 ####
- LD_SY 8*SIZE(ptrba), yvec0;
- VPERMILP_SY $0xb1, yvec0, yvec1;
- EDUP_SY 8*SIZE(ptrbb), yvec2;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
- SHUF_SY $0x03, yvec2, yvec2, yvec4;
- MUL_SY yvec0, yvec3, yvec7;
- ADD1_SY yvec7, yvec13, yvec13;
-
- ODUP_SY 8*SIZE(ptrbb), yvec2;
- SHUF_SY $0x03, yvec3, yvec3, yvec5;
- MUL_SY yvec0, yvec4, yvec6;
- ADD1_SY yvec6, yvec11, yvec11;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- MUL_SY yvec0, yvec5, yvec7;
- ADD1_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec2, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- SHUF_SY $0x03, yvec2, yvec2, yvec4;
- MUL_SY yvec1, yvec3, yvec7;
- ADD2_SY yvec7, yvec13, yvec13;
-
- SHUF_SY $0x03, yvec3, yvec3, yvec5;
- MUL_SY yvec1, yvec4, yvec6;
- ADD2_SY yvec6, yvec11, yvec11;
- MUL_SY yvec1, yvec5, yvec7;
- ADD2_SY yvec7, yvec9, yvec9;
- ADDQ $16*SIZE, ptrba;
- ADDQ $16*SIZE, ptrbb;
-
- .L9_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- TEST $1, kkk;
- #endif
- JLE .L10_loopE;
- ALIGN_5
- .L10_bodyB:
- #### Unroll times 1 ####
- LD_SY 0*SIZE(ptrba), yvec0;
- VPERMILP_SY $0xb1, yvec0, yvec1;
- EDUP_SY 0*SIZE(ptrbb), yvec2;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
- SHUF_SY $0x03, yvec2, yvec2, yvec4;
- MUL_SY yvec0, yvec3, yvec7;
- ADD1_SY yvec7, yvec13, yvec13;
-
- ODUP_SY 0*SIZE(ptrbb), yvec2;
- SHUF_SY $0x03, yvec3, yvec3, yvec5;
- MUL_SY yvec0, yvec4, yvec6;
- ADD1_SY yvec6, yvec11, yvec11;
- VPERMILP_SY $0x4e, yvec2, yvec3;
- MUL_SY yvec0, yvec5, yvec7;
- ADD1_SY yvec7, yvec9, yvec9;
-
- MUL_SY yvec1, yvec2, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- SHUF_SY $0x03, yvec2, yvec2, yvec4;
- MUL_SY yvec1, yvec3, yvec7;
- ADD2_SY yvec7, yvec13, yvec13;
-
- SHUF_SY $0x03, yvec3, yvec3, yvec5;
- MUL_SY yvec1, yvec4, yvec6;
- ADD2_SY yvec6, yvec11, yvec11;
- MUL_SY yvec1, yvec5, yvec7;
- ADD2_SY yvec7, yvec9, yvec9;
- ADDQ $8*SIZE, ptrba;
- ADDQ $8*SIZE, ptrbb;
-
- .L10_loopE:
- #### Handle ####
- XOR_SY yvec7, yvec7, yvec7;
- #if defined(RN) || defined(RT) || defined(CN) || defined(CT)
- ADDSUB_SY yvec15, yvec7, yvec15;
- ADDSUB_SY yvec13, yvec7, yvec13;
- ADDSUB_SY yvec11, yvec7, yvec11;
- ADDSUB_SY yvec9, yvec7, yvec9;
- #elif defined(NR) || defined(NC) || defined(TR) || defined(TC)
- SUB_SY yvec15, yvec7, yvec15;
- SUB_SY yvec13, yvec7, yvec13;
- SUB_SY yvec11, yvec7, yvec11;
- SUB_SY yvec9, yvec7, yvec9;
- #elif defined(RR) || defined(RC) || defined(CR) || defined(CC)
- VPERMILP_SY $0xb1, yvec15, yvec15;
- VPERMILP_SY $0xb1, yvec13, yvec13;
- VPERMILP_SY $0xb1, yvec11, yvec11;
- VPERMILP_SY $0xb1, yvec9, yvec9;
- ADDSUB_SY yvec15, yvec7, yvec15;
- ADDSUB_SY yvec13, yvec7, yvec13;
- ADDSUB_SY yvec11, yvec7, yvec11;
- ADDSUB_SY yvec9, yvec7, yvec9;
- VPERMILP_SY $0xb1, yvec15, yvec15;
- VPERMILP_SY $0xb1, yvec13, yvec13;
- VPERMILP_SY $0xb1, yvec11, yvec11;
- VPERMILP_SY $0xb1, yvec9, yvec9;
- #endif
- ##### Load Alpha ####
- BROAD_SY MEMALPHA_R,yvec7;
- BROAD_SY MEMALPHA_I,yvec6;
- ##### Multiply Alpha ####
- VPERMILP_SY $0xb1,yvec15, yvec5;
- MUL_SY yvec15, yvec7, yvec15;
- MUL_SY yvec5, yvec6, yvec5;
- ADDSUB_SY yvec5, yvec15, yvec15;
- VPERMILP_SY $0xb1,yvec13, yvec3;
- MUL_SY yvec13, yvec7, yvec13;
- MUL_SY yvec3, yvec6, yvec3;
- ADDSUB_SY yvec3, yvec13, yvec13;
- VPERMILP_SY $0xb1,yvec11, yvec1;
- MUL_SY yvec11, yvec7, yvec11;
- MUL_SY yvec1, yvec6, yvec1;
- ADDSUB_SY yvec1, yvec11, yvec11;
- VPERMILP_SY $0xb1,yvec9, yvec5;
- MUL_SY yvec9, yvec7, yvec9;
- MUL_SY yvec5, yvec6, yvec5;
- ADDSUB_SY yvec5, yvec9, yvec9;
- #### Writing back ####
- #### Shuffle Results ####
- MOV_SY yvec15,yvec7;
- REVS_SY $0xe4,yvec13,yvec15,yvec15;
- REVS_SY $0xe4,yvec7,yvec13,yvec13;
- MOV_SY yvec11,yvec7;
- REVS_SY $0xe4,yvec9,yvec11,yvec11;
- REVS_SY $0xe4,yvec7,yvec9,yvec9;
- #### Writing back ####
- EXTRA_SY $1, yvec15, xvec7;
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C0), xvec6, xvec6;
- LDH_SX 2*SIZE(C0), xvec6, xvec6;
- ADD_SX xvec6, xvec15, xvec15;
- #endif
- STL_SX xvec15, 0*SIZE(C0);
- STH_SX xvec15, 2*SIZE(C0);
- #ifndef TRMMKERNEL
- LDL_SX 4*SIZE(C1), xvec4, xvec4;
- LDH_SX 6*SIZE(C1), xvec4, xvec4;
- ADD_SX xvec4, xvec7, xvec7;
- #endif
- STL_SX xvec7, 4*SIZE(C1);
- STH_SX xvec7, 6*SIZE(C1);
-
- EXTRA_SY $1, yvec13, xvec5;
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C0, ldc, 1), xvec4, xvec4;
- LDH_SX 2*SIZE(C0, ldc, 1), xvec4, xvec4;
- ADD_SX xvec4, xvec13, xvec13;
- #endif
- STL_SX xvec13, 0*SIZE(C0, ldc, 1);
- STH_SX xvec13, 2*SIZE(C0, ldc, 1);
- #ifndef TRMMKERNEL
- LDL_SX 4*SIZE(C1, ldc, 1), xvec2, xvec2;
- LDH_SX 6*SIZE(C1, ldc, 1), xvec2, xvec2;
- ADD_SX xvec2, xvec5, xvec5;
- #endif
- STL_SX xvec5, 4*SIZE(C1, ldc, 1);
- STH_SX xvec5, 6*SIZE(C1, ldc, 1);
-
- EXTRA_SY $1, yvec11, xvec3;
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C1), xvec2, xvec2;
- LDH_SX 2*SIZE(C1), xvec2, xvec2;
- ADD_SX xvec2, xvec11, xvec11;
- #endif
- STL_SX xvec11, 0*SIZE(C1);
- STH_SX xvec11, 2*SIZE(C1);
- #ifndef TRMMKERNEL
- LDL_SX 4*SIZE(C0), xvec0, xvec0;
- LDH_SX 6*SIZE(C0), xvec0, xvec0;
- ADD_SX xvec0, xvec3, xvec3;
- #endif
- STL_SX xvec3, 4*SIZE(C0);
- STH_SX xvec3, 6*SIZE(C0);
-
- EXTRA_SY $1, yvec9, xvec1;
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C1, ldc, 1), xvec0, xvec0;
- LDH_SX 2*SIZE(C1, ldc, 1), xvec0, xvec0;
- ADD_SX xvec0, xvec9, xvec9;
- #endif
- STL_SX xvec9, 0*SIZE(C1, ldc, 1);
- STH_SX xvec9, 2*SIZE(C1, ldc, 1);
- #ifndef TRMMKERNEL
- LDL_SX 4*SIZE(C0, ldc, 1), xvec6, xvec6;
- LDH_SX 6*SIZE(C0, ldc, 1), xvec6, xvec6;
- ADD_SX xvec6, xvec1, xvec1;
- #endif
- STL_SX xvec1, 4*SIZE(C0, ldc, 1);
- STH_SX xvec1, 6*SIZE(C0, ldc, 1);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA))||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 4), ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $4, kk;
- #endif
-
- ADDQ $8*SIZE, C0;
- ADDQ $8*SIZE, C1;
-
- .L5_loopE:
- TEST $2, bm;
- JLE .L6_loopE;
- ALIGN_5
- .L6_bodyB:
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb,ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 2), ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- #### Initial Results Register ####
- XOR_SY yvec15, yvec15, yvec15;
- XOR_SY yvec14, yvec14, yvec14;
- XOR_SY yvec13, yvec13, yvec13;
- XOR_SY yvec12, yvec12, yvec12;
- #ifndef TRMMKERNEL
- MOVQ bk,k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $2, %rax;
- #else
- ADDQ $4, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L11_loopE;
- ALIGN_5
- .L11_bodyB:
- LD_SX 0*SIZE(ptrba), xvec0; # ar1, ai1, ar2, ai2
- EDUP_SX 0*SIZE(ptrbb), xvec2; # br1, br1, br2, br2
- SHUF_SX $0x4e, xvec2, xvec3; # br3, br3, br4, br4
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
- MUL_SX xvec0, xvec3, xvec3;
- ADD1_SX xvec3, xvec14, xvec14;
-
- EDUP_SX 4*SIZE(ptrbb), xvec4;
- SHUF_SX $0x4e, xvec4, xvec5;
- MUL_SX xvec0, xvec4, xvec4;
- ADD1_SX xvec4, xvec13, xvec13;
- MUL_SX xvec0, xvec5, xvec5;
- ADD1_SX xvec5, xvec12, xvec12;
-
- SHUF_SX $0xb1, xvec0, xvec1;
- ODUP_SX 0*SIZE(ptrbb), xvec2;
- SHUF_SX $0x4e, xvec2, xvec3;
- MUL_SX xvec1, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec14, xvec14;
-
- ODUP_SX 4*SIZE(ptrbb), xvec4;
- SHUF_SX $0x4e, xvec4, xvec5;
- MUL_SX xvec1, xvec4, xvec4;
- ADD2_SX xvec4, xvec13, xvec13;
- MUL_SX xvec1, xvec5, xvec5;
- ADD2_SX xvec5, xvec12, xvec12;
-
- LD_SX 4*SIZE(ptrba), xvec0; # ar1, ai1, ar2, ai2
- EDUP_SX 8*SIZE(ptrbb), xvec2; # br1, br1, br2, br2
- SHUF_SX $0x4e, xvec2, xvec3; # br3, br3, br4, br4
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
- MUL_SX xvec0, xvec3, xvec3;
- ADD1_SX xvec3, xvec14, xvec14;
-
- EDUP_SX 12*SIZE(ptrbb), xvec4;
- SHUF_SX $0x4e, xvec4, xvec5;
- MUL_SX xvec0, xvec4, xvec4;
- ADD1_SX xvec4, xvec13, xvec13;
- MUL_SX xvec0, xvec5, xvec5;
- ADD1_SX xvec5, xvec12, xvec12;
-
- SHUF_SX $0xb1, xvec0, xvec1;
- ODUP_SX 8*SIZE(ptrbb), xvec2;
- SHUF_SX $0x4e, xvec2, xvec3;
- MUL_SX xvec1, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec14, xvec14;
-
- ODUP_SX 12*SIZE(ptrbb), xvec4;
- SHUF_SX $0x4e, xvec4, xvec5;
- MUL_SX xvec1, xvec4, xvec4;
- ADD2_SX xvec4, xvec13, xvec13;
- MUL_SX xvec1, xvec5, xvec5;
- ADD2_SX xvec5, xvec12, xvec12;
-
- LD_SX 8*SIZE(ptrba), xvec0; # ar1, ai1, ar2, ai2
- EDUP_SX 16*SIZE(ptrbb), xvec2; # br1, br1, br2, br2
- SHUF_SX $0x4e, xvec2, xvec3; # br3, br3, br4, br4
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
- MUL_SX xvec0, xvec3, xvec3;
- ADD1_SX xvec3, xvec14, xvec14;
-
- EDUP_SX 20*SIZE(ptrbb), xvec4;
- SHUF_SX $0x4e, xvec4, xvec5;
- MUL_SX xvec0, xvec4, xvec4;
- ADD1_SX xvec4, xvec13, xvec13;
- MUL_SX xvec0, xvec5, xvec5;
- ADD1_SX xvec5, xvec12, xvec12;
-
- SHUF_SX $0xb1, xvec0, xvec1;
- ODUP_SX 16*SIZE(ptrbb), xvec2;
- SHUF_SX $0x4e, xvec2, xvec3;
- MUL_SX xvec1, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec14, xvec14;
-
- ODUP_SX 20*SIZE(ptrbb), xvec4;
- SHUF_SX $0x4e, xvec4, xvec5;
- MUL_SX xvec1, xvec4, xvec4;
- ADD2_SX xvec4, xvec13, xvec13;
- MUL_SX xvec1, xvec5, xvec5;
- ADD2_SX xvec5, xvec12, xvec12;
-
- LD_SX 12*SIZE(ptrba), xvec0; # ar1, ai1, ar2, ai2
- EDUP_SX 24*SIZE(ptrbb), xvec2; # br1, br1, br2, br2
- SHUF_SX $0x4e, xvec2, xvec3; # br3, br3, br4, br4
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
- MUL_SX xvec0, xvec3, xvec3;
- ADD1_SX xvec3, xvec14, xvec14;
-
- EDUP_SX 28*SIZE(ptrbb), xvec4;
- SHUF_SX $0x4e, xvec4, xvec5;
- MUL_SX xvec0, xvec4, xvec4;
- ADD1_SX xvec4, xvec13, xvec13;
- MUL_SX xvec0, xvec5, xvec5;
- ADD1_SX xvec5, xvec12, xvec12;
-
- SHUF_SX $0xb1, xvec0, xvec1;
- ODUP_SX 24*SIZE(ptrbb), xvec2;
- SHUF_SX $0x4e, xvec2, xvec3;
- MUL_SX xvec1, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec14, xvec14;
-
- ODUP_SX 28*SIZE(ptrbb), xvec4;
- SHUF_SX $0x4e, xvec4, xvec5;
- MUL_SX xvec1, xvec4, xvec4;
- ADD2_SX xvec4, xvec13, xvec13;
- MUL_SX xvec1, xvec5, xvec5;
- ADD2_SX xvec5, xvec12, xvec12;
- ADDQ $16*SIZE, ptrba;
- ADDQ $32*SIZE, ptrbb;
- DECQ k;
- JG .L11_bodyB;
- ALIGN_5
- .L11_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- TEST $2, kkk;
- #endif
- JLE .L12_loopE;
- ALIGN_5
- .L12_bodyB:
- LD_SX 0*SIZE(ptrba), xvec0; # ar1, ai1, ar2, ai2
- EDUP_SX 0*SIZE(ptrbb), xvec2; # br1, br1, br2, br2
- SHUF_SX $0x4e, xvec2, xvec3; # br3, br3, br4, br4
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
- MUL_SX xvec0, xvec3, xvec3;
- ADD1_SX xvec3, xvec14, xvec14;
-
- EDUP_SX 4*SIZE(ptrbb), xvec4;
- SHUF_SX $0x4e, xvec4, xvec5;
- MUL_SX xvec0, xvec4, xvec4;
- ADD1_SX xvec4, xvec13, xvec13;
- MUL_SX xvec0, xvec5, xvec5;
- ADD1_SX xvec5, xvec12, xvec12;
-
- SHUF_SX $0xb1, xvec0, xvec1;
- ODUP_SX 0*SIZE(ptrbb), xvec2;
- SHUF_SX $0x4e, xvec2, xvec3;
- MUL_SX xvec1, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec14, xvec14;
-
- ODUP_SX 4*SIZE(ptrbb), xvec4;
- SHUF_SX $0x4e, xvec4, xvec5;
- MUL_SX xvec1, xvec4, xvec4;
- ADD2_SX xvec4, xvec13, xvec13;
- MUL_SX xvec1, xvec5, xvec5;
- ADD2_SX xvec5, xvec12, xvec12;
-
- LD_SX 4*SIZE(ptrba), xvec0; # ar1, ai1, ar2, ai2
- EDUP_SX 8*SIZE(ptrbb), xvec2; # br1, br1, br2, br2
- SHUF_SX $0x4e, xvec2, xvec3; # br3, br3, br4, br4
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
- MUL_SX xvec0, xvec3, xvec3;
- ADD1_SX xvec3, xvec14, xvec14;
-
- EDUP_SX 12*SIZE(ptrbb), xvec4;
- SHUF_SX $0x4e, xvec4, xvec5;
- MUL_SX xvec0, xvec4, xvec4;
- ADD1_SX xvec4, xvec13, xvec13;
- MUL_SX xvec0, xvec5, xvec5;
- ADD1_SX xvec5, xvec12, xvec12;
-
- SHUF_SX $0xb1, xvec0, xvec1;
- ODUP_SX 8*SIZE(ptrbb), xvec2;
- SHUF_SX $0x4e, xvec2, xvec3;
- MUL_SX xvec1, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec14, xvec14;
-
- ODUP_SX 12*SIZE(ptrbb), xvec4;
- SHUF_SX $0x4e, xvec4, xvec5;
- MUL_SX xvec1, xvec4, xvec4;
- ADD2_SX xvec4, xvec13, xvec13;
- MUL_SX xvec1, xvec5, xvec5;
- ADD2_SX xvec5, xvec12, xvec12;
- ADDQ $8*SIZE, ptrba;
- ADDQ $16*SIZE, ptrbb;
-
- .L12_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- TEST $1, kkk;
- #endif
- JLE .L13_loopE;
- ALIGN_5
- .L13_bodyB:
- LD_SX 0*SIZE(ptrba), xvec0; # ar1, ai1, ar2, ai2
- EDUP_SX 0*SIZE(ptrbb), xvec2; # br1, br1, br2, br2
- SHUF_SX $0x4e, xvec2, xvec3; # br3, br3, br4, br4
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
- MUL_SX xvec0, xvec3, xvec3;
- ADD1_SX xvec3, xvec14, xvec14;
-
- EDUP_SX 4*SIZE(ptrbb), xvec4;
- SHUF_SX $0x4e, xvec4, xvec5;
- MUL_SX xvec0, xvec4, xvec4;
- ADD1_SX xvec4, xvec13, xvec13;
- MUL_SX xvec0, xvec5, xvec5;
- ADD1_SX xvec5, xvec12, xvec12;
-
- SHUF_SX $0xb1, xvec0, xvec1;
- ODUP_SX 0*SIZE(ptrbb), xvec2;
- SHUF_SX $0x4e, xvec2, xvec3;
- MUL_SX xvec1, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec14, xvec14;
-
- ODUP_SX 4*SIZE(ptrbb), xvec4;
- SHUF_SX $0x4e, xvec4, xvec5;
- MUL_SX xvec1, xvec4, xvec4;
- ADD2_SX xvec4, xvec13, xvec13;
- MUL_SX xvec1, xvec5, xvec5;
- ADD2_SX xvec5, xvec12, xvec12;
- ADDQ $4*SIZE, ptrba;
- ADDQ $8*SIZE, ptrbb;
-
- .L13_loopE:
- #### Handle ####
- #if defined(RN) || defined(RT) || defined(CN) || defined(CT)
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec14, xvec7, xvec7;
- MOV_SX xvec7, xvec14;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec13, xvec7, xvec7;
- MOV_SX xvec7, xvec13;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec12, xvec7, xvec7;
- MOV_SX xvec7, xvec12;
- #elif defined(NR) || defined(NC) || defined(TR) || defined(TC)
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec14, xvec7, xvec7;
- MOV_SX xvec7, xvec14;
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec13, xvec7, xvec7;
- MOV_SX xvec7, xvec13;
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec12, xvec7, xvec7;
- MOV_SX xvec7, xvec12;
- #elif defined(RR) || defined(RC) || defined(CR) || defined(CC)
- SHUF_SX $0xb1, xvec15, xvec15;
- SHUF_SX $0xb1, xvec14, xvec14;
- SHUF_SX $0xb1, xvec13, xvec13;
- SHUF_SX $0xb1, xvec12, xvec12;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec14, xvec7, xvec7;
- MOV_SX xvec7, xvec14;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec13, xvec7, xvec7;
- MOV_SX xvec7, xvec13;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec12, xvec7, xvec7;
- MOV_SX xvec7, xvec12;
- SHUF_SX $0xb1, xvec15, xvec15;
- SHUF_SX $0xb1, xvec14, xvec14;
- SHUF_SX $0xb1, xvec13, xvec13;
- SHUF_SX $0xb1, xvec12, xvec12;
- #endif
- ##### Load Alpha ####
- BROAD_SX MEMALPHA_R,xvec7;
- BROAD_SX MEMALPHA_I,xvec6;
- ##### Multiply Alpha ####
- VPERMILP_SX $0xb1,xvec15, xvec5;
- MUL_SX xvec7, xvec15, xvec15;
- MUL_SX xvec6, xvec5, xvec5;
- ADDSUB_SX xvec5, xvec15, xvec15;
- VPERMILP_SX $0xb1,xvec14, xvec4;
- MUL_SX xvec7, xvec14, xvec14;
- MUL_SX xvec6, xvec4, xvec4;
- ADDSUB_SX xvec4, xvec14, xvec14;
- VPERMILP_SX $0xb1,xvec13, xvec3;
- MUL_SX xvec7, xvec13, xvec13;
- MUL_SX xvec6, xvec3, xvec3;
- ADDSUB_SX xvec3, xvec13, xvec13;
- VPERMILP_SX $0xb1,xvec12, xvec2;
- MUL_SX xvec7, xvec12, xvec12;
- MUL_SX xvec6, xvec2, xvec2;
- ADDSUB_SX xvec2, xvec12, xvec12;
- #### Writing back ####
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C0), xvec0, xvec0;
- LDH_SX 2*SIZE(C0, ldc,1), xvec0, xvec0;
- LDL_SX 0*SIZE(C0, ldc,1), xvec1, xvec1;
- LDH_SX 2*SIZE(C0), xvec1, xvec1;
- LDL_SX 0*SIZE(C1), xvec2, xvec2;
- LDH_SX 2*SIZE(C1, ldc, 1), xvec2, xvec2;
- LDL_SX 0*SIZE(C1, ldc, 1), xvec3, xvec3;
- LDH_SX 2*SIZE(C1), xvec3, xvec3;
- ADD_SX xvec0, xvec15, xvec15;
- ADD_SX xvec1, xvec14, xvec14;
- ADD_SX xvec2, xvec13, xvec13;
- ADD_SX xvec3, xvec12, xvec12;
- #endif
- STL_SX xvec15, 0*SIZE(C0);
- STH_SX xvec15, 2*SIZE(C0, ldc, 1);
- STL_SX xvec14, 0*SIZE(C0, ldc, 1);
- STH_SX xvec14, 2*SIZE(C0);
- STL_SX xvec13, 0*SIZE(C1);
- STH_SX xvec13, 2*SIZE(C1, ldc, 1);
- STL_SX xvec12, 0*SIZE(C1, ldc, 1);
- STH_SX xvec12, 2*SIZE(C1);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA))||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 2), ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $2, kk;
- #endif
-
- ADDQ $4*SIZE, C0;
- ADDQ $4*SIZE, C1;
-
- .L6_loopE:
- TEST $1, bm;
- JLE .L7_loopE;
- ALIGN_5
- .L7_bodyB:
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb,ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- ADDQ %rax, ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- XOR_SY yvec15, yvec15, yvec15;
- XOR_SY yvec14, yvec14, yvec14;
- #ifndef TRMMKERNEL
- MOVQ bk,k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $1, %rax;
- #else
- ADDQ $4, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L14_loopE;
- ALIGN_5
- .L14_bodyB:
- BROAD_SX 0*SIZE(ptrba), xvec0;
- LD_SX 0*SIZE(ptrbb), xvec2;
- SHUF_SX $0xb1, xvec2, xvec3;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
-
- LD_SX 4*SIZE(ptrbb), xvec4;
- SHUF_SX $0xb1, xvec4, xvec5;
- MUL_SX xvec0, xvec4, xvec4;
- ADD1_SX xvec4, xvec14, xvec14;
-
- BROAD_SX 1*SIZE(ptrba), xvec1;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
- MUL_SX xvec1, xvec5, xvec5;
- ADD2_SX xvec5, xvec14, xvec14;
-
- BROAD_SX 2*SIZE(ptrba), xvec0;
- LD_SX 8*SIZE(ptrbb), xvec2;
- SHUF_SX $0xb1, xvec2, xvec3;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
-
- LD_SX 12*SIZE(ptrbb), xvec4;
- SHUF_SX $0xb1, xvec4, xvec5;
- MUL_SX xvec0, xvec4, xvec4;
- ADD1_SX xvec4, xvec14, xvec14;
-
- BROAD_SX 3*SIZE(ptrba), xvec1;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
- MUL_SX xvec1, xvec5, xvec5;
- ADD2_SX xvec5, xvec14, xvec14;
-
- BROAD_SX 4*SIZE(ptrba), xvec0;
- LD_SX 16*SIZE(ptrbb), xvec2;
- SHUF_SX $0xb1, xvec2, xvec3;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
-
- LD_SX 20*SIZE(ptrbb), xvec4;
- SHUF_SX $0xb1, xvec4, xvec5;
- MUL_SX xvec0, xvec4, xvec4;
- ADD1_SX xvec4, xvec14, xvec14;
-
- BROAD_SX 5*SIZE(ptrba), xvec1;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
- MUL_SX xvec1, xvec5, xvec5;
- ADD2_SX xvec5, xvec14, xvec14;
-
- BROAD_SX 6*SIZE(ptrba), xvec0;
- LD_SX 24*SIZE(ptrbb), xvec2;
- SHUF_SX $0xb1, xvec2, xvec3;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
-
- LD_SX 28*SIZE(ptrbb), xvec4;
- SHUF_SX $0xb1, xvec4, xvec5;
- MUL_SX xvec0, xvec4, xvec4;
- ADD1_SX xvec4, xvec14, xvec14;
-
- BROAD_SX 7*SIZE(ptrba), xvec1;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
- MUL_SX xvec1, xvec5, xvec5;
- ADD2_SX xvec5, xvec14, xvec14;
- ADDQ $8*SIZE, ptrba;
- ADDQ $32*SIZE, ptrbb;
- DECQ k;
- JG .L14_bodyB;
- ALIGN_5
- .L14_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- TEST $2, kkk;
- #endif
- JLE .L15_loopE;
- ALIGN_5
- .L15_bodyB:
- BROAD_SX 0*SIZE(ptrba), xvec0;
- LD_SX 0*SIZE(ptrbb), xvec2;
- SHUF_SX $0xb1, xvec2, xvec3;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
-
- LD_SX 4*SIZE(ptrbb), xvec4;
- SHUF_SX $0xb1, xvec4, xvec5;
- MUL_SX xvec0, xvec4, xvec4;
- ADD1_SX xvec4, xvec14, xvec14;
-
- BROAD_SX 1*SIZE(ptrba), xvec1;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
- MUL_SX xvec1, xvec5, xvec5;
- ADD2_SX xvec5, xvec14, xvec14;
-
- BROAD_SX 2*SIZE(ptrba), xvec0;
- LD_SX 8*SIZE(ptrbb), xvec2;
- SHUF_SX $0xb1, xvec2, xvec3;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
-
- LD_SX 12*SIZE(ptrbb), xvec4;
- SHUF_SX $0xb1, xvec4, xvec5;
- MUL_SX xvec0, xvec4, xvec4;
- ADD1_SX xvec4, xvec14, xvec14;
-
- BROAD_SX 3*SIZE(ptrba), xvec1;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
- MUL_SX xvec1, xvec5, xvec5;
- ADD2_SX xvec5, xvec14, xvec14;
- ADDQ $4*SIZE, ptrba;
- ADDQ $16*SIZE, ptrbb;
-
- .L15_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- TEST $1, kkk;
- #endif
- JLE .L16_loopE;
- ALIGN_5
- .L16_bodyB:
- BROAD_SX 0*SIZE(ptrba), xvec0;
- LD_SX 0*SIZE(ptrbb), xvec2;
- SHUF_SX $0xb1, xvec2, xvec3;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
-
- LD_SX 4*SIZE(ptrbb), xvec4;
- SHUF_SX $0xb1, xvec4, xvec5;
- MUL_SX xvec0, xvec4, xvec4;
- ADD1_SX xvec4, xvec14, xvec14;
-
- BROAD_SX 1*SIZE(ptrba), xvec1;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
- MUL_SX xvec1, xvec5, xvec5;
- ADD2_SX xvec5, xvec14, xvec14;
- ADDQ $2*SIZE, ptrba;
- ADDQ $8*SIZE, ptrbb;
-
- .L16_loopE:
- #### Handle ####
- #if defined(NR) || defined(NC) || defined(TR) || defined(TC)
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec14, xvec7, xvec7;
- MOV_SX xvec7, xvec14;
- #elif defined(RN) || defined(RT) || defined(CN) || defined(CT)
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec14, xvec7, xvec7;
- MOV_SX xvec7, xvec14;
- #elif defined(RR) || defined(RC) || defined(CR) || defined(CC)
- SHUF_SX $0xb1, xvec15, xvec15;
- SHUF_SX $0xb1, xvec14, xvec14;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec14, xvec7, xvec7;
- MOV_SX xvec7, xvec14;
- SHUF_SX $0xb1, xvec15, xvec15;
- SHUF_SX $0xb1, xvec14, xvec14;
- #endif
- ##### Load Alpha ####
- BROAD_SX MEMALPHA_R,xvec7;
- BROAD_SX MEMALPHA_I,xvec6;
- ##### Multiply Alpha ####
- VPERMILP_SX $0xb1,xvec15, xvec5;
- MUL_SX xvec7, xvec15, xvec15;
- MUL_SX xvec6, xvec5, xvec5;
- ADDSUB_SX xvec5, xvec15, xvec15;
- VPERMILP_SX $0xb1,xvec14, xvec4;
- MUL_SX xvec7, xvec14, xvec14;
- MUL_SX xvec6, xvec4, xvec4;
- ADDSUB_SX xvec4, xvec14, xvec14;
- #### Writing back ####
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C0), xvec0, xvec0;
- LDH_SX 0*SIZE(C0, ldc, 1), xvec0, xvec0;
- LDL_SX 0*SIZE(C1), xvec1, xvec1;
- LDH_SX 0*SIZE(C1, ldc, 1), xvec1, xvec1;
- ADD_SX xvec0, xvec15, xvec15;
- ADD_SX xvec1, xvec14, xvec14;
- #endif
- STL_SX xvec15, 0*SIZE(C0);
- STH_SX xvec15, 0*SIZE(C0, ldc, 1);
- STL_SX xvec14, 0*SIZE(C1);
- STH_SX xvec14, 0*SIZE(C1, ldc, 1);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA))||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- ADDQ %rax, ptrba;
- LEAQ (ptrbb, %rax, 4), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $1, kk;
- #endif
-
- ADDQ $2*SIZE, C0;
- ADDQ $2*SIZE, C1;
- .L7_loopE:
- #if defined(TRMMKERNEL) && !defined(LEFT)
- ADDQ $4, kk;
- #endif
- MOVQ bk,k;
- SALQ $5,k;
- ADDQ k,bb;
- LEAQ (C,ldc,4),C;
- .L0_bodyE:;
- DECQ j;
- JG .L0_bodyB;
- ALIGN_5;
- .L0_loopE:;
- TEST $2, bn;
- JLE .L20_loopE;
- ALIGN_5
- .L20_bodyB:
- #if defined(TRMMKERNEL) && defined(LEFT)
- MOVQ OFFSET, %rax;
- MOVQ %rax, kk;
- #endif
- MOVQ C, C0;
- LEAQ (C, ldc, 1), C1;
- MOVQ ba, ptrba;
- MOVQ bm, i;
- SARQ $3, i;
- JLE .L21_loopE;
- ALIGN_5
- .L21_bodyB:
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb,ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 8), ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- XOR_SY yvec15, yvec15, yvec15;
- XOR_SY yvec14, yvec14, yvec14;
- XOR_SY yvec13, yvec13, yvec13;
- XOR_SY yvec12, yvec12, yvec12;
- XOR_SY yvec11, yvec11, yvec11;
- XOR_SY yvec10, yvec10, yvec10;
- XOR_SY yvec9, yvec9, yvec9;
- XOR_SY yvec8, yvec8, yvec8;
- #ifndef TRMMKERNEL
- MOVQ bk,k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $8, %rax;
- #else
- ADDQ $2, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L211_loopE;
- ALIGN_5
- .L211_bodyB:
- EDUP_SX 0*SIZE(ptrbb), xvec4;
- ODUP_SX 0*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 0*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- LD_SX 4*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec14, xvec14;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec10, xvec10;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec14, xvec14;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec10, xvec10;
-
- LD_SX 8*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec13, xvec13;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec9, xvec9;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec13, xvec13;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec9, xvec9;
-
- LD_SX 12*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec12, xvec12;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec8, xvec8;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec12, xvec12;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec8, xvec8;
-
- EDUP_SX 4*SIZE(ptrbb), xvec4;
- ODUP_SX 4*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 16*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- LD_SX 20*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec14, xvec14;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec10, xvec10;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec14, xvec14;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec10, xvec10;
-
- LD_SX 24*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec13, xvec13;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec9, xvec9;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec13, xvec13;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec9, xvec9;
-
- LD_SX 28*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec12, xvec12;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec8, xvec8;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec12, xvec12;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec8, xvec8;
-
- EDUP_SX 8*SIZE(ptrbb), xvec4;
- ODUP_SX 8*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 32*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- LD_SX 36*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec14, xvec14;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec10, xvec10;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec14, xvec14;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec10, xvec10;
-
- LD_SX 40*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec13, xvec13;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec9, xvec9;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec13, xvec13;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec9, xvec9;
-
- LD_SX 44*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec12, xvec12;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec8, xvec8;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec12, xvec12;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec8, xvec8;
-
- EDUP_SX 12*SIZE(ptrbb), xvec4;
- ODUP_SX 12*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 48*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- LD_SX 52*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec14, xvec14;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec10, xvec10;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec14, xvec14;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec10, xvec10;
-
- LD_SX 56*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec13, xvec13;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec9, xvec9;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec13, xvec13;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec9, xvec9;
-
- LD_SX 60*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec12, xvec12;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec8, xvec8;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec12, xvec12;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec8, xvec8;
- ADDQ $64*SIZE, ptrba;
- ADDQ $16*SIZE, ptrbb;
- DECQ k;
- JG .L211_bodyB;
- ALIGN_5
- .L211_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- TEST $2, kkk;
- #endif
- JLE .L212_loopE;
- ALIGN_5
- .L212_bodyB:
- EDUP_SX 0*SIZE(ptrbb), xvec4;
- ODUP_SX 0*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 0*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- LD_SX 4*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec14, xvec14;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec10, xvec10;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec14, xvec14;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec10, xvec10;
-
- LD_SX 8*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec13, xvec13;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec9, xvec9;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec13, xvec13;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec9, xvec9;
-
- LD_SX 12*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec12, xvec12;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec8, xvec8;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec12, xvec12;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec8, xvec8;
-
- EDUP_SX 4*SIZE(ptrbb), xvec4;
- ODUP_SX 4*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 16*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- LD_SX 20*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec14, xvec14;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec10, xvec10;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec14, xvec14;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec10, xvec10;
-
- LD_SX 24*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec13, xvec13;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec9, xvec9;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec13, xvec13;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec9, xvec9;
-
- LD_SX 28*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec12, xvec12;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec8, xvec8;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec12, xvec12;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec8, xvec8;
- ADDQ $32*SIZE, ptrba;
- ADDQ $8*SIZE, ptrbb;
-
- .L212_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- TEST $1, kkk;
- #endif
- JLE .L213_loopE;
- ALIGN_5
- .L213_bodyB:
- EDUP_SX 0*SIZE(ptrbb), xvec4;
- ODUP_SX 0*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 0*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- LD_SX 4*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec14, xvec14;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec10, xvec10;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec14, xvec14;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec10, xvec10;
-
- LD_SX 8*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec13, xvec13;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec9, xvec9;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec13, xvec13;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec9, xvec9;
-
- LD_SX 12*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec12, xvec12;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec8, xvec8;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec12, xvec12;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec8, xvec8;
- ADDQ $16*SIZE, ptrba;
- ADDQ $4*SIZE, ptrbb
-
- .L213_loopE:
- #### Handle ####
- #if defined(RN) || defined(RT) || defined(CN) || defined(CT)
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec14, xvec7, xvec7;
- MOV_SX xvec7, xvec14;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec13, xvec7, xvec7;
- MOV_SX xvec7, xvec13;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec12, xvec7, xvec7;
- MOV_SX xvec7, xvec12;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec11, xvec7, xvec7;
- MOV_SX xvec7, xvec11;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec10, xvec7, xvec7;
- MOV_SX xvec7, xvec10;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec9, xvec7, xvec7;
- MOV_SX xvec7, xvec9;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec8, xvec7, xvec7;
- MOV_SX xvec7, xvec8;
- #elif defined(NR) || defined(NC) || defined(TR) || defined(TC)
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec14, xvec7, xvec7;
- MOV_SX xvec7, xvec14;
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec13, xvec7, xvec7;
- MOV_SX xvec7, xvec13;
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec12, xvec7, xvec7;
- MOV_SX xvec7, xvec12;
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec11, xvec7, xvec7;
- MOV_SX xvec7, xvec11;
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec10, xvec7, xvec7;
- MOV_SX xvec7, xvec10;
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec9, xvec7, xvec7;
- MOV_SX xvec7, xvec9;
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec8, xvec7, xvec7;
- MOV_SX xvec7, xvec8;
- #elif defined(RR) || defined(RC) || defined(CR) || defined(CC)
- SHUF_SX $0xb1, xvec15, xvec15;
- SHUF_SX $0xb1, xvec14, xvec14;
- SHUF_SX $0xb1, xvec13, xvec13;
- SHUF_SX $0xb1, xvec12, xvec12;
- SHUF_SX $0xb1, xvec11, xvec11;
- SHUF_SX $0xb1, xvec10, xvec10;
- SHUF_SX $0xb1, xvec9, xvec9;
- SHUF_SX $0xb1, xvec8, xvec8;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec14, xvec7, xvec7;
- MOV_SX xvec7, xvec14;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec13, xvec7, xvec7;
- MOV_SX xvec7, xvec13;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec12, xvec7, xvec7;
- MOV_SX xvec7, xvec12;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec11, xvec7, xvec7;
- MOV_SX xvec7, xvec11;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec10, xvec7, xvec7;
- MOV_SX xvec7, xvec10;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec9, xvec7, xvec7;
- MOV_SX xvec7, xvec9;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec8, xvec7, xvec7;
- MOV_SX xvec7, xvec8;
- SHUF_SX $0xb1, xvec15, xvec15;
- SHUF_SX $0xb1, xvec14, xvec14;
- SHUF_SX $0xb1, xvec13, xvec13;
- SHUF_SX $0xb1, xvec12, xvec12;
- SHUF_SX $0xb1, xvec11, xvec11;
- SHUF_SX $0xb1, xvec10, xvec10;
- SHUF_SX $0xb1, xvec9, xvec9;
- SHUF_SX $0xb1, xvec8, xvec8;
- #endif
- #### Mulitply Alpha ####
- BROAD_SX MEMALPHA_R, xvec7;
- BROAD_SX MEMALPHA_I, xvec6;
- #### Writng back ####
- VPERMILP_SX $0xb1,xvec15, xvec5;
- MUL_SX xvec7, xvec15, xvec15;
- MUL_SX xvec6, xvec5, xvec5;
- ADDSUB_SX xvec5, xvec15, xvec15;
- VPERMILP_SX $0xb1,xvec14, xvec4;
- MUL_SX xvec7, xvec14, xvec14;
- MUL_SX xvec6, xvec4, xvec4;
- ADDSUB_SX xvec4, xvec14, xvec14;
- VPERMILP_SX $0xb1,xvec13, xvec3;
- MUL_SX xvec7, xvec13, xvec13;
- MUL_SX xvec6, xvec3, xvec3;
- ADDSUB_SX xvec3, xvec13, xvec13;
- VPERMILP_SX $0xb1,xvec12, xvec2;
- MUL_SX xvec7, xvec12, xvec12;
- MUL_SX xvec6, xvec2, xvec2;
- ADDSUB_SX xvec2, xvec12, xvec12;
- VPERMILP_SX $0xb1,xvec11, xvec1;
- MUL_SX xvec7, xvec11, xvec11;
- MUL_SX xvec6, xvec1, xvec1;
- ADDSUB_SX xvec1, xvec11, xvec11;
- VPERMILP_SX $0xb1,xvec10, xvec0;
- MUL_SX xvec7, xvec10, xvec10;
- MUL_SX xvec6, xvec0, xvec0;
- ADDSUB_SX xvec0, xvec10, xvec10;
- VPERMILP_SX $0xb1,xvec9, xvec5;
- MUL_SX xvec7, xvec9, xvec9;
- MUL_SX xvec6, xvec5, xvec5;
- ADDSUB_SX xvec5, xvec9, xvec9;
- VPERMILP_SX $0xb1,xvec8, xvec4;
- MUL_SX xvec7, xvec8, xvec8;
- MUL_SX xvec6, xvec4, xvec4;
- ADDSUB_SX xvec4, xvec8, xvec8;
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C0), xvec0, xvec0;
- LDH_SX 2*SIZE(C1), xvec0, xvec0;
- LDL_SX 4*SIZE(C0), xvec1, xvec1;
- LDH_SX 6*SIZE(C1), xvec1, xvec1;
- LDL_SX 8*SIZE(C0), xvec2, xvec2;
- LDH_SX 10*SIZE(C1), xvec2, xvec2;
- LDL_SX 12*SIZE(C0), xvec3, xvec3;
- LDH_SX 14*SIZE(C1), xvec3, xvec3;
- ADD_SX xvec0, xvec15, xvec15;
- ADD_SX xvec1, xvec14, xvec14;
- ADD_SX xvec2, xvec13, xvec13;
- ADD_SX xvec3, xvec12, xvec12;
- #endif
- STL_SX xvec15, 0*SIZE(C0);
- STH_SX xvec15, 2*SIZE(C1);
- STL_SX xvec14, 4*SIZE(C0);
- STH_SX xvec14, 6*SIZE(C1);
- STL_SX xvec13, 8*SIZE(C0);
- STH_SX xvec13, 10*SIZE(C1);
- STL_SX xvec12, 12*SIZE(C0);
- STH_SX xvec12, 14*SIZE(C1);
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C1), xvec4, xvec4;
- LDH_SX 2*SIZE(C0), xvec4, xvec4;
- LDL_SX 4*SIZE(C1), xvec5, xvec5;
- LDH_SX 6*SIZE(C0), xvec5, xvec5;
- LDL_SX 8*SIZE(C1), xvec6, xvec6;
- LDH_SX 10*SIZE(C0), xvec6, xvec6;
- LDL_SX 12*SIZE(C1), xvec7, xvec7;
- LDH_SX 14*SIZE(C0), xvec7, xvec7;
- ADD_SX xvec4, xvec11, xvec11;
- ADD_SX xvec5, xvec10, xvec10;
- ADD_SX xvec6, xvec9, xvec9;
- ADD_SX xvec7, xvec8, xvec8;
- #endif
- STL_SX xvec11, 0*SIZE(C1);
- STH_SX xvec11, 2*SIZE(C0);
- STL_SX xvec10, 4*SIZE(C1);
- STH_SX xvec10, 6*SIZE(C0);
- STL_SX xvec9, 8*SIZE(C1);
- STH_SX xvec9, 10*SIZE(C0);
- STL_SX xvec8, 12*SIZE(C1);
- STH_SX xvec8, 14*SIZE(C0);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA))||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 8), ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $8, kk;
- #endif
-
- ADDQ $16*SIZE, C0;
- ADDQ $16*SIZE, C1;
- DECQ i;
- JG .L21_bodyB;
- ALIGN_5
- .L21_loopE:
- TEST $4, bm;
- JLE .L22_loopE;
- ALIGN_5
- .L22_bodyB:
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb,ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 4), ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- XOR_SY yvec15, yvec15, yvec15;
- XOR_SY yvec14, yvec14, yvec14;
- XOR_SY yvec11, yvec11, yvec11;
- XOR_SY yvec10, yvec10, yvec10;
- #ifndef TRMMKERNEL
- MOVQ bk,k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $4, %rax;
- #else
- ADDQ $2, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
-
- SARQ $2, k;
- JLE .L221_loopE;
- ALIGN_5
- .L221_bodyB:
- EDUP_SX 0*SIZE(ptrbb), xvec4;
- ODUP_SX 0*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 0*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- LD_SX 4*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec14, xvec14;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec10, xvec10;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec14, xvec14;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec10, xvec10;
-
- #### Unroll 2 #####
- EDUP_SX 4*SIZE(ptrbb), xvec4;
- ODUP_SX 4*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 8*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- LD_SX 12*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec14, xvec14;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec10, xvec10;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec14, xvec14;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec10, xvec10;
-
- #### Unroll 3 ####
- EDUP_SX 8*SIZE(ptrbb), xvec4;
- ODUP_SX 8*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 16*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- LD_SX 20*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec14, xvec14;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec10, xvec10;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec14, xvec14;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec10, xvec10;
-
- #### Unroll 4 ####
- EDUP_SX 12*SIZE(ptrbb), xvec4;
- ODUP_SX 12*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 24*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- LD_SX 28*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec14, xvec14;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec10, xvec10;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec14, xvec14;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec10, xvec10;
- ADDQ $32*SIZE, ptrba;
- ADDQ $16*SIZE, ptrbb;
- DECQ k;
- JG .L221_bodyB;
- ALIGN_5
- .L221_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- TEST $2, kkk;
- #endif
- JLE .L222_loopE;
- ALIGN_5
- .L222_bodyB:
- EDUP_SX 0*SIZE(ptrbb), xvec4;
- ODUP_SX 0*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 0*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- LD_SX 4*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec14, xvec14;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec10, xvec10;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec14, xvec14;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec10, xvec10;
-
- #### Unroll 2 #####
- EDUP_SX 4*SIZE(ptrbb), xvec4;
- ODUP_SX 4*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 8*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- LD_SX 12*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec14, xvec14;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec10, xvec10;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec14, xvec14;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec10, xvec10;
- ADDQ $16*SIZE, ptrba;
- ADDQ $8*SIZE, ptrbb;
-
-
- .L222_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- TEST $1, kkk;
- #endif
- JLE .L223_loopE;
- ALIGN_5
- .L223_bodyB:
- EDUP_SX 0*SIZE(ptrbb), xvec4;
- ODUP_SX 0*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 0*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- LD_SX 4*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec14, xvec14;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec10, xvec10;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec14, xvec14;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec10, xvec10;
- ADDQ $8*SIZE, ptrba;
- ADDQ $4*SIZE, ptrbb;
-
- .L223_loopE:
- #### Handle ####
- #if defined(RN) || defined(RT) || defined(CN) || defined(CT)
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec14, xvec7, xvec7;
- MOV_SX xvec7, xvec14;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec11, xvec7, xvec7;
- MOV_SX xvec7, xvec11;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec10, xvec7, xvec7;
- MOV_SX xvec7, xvec10;
- #elif defined(NR) || defined(NC) || defined(TR) || defined(TC)
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec14, xvec7, xvec7;
- MOV_SX xvec7, xvec14;
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec11, xvec7, xvec7;
- MOV_SX xvec7, xvec11;
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec10, xvec7, xvec7;
- MOV_SX xvec7, xvec10;
- #elif defined(RR) || defined(RC) || defined(CR) || defined(CC)
- SHUF_SX $0xb1, xvec15, xvec15;
- SHUF_SX $0xb1, xvec14, xvec14;
- SHUF_SX $0xb1, xvec11, xvec11;
- SHUF_SX $0xb1, xvec10, xvec10;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec14, xvec7, xvec7;
- MOV_SX xvec7, xvec14;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec11, xvec7, xvec7;
- MOV_SX xvec7, xvec11;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec10, xvec7, xvec7;
- MOV_SX xvec7, xvec10;
- SHUF_SX $0xb1, xvec15, xvec15;
- SHUF_SX $0xb1, xvec14, xvec14;
- SHUF_SX $0xb1, xvec11, xvec11;
- SHUF_SX $0xb1, xvec10, xvec10;
- #endif
- #### Mulitply Alpha ####
- BROAD_SX MEMALPHA_R, xvec7;
- BROAD_SX MEMALPHA_I, xvec6;
- #### Writng back ####
- VPERMILP_SX $0xb1,xvec15, xvec5;
- MUL_SX xvec7, xvec15, xvec15;
- MUL_SX xvec6, xvec5, xvec5;
- ADDSUB_SX xvec5, xvec15, xvec15;
- VPERMILP_SX $0xb1,xvec14, xvec4;
- MUL_SX xvec7, xvec14, xvec14;
- MUL_SX xvec6, xvec4, xvec4;
- ADDSUB_SX xvec4, xvec14, xvec14;
- VPERMILP_SX $0xb1,xvec11, xvec1;
- MUL_SX xvec7, xvec11, xvec11;
- MUL_SX xvec6, xvec1, xvec1;
- ADDSUB_SX xvec1, xvec11, xvec11;
- VPERMILP_SX $0xb1,xvec10, xvec0;
- MUL_SX xvec7, xvec10, xvec10;
- MUL_SX xvec6, xvec0, xvec0;
- ADDSUB_SX xvec0, xvec10, xvec10;
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C0), xvec0, xvec0;
- LDH_SX 2*SIZE(C1), xvec0, xvec0;
- LDL_SX 4*SIZE(C0), xvec1, xvec1;
- LDH_SX 6*SIZE(C1), xvec1, xvec1;
- ADD_SX xvec0, xvec15, xvec15;
- ADD_SX xvec1, xvec14, xvec14;
- #endif
- STL_SX xvec15, 0*SIZE(C0);
- STH_SX xvec15, 2*SIZE(C1);
- STL_SX xvec14, 4*SIZE(C0);
- STH_SX xvec14, 6*SIZE(C1);
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C1), xvec4, xvec4;
- LDH_SX 2*SIZE(C0), xvec4, xvec4;
- LDL_SX 4*SIZE(C1), xvec5, xvec5;
- LDH_SX 6*SIZE(C0), xvec5, xvec5;
- ADD_SX xvec4, xvec11, xvec11;
- ADD_SX xvec5, xvec10, xvec10;
- #endif
- STL_SX xvec11, 0*SIZE(C1);
- STH_SX xvec11, 2*SIZE(C0);
- STL_SX xvec10, 4*SIZE(C1);
- STH_SX xvec10, 6*SIZE(C0);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA))||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 4), ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $4, kk;
- #endif
-
- ADDQ $8*SIZE, C0;
- ADDQ $8*SIZE, C1;
-
- .L22_loopE:
- TEST $2, bm;
- JLE .L23_loopE;
- ALIGN_5
- .L23_bodyB:
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb,ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 2), ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- XOR_SY yvec15, yvec15, yvec15;
- XOR_SY yvec11, yvec11, yvec11;
- #ifndef TRMMKERNEL
- MOVQ bk,k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $2, %rax;
- #else
- ADDQ $2, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L231_loopE;
- ALIGN_5
- .L231_bodyB:
- EDUP_SX 0*SIZE(ptrbb), xvec4;
- ODUP_SX 0*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 0*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- #### Unroll 2 #####
- EDUP_SX 4*SIZE(ptrbb), xvec4;
- ODUP_SX 4*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 4*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- #### Unroll 3 ####
- EDUP_SX 8*SIZE(ptrbb), xvec4;
- ODUP_SX 8*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 8*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- #### Unroll 4 ####
- EDUP_SX 12*SIZE(ptrbb), xvec4;
- ODUP_SX 12*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 12*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
- ADDQ $16*SIZE, ptrba;
- ADDQ $16*SIZE, ptrbb;
- DECQ k;
- JG .L231_bodyB;
- ALIGN_5
- .L231_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- TEST $2, kkk;
- #endif
- JLE .L232_loopE;
- ALIGN_5
- .L232_bodyB:
- EDUP_SX 0*SIZE(ptrbb), xvec4;
- ODUP_SX 0*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 0*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
-
- #### Unroll 2 #####
- EDUP_SX 4*SIZE(ptrbb), xvec4;
- ODUP_SX 4*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 4*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
- ADDQ $8*SIZE, ptrba;
- ADDQ $8*SIZE, ptrbb;
-
- .L232_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- TEST $1, kkk;
- #endif
- JLE .L233_loopE;
- ALIGN_5
- .L233_bodyB:
- EDUP_SX 0*SIZE(ptrbb), xvec4;
- ODUP_SX 0*SIZE(ptrbb), xvec5;
- SHUF_SX $0x4e, xvec4, xvec6;
- SHUF_SX $0x4e, xvec5, xvec7;
-
- LD_SX 0*SIZE(ptrba), xvec0;
- MOV_SX xvec0, xvec1;
- MUL_SX xvec4, xvec0, xvec0;
- ADD1_SX xvec0, xvec15, xvec15;
- SHUF_SX $0xb1, xvec1, xvec2;
- MUL_SX xvec6, xvec1, xvec1;
- ADD1_SX xvec1, xvec11, xvec11;
-
- MOV_SX xvec2, xvec3;
- MUL_SX xvec5, xvec2, xvec2;
- ADD2_SX xvec2, xvec15, xvec15;
- MUL_SX xvec7, xvec3, xvec3;
- ADD2_SX xvec3, xvec11, xvec11;
- ADDQ $4*SIZE, ptrba;
- ADDQ $4*SIZE, ptrbb;
-
- .L233_loopE:
- #### Handle ####
- #if defined(RN) || defined(RT) || defined(CN) || defined(CT)
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec11, xvec7, xvec7;
- MOV_SX xvec7, xvec11;
- #elif defined(NR) || defined(NC) || defined(TR) || defined(TC)
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- XOR_SY yvec7, yvec7, yvec7;
- SUB_SX xvec11, xvec7, xvec7;
- MOV_SX xvec7, xvec11;
- #elif defined(RR) || defined(RC) || defined(CR) || defined(CC)
- SHUF_SX $0xb1, xvec15, xvec15;
- SHUF_SX $0xb1, xvec11, xvec11;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- XOR_SY yvec7, yvec7, yvec7;
- ADDSUB_SX xvec11, xvec7, xvec7;
- MOV_SX xvec7, xvec11;
- SHUF_SX $0xb1, xvec15, xvec15;
- SHUF_SX $0xb1, xvec11, xvec11;
- #endif
- #### Mulitply Alpha ####
- BROAD_SX MEMALPHA_R, xvec7;
- BROAD_SX MEMALPHA_I, xvec6;
- #### Writng back ####
- VPERMILP_SX $0xb1,xvec15, xvec5;
- MUL_SX xvec7, xvec15, xvec15;
- MUL_SX xvec6, xvec5, xvec5;
- ADDSUB_SX xvec5, xvec15, xvec15;
- VPERMILP_SX $0xb1,xvec11, xvec1;
- MUL_SX xvec7, xvec11, xvec11;
- MUL_SX xvec6, xvec1, xvec1;
- ADDSUB_SX xvec1, xvec11, xvec11;
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C0), xvec0, xvec0;
- LDH_SX 2*SIZE(C1), xvec0, xvec0;
- ADD_SX xvec0, xvec15, xvec15;
- #endif
- STL_SX xvec15, 0*SIZE(C0);
- STH_SX xvec15, 2*SIZE(C1);
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C1), xvec4, xvec4;
- LDH_SX 2*SIZE(C0), xvec4, xvec4;
- ADD_SX xvec4, xvec11, xvec11;
- #endif
- STL_SX xvec11, 0*SIZE(C1);
- STH_SX xvec11, 2*SIZE(C0);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA))||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 2), ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $2, kk;
- #endif
-
- ADDQ $4*SIZE, C0;
- ADDQ $4*SIZE, C1;
-
- .L23_loopE:
- TEST $1, bm;
- JLE .L24_loopE;
- ALIGN_5
- .L24_bodyB:
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb,ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- ADDQ %rax, ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- XOR_SY yvec15, yvec15, yvec15;
- #ifndef TRMMKERNEL
- MOVQ bk,k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $1, %rax;
- #else
- ADDQ $2, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L241_loopE;
- ALIGN_5
- .L241_bodyB:
- BROAD_SX 0*SIZE(ptrba), xvec0;
- LD_SX 0*SIZE(ptrbb), xvec2;
- SHUF_SX $0xb1, xvec2, xvec3;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
-
- BROAD_SX 1*SIZE(ptrba), xvec1;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
-
- BROAD_SX 2*SIZE(ptrba), xvec0;
- LD_SX 4*SIZE(ptrbb), xvec2;
- SHUF_SX $0xb1, xvec2, xvec3;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
-
- BROAD_SX 3*SIZE(ptrba), xvec1;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
-
- BROAD_SX 4*SIZE(ptrba), xvec0;
- LD_SX 8*SIZE(ptrbb), xvec2;
- SHUF_SX $0xb1, xvec2, xvec3;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
-
- BROAD_SX 5*SIZE(ptrba), xvec1;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
-
- BROAD_SX 6*SIZE(ptrba), xvec0;
- LD_SX 12*SIZE(ptrbb), xvec2;
- SHUF_SX $0xb1, xvec2, xvec3;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
-
- BROAD_SX 7*SIZE(ptrba), xvec1;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
- ADDQ $8*SIZE, ptrba;
- ADDQ $16*SIZE, ptrbb;
- DECQ k;
- JG .L241_bodyB;
- ALIGN_5
- .L241_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- TEST $2, kkk;
- #endif
- JLE .L242_loopE;
- ALIGN_5
- .L242_bodyB:
- BROAD_SX 0*SIZE(ptrba), xvec0;
- LD_SX 0*SIZE(ptrbb), xvec2;
- SHUF_SX $0xb1, xvec2, xvec3;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
-
- BROAD_SX 1*SIZE(ptrba), xvec1;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
-
- BROAD_SX 2*SIZE(ptrba), xvec0;
- LD_SX 4*SIZE(ptrbb), xvec2;
- SHUF_SX $0xb1, xvec2, xvec3;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
-
- BROAD_SX 3*SIZE(ptrba), xvec1;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
- ADDQ $4*SIZE, ptrba;
- ADDQ $8*SIZE, ptrbb;
-
- .L242_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- TEST $1, kkk;
- #endif
- JLE .L243_loopE;
- ALIGN_5
- .L243_bodyB:
- BROAD_SX 0*SIZE(ptrba), xvec0;
- LD_SX 0*SIZE(ptrbb), xvec2;
- SHUF_SX $0xb1, xvec2, xvec3;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
-
- BROAD_SX 1*SIZE(ptrba), xvec1;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
- ADDQ $2*SIZE, ptrba;
- ADDQ $4*SIZE, ptrbb;
-
- .L243_loopE:
- #### Handle ####
- XOR_SY yvec7, yvec7, yvec7;
- #if defined(NR) || defined(NC) || defined(TR) || defined(TC)
- ADDSUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- #elif defined(RN) || defined(RT) || defined(CN) || defined(CT)
- SUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- #elif defined(RR) || defined(RC) || defined(CR) || defined(CC)
- SHUF_SX $0xb1, xvec15, xvec15;
- ADDSUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- SHUF_SX $0xb1, xvec15, xvec15;
- #endif
- ##### Load Alpha ####
- BROAD_SX MEMALPHA_R,xvec7;
- BROAD_SX MEMALPHA_I,xvec6;
- ##### Multiply Alpha ####
- VPERMILP_SX $0xb1,xvec15, xvec5;
- MUL_SX xvec7, xvec15, xvec15;
- MUL_SX xvec6, xvec5, xvec5;
- ADDSUB_SX xvec5, xvec15, xvec15;
- #### Writing back ####
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C0), xvec0, xvec0;
- LDH_SX 0*SIZE(C1), xvec0, xvec0;
- ADD_SX xvec0, xvec15, xvec15;
- #endif
- STL_SX xvec15, 0*SIZE(C0);
- STH_SX xvec15, 0*SIZE(C1);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA))||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- ADDQ %rax, ptrba;
- LEAQ (ptrbb, %rax, 2), ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $1, kk;
- #endif
- ADDQ $2*SIZE, C0;
- ADDQ $2*SIZE, C1;
- .L24_loopE:
- #if defined(TRMMKERNEL) && !defined(LEFT)
- ADDQ $2, kk;
- #endif
- MOVQ bk, k;
- SALQ $4, k;
- ADDQ k, bb;
- LEAQ (C, ldc, 2), C;
- .L20_loopE:
- TEST $1, bn;
- JLE .L30_loopE;
- ALIGN_5
- .L30_bodyB:
- #if defined(TRMMKERNEL) && defined(LEFT)
- MOVQ OFFSET, %rax;
- MOVQ %rax, kk;
- #endif
- MOVQ C, C0;
- MOVQ ba, ptrba;
- MOVQ bm, i;
- SARQ $3, i;
- JLE .L31_loopE;
- ALIGN_5
- .L31_bodyB:
- MOVQ bb, ptrbb;
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb,ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 8), ptrba;
- ADDQ %rax, ptrbb;
- #endif
- XOR_SY yvec15, yvec15, yvec15;
- XOR_SY yvec14, yvec14, yvec14;
- MOVQ bk, k;
- #ifndef TRMMKERNEL
- MOVQ bk,k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $8, %rax;
- #else
- ADDQ $1, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L311_loopE;
- ALIGN_5
- .L311_bodyB:
- #### Unroll 1 ####
- LD_SY 0*SIZE(ptrba), yvec0;
- LD_SY 8*SIZE(ptrba), yvec1;
- BROAD_SY 0*SIZE(ptrbb), yvec2;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
- MUL_SY yvec1, yvec2, yvec7;
- ADD1_SY yvec7, yvec14, yvec14;
-
- BROAD_SY 1*SIZE(ptrbb), yvec3;
- VPERMILP_SY $0xb1, yvec0, yvec4;
- VPERMILP_SY $0xb1, yvec1, yvec5;
- MUL_SY yvec4, yvec3, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- MUL_SY yvec5, yvec3, yvec7;
- ADD2_SY yvec7, yvec14, yvec14;
-
- #### Unroll 2 ####
- LD_SY 16*SIZE(ptrba), yvec0;
- LD_SY 24*SIZE(ptrba), yvec1;
- BROAD_SY 2*SIZE(ptrbb), yvec2;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
- MUL_SY yvec1, yvec2, yvec7;
- ADD1_SY yvec7, yvec14, yvec14;
-
- BROAD_SY 3*SIZE(ptrbb), yvec3;
- VPERMILP_SY $0xb1, yvec0, yvec4;
- VPERMILP_SY $0xb1, yvec1, yvec5;
- MUL_SY yvec4, yvec3, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- MUL_SY yvec5, yvec3, yvec7;
- ADD2_SY yvec7, yvec14, yvec14;
-
- #### Unroll 3 ####
- LD_SY 32*SIZE(ptrba), yvec0;
- LD_SY 40*SIZE(ptrba), yvec1;
- BROAD_SY 4*SIZE(ptrbb), yvec2;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
- MUL_SY yvec1, yvec2, yvec7;
- ADD1_SY yvec7, yvec14, yvec14;
-
- BROAD_SY 5*SIZE(ptrbb), yvec3;
- VPERMILP_SY $0xb1, yvec0, yvec4;
- VPERMILP_SY $0xb1, yvec1, yvec5;
- MUL_SY yvec4, yvec3, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- MUL_SY yvec5, yvec3, yvec7;
- ADD2_SY yvec7, yvec14, yvec14;
-
- #### Unroll 4 ####
- LD_SY 48*SIZE(ptrba), yvec0;
- LD_SY 56*SIZE(ptrba), yvec1;
- BROAD_SY 6*SIZE(ptrbb), yvec2;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
- MUL_SY yvec1, yvec2, yvec7;
- ADD1_SY yvec7, yvec14, yvec14;
-
- BROAD_SY 7*SIZE(ptrbb), yvec3;
- VPERMILP_SY $0xb1, yvec0, yvec4;
- VPERMILP_SY $0xb1, yvec1, yvec5;
- MUL_SY yvec4, yvec3, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- MUL_SY yvec5, yvec3, yvec7;
- ADD2_SY yvec7, yvec14, yvec14;
- ADDQ $64*SIZE, ptrba;
- ADDQ $8*SIZE, ptrbb;
- DECQ k;
- JG .L311_bodyB;
- ALIGN_5
- .L311_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- TEST $2, kkk;
- #endif
- JLE .L312_loopE;
- ALIGN_5
- .L312_bodyB:
- #### Unroll 1 ####
- LD_SY 0*SIZE(ptrba), yvec0;
- LD_SY 8*SIZE(ptrba), yvec1;
- BROAD_SY 0*SIZE(ptrbb), yvec2;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
- MUL_SY yvec1, yvec2, yvec7;
- ADD1_SY yvec7, yvec14, yvec14;
-
- BROAD_SY 1*SIZE(ptrbb), yvec3;
- VPERMILP_SY $0xb1, yvec0, yvec4;
- VPERMILP_SY $0xb1, yvec1, yvec5;
- MUL_SY yvec4, yvec3, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- MUL_SY yvec5, yvec3, yvec7;
- ADD2_SY yvec7, yvec14, yvec14;
-
- #### Unroll 2 ####
- LD_SY 16*SIZE(ptrba), yvec0;
- LD_SY 24*SIZE(ptrba), yvec1;
- BROAD_SY 2*SIZE(ptrbb), yvec2;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
- MUL_SY yvec1, yvec2, yvec7;
- ADD1_SY yvec7, yvec14, yvec14;
-
- BROAD_SY 3*SIZE(ptrbb), yvec3;
- VPERMILP_SY $0xb1, yvec0, yvec4;
- VPERMILP_SY $0xb1, yvec1, yvec5;
- MUL_SY yvec4, yvec3, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- MUL_SY yvec5, yvec3, yvec7;
- ADD2_SY yvec7, yvec14, yvec14;
- ADDQ $32*SIZE, ptrba;
- ADDQ $4*SIZE, ptrbb;
-
- .L312_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- TEST $1, kkk;
- #endif
- JLE .L313_loopE;
- ALIGN_5
- .L313_bodyB:
- #### Unroll 1 ####
- LD_SY 0*SIZE(ptrba), yvec0;
- LD_SY 8*SIZE(ptrba), yvec1;
- BROAD_SY 0*SIZE(ptrbb), yvec2;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
- MUL_SY yvec1, yvec2, yvec7;
- ADD1_SY yvec7, yvec14, yvec14;
-
- BROAD_SY 1*SIZE(ptrbb), yvec3;
- VPERMILP_SY $0xb1, yvec0, yvec4;
- VPERMILP_SY $0xb1, yvec1, yvec5;
- MUL_SY yvec4, yvec3, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- MUL_SY yvec5, yvec3, yvec7;
- ADD2_SY yvec7, yvec14, yvec14;
- ADDQ $16*SIZE, ptrba;
- ADDQ $2*SIZE, ptrbb;
-
- .L313_loopE:
- #### Handle ####
- XOR_SY yvec7, yvec7, yvec7;
- #if defined(RN) || defined(RT) || defined(CN) || defined(CT)
- ADDSUB_SY yvec15, yvec7, yvec15;
- ADDSUB_SY yvec14, yvec7, yvec14;
- #elif defined(NR) || defined(NC) || defined(TR) || defined(TC)
- SUB_SY yvec15, yvec7, yvec15;
- SUB_SY yvec14, yvec7, yvec14;
- #elif defined(RR) || defined(RC) || defined(CR) || defined(CC)
- VPERMILP_SY $0xb1, yvec15, yvec15;
- VPERMILP_SY $0xb1, yvec14, yvec14;
- ADDSUB_SY yvec15, yvec7, yvec15;
- ADDSUB_SY yvec14, yvec7, yvec14;
- VPERMILP_SY $0xb1, yvec15, yvec15;
- VPERMILP_SY $0xb1, yvec14, yvec14;
- #endif
- ##### Load Alpha ####
- BROAD_SY MEMALPHA_R,yvec7;
- BROAD_SY MEMALPHA_I,yvec6;
- ##### Multiply Alpha ####
- VPERMILP_SY $0xb1,yvec15, yvec5;
- MUL_SY yvec15, yvec7, yvec15;
- MUL_SY yvec5, yvec6, yvec5;
- ADDSUB_SY yvec5, yvec15, yvec15;
- VPERMILP_SY $0xb1,yvec14, yvec4;
- MUL_SY yvec14, yvec7, yvec14;
- MUL_SY yvec4, yvec6, yvec4;
- ADDSUB_SY yvec4, yvec14, yvec14;
- #### Writing back ####
- EXTRA_SY $1, yvec15, xvec7;
- EXTRA_SY $1, yvec14, xvec6;
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C0), xvec0, xvec0;
- LDH_SX 2*SIZE(C0), xvec0, xvec0;
- LDL_SX 4*SIZE(C0), xvec1, xvec1;
- LDH_SX 6*SIZE(C0), xvec1, xvec1;
- LDL_SX 8*SIZE(C0), xvec2, xvec2;
- LDH_SX 10*SIZE(C0), xvec2, xvec2;
- LDL_SX 12*SIZE(C0), xvec3, xvec3;
- LDH_SX 14*SIZE(C0), xvec3, xvec3;
- ADD_SX xvec0, xvec15, xvec15;
- ADD_SX xvec1, xvec7, xvec7;
- ADD_SX xvec2, xvec14, xvec14;
- ADD_SX xvec3, xvec6, xvec6;
- #endif
- STL_SX xvec15, 0*SIZE(C0);
- STH_SX xvec15, 2*SIZE(C0);
- STL_SX xvec7, 4*SIZE(C0);
- STH_SX xvec7, 6*SIZE(C0);
- STL_SX xvec14, 8*SIZE(C0);
- STH_SX xvec14, 10*SIZE(C0);
- STL_SX xvec6, 12*SIZE(C0);
- STH_SX xvec6, 14*SIZE(C0);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA))||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 8), ptrba;
- ADDQ %rax, ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $8, kk;
- #endif
- ADDQ $16*SIZE, C0;
- DECQ i;
- JG .L31_bodyB;
- ALIGN_5
- .L31_loopE:
- TEST $4, bm;
- JLE .L32_loopE;
- ALIGN_5
- .L32_bodyB:
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb,ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 4), ptrba;
- ADDQ %rax, ptrbb;
- #endif
- XOR_SY yvec15, yvec15, yvec15;
- #ifndef TRMMKERNEL
- MOVQ bk,k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $4, %rax;
- #else
- ADDQ $1, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L321_loopE;
- ALIGN_5
- .L321_bodyB:
- #### Unroll 1 ####
- LD_SY 0*SIZE(ptrba), yvec0;
- BROAD_SY 0*SIZE(ptrbb), yvec2;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
-
- BROAD_SY 1*SIZE(ptrbb), yvec3;
- VPERMILP_SY $0xb1, yvec0, yvec4;
- MUL_SY yvec4, yvec3, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
-
- #### Unroll 2 ####
- LD_SY 8*SIZE(ptrba), yvec0;
- BROAD_SY 2*SIZE(ptrbb), yvec2;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
-
- BROAD_SY 3*SIZE(ptrbb), yvec3;
- VPERMILP_SY $0xb1, yvec0, yvec4;
- MUL_SY yvec4, yvec3, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
-
- #### Unroll 3 ####
- LD_SY 16*SIZE(ptrba), yvec0;
- BROAD_SY 4*SIZE(ptrbb), yvec2;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
-
- BROAD_SY 5*SIZE(ptrbb), yvec3;
- VPERMILP_SY $0xb1, yvec0, yvec4;
- MUL_SY yvec4, yvec3, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
-
- #### Unroll 4 ####
- LD_SY 24*SIZE(ptrba), yvec0;
- BROAD_SY 6*SIZE(ptrbb), yvec2;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
-
- BROAD_SY 7*SIZE(ptrbb), yvec3;
- VPERMILP_SY $0xb1, yvec0, yvec4;
- MUL_SY yvec4, yvec3, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- ADDQ $32*SIZE, ptrba;
- ADDQ $8*SIZE, ptrbb;
- DECQ k;
- JG .L321_bodyB;
- ALIGN_5
- .L321_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- TEST $2, kkk;
- #endif
- JLE .L322_loopE;
- ALIGN_5
- .L322_bodyB:
- #### Unroll 1 ####
- LD_SY 0*SIZE(ptrba), yvec0;
- BROAD_SY 0*SIZE(ptrbb), yvec2;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
-
- BROAD_SY 1*SIZE(ptrbb), yvec3;
- VPERMILP_SY $0xb1, yvec0, yvec4;
- MUL_SY yvec4, yvec3, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
-
- #### Unroll 2 ####
- LD_SY 8*SIZE(ptrba), yvec0;
- BROAD_SY 2*SIZE(ptrbb), yvec2;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
-
- BROAD_SY 3*SIZE(ptrbb), yvec3;
- VPERMILP_SY $0xb1, yvec0, yvec4;
- MUL_SY yvec4, yvec3, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- ADDQ $16*SIZE, ptrba;
- ADDQ $4*SIZE, ptrbb;
-
- .L322_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- TEST $1, kkk;
- #endif
- JLE .L323_loopE;
- ALIGN_5
- .L323_bodyB:
- #### Unroll 1 ####
- LD_SY 0*SIZE(ptrba), yvec0;
- BROAD_SY 0*SIZE(ptrbb), yvec2;
- MUL_SY yvec0, yvec2, yvec6;
- ADD1_SY yvec6, yvec15, yvec15;
-
- BROAD_SY 1*SIZE(ptrbb), yvec3;
- VPERMILP_SY $0xb1, yvec0, yvec4;
- MUL_SY yvec4, yvec3, yvec6;
- ADD2_SY yvec6, yvec15, yvec15;
- ADDQ $8*SIZE, ptrba;
- ADDQ $2*SIZE, ptrbb;
-
- .L323_loopE:
- #### Handle ####
- XOR_SY yvec7, yvec7, yvec7;
- #if defined(RN) || defined(RT) || defined(CN) || defined(CT)
- ADDSUB_SY yvec15, yvec7, yvec15;
- #elif defined(NR) || defined(NC) || defined(TR) || defined(TC)
- SUB_SY yvec15, yvec7, yvec15;
- #elif defined(RR) || defined(RC) || defined(CR) || defined(CC)
- VPERMILP_SY $0xb1, yvec15, yvec15;
- ADDSUB_SY yvec15, yvec7, yvec15;
- VPERMILP_SY $0xb1, yvec15, yvec15;
- #endif
- ##### Load Alpha ####
- BROAD_SY MEMALPHA_R,yvec7;
- BROAD_SY MEMALPHA_I,yvec6;
- ##### Multiply Alpha ####
- VPERMILP_SY $0xb1,yvec15, yvec5;
- MUL_SY yvec15, yvec7, yvec15;
- MUL_SY yvec5, yvec6, yvec5;
- ADDSUB_SY yvec5, yvec15, yvec15;
- #### Writing back ####
- EXTRA_SY $1, yvec15, xvec7;
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C0), xvec0, xvec0;
- LDH_SX 2*SIZE(C0), xvec0, xvec0;
- LDL_SX 4*SIZE(C0), xvec1, xvec1;
- LDH_SX 6*SIZE(C0), xvec1, xvec1;
- ADD_SX xvec0, xvec15, xvec15;
- ADD_SX xvec1, xvec7, xvec7;
- #endif
- STL_SX xvec15, 0*SIZE(C0);
- STH_SX xvec15, 2*SIZE(C0);
- STL_SX xvec7, 4*SIZE(C0);
- STH_SX xvec7, 6*SIZE(C0);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA))||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 4), ptrba;
- ADDQ %rax, ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $4, kk;
- #endif
- ADDQ $8*SIZE, C0;
-
- .L32_loopE:
- TEST $2, bm;
- JLE .L33_loopE;
- ALIGN_5
- .L33_bodyB:
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb,ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 2), ptrba;
- ADDQ %rax, ptrbb;
- #endif
- XOR_SY yvec15, yvec15, yvec15;
- #ifndef TRMMKERNEL
- MOVQ bk,k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $2, %rax;
- #else
- ADDQ $1, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L331_loopE;
- ALIGN_5
- .L331_bodyB:
- #### Unroll 1 ####
- LD_SX 0*SIZE(ptrba), xvec0;
- BROAD_SX 0*SIZE(ptrbb), xvec2;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
- SHUF_SX $0xb1, xvec0, xvec1;
- BROAD_SX 1*SIZE(ptrbb), xvec3;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
-
- #### Unroll 2 ####
- LD_SX 4*SIZE(ptrba), xvec0;
- BROAD_SX 2*SIZE(ptrbb), xvec2;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
- SHUF_SX $0xb1, xvec0, xvec1;
- BROAD_SX 3*SIZE(ptrbb), xvec3;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
-
- #### Unroll 3 ####
- LD_SX 8*SIZE(ptrba), xvec0;
- BROAD_SX 4*SIZE(ptrbb), xvec2;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
- SHUF_SX $0xb1, xvec0, xvec1;
- BROAD_SX 5*SIZE(ptrbb), xvec3;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
-
- #### Unroll 4 ####
- LD_SX 12*SIZE(ptrba), xvec0;
- BROAD_SX 6*SIZE(ptrbb), xvec2;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
- SHUF_SX $0xb1, xvec0, xvec1;
- BROAD_SX 7*SIZE(ptrbb), xvec3;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
- ADDQ $16*SIZE, ptrba;
- ADDQ $8*SIZE, ptrbb;
- DECQ k;
- JG .L331_bodyB;
- ALIGN_5
- .L331_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- TEST $2, kkk;
- #endif
- JLE .L332_loopE;
- ALIGN_5
- .L332_bodyB:
- #### Unroll 1 ####
- LD_SX 0*SIZE(ptrba), xvec0;
- BROAD_SX 0*SIZE(ptrbb), xvec2;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
- SHUF_SX $0xb1, xvec0, xvec1;
- BROAD_SX 1*SIZE(ptrbb), xvec3;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
-
- #### Unroll 2 ####
- LD_SX 4*SIZE(ptrba), xvec0;
- BROAD_SX 2*SIZE(ptrbb), xvec2;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
- SHUF_SX $0xb1, xvec0, xvec1;
- BROAD_SX 3*SIZE(ptrbb), xvec3;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
- ADDQ $8*SIZE, ptrba;
- ADDQ $4*SIZE, ptrbb;
-
- .L332_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- TEST $1, kkk;
- #endif
- JLE .L333_loopE;
- ALIGN_5
- .L333_bodyB:
- #### Unroll 1 ####
- LD_SX 0*SIZE(ptrba), xvec0;
- BROAD_SX 0*SIZE(ptrbb), xvec2;
- MUL_SX xvec0, xvec2, xvec2;
- ADD1_SX xvec2, xvec15, xvec15;
- SHUF_SX $0xb1, xvec0, xvec1;
- BROAD_SX 1*SIZE(ptrbb), xvec3;
- MUL_SX xvec1, xvec3, xvec3;
- ADD2_SX xvec3, xvec15, xvec15;
- ADDQ $4*SIZE, ptrba;
- ADDQ $2*SIZE, ptrbb;
-
- .L333_loopE:
- #### Handle ####
- XOR_SY yvec7, yvec7, yvec7;
- #if defined(RN) || defined(RT) || defined(CN) || defined(CT)
- ADDSUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- #elif defined(NR) || defined(NC) || defined(TR) || defined(TC)
- SUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- #elif defined(RR) || defined(RC) || defined(CR) || defined(CC)
- SHUF_SX $0xb1, xvec15, xvec15;
- ADDSUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- SHUF_SX $0xb1, xvec15, xvec15;
- #endif
- #### Mulitply Alpha ####
- BROAD_SX MEMALPHA_R, xvec7;
- BROAD_SX MEMALPHA_I, xvec6;
- #### Writng back ####
- VPERMILP_SX $0xb1,xvec15, xvec5;
- MUL_SX xvec7, xvec15, xvec15;
- MUL_SX xvec6, xvec5, xvec5;
- ADDSUB_SX xvec5, xvec15, xvec15;
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C0), xvec0, xvec0;
- LDH_SX 2*SIZE(C0), xvec0, xvec0;
- ADD_SX xvec0, xvec15, xvec15;
- #endif
- STL_SX xvec15, 0*SIZE(C0);
- STH_SX xvec15, 2*SIZE(C0);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA))||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- LEAQ (ptrba, %rax, 2), ptrba;
- ADDQ %rax, ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $2, kk;
- #endif
- ADDQ $4*SIZE, C0;
-
- .L33_loopE:
- TEST $1, bm;
- JLE .L34_loopE;
- ALIGN_5
- .L34_bodyB:
- #if !defined(TRMMKERNEL)||(defined(TRMMKERNEL)&&defined(LEFT)&&defined(TRANSA))||(defined(TRMMKERNEL)&&!defined(LEFT)&&!defined(TRANSA))
- MOVQ bb,ptrbb;
- #else
- MOVQ bb, ptrbb;
- MOVQ kk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- ADDQ %rax, ptrba;
- ADDQ %rax, ptrbb;
- #endif
- XOR_SY yvec15, yvec15, yvec15;
- #ifndef TRMMKERNEL
- MOVQ bk,k;
- #elif (defined(LEFT)&&!defined(TRANSA))||(!defined(LEFT)&&defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kk, %rax;
- MOVQ %rax, kkk;
- #else
- MOVQ kk, %rax;
- #ifdef LEFT
- ADDQ $1, %rax;
- #else
- ADDQ $1, %rax;
- #endif
- MOVQ %rax, kkk;
- #endif
- SARQ $2, k;
- JLE .L341_loopE;
- ALIGN_5
- .L341_bodyB:
- LD_SX 0*SIZE(ptrba), xvec0;
- LD_SX 0*SIZE(ptrbb), xvec2;
- SHUF_SX $0xa0, xvec2, xvec3;
- MUL_SX xvec0, xvec3, xvec3;
- ADD1_SX xvec3, xvec15, xvec15;
- SHUF_SX $0xb1, xvec0, xvec1;
- SHUF_SX $0xf5, xvec2, xvec4;
- MUL_SX xvec1, xvec4, xvec4;
- ADD2_SX xvec4, xvec15, xvec15;
-
- LD_SX 4*SIZE(ptrba), xvec0;
- LD_SX 4*SIZE(ptrbb), xvec2;
- SHUF_SX $0xa0, xvec2, xvec3;
- MUL_SX xvec0, xvec3, xvec3;
- ADD1_SX xvec3, xvec15, xvec15;
- SHUF_SX $0xb1, xvec0, xvec1;
- SHUF_SX $0xf5, xvec2, xvec4;
- MUL_SX xvec1, xvec4, xvec4;
- ADD2_SX xvec4, xvec15, xvec15;
- ADDQ $8*SIZE, ptrba;
- ADDQ $8*SIZE, ptrbb;
- DECQ k;
- JG .L341_bodyB;
- ALIGN_5
- .L341_loopE:
- #ifndef TRMMKERNEL
- TEST $2, bk;
- #else
- TEST $2, kkk;
- #endif
- JLE .L342_loopE;
- ALIGN_5
- .L342_bodyB:
- LD_SX 0*SIZE(ptrba), xvec0;
- LD_SX 0*SIZE(ptrbb), xvec2;
- SHUF_SX $0xa0, xvec2, xvec3;
- MUL_SX xvec0, xvec3, xvec3;
- ADD1_SX xvec3, xvec15, xvec15;
- SHUF_SX $0xb1, xvec0, xvec1;
- SHUF_SX $0xf5, xvec2, xvec4;
- MUL_SX xvec1, xvec4, xvec4;
- ADD2_SX xvec4, xvec15, xvec15;
- ADDQ $4*SIZE, ptrba;
- ADDQ $4*SIZE, ptrbb;
-
- .L342_loopE:
- #ifndef TRMMKERNEL
- TEST $1, bk;
- #else
- TEST $1, kkk;
- #endif
- JLE .L343_loopE;
- ALIGN_5
- .L343_bodyB:
- XOR_SY yvec0, yvec0, yvec0;
- XOR_SY yvec2, yvec2, yvec2;
- LDL_SX 0*SIZE(ptrba), xvec0, xvec0;
- LDL_SX 0*SIZE(ptrbb), xvec2, xvec2;
- SHUF_SX $0xe0, xvec2, xvec3;
- MUL_SX xvec0, xvec3, xvec3;
- ADD1_SX xvec3, xvec15, xvec15;
- SHUF_SX $0xe1, xvec0, xvec1;
- SHUF_SX $0xe5, xvec2, xvec4;
- MUL_SX xvec1, xvec4, xvec4;
- ADD2_SX xvec4, xvec15, xvec15;
- ADDQ $2*SIZE, ptrba;
- ADDQ $2*SIZE, ptrbb;
-
- .L343_loopE:
- #### Handle ####
- XOR_SY yvec7, yvec7, yvec7;
- #if defined(RN) || defined(RT) || defined(CN) || defined(CT)
- ADDSUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- #elif defined(NR) || defined(NC) || defined(TR) || defined(TC)
- SUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- #elif defined(RR) || defined(RC) || defined(CR) || defined(CC)
- SHUF_SX $0xb1, xvec15, xvec15;
- ADDSUB_SX xvec15, xvec7, xvec7;
- MOV_SX xvec7, xvec15;
- SHUF_SX $0xb1, xvec15, xvec15;
- #endif
- BROAD_SX MEMALPHA_R, xvec7;
- BROAD_SX MEMALPHA_I, xvec6;
- VPERMILP_SX $0xb1, xvec15, xvec5;
- MUL_SX xvec7, xvec15, xvec15;
- MUL_SX xvec6, xvec5, xvec5;
- ADDSUB_SX xvec5, xvec15, xvec15;
- SHUF_SX $0x44, xvec15, xvec14;
- SHUF_SX $0xee, xvec15, xvec13;
- ADD_SX xvec13, xvec14, xvec14;
- #ifndef TRMMKERNEL
- LDL_SX 0*SIZE(C0), xvec0, xvec0;
- ADD_SX xvec0, xvec14, xvec14;
- #endif
- STL_SX xvec14, 0*SIZE(C0);
- #if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA))||(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))
- MOVQ bk, %rax;
- SUBQ kkk, %rax;
- SALQ $ZBASE_SHIFT, %rax;
- ADDQ %rax, ptrba;
- ADDQ %rax, ptrbb;
- #endif
- #if defined(TRMMKERNEL) && defined(LEFT)
- ADDQ $1, kk;
- #endif
- ADDQ $2*SIZE, C0;
-
- .L34_loopE:
- #if defined(TRMMKERNEL) && !defined(LEFT)
- ADDQ $1, kk;
- #endif
-
- MOVQ bk, k;
- SALQ $3, k;
- ADDQ k, bb;
- ADDQ ldc, C;
- .L30_loopE:
- movq 0(%rsp), %rbx;
- movq 8(%rsp), %rbp;
- movq 16(%rsp), %r12;
- movq 24(%rsp), %r13;
- movq 32(%rsp), %r14;
- movq 40(%rsp), %r15;
-
- vzeroupper
-
- #ifdef WINDOWS_ABI
- movq 48(%rsp), %rdi
- movq 56(%rsp), %rsi
- movups 64(%rsp), %xmm6
- movups 80(%rsp), %xmm7
- movups 96(%rsp), %xmm8
- movups 112(%rsp), %xmm9
- movups 128(%rsp), %xmm10
- movups 144(%rsp), %xmm11
- movups 160(%rsp), %xmm12
- movups 176(%rsp), %xmm13
- movups 192(%rsp), %xmm14
- movups 208(%rsp), %xmm15
- #endif
-
- addq $STACKSIZE, %rsp;
- ret
-
- EPILOGUE
|