|
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622 |
- /***************************************************************************
- Copyright (c) 2013-2016, The OpenBLAS Project
- All rights reserved.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- 3. Neither the name of the OpenBLAS project nor the names of
- its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
- /**************************************************************************************
- * 2016/03/05 Werner Saar (wernsaar@googlemail.com)
- * BLASTEST : OK
- * CTEST : OK
- * TEST : OK
- * LAPACK-TEST : OK
- **************************************************************************************/
-
-
- #if defined(NN) || defined(NT) || defined(TN) || defined(TT)
-
- #define XSFADD_R1 xsadddp
- #define XSFADD_R2 xssubdp
- #define XSFADD_I1 xsadddp
- #define XSFADD_I2 xsadddp
-
- #elif defined(CN) || defined(CT) || defined(RN) || defined(RT)
-
- #define XSFADD_R1 xsadddp
- #define XSFADD_R2 xsadddp
- #define XSFADD_I1 xssubdp
- #define XSFADD_I2 xsadddp
-
- #elif defined(NC) || defined(TC) || defined(NR) || defined(TR)
-
- #define XSFADD_R1 xsadddp
- #define XSFADD_R2 xsadddp
- #define XSFADD_I1 xsadddp
- #define XSFADD_I2 xssubdp
-
- #else // CC || CR || RC || RR
-
- #define XSFADD_R1 xsadddp
- #define XSFADD_R2 xssubdp
- #define XSFADD_I1 xssubdp
- #define XSFADD_I2 xssubdp
-
- #endif
-
- /**********************************************************************************************
- * Macros for N=2 and M=8
- **********************************************************************************************/
-
- #if defined(_AIX)
- define(`LOAD2x8_1', `
- #else
- .macro LOAD2x8_1
- #endif
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
- lxvdsx vs18, o16, BO // load real part from B
- lxvdsx vs19, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
- lxvd2x vs2, o32, AO // load real,imag from A
- lxvd2x vs3, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvd2x vs4, o0, AO // load real,imag from A
- lxvd2x vs5, o16, AO // load real,imag from A
- lxvd2x vs6, o32, AO // load real,imag from A
- lxvd2x vs7, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x8_I1', `
- #else
- .macro KERNEL2x8_I1
- #endif
-
- lxvd2x vs8, o0, AO // load real,imag from A
- lxvd2x vs9, o16, AO // load real,imag from A
- lxvd2x vs10, o32, AO // load real,imag from A
- lxvd2x vs11, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvd2x vs12, o0, AO // load real,imag from A
- lxvd2x vs13, o16, AO // load real,imag from A
- lxvd2x vs14, o32, AO // load real,imag from A
- lxvd2x vs15, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs20, o0, BO // load real part from B
- lxvdsx vs21, o8, BO // load imag part from B
- lxvdsx vs22, o16, BO // load real part from B
- lxvdsx vs23, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmuldp vs32, vs0, vs16 // real*real, imag*real
- xvmuldp vs33, vs0, vs17 // real*imag, imag*imag
- xvmuldp vs34, vs1, vs16 // real*real, imag*real
- xvmuldp vs35, vs1, vs17 // real*imag, imag*imag
- xvmuldp vs36, vs2, vs16 // real*real, imag*real
- xvmuldp vs37, vs2, vs17 // real*imag, imag*imag
- xvmuldp vs38, vs3, vs16 // real*real, imag*real
- xvmuldp vs39, vs3, vs17 // real*imag, imag*imag
- xvmuldp vs40, vs4, vs16 // real*real, imag*real
- xvmuldp vs41, vs4, vs17 // real*imag, imag*imag
- xvmuldp vs42, vs5, vs16 // real*real, imag*real
- xvmuldp vs43, vs5, vs17 // real*imag, imag*imag
- xvmuldp vs44, vs6, vs16 // real*real, imag*real
- xvmuldp vs45, vs6, vs17 // real*imag, imag*imag
- xvmuldp vs46, vs7, vs16 // real*real, imag*real
- xvmuldp vs47, vs7, vs17 // real*imag, imag*imag
-
- xvmuldp vs48, vs0, vs18 // real*real, imag*real
- xvmuldp vs49, vs0, vs19 // real*imag, imag*imag
- xvmuldp vs50, vs1, vs18 // real*real, imag*real
- xvmuldp vs51, vs1, vs19 // real*imag, imag*imag
- xvmuldp vs52, vs2, vs18 // real*real, imag*real
- xvmuldp vs53, vs2, vs19 // real*imag, imag*imag
- xvmuldp vs54, vs3, vs18 // real*real, imag*real
- xvmuldp vs55, vs3, vs19 // real*imag, imag*imag
- xvmuldp vs56, vs4, vs18 // real*real, imag*real
- xvmuldp vs57, vs4, vs19 // real*imag, imag*imag
- xvmuldp vs58, vs5, vs18 // real*real, imag*real
- xvmuldp vs59, vs5, vs19 // real*imag, imag*imag
- xvmuldp vs60, vs6, vs18 // real*real, imag*real
- xvmuldp vs61, vs6, vs19 // real*imag, imag*imag
- xvmuldp vs62, vs7, vs18 // real*real, imag*real
- xvmuldp vs63, vs7, vs19 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x8_1', `
- #else
- .macro KERNEL2x8_1
- #endif
-
-
- xvmaddadp vs32, vs0, vs16 // real*real, imag*real
- xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag
- xvmaddadp vs34, vs1, vs16 // real*real, imag*real
- xvmaddadp vs35, vs1, vs17 // real*imag, imag*imag
-
- lxvdsx vs22, o16, BO // load real part from B
- lxvdsx vs23, o24, BO // load imag part from B
-
- xvmaddadp vs36, vs2, vs16 // real*real, imag*real
- xvmaddadp vs37, vs2, vs17 // real*imag, imag*imag
- xvmaddadp vs38, vs3, vs16 // real*real, imag*real
- xvmaddadp vs39, vs3, vs17 // real*imag, imag*imag
-
- lxvd2x vs8, o0, AO // load real,imag from A
- lxvd2x vs9, o16, AO // load real,imag from A
-
- xvmaddadp vs40, vs4, vs16 // real*real, imag*real
- xvmaddadp vs41, vs4, vs17 // real*imag, imag*imag
- xvmaddadp vs42, vs5, vs16 // real*real, imag*real
- xvmaddadp vs43, vs5, vs17 // real*imag, imag*imag
-
- lxvd2x vs10, o32, AO // load real,imag from A
- lxvd2x vs11, o48, AO // load real,imag from A
-
- xvmaddadp vs44, vs6, vs16 // real*real, imag*real
- xvmaddadp vs45, vs6, vs17 // real*imag, imag*imag
-
- addi AO, AO, 64
-
- xvmaddadp vs46, vs7, vs16 // real*real, imag*real
- xvmaddadp vs47, vs7, vs17 // real*imag, imag*imag
-
- xvmaddadp vs48, vs0, vs18 // real*real, imag*real
- xvmaddadp vs49, vs0, vs19 // real*imag, imag*imag
- xvmaddadp vs50, vs1, vs18 // real*real, imag*real
- xvmaddadp vs51, vs1, vs19 // real*imag, imag*imag
-
- lxvd2x vs12, o0, AO // load real,imag from A
- lxvd2x vs13, o16, AO // load real,imag from A
-
- xvmaddadp vs52, vs2, vs18 // real*real, imag*real
- xvmaddadp vs53, vs2, vs19 // real*imag, imag*imag
- xvmaddadp vs54, vs3, vs18 // real*real, imag*real
- xvmaddadp vs55, vs3, vs19 // real*imag, imag*imag
-
- lxvd2x vs14, o32, AO // load real,imag from A
- lxvd2x vs15, o48, AO // load real,imag from A
-
- xvmaddadp vs56, vs4, vs18 // real*real, imag*real
- xvmaddadp vs57, vs4, vs19 // real*imag, imag*imag
- xvmaddadp vs58, vs5, vs18 // real*real, imag*real
- xvmaddadp vs59, vs5, vs19 // real*imag, imag*imag
-
- lxvdsx vs20, o0, BO // load real part from B
- lxvdsx vs21, o8, BO // load imag part from B
-
- xvmaddadp vs60, vs6, vs18 // real*real, imag*real
- xvmaddadp vs61, vs6, vs19 // real*imag, imag*imag
- xvmaddadp vs62, vs7, vs18 // real*real, imag*real
- xvmaddadp vs63, vs7, vs19 // real*imag, imag*imag
-
- addi AO, AO, 64
- addi BO, BO, 32
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x8_2', `
- #else
- .macro KERNEL2x8_2
- #endif
-
-
- xvmaddadp vs32, vs8, vs20 // real*real, imag*real
- xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag
- xvmaddadp vs34, vs9, vs20 // real*real, imag*real
- xvmaddadp vs35, vs9, vs21 // real*imag, imag*imag
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- xvmaddadp vs36, vs10, vs20 // real*real, imag*real
- xvmaddadp vs37, vs10, vs21 // real*imag, imag*imag
- xvmaddadp vs38, vs11, vs20 // real*real, imag*real
- xvmaddadp vs39, vs11, vs21 // real*imag, imag*imag
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
-
- xvmaddadp vs40, vs12, vs20 // real*real, imag*real
- xvmaddadp vs41, vs12, vs21 // real*imag, imag*imag
- xvmaddadp vs42, vs13, vs20 // real*real, imag*real
- xvmaddadp vs43, vs13, vs21 // real*imag, imag*imag
-
- lxvd2x vs2, o32, AO // load real,imag from A
- lxvd2x vs3, o48, AO // load real,imag from A
-
- xvmaddadp vs44, vs14, vs20 // real*real, imag*real
- xvmaddadp vs45, vs14, vs21 // real*imag, imag*imag
- xvmaddadp vs46, vs15, vs20 // real*real, imag*real
- xvmaddadp vs47, vs15, vs21 // real*imag, imag*imag
-
- addi AO, AO, 64
-
- xvmaddadp vs48, vs8, vs22 // real*real, imag*real
- xvmaddadp vs49, vs8, vs23 // real*imag, imag*imag
- xvmaddadp vs50, vs9, vs22 // real*real, imag*real
- xvmaddadp vs51, vs9, vs23 // real*imag, imag*imag
-
- lxvd2x vs4, o0, AO // load real,imag from A
- lxvd2x vs5, o16, AO // load real,imag from A
-
- xvmaddadp vs52, vs10, vs22 // real*real, imag*real
- xvmaddadp vs53, vs10, vs23 // real*imag, imag*imag
- xvmaddadp vs54, vs11, vs22 // real*real, imag*real
- xvmaddadp vs55, vs11, vs23 // real*imag, imag*imag
-
- lxvd2x vs6, o32, AO // load real,imag from A
- lxvd2x vs7, o48, AO // load real,imag from A
-
- xvmaddadp vs56, vs12, vs22 // real*real, imag*real
- xvmaddadp vs57, vs12, vs23 // real*imag, imag*imag
- xvmaddadp vs58, vs13, vs22 // real*real, imag*real
- xvmaddadp vs59, vs13, vs23 // real*imag, imag*imag
-
- lxvdsx vs18, o16, BO // load real part from B
- lxvdsx vs19, o24, BO // load imag part from B
-
- xvmaddadp vs60, vs14, vs22 // real*real, imag*real
- xvmaddadp vs61, vs14, vs23 // real*imag, imag*imag
- xvmaddadp vs62, vs15, vs22 // real*real, imag*real
- xvmaddadp vs63, vs15, vs23 // real*imag, imag*imag
-
- addi AO, AO, 64
- addi BO, BO, 32
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x8_E2', `
- #else
- .macro KERNEL2x8_E2
- #endif
-
-
- xvmaddadp vs32, vs8, vs20 // real*real, imag*real
- xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag
- xvmaddadp vs34, vs9, vs20 // real*real, imag*real
- xvmaddadp vs35, vs9, vs21 // real*imag, imag*imag
- xvmaddadp vs36, vs10, vs20 // real*real, imag*real
- xvmaddadp vs37, vs10, vs21 // real*imag, imag*imag
- xvmaddadp vs38, vs11, vs20 // real*real, imag*real
- xvmaddadp vs39, vs11, vs21 // real*imag, imag*imag
- xvmaddadp vs40, vs12, vs20 // real*real, imag*real
- xvmaddadp vs41, vs12, vs21 // real*imag, imag*imag
- xvmaddadp vs42, vs13, vs20 // real*real, imag*real
- xvmaddadp vs43, vs13, vs21 // real*imag, imag*imag
- xvmaddadp vs44, vs14, vs20 // real*real, imag*real
- xvmaddadp vs45, vs14, vs21 // real*imag, imag*imag
- xvmaddadp vs46, vs15, vs20 // real*real, imag*real
- xvmaddadp vs47, vs15, vs21 // real*imag, imag*imag
-
- xvmaddadp vs48, vs8, vs22 // real*real, imag*real
- xvmaddadp vs49, vs8, vs23 // real*imag, imag*imag
- xvmaddadp vs50, vs9, vs22 // real*real, imag*real
- xvmaddadp vs51, vs9, vs23 // real*imag, imag*imag
- xvmaddadp vs52, vs10, vs22 // real*real, imag*real
- xvmaddadp vs53, vs10, vs23 // real*imag, imag*imag
- xvmaddadp vs54, vs11, vs22 // real*real, imag*real
- xvmaddadp vs55, vs11, vs23 // real*imag, imag*imag
- xvmaddadp vs56, vs12, vs22 // real*real, imag*real
- xvmaddadp vs57, vs12, vs23 // real*imag, imag*imag
- xvmaddadp vs58, vs13, vs22 // real*real, imag*real
- xvmaddadp vs59, vs13, vs23 // real*imag, imag*imag
- xvmaddadp vs60, vs14, vs22 // real*real, imag*real
- xvmaddadp vs61, vs14, vs23 // real*imag, imag*imag
- xvmaddadp vs62, vs15, vs22 // real*real, imag*real
- xvmaddadp vs63, vs15, vs23 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x8_SUBI1', `
- #else
- .macro KERNEL2x8_SUBI1
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
- lxvd2x vs2, o32, AO // load real,imag from A
- lxvd2x vs3, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvd2x vs4, o0, AO // load real,imag from A
- lxvd2x vs5, o16, AO // load real,imag from A
- lxvd2x vs6, o32, AO // load real,imag from A
- lxvd2x vs7, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
- lxvdsx vs18, o16, BO // load real part from B
- lxvdsx vs19, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmuldp vs32, vs0, vs16 // real*real, imag*real
- xvmuldp vs33, vs0, vs17 // real*imag, imag*imag
- xvmuldp vs34, vs1, vs16 // real*real, imag*real
- xvmuldp vs35, vs1, vs17 // real*imag, imag*imag
- xvmuldp vs36, vs2, vs16 // real*real, imag*real
- xvmuldp vs37, vs2, vs17 // real*imag, imag*imag
- xvmuldp vs38, vs3, vs16 // real*real, imag*real
- xvmuldp vs39, vs3, vs17 // real*imag, imag*imag
- xvmuldp vs40, vs4, vs16 // real*real, imag*real
- xvmuldp vs41, vs4, vs17 // real*imag, imag*imag
- xvmuldp vs42, vs5, vs16 // real*real, imag*real
- xvmuldp vs43, vs5, vs17 // real*imag, imag*imag
- xvmuldp vs44, vs6, vs16 // real*real, imag*real
- xvmuldp vs45, vs6, vs17 // real*imag, imag*imag
- xvmuldp vs46, vs7, vs16 // real*real, imag*real
- xvmuldp vs47, vs7, vs17 // real*imag, imag*imag
-
- xvmuldp vs48, vs0, vs18 // real*real, imag*real
- xvmuldp vs49, vs0, vs19 // real*imag, imag*imag
- xvmuldp vs50, vs1, vs18 // real*real, imag*real
- xvmuldp vs51, vs1, vs19 // real*imag, imag*imag
- xvmuldp vs52, vs2, vs18 // real*real, imag*real
- xvmuldp vs53, vs2, vs19 // real*imag, imag*imag
- xvmuldp vs54, vs3, vs18 // real*real, imag*real
- xvmuldp vs55, vs3, vs19 // real*imag, imag*imag
- xvmuldp vs56, vs4, vs18 // real*real, imag*real
- xvmuldp vs57, vs4, vs19 // real*imag, imag*imag
- xvmuldp vs58, vs5, vs18 // real*real, imag*real
- xvmuldp vs59, vs5, vs19 // real*imag, imag*imag
- xvmuldp vs60, vs6, vs18 // real*real, imag*real
- xvmuldp vs61, vs6, vs19 // real*imag, imag*imag
- xvmuldp vs62, vs7, vs18 // real*real, imag*real
- xvmuldp vs63, vs7, vs19 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x8_SUB1', `
- #else
- .macro KERNEL2x8_SUB1
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
- lxvd2x vs2, o32, AO // load real,imag from A
- lxvd2x vs3, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvd2x vs4, o0, AO // load real,imag from A
- lxvd2x vs5, o16, AO // load real,imag from A
- lxvd2x vs6, o32, AO // load real,imag from A
- lxvd2x vs7, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
- lxvdsx vs18, o16, BO // load real part from B
- lxvdsx vs19, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmaddadp vs32, vs0, vs16 // real*real, imag*real
- xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag
- xvmaddadp vs34, vs1, vs16 // real*real, imag*real
- xvmaddadp vs35, vs1, vs17 // real*imag, imag*imag
- xvmaddadp vs36, vs2, vs16 // real*real, imag*real
- xvmaddadp vs37, vs2, vs17 // real*imag, imag*imag
- xvmaddadp vs38, vs3, vs16 // real*real, imag*real
- xvmaddadp vs39, vs3, vs17 // real*imag, imag*imag
- xvmaddadp vs40, vs4, vs16 // real*real, imag*real
- xvmaddadp vs41, vs4, vs17 // real*imag, imag*imag
- xvmaddadp vs42, vs5, vs16 // real*real, imag*real
- xvmaddadp vs43, vs5, vs17 // real*imag, imag*imag
- xvmaddadp vs44, vs6, vs16 // real*real, imag*real
- xvmaddadp vs45, vs6, vs17 // real*imag, imag*imag
- xvmaddadp vs46, vs7, vs16 // real*real, imag*real
- xvmaddadp vs47, vs7, vs17 // real*imag, imag*imag
-
- xvmaddadp vs48, vs0, vs18 // real*real, imag*real
- xvmaddadp vs49, vs0, vs19 // real*imag, imag*imag
- xvmaddadp vs50, vs1, vs18 // real*real, imag*real
- xvmaddadp vs51, vs1, vs19 // real*imag, imag*imag
- xvmaddadp vs52, vs2, vs18 // real*real, imag*real
- xvmaddadp vs53, vs2, vs19 // real*imag, imag*imag
- xvmaddadp vs54, vs3, vs18 // real*real, imag*real
- xvmaddadp vs55, vs3, vs19 // real*imag, imag*imag
- xvmaddadp vs56, vs4, vs18 // real*real, imag*real
- xvmaddadp vs57, vs4, vs19 // real*imag, imag*imag
- xvmaddadp vs58, vs5, vs18 // real*real, imag*real
- xvmaddadp vs59, vs5, vs19 // real*imag, imag*imag
- xvmaddadp vs60, vs6, vs18 // real*real, imag*real
- xvmaddadp vs61, vs6, vs19 // real*imag, imag*imag
- xvmaddadp vs62, vs7, vs18 // real*real, imag*real
- xvmaddadp vs63, vs7, vs19 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`SAVE2x8', `
- #else
- .macro SAVE2x8
- #endif
-
-
- mr T1, CO
- addi T2, T1, 64
-
- #ifndef TRMMKERNEL
-
- lxvd2x vs16, o0, T1
- lxvd2x vs17, o16, T1
- lxvd2x vs18, o32, T1
- lxvd2x vs19, o48, T1
- lxvd2x vs20, o0, T2
- lxvd2x vs21, o16, T2
- lxvd2x vs22, o32, T2
- lxvd2x vs23, o48, T2
-
- #endif
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs32 // realA*realB
- XSFADD_R2 vs0, vs0, vs33 // imagA*imagB
-
- XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs32 // realA*imagB
- XSFADD_I2 vs1, vs1, vs33 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs8, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs34 // realA*realB
- XSFADD_R2 vs0, vs0, vs35 // imagA*imagB
-
- XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs34 // realA*imagB
- XSFADD_I2 vs1, vs1, vs35 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs9, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs37,vs37) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs36 // realA*realB
- XSFADD_R2 vs0, vs0, vs37 // imagA*imagB
-
- XXSWAPD(vs36,vs36) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs37,vs37) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs36 // realA*imagB
- XSFADD_I2 vs1, vs1, vs37 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs10, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs39,vs39) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs38 // realA*realB
- XSFADD_R2 vs0, vs0, vs39 // imagA*imagB
-
- XXSWAPD(vs38,vs38) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs39,vs39) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs38 // realA*imagB
- XSFADD_I2 vs1, vs1, vs39 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs11, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs41,vs41) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs40 // realA*realB
- XSFADD_R2 vs0, vs0, vs41 // imagA*imagB
-
- XXSWAPD(vs40,vs40) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs41,vs41) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs40 // realA*imagB
- XSFADD_I2 vs1, vs1, vs41 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs12, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs43,vs43) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs42 // realA*realB
- XSFADD_R2 vs0, vs0, vs43 // imagA*imagB
-
- XXSWAPD(vs42,vs42) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs43,vs43) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs42 // realA*imagB
- XSFADD_I2 vs1, vs1, vs43 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs13, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs45,vs45) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs44 // realA*realB
- XSFADD_R2 vs0, vs0, vs45 // imagA*imagB
-
- XXSWAPD(vs44,vs44) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs45,vs45) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs44 // realA*imagB
- XSFADD_I2 vs1, vs1, vs45 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs14, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs47,vs47) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs46 // realA*realB
- XSFADD_R2 vs0, vs0, vs47 // imagA*imagB
-
- XXSWAPD(vs46,vs46) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs47,vs47) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs46 // realA*imagB
- XSFADD_I2 vs1, vs1, vs47 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs15, vs2, vs3, 0 // merge real and imag part
-
-
- #ifndef TRMMKERNEL
-
- xvadddp vs8, vs8, vs16
- xvadddp vs9, vs9, vs17
- xvadddp vs10, vs10, vs18
- xvadddp vs11, vs11, vs19
- xvadddp vs12, vs12, vs20
- xvadddp vs13, vs13, vs21
- xvadddp vs14, vs14, vs22
- xvadddp vs15, vs15, vs23
-
- #endif
-
- stxvd2x vs8, o0, T1
- stxvd2x vs9, o16, T1
- stxvd2x vs10, o32, T1
- stxvd2x vs11, o48, T1
- stxvd2x vs12, o0, T2
- stxvd2x vs13, o16, T2
- stxvd2x vs14, o32, T2
- stxvd2x vs15, o48, T2
-
- add T1, T1, LDC
- add T2, T2, LDC
-
- #ifndef TRMMKERNEL
-
- lxvd2x vs16, o0, T1
- lxvd2x vs17, o16, T1
- lxvd2x vs18, o32, T1
- lxvd2x vs19, o48, T1
- lxvd2x vs20, o0, T2
- lxvd2x vs21, o16, T2
- lxvd2x vs22, o32, T2
- lxvd2x vs23, o48, T2
-
- #endif
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs49,vs49) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs48 // realA*realB
- XSFADD_R2 vs0, vs0, vs49 // imagA*imagB
-
- XXSWAPD(vs48,vs48) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs49,vs49) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs48 // realA*imagB
- XSFADD_I2 vs1, vs1, vs49 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs8, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs51,vs51) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs50 // realA*realB
- XSFADD_R2 vs0, vs0, vs51 // imagA*imagB
-
- XXSWAPD(vs50,vs50) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs51,vs51) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs50 // realA*imagB
- XSFADD_I2 vs1, vs1, vs51 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs9, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs53,vs53) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs52 // realA*realB
- XSFADD_R2 vs0, vs0, vs53 // imagA*imagB
-
- XXSWAPD(vs52,vs52) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs53,vs53) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs52 // realA*imagB
- XSFADD_I2 vs1, vs1, vs53 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs10, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs55,vs55) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs54 // realA*realB
- XSFADD_R2 vs0, vs0, vs55 // imagA*imagB
-
- XXSWAPD(vs54,vs54) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs55,vs55) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs54 // realA*imagB
- XSFADD_I2 vs1, vs1, vs55 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs11, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs57,vs57) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs56 // realA*realB
- XSFADD_R2 vs0, vs0, vs57 // imagA*imagB
-
- XXSWAPD(vs56,vs56) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs57,vs57) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs56 // realA*imagB
- XSFADD_I2 vs1, vs1, vs57 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs12, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs59,vs59) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs58 // realA*realB
- XSFADD_R2 vs0, vs0, vs59 // imagA*imagB
-
- XXSWAPD(vs58,vs58) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs59,vs59) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs58 // realA*imagB
- XSFADD_I2 vs1, vs1, vs59 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs13, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs61,vs61) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs60 // realA*realB
- XSFADD_R2 vs0, vs0, vs61 // imagA*imagB
-
- XXSWAPD(vs60,vs60) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs61,vs61) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs60 // realA*imagB
- XSFADD_I2 vs1, vs1, vs61 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs14, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs63,vs63) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs62 // realA*realB
- XSFADD_R2 vs0, vs0, vs63 // imagA*imagB
-
- XXSWAPD(vs62,vs62) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs63,vs63) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs62 // realA*imagB
- XSFADD_I2 vs1, vs1, vs63 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs15, vs2, vs3, 0 // merge real and imag part
-
-
- #ifndef TRMMKERNEL
-
- xvadddp vs8, vs8, vs16
- xvadddp vs9, vs9, vs17
- xvadddp vs10, vs10, vs18
- xvadddp vs11, vs11, vs19
- xvadddp vs12, vs12, vs20
- xvadddp vs13, vs13, vs21
- xvadddp vs14, vs14, vs22
- xvadddp vs15, vs15, vs23
-
- #endif
-
- stxvd2x vs8, o0, T1
- stxvd2x vs9, o16, T1
- stxvd2x vs10, o32, T1
- stxvd2x vs11, o48, T1
- stxvd2x vs12, o0, T2
- stxvd2x vs13, o16, T2
- stxvd2x vs14, o32, T2
- stxvd2x vs15, o48, T2
-
- add T1, T1, LDC
- add T2, T2, LDC
- addi CO, CO, 128
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
-
- /**********************************************************************************************
- * Macros for N=2 and M=4
- **********************************************************************************************/
-
- #if defined(_AIX)
- define(`LOAD2x4_1', `
- #else
- .macro LOAD2x4_1
- #endif
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
- lxvdsx vs18, o16, BO // load real part from B
- lxvdsx vs19, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
- lxvd2x vs2, o32, AO // load real,imag from A
- lxvd2x vs3, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x4_I1', `
- #else
- .macro KERNEL2x4_I1
- #endif
-
- lxvd2x vs8, o0, AO // load real,imag from A
- lxvd2x vs9, o16, AO // load real,imag from A
- lxvd2x vs10, o32, AO // load real,imag from A
- lxvd2x vs11, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs20, o0, BO // load real part from B
- lxvdsx vs21, o8, BO // load imag part from B
- lxvdsx vs22, o16, BO // load real part from B
- lxvdsx vs23, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmuldp vs32, vs0, vs16 // real*real, imag*real
- xvmuldp vs33, vs0, vs17 // real*imag, imag*imag
- xvmuldp vs34, vs1, vs16 // real*real, imag*real
- xvmuldp vs35, vs1, vs17 // real*imag, imag*imag
- xvmuldp vs36, vs2, vs16 // real*real, imag*real
- xvmuldp vs37, vs2, vs17 // real*imag, imag*imag
- xvmuldp vs38, vs3, vs16 // real*real, imag*real
- xvmuldp vs39, vs3, vs17 // real*imag, imag*imag
-
- xvmuldp vs40, vs0, vs18 // real*real, imag*real
- xvmuldp vs41, vs0, vs19 // real*imag, imag*imag
- xvmuldp vs42, vs1, vs18 // real*real, imag*real
- xvmuldp vs43, vs1, vs19 // real*imag, imag*imag
- xvmuldp vs44, vs2, vs18 // real*real, imag*real
- xvmuldp vs45, vs2, vs19 // real*imag, imag*imag
- xvmuldp vs46, vs3, vs18 // real*real, imag*real
- xvmuldp vs47, vs3, vs19 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x4_1', `
- #else
- .macro KERNEL2x4_1
- #endif
-
- lxvd2x vs8, o0, AO // load real,imag from A
- lxvd2x vs9, o16, AO // load real,imag from A
- lxvd2x vs10, o32, AO // load real,imag from A
- lxvd2x vs11, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs20, o0, BO // load real part from B
- lxvdsx vs21, o8, BO // load imag part from B
- lxvdsx vs22, o16, BO // load real part from B
- lxvdsx vs23, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmaddadp vs32, vs0, vs16 // real*real, imag*real
- xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag
- xvmaddadp vs34, vs1, vs16 // real*real, imag*real
- xvmaddadp vs35, vs1, vs17 // real*imag, imag*imag
- xvmaddadp vs36, vs2, vs16 // real*real, imag*real
- xvmaddadp vs37, vs2, vs17 // real*imag, imag*imag
- xvmaddadp vs38, vs3, vs16 // real*real, imag*real
- xvmaddadp vs39, vs3, vs17 // real*imag, imag*imag
-
- xvmaddadp vs40, vs0, vs18 // real*real, imag*real
- xvmaddadp vs41, vs0, vs19 // real*imag, imag*imag
- xvmaddadp vs42, vs1, vs18 // real*real, imag*real
- xvmaddadp vs43, vs1, vs19 // real*imag, imag*imag
- xvmaddadp vs44, vs2, vs18 // real*real, imag*real
- xvmaddadp vs45, vs2, vs19 // real*imag, imag*imag
- xvmaddadp vs46, vs3, vs18 // real*real, imag*real
- xvmaddadp vs47, vs3, vs19 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x4_2', `
- #else
- .macro KERNEL2x4_2
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
- lxvd2x vs2, o32, AO // load real,imag from A
- lxvd2x vs3, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
- lxvdsx vs18, o16, BO // load real part from B
- lxvdsx vs19, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmaddadp vs32, vs8, vs20 // real*real, imag*real
- xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag
- xvmaddadp vs34, vs9, vs20 // real*real, imag*real
- xvmaddadp vs35, vs9, vs21 // real*imag, imag*imag
- xvmaddadp vs36, vs10, vs20 // real*real, imag*real
- xvmaddadp vs37, vs10, vs21 // real*imag, imag*imag
- xvmaddadp vs38, vs11, vs20 // real*real, imag*real
- xvmaddadp vs39, vs11, vs21 // real*imag, imag*imag
-
- xvmaddadp vs40, vs8, vs22 // real*real, imag*real
- xvmaddadp vs41, vs8, vs23 // real*imag, imag*imag
- xvmaddadp vs42, vs9, vs22 // real*real, imag*real
- xvmaddadp vs43, vs9, vs23 // real*imag, imag*imag
- xvmaddadp vs44, vs10, vs22 // real*real, imag*real
- xvmaddadp vs45, vs10, vs23 // real*imag, imag*imag
- xvmaddadp vs46, vs11, vs22 // real*real, imag*real
- xvmaddadp vs47, vs11, vs23 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x4_E2', `
- #else
- .macro KERNEL2x4_E2
- #endif
-
-
- xvmaddadp vs32, vs8, vs20 // real*real, imag*real
- xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag
- xvmaddadp vs34, vs9, vs20 // real*real, imag*real
- xvmaddadp vs35, vs9, vs21 // real*imag, imag*imag
- xvmaddadp vs36, vs10, vs20 // real*real, imag*real
- xvmaddadp vs37, vs10, vs21 // real*imag, imag*imag
- xvmaddadp vs38, vs11, vs20 // real*real, imag*real
- xvmaddadp vs39, vs11, vs21 // real*imag, imag*imag
-
- xvmaddadp vs40, vs8, vs22 // real*real, imag*real
- xvmaddadp vs41, vs8, vs23 // real*imag, imag*imag
- xvmaddadp vs42, vs9, vs22 // real*real, imag*real
- xvmaddadp vs43, vs9, vs23 // real*imag, imag*imag
- xvmaddadp vs44, vs10, vs22 // real*real, imag*real
- xvmaddadp vs45, vs10, vs23 // real*imag, imag*imag
- xvmaddadp vs46, vs11, vs22 // real*real, imag*real
- xvmaddadp vs47, vs11, vs23 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x4_SUBI1', `
- #else
- .macro KERNEL2x4_SUBI1
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
- lxvd2x vs2, o32, AO // load real,imag from A
- lxvd2x vs3, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
- lxvdsx vs18, o16, BO // load real part from B
- lxvdsx vs19, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmuldp vs32, vs0, vs16 // real*real, imag*real
- xvmuldp vs33, vs0, vs17 // real*imag, imag*imag
- xvmuldp vs34, vs1, vs16 // real*real, imag*real
- xvmuldp vs35, vs1, vs17 // real*imag, imag*imag
- xvmuldp vs36, vs2, vs16 // real*real, imag*real
- xvmuldp vs37, vs2, vs17 // real*imag, imag*imag
- xvmuldp vs38, vs3, vs16 // real*real, imag*real
- xvmuldp vs39, vs3, vs17 // real*imag, imag*imag
-
- xvmuldp vs40, vs0, vs18 // real*real, imag*real
- xvmuldp vs41, vs0, vs19 // real*imag, imag*imag
- xvmuldp vs42, vs1, vs18 // real*real, imag*real
- xvmuldp vs43, vs1, vs19 // real*imag, imag*imag
- xvmuldp vs44, vs2, vs18 // real*real, imag*real
- xvmuldp vs45, vs2, vs19 // real*imag, imag*imag
- xvmuldp vs46, vs3, vs18 // real*real, imag*real
- xvmuldp vs47, vs3, vs19 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x4_SUB1', `
- #else
- .macro KERNEL2x4_SUB1
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
- lxvd2x vs2, o32, AO // load real,imag from A
- lxvd2x vs3, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
- lxvdsx vs18, o16, BO // load real part from B
- lxvdsx vs19, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmaddadp vs32, vs0, vs16 // real*real, imag*real
- xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag
- xvmaddadp vs34, vs1, vs16 // real*real, imag*real
- xvmaddadp vs35, vs1, vs17 // real*imag, imag*imag
- xvmaddadp vs36, vs2, vs16 // real*real, imag*real
- xvmaddadp vs37, vs2, vs17 // real*imag, imag*imag
- xvmaddadp vs38, vs3, vs16 // real*real, imag*real
- xvmaddadp vs39, vs3, vs17 // real*imag, imag*imag
-
- xvmaddadp vs40, vs0, vs18 // real*real, imag*real
- xvmaddadp vs41, vs0, vs19 // real*imag, imag*imag
- xvmaddadp vs42, vs1, vs18 // real*real, imag*real
- xvmaddadp vs43, vs1, vs19 // real*imag, imag*imag
- xvmaddadp vs44, vs2, vs18 // real*real, imag*real
- xvmaddadp vs45, vs2, vs19 // real*imag, imag*imag
- xvmaddadp vs46, vs3, vs18 // real*real, imag*real
- xvmaddadp vs47, vs3, vs19 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`SAVE2x4', `
- #else
- .macro SAVE2x4
- #endif
-
-
- mr T1, CO
-
- #ifndef TRMMKERNEL
-
- lxvd2x vs16, o0, T1
- lxvd2x vs17, o16, T1
- lxvd2x vs18, o32, T1
- lxvd2x vs19, o48, T1
-
- #endif
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs32 // realA*realB
- XSFADD_R2 vs0, vs0, vs33 // imagA*imagB
-
- XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs32 // realA*imagB
- XSFADD_I2 vs1, vs1, vs33 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs8, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs34 // realA*realB
- XSFADD_R2 vs0, vs0, vs35 // imagA*imagB
-
- XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs34 // realA*imagB
- XSFADD_I2 vs1, vs1, vs35 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs9, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs37,vs37) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs36 // realA*realB
- XSFADD_R2 vs0, vs0, vs37 // imagA*imagB
-
- XXSWAPD(vs36,vs36) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs37,vs37) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs36 // realA*imagB
- XSFADD_I2 vs1, vs1, vs37 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs10, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs39,vs39) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs38 // realA*realB
- XSFADD_R2 vs0, vs0, vs39 // imagA*imagB
-
- XXSWAPD(vs38,vs38) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs39,vs39) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs38 // realA*imagB
- XSFADD_I2 vs1, vs1, vs39 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs11, vs2, vs3, 0 // merge real and imag part
-
-
- #ifndef TRMMKERNEL
-
- xvadddp vs8, vs8, vs16
- xvadddp vs9, vs9, vs17
- xvadddp vs10, vs10, vs18
- xvadddp vs11, vs11, vs19
-
- #endif
-
- stxvd2x vs8, o0, T1
- stxvd2x vs9, o16, T1
- stxvd2x vs10, o32, T1
- stxvd2x vs11, o48, T1
-
- add T1, T1, LDC
-
- #ifndef TRMMKERNEL
-
- lxvd2x vs16, o0, T1
- lxvd2x vs17, o16, T1
- lxvd2x vs18, o32, T1
- lxvd2x vs19, o48, T1
-
- #endif
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs41,vs41) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs40 // realA*realB
- XSFADD_R2 vs0, vs0, vs41 // imagA*imagB
-
- XXSWAPD(vs40,vs40) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs41,vs41) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs40 // realA*imagB
- XSFADD_I2 vs1, vs1, vs41 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs8, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs43,vs43) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs42 // realA*realB
- XSFADD_R2 vs0, vs0, vs43 // imagA*imagB
-
- XXSWAPD(vs42,vs42) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs43,vs43) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs42 // realA*imagB
- XSFADD_I2 vs1, vs1, vs43 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs9, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs45,vs45) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs44 // realA*realB
- XSFADD_R2 vs0, vs0, vs45 // imagA*imagB
-
- XXSWAPD(vs44,vs44) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs45,vs45) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs44 // realA*imagB
- XSFADD_I2 vs1, vs1, vs45 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs10, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs47,vs47) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs46 // realA*realB
- XSFADD_R2 vs0, vs0, vs47 // imagA*imagB
-
- XXSWAPD(vs46,vs46) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs47,vs47) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs46 // realA*imagB
- XSFADD_I2 vs1, vs1, vs47 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs11, vs2, vs3, 0 // merge real and imag part
-
-
- #ifndef TRMMKERNEL
-
- xvadddp vs8, vs8, vs16
- xvadddp vs9, vs9, vs17
- xvadddp vs10, vs10, vs18
- xvadddp vs11, vs11, vs19
-
- #endif
-
- stxvd2x vs8, o0, T1
- stxvd2x vs9, o16, T1
- stxvd2x vs10, o32, T1
- stxvd2x vs11, o48, T1
-
- add T1, T1, LDC
- addi CO, CO, 64
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
-
- /**********************************************************************************************
- * Macros for N=2 and M=2
- **********************************************************************************************/
-
- #if defined(_AIX)
- define(`LOAD2x2_1', `
- #else
- .macro LOAD2x2_1
- #endif
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
- lxvdsx vs18, o16, BO // load real part from B
- lxvdsx vs19, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
-
- addi AO, AO, 32
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x2_I1', `
- #else
- .macro KERNEL2x2_I1
- #endif
-
- lxvd2x vs8, o0, AO // load real,imag from A
- lxvd2x vs9, o16, AO // load real,imag from A
-
- addi AO, AO, 32
-
- lxvdsx vs20, o0, BO // load real part from B
- lxvdsx vs21, o8, BO // load imag part from B
- lxvdsx vs22, o16, BO // load real part from B
- lxvdsx vs23, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmuldp vs32, vs0, vs16 // real*real, imag*real
- xvmuldp vs33, vs0, vs17 // real*imag, imag*imag
- xvmuldp vs34, vs1, vs16 // real*real, imag*real
- xvmuldp vs35, vs1, vs17 // real*imag, imag*imag
-
- xvmuldp vs36, vs0, vs18 // real*real, imag*real
- xvmuldp vs37, vs0, vs19 // real*imag, imag*imag
- xvmuldp vs38, vs1, vs18 // real*real, imag*real
- xvmuldp vs39, vs1, vs19 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x2_1', `
- #else
- .macro KERNEL2x2_1
- #endif
-
- lxvd2x vs8, o0, AO // load real,imag from A
- lxvd2x vs9, o16, AO // load real,imag from A
-
- addi AO, AO, 32
-
- lxvdsx vs20, o0, BO // load real part from B
- lxvdsx vs21, o8, BO // load imag part from B
- lxvdsx vs22, o16, BO // load real part from B
- lxvdsx vs23, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmaddadp vs32, vs0, vs16 // real*real, imag*real
- xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag
- xvmaddadp vs34, vs1, vs16 // real*real, imag*real
- xvmaddadp vs35, vs1, vs17 // real*imag, imag*imag
-
- xvmaddadp vs36, vs0, vs18 // real*real, imag*real
- xvmaddadp vs37, vs0, vs19 // real*imag, imag*imag
- xvmaddadp vs38, vs1, vs18 // real*real, imag*real
- xvmaddadp vs39, vs1, vs19 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x2_2', `
- #else
- .macro KERNEL2x2_2
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
-
- addi AO, AO, 32
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
- lxvdsx vs18, o16, BO // load real part from B
- lxvdsx vs19, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmaddadp vs32, vs8, vs20 // real*real, imag*real
- xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag
- xvmaddadp vs34, vs9, vs20 // real*real, imag*real
- xvmaddadp vs35, vs9, vs21 // real*imag, imag*imag
-
- xvmaddadp vs36, vs8, vs22 // real*real, imag*real
- xvmaddadp vs37, vs8, vs23 // real*imag, imag*imag
- xvmaddadp vs38, vs9, vs22 // real*real, imag*real
- xvmaddadp vs39, vs9, vs23 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x2_E2', `
- #else
- .macro KERNEL2x2_E2
- #endif
-
-
- xvmaddadp vs32, vs8, vs20 // real*real, imag*real
- xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag
- xvmaddadp vs34, vs9, vs20 // real*real, imag*real
- xvmaddadp vs35, vs9, vs21 // real*imag, imag*imag
-
- xvmaddadp vs36, vs8, vs22 // real*real, imag*real
- xvmaddadp vs37, vs8, vs23 // real*imag, imag*imag
- xvmaddadp vs38, vs9, vs22 // real*real, imag*real
- xvmaddadp vs39, vs9, vs23 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x2_SUBI1', `
- #else
- .macro KERNEL2x2_SUBI1
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
-
- addi AO, AO, 32
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
- lxvdsx vs18, o16, BO // load real part from B
- lxvdsx vs19, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmuldp vs32, vs0, vs16 // real*real, imag*real
- xvmuldp vs33, vs0, vs17 // real*imag, imag*imag
- xvmuldp vs34, vs1, vs16 // real*real, imag*real
- xvmuldp vs35, vs1, vs17 // real*imag, imag*imag
-
- xvmuldp vs36, vs0, vs18 // real*real, imag*real
- xvmuldp vs37, vs0, vs19 // real*imag, imag*imag
- xvmuldp vs38, vs1, vs18 // real*real, imag*real
- xvmuldp vs39, vs1, vs19 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x2_SUB1', `
- #else
- .macro KERNEL2x2_SUB1
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
-
- addi AO, AO, 32
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
- lxvdsx vs18, o16, BO // load real part from B
- lxvdsx vs19, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmaddadp vs32, vs0, vs16 // real*real, imag*real
- xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag
- xvmaddadp vs34, vs1, vs16 // real*real, imag*real
- xvmaddadp vs35, vs1, vs17 // real*imag, imag*imag
-
- xvmaddadp vs36, vs0, vs18 // real*real, imag*real
- xvmaddadp vs37, vs0, vs19 // real*imag, imag*imag
- xvmaddadp vs38, vs1, vs18 // real*real, imag*real
- xvmaddadp vs39, vs1, vs19 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`SAVE2x2', `
- #else
- .macro SAVE2x2
- #endif
-
-
- mr T1, CO
-
- #ifndef TRMMKERNEL
-
- lxvd2x vs16, o0, T1
- lxvd2x vs17, o16, T1
-
- #endif
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs32 // realA*realB
- XSFADD_R2 vs0, vs0, vs33 // imagA*imagB
-
- XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs32 // realA*imagB
- XSFADD_I2 vs1, vs1, vs33 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs8, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs34 // realA*realB
- XSFADD_R2 vs0, vs0, vs35 // imagA*imagB
-
- XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs34 // realA*imagB
- XSFADD_I2 vs1, vs1, vs35 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs9, vs2, vs3, 0 // merge real and imag part
-
-
- #ifndef TRMMKERNEL
-
- xvadddp vs8, vs8, vs16
- xvadddp vs9, vs9, vs17
-
- #endif
-
- stxvd2x vs8, o0, T1
- stxvd2x vs9, o16, T1
-
- add T1, T1, LDC
-
- #ifndef TRMMKERNEL
-
- lxvd2x vs16, o0, T1
- lxvd2x vs17, o16, T1
-
- #endif
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs37,vs37) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs36 // realA*realB
- XSFADD_R2 vs0, vs0, vs37 // imagA*imagB
-
- XXSWAPD(vs36,vs36) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs37,vs37) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs36 // realA*imagB
- XSFADD_I2 vs1, vs1, vs37 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs8, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs39,vs39) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs38 // realA*realB
- XSFADD_R2 vs0, vs0, vs39 // imagA*imagB
-
- XXSWAPD(vs38,vs38) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs39,vs39) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs38 // realA*imagB
- XSFADD_I2 vs1, vs1, vs39 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs9, vs2, vs3, 0 // merge real and imag part
-
-
- #ifndef TRMMKERNEL
-
- xvadddp vs8, vs8, vs16
- xvadddp vs9, vs9, vs17
-
- #endif
-
- stxvd2x vs8, o0, T1
- stxvd2x vs9, o16, T1
-
- add T1, T1, LDC
- addi CO, CO, 32
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
-
- /**********************************************************************************************
- * Macros for N=2 and M=1
- **********************************************************************************************/
-
- #if defined(_AIX)
- define(`LOAD2x1_1', `
- #else
- .macro LOAD2x1_1
- #endif
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
- lxvdsx vs18, o16, BO // load real part from B
- lxvdsx vs19, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- lxvd2x vs0, o0, AO // load real,imag from A
-
- addi AO, AO, 16
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x1_I1', `
- #else
- .macro KERNEL2x1_I1
- #endif
-
- lxvd2x vs8, o0, AO // load real,imag from A
-
- addi AO, AO, 16
-
- lxvdsx vs20, o0, BO // load real part from B
- lxvdsx vs21, o8, BO // load imag part from B
- lxvdsx vs22, o16, BO // load real part from B
- lxvdsx vs23, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmuldp vs32, vs0, vs16 // real*real, imag*real
- xvmuldp vs33, vs0, vs17 // real*imag, imag*imag
-
- xvmuldp vs34, vs0, vs18 // real*real, imag*real
- xvmuldp vs35, vs0, vs19 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x1_1', `
- #else
- .macro KERNEL2x1_1
- #endif
-
- lxvd2x vs8, o0, AO // load real,imag from A
-
- addi AO, AO, 16
-
- lxvdsx vs20, o0, BO // load real part from B
- lxvdsx vs21, o8, BO // load imag part from B
- lxvdsx vs22, o16, BO // load real part from B
- lxvdsx vs23, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmaddadp vs32, vs0, vs16 // real*real, imag*real
- xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag
-
- xvmaddadp vs34, vs0, vs18 // real*real, imag*real
- xvmaddadp vs35, vs0, vs19 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x1_2', `
- #else
- .macro KERNEL2x1_2
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
-
- addi AO, AO, 16
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
- lxvdsx vs18, o16, BO // load real part from B
- lxvdsx vs19, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmaddadp vs32, vs8, vs20 // real*real, imag*real
- xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag
-
- xvmaddadp vs34, vs8, vs22 // real*real, imag*real
- xvmaddadp vs35, vs8, vs23 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x1_E2', `
- #else
- .macro KERNEL2x1_E2
- #endif
-
-
- xvmaddadp vs32, vs8, vs20 // real*real, imag*real
- xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag
-
- xvmaddadp vs34, vs8, vs22 // real*real, imag*real
- xvmaddadp vs35, vs8, vs23 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x1_SUBI1', `
- #else
- .macro KERNEL2x1_SUBI1
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
-
- addi AO, AO, 16
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
- lxvdsx vs18, o16, BO // load real part from B
- lxvdsx vs19, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmuldp vs32, vs0, vs16 // real*real, imag*real
- xvmuldp vs33, vs0, vs17 // real*imag, imag*imag
-
- xvmuldp vs34, vs0, vs18 // real*real, imag*real
- xvmuldp vs35, vs0, vs19 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL2x1_SUB1', `
- #else
- .macro KERNEL2x1_SUB1
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
-
- addi AO, AO, 16
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
- lxvdsx vs18, o16, BO // load real part from B
- lxvdsx vs19, o24, BO // load imag part from B
-
- addi BO, BO, 32
-
- xvmaddadp vs32, vs0, vs16 // real*real, imag*real
- xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag
-
- xvmaddadp vs34, vs0, vs18 // real*real, imag*real
- xvmaddadp vs35, vs0, vs19 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`SAVE2x1', `
- #else
- .macro SAVE2x1
- #endif
-
-
- mr T1, CO
-
- #ifndef TRMMKERNEL
-
- lxvd2x vs16, o0, T1
-
- #endif
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs32 // realA*realB
- XSFADD_R2 vs0, vs0, vs33 // imagA*imagB
-
- XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs32 // realA*imagB
- XSFADD_I2 vs1, vs1, vs33 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs8, vs2, vs3, 0 // merge real and imag part
-
-
- #ifndef TRMMKERNEL
-
- xvadddp vs8, vs8, vs16
-
- #endif
-
- stxvd2x vs8, o0, T1
-
- add T1, T1, LDC
-
- #ifndef TRMMKERNEL
-
- lxvd2x vs16, o0, T1
-
- #endif
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs34 // realA*realB
- XSFADD_R2 vs0, vs0, vs35 // imagA*imagB
-
- XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs34 // realA*imagB
- XSFADD_I2 vs1, vs1, vs35 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs8, vs2, vs3, 0 // merge real and imag part
-
-
- #ifndef TRMMKERNEL
-
- xvadddp vs8, vs8, vs16
-
- #endif
-
- stxvd2x vs8, o0, T1
-
- add T1, T1, LDC
- addi CO, CO, 16
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
-
- /**********************************************************************************************
- * Macros for N=1 and M=8
- **********************************************************************************************/
-
- #if defined(_AIX)
- define(`LOAD1x8_1', `
- #else
- .macro LOAD1x8_1
- #endif
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
- lxvd2x vs2, o32, AO // load real,imag from A
- lxvd2x vs3, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvd2x vs4, o0, AO // load real,imag from A
- lxvd2x vs5, o16, AO // load real,imag from A
- lxvd2x vs6, o32, AO // load real,imag from A
- lxvd2x vs7, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x8_I1', `
- #else
- .macro KERNEL1x8_I1
- #endif
-
- lxvd2x vs8, o0, AO // load real,imag from A
- lxvd2x vs9, o16, AO // load real,imag from A
- lxvd2x vs10, o32, AO // load real,imag from A
- lxvd2x vs11, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvd2x vs12, o0, AO // load real,imag from A
- lxvd2x vs13, o16, AO // load real,imag from A
- lxvd2x vs14, o32, AO // load real,imag from A
- lxvd2x vs15, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs20, o0, BO // load real part from B
- lxvdsx vs21, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmuldp vs32, vs0, vs16 // real*real, imag*real
- xvmuldp vs33, vs0, vs17 // real*imag, imag*imag
- xvmuldp vs34, vs1, vs16 // real*real, imag*real
- xvmuldp vs35, vs1, vs17 // real*imag, imag*imag
- xvmuldp vs36, vs2, vs16 // real*real, imag*real
- xvmuldp vs37, vs2, vs17 // real*imag, imag*imag
- xvmuldp vs38, vs3, vs16 // real*real, imag*real
- xvmuldp vs39, vs3, vs17 // real*imag, imag*imag
- xvmuldp vs40, vs4, vs16 // real*real, imag*real
- xvmuldp vs41, vs4, vs17 // real*imag, imag*imag
- xvmuldp vs42, vs5, vs16 // real*real, imag*real
- xvmuldp vs43, vs5, vs17 // real*imag, imag*imag
- xvmuldp vs44, vs6, vs16 // real*real, imag*real
- xvmuldp vs45, vs6, vs17 // real*imag, imag*imag
- xvmuldp vs46, vs7, vs16 // real*real, imag*real
- xvmuldp vs47, vs7, vs17 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x8_1', `
- #else
- .macro KERNEL1x8_1
- #endif
-
- lxvd2x vs8, o0, AO // load real,imag from A
- lxvd2x vs9, o16, AO // load real,imag from A
- lxvd2x vs10, o32, AO // load real,imag from A
- lxvd2x vs11, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvd2x vs12, o0, AO // load real,imag from A
- lxvd2x vs13, o16, AO // load real,imag from A
- lxvd2x vs14, o32, AO // load real,imag from A
- lxvd2x vs15, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs20, o0, BO // load real part from B
- lxvdsx vs21, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmaddadp vs32, vs0, vs16 // real*real, imag*real
- xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag
- xvmaddadp vs34, vs1, vs16 // real*real, imag*real
- xvmaddadp vs35, vs1, vs17 // real*imag, imag*imag
- xvmaddadp vs36, vs2, vs16 // real*real, imag*real
- xvmaddadp vs37, vs2, vs17 // real*imag, imag*imag
- xvmaddadp vs38, vs3, vs16 // real*real, imag*real
- xvmaddadp vs39, vs3, vs17 // real*imag, imag*imag
- xvmaddadp vs40, vs4, vs16 // real*real, imag*real
- xvmaddadp vs41, vs4, vs17 // real*imag, imag*imag
- xvmaddadp vs42, vs5, vs16 // real*real, imag*real
- xvmaddadp vs43, vs5, vs17 // real*imag, imag*imag
- xvmaddadp vs44, vs6, vs16 // real*real, imag*real
- xvmaddadp vs45, vs6, vs17 // real*imag, imag*imag
- xvmaddadp vs46, vs7, vs16 // real*real, imag*real
- xvmaddadp vs47, vs7, vs17 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x8_2', `
- #else
- .macro KERNEL1x8_2
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
- lxvd2x vs2, o32, AO // load real,imag from A
- lxvd2x vs3, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvd2x vs4, o0, AO // load real,imag from A
- lxvd2x vs5, o16, AO // load real,imag from A
- lxvd2x vs6, o32, AO // load real,imag from A
- lxvd2x vs7, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmaddadp vs32, vs8, vs20 // real*real, imag*real
- xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag
- xvmaddadp vs34, vs9, vs20 // real*real, imag*real
- xvmaddadp vs35, vs9, vs21 // real*imag, imag*imag
- xvmaddadp vs36, vs10, vs20 // real*real, imag*real
- xvmaddadp vs37, vs10, vs21 // real*imag, imag*imag
- xvmaddadp vs38, vs11, vs20 // real*real, imag*real
- xvmaddadp vs39, vs11, vs21 // real*imag, imag*imag
- xvmaddadp vs40, vs12, vs20 // real*real, imag*real
- xvmaddadp vs41, vs12, vs21 // real*imag, imag*imag
- xvmaddadp vs42, vs13, vs20 // real*real, imag*real
- xvmaddadp vs43, vs13, vs21 // real*imag, imag*imag
- xvmaddadp vs44, vs14, vs20 // real*real, imag*real
- xvmaddadp vs45, vs14, vs21 // real*imag, imag*imag
- xvmaddadp vs46, vs15, vs20 // real*real, imag*real
- xvmaddadp vs47, vs15, vs21 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x8_E2', `
- #else
- .macro KERNEL1x8_E2
- #endif
-
-
- xvmaddadp vs32, vs8, vs20 // real*real, imag*real
- xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag
- xvmaddadp vs34, vs9, vs20 // real*real, imag*real
- xvmaddadp vs35, vs9, vs21 // real*imag, imag*imag
- xvmaddadp vs36, vs10, vs20 // real*real, imag*real
- xvmaddadp vs37, vs10, vs21 // real*imag, imag*imag
- xvmaddadp vs38, vs11, vs20 // real*real, imag*real
- xvmaddadp vs39, vs11, vs21 // real*imag, imag*imag
- xvmaddadp vs40, vs12, vs20 // real*real, imag*real
- xvmaddadp vs41, vs12, vs21 // real*imag, imag*imag
- xvmaddadp vs42, vs13, vs20 // real*real, imag*real
- xvmaddadp vs43, vs13, vs21 // real*imag, imag*imag
- xvmaddadp vs44, vs14, vs20 // real*real, imag*real
- xvmaddadp vs45, vs14, vs21 // real*imag, imag*imag
- xvmaddadp vs46, vs15, vs20 // real*real, imag*real
- xvmaddadp vs47, vs15, vs21 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x8_SUBI1', `
- #else
- .macro KERNEL1x8_SUBI1
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
- lxvd2x vs2, o32, AO // load real,imag from A
- lxvd2x vs3, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvd2x vs4, o0, AO // load real,imag from A
- lxvd2x vs5, o16, AO // load real,imag from A
- lxvd2x vs6, o32, AO // load real,imag from A
- lxvd2x vs7, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmuldp vs32, vs0, vs16 // real*real, imag*real
- xvmuldp vs33, vs0, vs17 // real*imag, imag*imag
- xvmuldp vs34, vs1, vs16 // real*real, imag*real
- xvmuldp vs35, vs1, vs17 // real*imag, imag*imag
- xvmuldp vs36, vs2, vs16 // real*real, imag*real
- xvmuldp vs37, vs2, vs17 // real*imag, imag*imag
- xvmuldp vs38, vs3, vs16 // real*real, imag*real
- xvmuldp vs39, vs3, vs17 // real*imag, imag*imag
- xvmuldp vs40, vs4, vs16 // real*real, imag*real
- xvmuldp vs41, vs4, vs17 // real*imag, imag*imag
- xvmuldp vs42, vs5, vs16 // real*real, imag*real
- xvmuldp vs43, vs5, vs17 // real*imag, imag*imag
- xvmuldp vs44, vs6, vs16 // real*real, imag*real
- xvmuldp vs45, vs6, vs17 // real*imag, imag*imag
- xvmuldp vs46, vs7, vs16 // real*real, imag*real
- xvmuldp vs47, vs7, vs17 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x8_SUB1', `
- #else
- .macro KERNEL1x8_SUB1
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
- lxvd2x vs2, o32, AO // load real,imag from A
- lxvd2x vs3, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvd2x vs4, o0, AO // load real,imag from A
- lxvd2x vs5, o16, AO // load real,imag from A
- lxvd2x vs6, o32, AO // load real,imag from A
- lxvd2x vs7, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmaddadp vs32, vs0, vs16 // real*real, imag*real
- xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag
- xvmaddadp vs34, vs1, vs16 // real*real, imag*real
- xvmaddadp vs35, vs1, vs17 // real*imag, imag*imag
- xvmaddadp vs36, vs2, vs16 // real*real, imag*real
- xvmaddadp vs37, vs2, vs17 // real*imag, imag*imag
- xvmaddadp vs38, vs3, vs16 // real*real, imag*real
- xvmaddadp vs39, vs3, vs17 // real*imag, imag*imag
- xvmaddadp vs40, vs4, vs16 // real*real, imag*real
- xvmaddadp vs41, vs4, vs17 // real*imag, imag*imag
- xvmaddadp vs42, vs5, vs16 // real*real, imag*real
- xvmaddadp vs43, vs5, vs17 // real*imag, imag*imag
- xvmaddadp vs44, vs6, vs16 // real*real, imag*real
- xvmaddadp vs45, vs6, vs17 // real*imag, imag*imag
- xvmaddadp vs46, vs7, vs16 // real*real, imag*real
- xvmaddadp vs47, vs7, vs17 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`SAVE1x8', `
- #else
- .macro SAVE1x8
- #endif
-
-
- mr T1, CO
- addi T2, T1, 64
-
- #ifndef TRMMKERNEL
-
- lxvd2x vs16, o0, T1
- lxvd2x vs17, o16, T1
- lxvd2x vs18, o32, T1
- lxvd2x vs19, o48, T1
- lxvd2x vs20, o0, T2
- lxvd2x vs21, o16, T2
- lxvd2x vs22, o32, T2
- lxvd2x vs23, o48, T2
-
- #endif
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs32 // realA*realB
- XSFADD_R2 vs0, vs0, vs33 // imagA*imagB
-
- XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs32 // realA*imagB
- XSFADD_I2 vs1, vs1, vs33 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs8, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs34 // realA*realB
- XSFADD_R2 vs0, vs0, vs35 // imagA*imagB
-
- XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs34 // realA*imagB
- XSFADD_I2 vs1, vs1, vs35 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs9, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs37,vs37) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs36 // realA*realB
- XSFADD_R2 vs0, vs0, vs37 // imagA*imagB
-
- XXSWAPD(vs36,vs36) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs37,vs37) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs36 // realA*imagB
- XSFADD_I2 vs1, vs1, vs37 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs10, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs39,vs39) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs38 // realA*realB
- XSFADD_R2 vs0, vs0, vs39 // imagA*imagB
-
- XXSWAPD(vs38,vs38) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs39,vs39) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs38 // realA*imagB
- XSFADD_I2 vs1, vs1, vs39 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs11, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs41,vs41) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs40 // realA*realB
- XSFADD_R2 vs0, vs0, vs41 // imagA*imagB
-
- XXSWAPD(vs40,vs40) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs41,vs41) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs40 // realA*imagB
- XSFADD_I2 vs1, vs1, vs41 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs12, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs43,vs43) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs42 // realA*realB
- XSFADD_R2 vs0, vs0, vs43 // imagA*imagB
-
- XXSWAPD(vs42,vs42) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs43,vs43) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs42 // realA*imagB
- XSFADD_I2 vs1, vs1, vs43 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs13, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs45,vs45) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs44 // realA*realB
- XSFADD_R2 vs0, vs0, vs45 // imagA*imagB
-
- XXSWAPD(vs44,vs44) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs45,vs45) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs44 // realA*imagB
- XSFADD_I2 vs1, vs1, vs45 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs14, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs47,vs47) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs46 // realA*realB
- XSFADD_R2 vs0, vs0, vs47 // imagA*imagB
-
- XXSWAPD(vs46,vs46) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs47,vs47) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs46 // realA*imagB
- XSFADD_I2 vs1, vs1, vs47 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs15, vs2, vs3, 0 // merge real and imag part
-
-
- #ifndef TRMMKERNEL
-
- xvadddp vs8, vs8, vs16
- xvadddp vs9, vs9, vs17
- xvadddp vs10, vs10, vs18
- xvadddp vs11, vs11, vs19
- xvadddp vs12, vs12, vs20
- xvadddp vs13, vs13, vs21
- xvadddp vs14, vs14, vs22
- xvadddp vs15, vs15, vs23
-
- #endif
-
- stxvd2x vs8, o0, T1
- stxvd2x vs9, o16, T1
- stxvd2x vs10, o32, T1
- stxvd2x vs11, o48, T1
- stxvd2x vs12, o0, T2
- stxvd2x vs13, o16, T2
- stxvd2x vs14, o32, T2
- stxvd2x vs15, o48, T2
-
- add T1, T1, LDC
- add T2, T2, LDC
- addi CO, CO, 128
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
-
- /**********************************************************************************************
- * Macros for N=1 and M=4
- **********************************************************************************************/
-
- #if defined(_AIX)
- define(`LOAD1x4_1', `
- #else
- .macro LOAD1x4_1
- #endif
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
- lxvd2x vs2, o32, AO // load real,imag from A
- lxvd2x vs3, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x4_I1', `
- #else
- .macro KERNEL1x4_I1
- #endif
-
- lxvd2x vs8, o0, AO // load real,imag from A
- lxvd2x vs9, o16, AO // load real,imag from A
- lxvd2x vs10, o32, AO // load real,imag from A
- lxvd2x vs11, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs20, o0, BO // load real part from B
- lxvdsx vs21, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmuldp vs32, vs0, vs16 // real*real, imag*real
- xvmuldp vs33, vs0, vs17 // real*imag, imag*imag
- xvmuldp vs34, vs1, vs16 // real*real, imag*real
- xvmuldp vs35, vs1, vs17 // real*imag, imag*imag
- xvmuldp vs36, vs2, vs16 // real*real, imag*real
- xvmuldp vs37, vs2, vs17 // real*imag, imag*imag
- xvmuldp vs38, vs3, vs16 // real*real, imag*real
- xvmuldp vs39, vs3, vs17 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x4_1', `
- #else
- .macro KERNEL1x4_1
- #endif
-
- lxvd2x vs8, o0, AO // load real,imag from A
- lxvd2x vs9, o16, AO // load real,imag from A
- lxvd2x vs10, o32, AO // load real,imag from A
- lxvd2x vs11, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs20, o0, BO // load real part from B
- lxvdsx vs21, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmaddadp vs32, vs0, vs16 // real*real, imag*real
- xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag
- xvmaddadp vs34, vs1, vs16 // real*real, imag*real
- xvmaddadp vs35, vs1, vs17 // real*imag, imag*imag
- xvmaddadp vs36, vs2, vs16 // real*real, imag*real
- xvmaddadp vs37, vs2, vs17 // real*imag, imag*imag
- xvmaddadp vs38, vs3, vs16 // real*real, imag*real
- xvmaddadp vs39, vs3, vs17 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x4_2', `
- #else
- .macro KERNEL1x4_2
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
- lxvd2x vs2, o32, AO // load real,imag from A
- lxvd2x vs3, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmaddadp vs32, vs8, vs20 // real*real, imag*real
- xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag
- xvmaddadp vs34, vs9, vs20 // real*real, imag*real
- xvmaddadp vs35, vs9, vs21 // real*imag, imag*imag
- xvmaddadp vs36, vs10, vs20 // real*real, imag*real
- xvmaddadp vs37, vs10, vs21 // real*imag, imag*imag
- xvmaddadp vs38, vs11, vs20 // real*real, imag*real
- xvmaddadp vs39, vs11, vs21 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x4_E2', `
- #else
- .macro KERNEL1x4_E2
- #endif
-
-
- xvmaddadp vs32, vs8, vs20 // real*real, imag*real
- xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag
- xvmaddadp vs34, vs9, vs20 // real*real, imag*real
- xvmaddadp vs35, vs9, vs21 // real*imag, imag*imag
- xvmaddadp vs36, vs10, vs20 // real*real, imag*real
- xvmaddadp vs37, vs10, vs21 // real*imag, imag*imag
- xvmaddadp vs38, vs11, vs20 // real*real, imag*real
- xvmaddadp vs39, vs11, vs21 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x4_SUBI1', `
- #else
- .macro KERNEL1x4_SUBI1
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
- lxvd2x vs2, o32, AO // load real,imag from A
- lxvd2x vs3, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmuldp vs32, vs0, vs16 // real*real, imag*real
- xvmuldp vs33, vs0, vs17 // real*imag, imag*imag
- xvmuldp vs34, vs1, vs16 // real*real, imag*real
- xvmuldp vs35, vs1, vs17 // real*imag, imag*imag
- xvmuldp vs36, vs2, vs16 // real*real, imag*real
- xvmuldp vs37, vs2, vs17 // real*imag, imag*imag
- xvmuldp vs38, vs3, vs16 // real*real, imag*real
- xvmuldp vs39, vs3, vs17 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x4_SUB1', `
- #else
- .macro KERNEL1x4_SUB1
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
- lxvd2x vs2, o32, AO // load real,imag from A
- lxvd2x vs3, o48, AO // load real,imag from A
-
- addi AO, AO, 64
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmaddadp vs32, vs0, vs16 // real*real, imag*real
- xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag
- xvmaddadp vs34, vs1, vs16 // real*real, imag*real
- xvmaddadp vs35, vs1, vs17 // real*imag, imag*imag
- xvmaddadp vs36, vs2, vs16 // real*real, imag*real
- xvmaddadp vs37, vs2, vs17 // real*imag, imag*imag
- xvmaddadp vs38, vs3, vs16 // real*real, imag*real
- xvmaddadp vs39, vs3, vs17 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`SAVE1x4', `
- #else
- .macro SAVE1x4
- #endif
-
-
- mr T1, CO
-
- #ifndef TRMMKERNEL
-
- lxvd2x vs16, o0, T1
- lxvd2x vs17, o16, T1
- lxvd2x vs18, o32, T1
- lxvd2x vs19, o48, T1
-
- #endif
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs32 // realA*realB
- XSFADD_R2 vs0, vs0, vs33 // imagA*imagB
-
- XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs32 // realA*imagB
- XSFADD_I2 vs1, vs1, vs33 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs8, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs34 // realA*realB
- XSFADD_R2 vs0, vs0, vs35 // imagA*imagB
-
- XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs34 // realA*imagB
- XSFADD_I2 vs1, vs1, vs35 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs9, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs37,vs37) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs36 // realA*realB
- XSFADD_R2 vs0, vs0, vs37 // imagA*imagB
-
- XXSWAPD(vs36,vs36) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs37,vs37) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs36 // realA*imagB
- XSFADD_I2 vs1, vs1, vs37 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs10, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs39,vs39) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs38 // realA*realB
- XSFADD_R2 vs0, vs0, vs39 // imagA*imagB
-
- XXSWAPD(vs38,vs38) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs39,vs39) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs38 // realA*imagB
- XSFADD_I2 vs1, vs1, vs39 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs11, vs2, vs3, 0 // merge real and imag part
-
-
- #ifndef TRMMKERNEL
-
- xvadddp vs8, vs8, vs16
- xvadddp vs9, vs9, vs17
- xvadddp vs10, vs10, vs18
- xvadddp vs11, vs11, vs19
-
- #endif
-
- stxvd2x vs8, o0, T1
- stxvd2x vs9, o16, T1
- stxvd2x vs10, o32, T1
- stxvd2x vs11, o48, T1
-
- add T1, T1, LDC
- addi CO, CO, 64
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
-
- /**********************************************************************************************
- * Macros for N=1 and M=2
- **********************************************************************************************/
-
- #if defined(_AIX)
- define(`LOAD1x2_1', `
- #else
- .macro LOAD1x2_1
- #endif
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
-
- addi AO, AO, 32
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x2_I1', `
- #else
- .macro KERNEL1x2_I1
- #endif
-
- lxvd2x vs8, o0, AO // load real,imag from A
- lxvd2x vs9, o16, AO // load real,imag from A
-
- addi AO, AO, 32
-
- lxvdsx vs20, o0, BO // load real part from B
- lxvdsx vs21, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmuldp vs32, vs0, vs16 // real*real, imag*real
- xvmuldp vs33, vs0, vs17 // real*imag, imag*imag
- xvmuldp vs34, vs1, vs16 // real*real, imag*real
- xvmuldp vs35, vs1, vs17 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x2_1', `
- #else
- .macro KERNEL1x2_1
- #endif
-
- lxvd2x vs8, o0, AO // load real,imag from A
- lxvd2x vs9, o16, AO // load real,imag from A
-
- addi AO, AO, 32
-
- lxvdsx vs20, o0, BO // load real part from B
- lxvdsx vs21, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmaddadp vs32, vs0, vs16 // real*real, imag*real
- xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag
- xvmaddadp vs34, vs1, vs16 // real*real, imag*real
- xvmaddadp vs35, vs1, vs17 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x2_2', `
- #else
- .macro KERNEL1x2_2
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
-
- addi AO, AO, 32
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmaddadp vs32, vs8, vs20 // real*real, imag*real
- xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag
- xvmaddadp vs34, vs9, vs20 // real*real, imag*real
- xvmaddadp vs35, vs9, vs21 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x2_E2', `
- #else
- .macro KERNEL1x2_E2
- #endif
-
-
- xvmaddadp vs32, vs8, vs20 // real*real, imag*real
- xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag
- xvmaddadp vs34, vs9, vs20 // real*real, imag*real
- xvmaddadp vs35, vs9, vs21 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x2_SUBI1', `
- #else
- .macro KERNEL1x2_SUBI1
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
-
- addi AO, AO, 32
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmuldp vs32, vs0, vs16 // real*real, imag*real
- xvmuldp vs33, vs0, vs17 // real*imag, imag*imag
- xvmuldp vs34, vs1, vs16 // real*real, imag*real
- xvmuldp vs35, vs1, vs17 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x2_SUB1', `
- #else
- .macro KERNEL1x2_SUB1
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
- lxvd2x vs1, o16, AO // load real,imag from A
-
- addi AO, AO, 32
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmaddadp vs32, vs0, vs16 // real*real, imag*real
- xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag
- xvmaddadp vs34, vs1, vs16 // real*real, imag*real
- xvmaddadp vs35, vs1, vs17 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`SAVE1x2', `
- #else
- .macro SAVE1x2
- #endif
-
-
- mr T1, CO
-
- #ifndef TRMMKERNEL
-
- lxvd2x vs16, o0, T1
- lxvd2x vs17, o16, T1
-
- #endif
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs32 // realA*realB
- XSFADD_R2 vs0, vs0, vs33 // imagA*imagB
-
- XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs32 // realA*imagB
- XSFADD_I2 vs1, vs1, vs33 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs8, vs2, vs3, 0 // merge real and imag part
-
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs35,vs35) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs34 // realA*realB
- XSFADD_R2 vs0, vs0, vs35 // imagA*imagB
-
- XXSWAPD(vs34,vs34) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs35,vs35) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs34 // realA*imagB
- XSFADD_I2 vs1, vs1, vs35 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs9, vs2, vs3, 0 // merge real and imag part
-
-
- #ifndef TRMMKERNEL
-
- xvadddp vs8, vs8, vs16
- xvadddp vs9, vs9, vs17
-
- #endif
-
- stxvd2x vs8, o0, T1
- stxvd2x vs9, o16, T1
-
- add T1, T1, LDC
- addi CO, CO, 32
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
-
- /**********************************************************************************************
- * Macros for N=1 and M=1
- **********************************************************************************************/
-
- #if defined(_AIX)
- define(`LOAD1x1_1', `
- #else
- .macro LOAD1x1_1
- #endif
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- lxvd2x vs0, o0, AO // load real,imag from A
-
- addi AO, AO, 16
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x1_I1', `
- #else
- .macro KERNEL1x1_I1
- #endif
-
- lxvd2x vs8, o0, AO // load real,imag from A
-
- addi AO, AO, 16
-
- lxvdsx vs20, o0, BO // load real part from B
- lxvdsx vs21, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmuldp vs32, vs0, vs16 // real*real, imag*real
- xvmuldp vs33, vs0, vs17 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x1_1', `
- #else
- .macro KERNEL1x1_1
- #endif
-
- lxvd2x vs8, o0, AO // load real,imag from A
-
- addi AO, AO, 16
-
- lxvdsx vs20, o0, BO // load real part from B
- lxvdsx vs21, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmaddadp vs32, vs0, vs16 // real*real, imag*real
- xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x1_2', `
- #else
- .macro KERNEL1x1_2
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
-
- addi AO, AO, 16
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmaddadp vs32, vs8, vs20 // real*real, imag*real
- xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x1_E2', `
- #else
- .macro KERNEL1x1_E2
- #endif
-
-
- xvmaddadp vs32, vs8, vs20 // real*real, imag*real
- xvmaddadp vs33, vs8, vs21 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x1_SUBI1', `
- #else
- .macro KERNEL1x1_SUBI1
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
-
- addi AO, AO, 16
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmuldp vs32, vs0, vs16 // real*real, imag*real
- xvmuldp vs33, vs0, vs17 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`KERNEL1x1_SUB1', `
- #else
- .macro KERNEL1x1_SUB1
- #endif
-
- lxvd2x vs0, o0, AO // load real,imag from A
-
- addi AO, AO, 16
-
- lxvdsx vs16, o0, BO // load real part from B
- lxvdsx vs17, o8, BO // load imag part from B
-
- addi BO, BO, 16
-
- xvmaddadp vs32, vs0, vs16 // real*real, imag*real
- xvmaddadp vs33, vs0, vs17 // real*imag, imag*imag
-
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
- #if defined(_AIX)
- define(`SAVE1x1', `
- #else
- .macro SAVE1x1
- #endif
-
-
- mr T1, CO
-
- #ifndef TRMMKERNEL
-
- lxvd2x vs16, o0, T1
-
- #endif
-
-
- xxlxor vs0, vs0, vs0
- xxlxor vs1, vs1, vs1
- XXSWAPD(vs33,vs33) // realA*imagB, imagA*imagB -> imagA*imagB, realA*imagB
-
- XSFADD_R1 vs0, vs0, vs32 // realA*realB
- XSFADD_R2 vs0, vs0, vs33 // imagA*imagB
-
- XXSWAPD(vs32,vs32) // realA*realB, imagA*realB -> imagA*realB, realA*realB
- XXSWAPD(vs33,vs33) // imagA*imagB, realA*imagB -> realA*imagB, imagA*imagB
-
- XSFADD_I1 vs1, vs1, vs32 // realA*imagB
- XSFADD_I2 vs1, vs1, vs33 // imagA*realB
-
- xsmuldp vs4, vs0, alpha_r // real*alpha_r
- xsmuldp vs5, vs1, alpha_i // imag*alpha_i
- xsmuldp vs6, vs0, alpha_i // real*alpha_i
- xsmuldp vs7, vs1, alpha_r // imag*alpha_r
-
- xssubdp vs2, vs4, vs5 // real*alpha_r - imag*alpha_i
- xsadddp vs3, vs6, vs7 // real*alpha_i + imag*alpha_r
- xxpermdi vs8, vs2, vs3, 0 // merge real and imag part
-
-
- #ifndef TRMMKERNEL
-
- xvadddp vs8, vs8, vs16
-
- #endif
-
- stxvd2x vs8, o0, T1
-
- add T1, T1, LDC
- addi CO, CO, 16
-
- #if defined(_AIX)
- ')
- #else
- .endm
- #endif
-
|