You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

zgejsv.c 123 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559
  1. #include <math.h>
  2. #include <stdlib.h>
  3. #include <string.h>
  4. #include <stdio.h>
  5. #include <complex.h>
  6. #ifdef complex
  7. #undef complex
  8. #endif
  9. #ifdef I
  10. #undef I
  11. #endif
  12. #if defined(_WIN64)
  13. typedef long long BLASLONG;
  14. typedef unsigned long long BLASULONG;
  15. #else
  16. typedef long BLASLONG;
  17. typedef unsigned long BLASULONG;
  18. #endif
  19. #ifdef LAPACK_ILP64
  20. typedef BLASLONG blasint;
  21. #if defined(_WIN64)
  22. #define blasabs(x) llabs(x)
  23. #else
  24. #define blasabs(x) labs(x)
  25. #endif
  26. #else
  27. typedef int blasint;
  28. #define blasabs(x) abs(x)
  29. #endif
  30. typedef blasint integer;
  31. typedef unsigned int uinteger;
  32. typedef char *address;
  33. typedef short int shortint;
  34. typedef float real;
  35. typedef double doublereal;
  36. typedef struct { real r, i; } complex;
  37. typedef struct { doublereal r, i; } doublecomplex;
  38. #ifdef _MSC_VER
  39. static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
  40. static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
  41. static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
  42. static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
  43. #else
  44. static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
  45. static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
  46. static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
  47. static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
  48. #endif
  49. #define pCf(z) (*_pCf(z))
  50. #define pCd(z) (*_pCd(z))
  51. typedef blasint logical;
  52. typedef char logical1;
  53. typedef char integer1;
  54. #define TRUE_ (1)
  55. #define FALSE_ (0)
  56. /* Extern is for use with -E */
  57. #ifndef Extern
  58. #define Extern extern
  59. #endif
  60. /* I/O stuff */
  61. typedef int flag;
  62. typedef int ftnlen;
  63. typedef int ftnint;
  64. /*external read, write*/
  65. typedef struct
  66. { flag cierr;
  67. ftnint ciunit;
  68. flag ciend;
  69. char *cifmt;
  70. ftnint cirec;
  71. } cilist;
  72. /*internal read, write*/
  73. typedef struct
  74. { flag icierr;
  75. char *iciunit;
  76. flag iciend;
  77. char *icifmt;
  78. ftnint icirlen;
  79. ftnint icirnum;
  80. } icilist;
  81. /*open*/
  82. typedef struct
  83. { flag oerr;
  84. ftnint ounit;
  85. char *ofnm;
  86. ftnlen ofnmlen;
  87. char *osta;
  88. char *oacc;
  89. char *ofm;
  90. ftnint orl;
  91. char *oblnk;
  92. } olist;
  93. /*close*/
  94. typedef struct
  95. { flag cerr;
  96. ftnint cunit;
  97. char *csta;
  98. } cllist;
  99. /*rewind, backspace, endfile*/
  100. typedef struct
  101. { flag aerr;
  102. ftnint aunit;
  103. } alist;
  104. /* inquire */
  105. typedef struct
  106. { flag inerr;
  107. ftnint inunit;
  108. char *infile;
  109. ftnlen infilen;
  110. ftnint *inex; /*parameters in standard's order*/
  111. ftnint *inopen;
  112. ftnint *innum;
  113. ftnint *innamed;
  114. char *inname;
  115. ftnlen innamlen;
  116. char *inacc;
  117. ftnlen inacclen;
  118. char *inseq;
  119. ftnlen inseqlen;
  120. char *indir;
  121. ftnlen indirlen;
  122. char *infmt;
  123. ftnlen infmtlen;
  124. char *inform;
  125. ftnint informlen;
  126. char *inunf;
  127. ftnlen inunflen;
  128. ftnint *inrecl;
  129. ftnint *innrec;
  130. char *inblank;
  131. ftnlen inblanklen;
  132. } inlist;
  133. #define VOID void
  134. union Multitype { /* for multiple entry points */
  135. integer1 g;
  136. shortint h;
  137. integer i;
  138. /* longint j; */
  139. real r;
  140. doublereal d;
  141. complex c;
  142. doublecomplex z;
  143. };
  144. typedef union Multitype Multitype;
  145. struct Vardesc { /* for Namelist */
  146. char *name;
  147. char *addr;
  148. ftnlen *dims;
  149. int type;
  150. };
  151. typedef struct Vardesc Vardesc;
  152. struct Namelist {
  153. char *name;
  154. Vardesc **vars;
  155. int nvars;
  156. };
  157. typedef struct Namelist Namelist;
  158. #define abs(x) ((x) >= 0 ? (x) : -(x))
  159. #define dabs(x) (fabs(x))
  160. #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
  161. #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
  162. #define dmin(a,b) (f2cmin(a,b))
  163. #define dmax(a,b) (f2cmax(a,b))
  164. #define bit_test(a,b) ((a) >> (b) & 1)
  165. #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
  166. #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
  167. #define abort_() { sig_die("Fortran abort routine called", 1); }
  168. #define c_abs(z) (cabsf(Cf(z)))
  169. #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
  170. #ifdef _MSC_VER
  171. #define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
  172. #define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/Cd(b)._Val[1]);}
  173. #else
  174. #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
  175. #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
  176. #endif
  177. #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
  178. #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
  179. #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
  180. //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
  181. #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
  182. #define d_abs(x) (fabs(*(x)))
  183. #define d_acos(x) (acos(*(x)))
  184. #define d_asin(x) (asin(*(x)))
  185. #define d_atan(x) (atan(*(x)))
  186. #define d_atn2(x, y) (atan2(*(x),*(y)))
  187. #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
  188. #define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
  189. #define d_cos(x) (cos(*(x)))
  190. #define d_cosh(x) (cosh(*(x)))
  191. #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
  192. #define d_exp(x) (exp(*(x)))
  193. #define d_imag(z) (cimag(Cd(z)))
  194. #define r_imag(z) (cimagf(Cf(z)))
  195. #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
  196. #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
  197. #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
  198. #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
  199. #define d_log(x) (log(*(x)))
  200. #define d_mod(x, y) (fmod(*(x), *(y)))
  201. #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
  202. #define d_nint(x) u_nint(*(x))
  203. #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
  204. #define d_sign(a,b) u_sign(*(a),*(b))
  205. #define r_sign(a,b) u_sign(*(a),*(b))
  206. #define d_sin(x) (sin(*(x)))
  207. #define d_sinh(x) (sinh(*(x)))
  208. #define d_sqrt(x) (sqrt(*(x)))
  209. #define d_tan(x) (tan(*(x)))
  210. #define d_tanh(x) (tanh(*(x)))
  211. #define i_abs(x) abs(*(x))
  212. #define i_dnnt(x) ((integer)u_nint(*(x)))
  213. #define i_len(s, n) (n)
  214. #define i_nint(x) ((integer)u_nint(*(x)))
  215. #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
  216. #define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
  217. #define pow_si(B,E) spow_ui(*(B),*(E))
  218. #define pow_ri(B,E) spow_ui(*(B),*(E))
  219. #define pow_di(B,E) dpow_ui(*(B),*(E))
  220. #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
  221. #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
  222. #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
  223. #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
  224. #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
  225. #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
  226. #define sig_die(s, kill) { exit(1); }
  227. #define s_stop(s, n) {exit(0);}
  228. static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
  229. #define z_abs(z) (cabs(Cd(z)))
  230. #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
  231. #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
  232. #define myexit_() break;
  233. #define mycycle() continue;
  234. #define myceiling(w) {ceil(w)}
  235. #define myhuge(w) {HUGE_VAL}
  236. //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
  237. #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
  238. /* procedure parameter types for -A and -C++ */
  239. #ifdef __cplusplus
  240. typedef logical (*L_fp)(...);
  241. #else
  242. typedef logical (*L_fp)();
  243. #endif
  244. static float spow_ui(float x, integer n) {
  245. float pow=1.0; unsigned long int u;
  246. if(n != 0) {
  247. if(n < 0) n = -n, x = 1/x;
  248. for(u = n; ; ) {
  249. if(u & 01) pow *= x;
  250. if(u >>= 1) x *= x;
  251. else break;
  252. }
  253. }
  254. return pow;
  255. }
  256. static double dpow_ui(double x, integer n) {
  257. double pow=1.0; unsigned long int u;
  258. if(n != 0) {
  259. if(n < 0) n = -n, x = 1/x;
  260. for(u = n; ; ) {
  261. if(u & 01) pow *= x;
  262. if(u >>= 1) x *= x;
  263. else break;
  264. }
  265. }
  266. return pow;
  267. }
  268. #ifdef _MSC_VER
  269. static _Fcomplex cpow_ui(complex x, integer n) {
  270. complex pow={1.0,0.0}; unsigned long int u;
  271. if(n != 0) {
  272. if(n < 0) n = -n, x.r = 1/x.r, x.i=1/x.i;
  273. for(u = n; ; ) {
  274. if(u & 01) pow.r *= x.r, pow.i *= x.i;
  275. if(u >>= 1) x.r *= x.r, x.i *= x.i;
  276. else break;
  277. }
  278. }
  279. _Fcomplex p={pow.r, pow.i};
  280. return p;
  281. }
  282. #else
  283. static _Complex float cpow_ui(_Complex float x, integer n) {
  284. _Complex float pow=1.0; unsigned long int u;
  285. if(n != 0) {
  286. if(n < 0) n = -n, x = 1/x;
  287. for(u = n; ; ) {
  288. if(u & 01) pow *= x;
  289. if(u >>= 1) x *= x;
  290. else break;
  291. }
  292. }
  293. return pow;
  294. }
  295. #endif
  296. #ifdef _MSC_VER
  297. static _Dcomplex zpow_ui(_Dcomplex x, integer n) {
  298. _Dcomplex pow={1.0,0.0}; unsigned long int u;
  299. if(n != 0) {
  300. if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1];
  301. for(u = n; ; ) {
  302. if(u & 01) pow._Val[0] *= x._Val[0], pow._Val[1] *= x._Val[1];
  303. if(u >>= 1) x._Val[0] *= x._Val[0], x._Val[1] *= x._Val[1];
  304. else break;
  305. }
  306. }
  307. _Dcomplex p = {pow._Val[0], pow._Val[1]};
  308. return p;
  309. }
  310. #else
  311. static _Complex double zpow_ui(_Complex double x, integer n) {
  312. _Complex double pow=1.0; unsigned long int u;
  313. if(n != 0) {
  314. if(n < 0) n = -n, x = 1/x;
  315. for(u = n; ; ) {
  316. if(u & 01) pow *= x;
  317. if(u >>= 1) x *= x;
  318. else break;
  319. }
  320. }
  321. return pow;
  322. }
  323. #endif
  324. static integer pow_ii(integer x, integer n) {
  325. integer pow; unsigned long int u;
  326. if (n <= 0) {
  327. if (n == 0 || x == 1) pow = 1;
  328. else if (x != -1) pow = x == 0 ? 1/x : 0;
  329. else n = -n;
  330. }
  331. if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
  332. u = n;
  333. for(pow = 1; ; ) {
  334. if(u & 01) pow *= x;
  335. if(u >>= 1) x *= x;
  336. else break;
  337. }
  338. }
  339. return pow;
  340. }
  341. static integer dmaxloc_(double *w, integer s, integer e, integer *n)
  342. {
  343. double m; integer i, mi;
  344. for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
  345. if (w[i-1]>m) mi=i ,m=w[i-1];
  346. return mi-s+1;
  347. }
  348. static integer smaxloc_(float *w, integer s, integer e, integer *n)
  349. {
  350. float m; integer i, mi;
  351. for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
  352. if (w[i-1]>m) mi=i ,m=w[i-1];
  353. return mi-s+1;
  354. }
  355. static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
  356. integer n = *n_, incx = *incx_, incy = *incy_, i;
  357. #ifdef _MSC_VER
  358. _Fcomplex zdotc = {0.0, 0.0};
  359. if (incx == 1 && incy == 1) {
  360. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  361. zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0];
  362. zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1];
  363. }
  364. } else {
  365. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  366. zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0];
  367. zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1];
  368. }
  369. }
  370. pCf(z) = zdotc;
  371. }
  372. #else
  373. _Complex float zdotc = 0.0;
  374. if (incx == 1 && incy == 1) {
  375. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  376. zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
  377. }
  378. } else {
  379. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  380. zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
  381. }
  382. }
  383. pCf(z) = zdotc;
  384. }
  385. #endif
  386. static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
  387. integer n = *n_, incx = *incx_, incy = *incy_, i;
  388. #ifdef _MSC_VER
  389. _Dcomplex zdotc = {0.0, 0.0};
  390. if (incx == 1 && incy == 1) {
  391. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  392. zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0];
  393. zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1];
  394. }
  395. } else {
  396. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  397. zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0];
  398. zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1];
  399. }
  400. }
  401. pCd(z) = zdotc;
  402. }
  403. #else
  404. _Complex double zdotc = 0.0;
  405. if (incx == 1 && incy == 1) {
  406. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  407. zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
  408. }
  409. } else {
  410. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  411. zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
  412. }
  413. }
  414. pCd(z) = zdotc;
  415. }
  416. #endif
  417. static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
  418. integer n = *n_, incx = *incx_, incy = *incy_, i;
  419. #ifdef _MSC_VER
  420. _Fcomplex zdotc = {0.0, 0.0};
  421. if (incx == 1 && incy == 1) {
  422. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  423. zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0];
  424. zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1];
  425. }
  426. } else {
  427. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  428. zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0];
  429. zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1];
  430. }
  431. }
  432. pCf(z) = zdotc;
  433. }
  434. #else
  435. _Complex float zdotc = 0.0;
  436. if (incx == 1 && incy == 1) {
  437. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  438. zdotc += Cf(&x[i]) * Cf(&y[i]);
  439. }
  440. } else {
  441. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  442. zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
  443. }
  444. }
  445. pCf(z) = zdotc;
  446. }
  447. #endif
  448. static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
  449. integer n = *n_, incx = *incx_, incy = *incy_, i;
  450. #ifdef _MSC_VER
  451. _Dcomplex zdotc = {0.0, 0.0};
  452. if (incx == 1 && incy == 1) {
  453. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  454. zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0];
  455. zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1];
  456. }
  457. } else {
  458. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  459. zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0];
  460. zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1];
  461. }
  462. }
  463. pCd(z) = zdotc;
  464. }
  465. #else
  466. _Complex double zdotc = 0.0;
  467. if (incx == 1 && incy == 1) {
  468. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  469. zdotc += Cd(&x[i]) * Cd(&y[i]);
  470. }
  471. } else {
  472. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  473. zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
  474. }
  475. }
  476. pCd(z) = zdotc;
  477. }
  478. #endif
  479. /* -- translated by f2c (version 20000121).
  480. You must link the resulting object file with the libraries:
  481. -lf2c -lm (in that order)
  482. */
  483. /* Table of constant values */
  484. static doublecomplex c_b1 = {0.,0.};
  485. static doublecomplex c_b2 = {1.,0.};
  486. static integer c_n1 = -1;
  487. static integer c__1 = 1;
  488. static integer c__0 = 0;
  489. static doublereal c_b141 = 1.;
  490. static logical c_false = FALSE_;
  491. /* > \brief \b ZGEJSV */
  492. /* =========== DOCUMENTATION =========== */
  493. /* Online html documentation available at */
  494. /* http://www.netlib.org/lapack/explore-html/ */
  495. /* > \htmlonly */
  496. /* > Download ZGEJSV + dependencies */
  497. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/zgejsv.
  498. f"> */
  499. /* > [TGZ]</a> */
  500. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/zgejsv.
  501. f"> */
  502. /* > [ZIP]</a> */
  503. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/zgejsv.
  504. f"> */
  505. /* > [TXT]</a> */
  506. /* > \endhtmlonly */
  507. /* Definition: */
  508. /* =========== */
  509. /* SUBROUTINE ZGEJSV( JOBA, JOBU, JOBV, JOBR, JOBT, JOBP, */
  510. /* M, N, A, LDA, SVA, U, LDU, V, LDV, */
  511. /* CWORK, LWORK, RWORK, LRWORK, IWORK, INFO ) */
  512. /* IMPLICIT NONE */
  513. /* INTEGER INFO, LDA, LDU, LDV, LWORK, M, N */
  514. /* COMPLEX*16 A( LDA, * ), U( LDU, * ), V( LDV, * ), CWORK( LWORK ) */
  515. /* DOUBLE PRECISION SVA( N ), RWORK( LRWORK ) */
  516. /* INTEGER IWORK( * ) */
  517. /* CHARACTER*1 JOBA, JOBP, JOBR, JOBT, JOBU, JOBV */
  518. /* > \par Purpose: */
  519. /* ============= */
  520. /* > */
  521. /* > \verbatim */
  522. /* > */
  523. /* > ZGEJSV computes the singular value decomposition (SVD) of a complex M-by-N */
  524. /* > matrix [A], where M >= N. The SVD of [A] is written as */
  525. /* > */
  526. /* > [A] = [U] * [SIGMA] * [V]^*, */
  527. /* > */
  528. /* > where [SIGMA] is an N-by-N (M-by-N) matrix which is zero except for its N */
  529. /* > diagonal elements, [U] is an M-by-N (or M-by-M) unitary matrix, and */
  530. /* > [V] is an N-by-N unitary matrix. The diagonal elements of [SIGMA] are */
  531. /* > the singular values of [A]. The columns of [U] and [V] are the left and */
  532. /* > the right singular vectors of [A], respectively. The matrices [U] and [V] */
  533. /* > are computed and stored in the arrays U and V, respectively. The diagonal */
  534. /* > of [SIGMA] is computed and stored in the array SVA. */
  535. /* > \endverbatim */
  536. /* > */
  537. /* > Arguments: */
  538. /* > ========== */
  539. /* > */
  540. /* > \param[in] JOBA */
  541. /* > \verbatim */
  542. /* > JOBA is CHARACTER*1 */
  543. /* > Specifies the level of accuracy: */
  544. /* > = 'C': This option works well (high relative accuracy) if A = B * D, */
  545. /* > with well-conditioned B and arbitrary diagonal matrix D. */
  546. /* > The accuracy cannot be spoiled by COLUMN scaling. The */
  547. /* > accuracy of the computed output depends on the condition of */
  548. /* > B, and the procedure aims at the best theoretical accuracy. */
  549. /* > The relative error max_{i=1:N}|d sigma_i| / sigma_i is */
  550. /* > bounded by f(M,N)*epsilon* cond(B), independent of D. */
  551. /* > The input matrix is preprocessed with the QRF with column */
  552. /* > pivoting. This initial preprocessing and preconditioning by */
  553. /* > a rank revealing QR factorization is common for all values of */
  554. /* > JOBA. Additional actions are specified as follows: */
  555. /* > = 'E': Computation as with 'C' with an additional estimate of the */
  556. /* > condition number of B. It provides a realistic error bound. */
  557. /* > = 'F': If A = D1 * C * D2 with ill-conditioned diagonal scalings */
  558. /* > D1, D2, and well-conditioned matrix C, this option gives */
  559. /* > higher accuracy than the 'C' option. If the structure of the */
  560. /* > input matrix is not known, and relative accuracy is */
  561. /* > desirable, then this option is advisable. The input matrix A */
  562. /* > is preprocessed with QR factorization with FULL (row and */
  563. /* > column) pivoting. */
  564. /* > = 'G': Computation as with 'F' with an additional estimate of the */
  565. /* > condition number of B, where A=B*D. If A has heavily weighted */
  566. /* > rows, then using this condition number gives too pessimistic */
  567. /* > error bound. */
  568. /* > = 'A': Small singular values are not well determined by the data */
  569. /* > and are considered as noisy; the matrix is treated as */
  570. /* > numerically rank deficient. The error in the computed */
  571. /* > singular values is bounded by f(m,n)*epsilon*||A||. */
  572. /* > The computed SVD A = U * S * V^* restores A up to */
  573. /* > f(m,n)*epsilon*||A||. */
  574. /* > This gives the procedure the licence to discard (set to zero) */
  575. /* > all singular values below N*epsilon*||A||. */
  576. /* > = 'R': Similar as in 'A'. Rank revealing property of the initial */
  577. /* > QR factorization is used do reveal (using triangular factor) */
  578. /* > a gap sigma_{r+1} < epsilon * sigma_r in which case the */
  579. /* > numerical RANK is declared to be r. The SVD is computed with */
  580. /* > absolute error bounds, but more accurately than with 'A'. */
  581. /* > \endverbatim */
  582. /* > */
  583. /* > \param[in] JOBU */
  584. /* > \verbatim */
  585. /* > JOBU is CHARACTER*1 */
  586. /* > Specifies whether to compute the columns of U: */
  587. /* > = 'U': N columns of U are returned in the array U. */
  588. /* > = 'F': full set of M left sing. vectors is returned in the array U. */
  589. /* > = 'W': U may be used as workspace of length M*N. See the description */
  590. /* > of U. */
  591. /* > = 'N': U is not computed. */
  592. /* > \endverbatim */
  593. /* > */
  594. /* > \param[in] JOBV */
  595. /* > \verbatim */
  596. /* > JOBV is CHARACTER*1 */
  597. /* > Specifies whether to compute the matrix V: */
  598. /* > = 'V': N columns of V are returned in the array V; Jacobi rotations */
  599. /* > are not explicitly accumulated. */
  600. /* > = 'J': N columns of V are returned in the array V, but they are */
  601. /* > computed as the product of Jacobi rotations, if JOBT = 'N'. */
  602. /* > = 'W': V may be used as workspace of length N*N. See the description */
  603. /* > of V. */
  604. /* > = 'N': V is not computed. */
  605. /* > \endverbatim */
  606. /* > */
  607. /* > \param[in] JOBR */
  608. /* > \verbatim */
  609. /* > JOBR is CHARACTER*1 */
  610. /* > Specifies the RANGE for the singular values. Issues the licence to */
  611. /* > set to zero small positive singular values if they are outside */
  612. /* > specified range. If A .NE. 0 is scaled so that the largest singular */
  613. /* > value of c*A is around SQRT(BIG), BIG=DLAMCH('O'), then JOBR issues */
  614. /* > the licence to kill columns of A whose norm in c*A is less than */
  615. /* > SQRT(SFMIN) (for JOBR = 'R'), or less than SMALL=SFMIN/EPSLN, */
  616. /* > where SFMIN=DLAMCH('S'), EPSLN=DLAMCH('E'). */
  617. /* > = 'N': Do not kill small columns of c*A. This option assumes that */
  618. /* > BLAS and QR factorizations and triangular solvers are */
  619. /* > implemented to work in that range. If the condition of A */
  620. /* > is greater than BIG, use ZGESVJ. */
  621. /* > = 'R': RESTRICTED range for sigma(c*A) is [SQRT(SFMIN), SQRT(BIG)] */
  622. /* > (roughly, as described above). This option is recommended. */
  623. /* > =========================== */
  624. /* > For computing the singular values in the FULL range [SFMIN,BIG] */
  625. /* > use ZGESVJ. */
  626. /* > \endverbatim */
  627. /* > */
  628. /* > \param[in] JOBT */
  629. /* > \verbatim */
  630. /* > JOBT is CHARACTER*1 */
  631. /* > If the matrix is square then the procedure may determine to use */
  632. /* > transposed A if A^* seems to be better with respect to convergence. */
  633. /* > If the matrix is not square, JOBT is ignored. */
  634. /* > The decision is based on two values of entropy over the adjoint */
  635. /* > orbit of A^* * A. See the descriptions of WORK(6) and WORK(7). */
  636. /* > = 'T': transpose if entropy test indicates possibly faster */
  637. /* > convergence of Jacobi process if A^* is taken as input. If A is */
  638. /* > replaced with A^*, then the row pivoting is included automatically. */
  639. /* > = 'N': do not speculate. */
  640. /* > The option 'T' can be used to compute only the singular values, or */
  641. /* > the full SVD (U, SIGMA and V). For only one set of singular vectors */
  642. /* > (U or V), the caller should provide both U and V, as one of the */
  643. /* > matrices is used as workspace if the matrix A is transposed. */
  644. /* > The implementer can easily remove this constraint and make the */
  645. /* > code more complicated. See the descriptions of U and V. */
  646. /* > In general, this option is considered experimental, and 'N'; should */
  647. /* > be preferred. This is subject to changes in the future. */
  648. /* > \endverbatim */
  649. /* > */
  650. /* > \param[in] JOBP */
  651. /* > \verbatim */
  652. /* > JOBP is CHARACTER*1 */
  653. /* > Issues the licence to introduce structured perturbations to drown */
  654. /* > denormalized numbers. This licence should be active if the */
  655. /* > denormals are poorly implemented, causing slow computation, */
  656. /* > especially in cases of fast convergence (!). For details see [1,2]. */
  657. /* > For the sake of simplicity, this perturbations are included only */
  658. /* > when the full SVD or only the singular values are requested. The */
  659. /* > implementer/user can easily add the perturbation for the cases of */
  660. /* > computing one set of singular vectors. */
  661. /* > = 'P': introduce perturbation */
  662. /* > = 'N': do not perturb */
  663. /* > \endverbatim */
  664. /* > */
  665. /* > \param[in] M */
  666. /* > \verbatim */
  667. /* > M is INTEGER */
  668. /* > The number of rows of the input matrix A. M >= 0. */
  669. /* > \endverbatim */
  670. /* > */
  671. /* > \param[in] N */
  672. /* > \verbatim */
  673. /* > N is INTEGER */
  674. /* > The number of columns of the input matrix A. M >= N >= 0. */
  675. /* > \endverbatim */
  676. /* > */
  677. /* > \param[in,out] A */
  678. /* > \verbatim */
  679. /* > A is COMPLEX*16 array, dimension (LDA,N) */
  680. /* > On entry, the M-by-N matrix A. */
  681. /* > \endverbatim */
  682. /* > */
  683. /* > \param[in] LDA */
  684. /* > \verbatim */
  685. /* > LDA is INTEGER */
  686. /* > The leading dimension of the array A. LDA >= f2cmax(1,M). */
  687. /* > \endverbatim */
  688. /* > */
  689. /* > \param[out] SVA */
  690. /* > \verbatim */
  691. /* > SVA is DOUBLE PRECISION array, dimension (N) */
  692. /* > On exit, */
  693. /* > - For WORK(1)/WORK(2) = ONE: The singular values of A. During the */
  694. /* > computation SVA contains Euclidean column norms of the */
  695. /* > iterated matrices in the array A. */
  696. /* > - For WORK(1) .NE. WORK(2): The singular values of A are */
  697. /* > (WORK(1)/WORK(2)) * SVA(1:N). This factored form is used if */
  698. /* > sigma_max(A) overflows or if small singular values have been */
  699. /* > saved from underflow by scaling the input matrix A. */
  700. /* > - If JOBR='R' then some of the singular values may be returned */
  701. /* > as exact zeros obtained by "set to zero" because they are */
  702. /* > below the numerical rank threshold or are denormalized numbers. */
  703. /* > \endverbatim */
  704. /* > */
  705. /* > \param[out] U */
  706. /* > \verbatim */
  707. /* > U is COMPLEX*16 array, dimension ( LDU, N ) */
  708. /* > If JOBU = 'U', then U contains on exit the M-by-N matrix of */
  709. /* > the left singular vectors. */
  710. /* > If JOBU = 'F', then U contains on exit the M-by-M matrix of */
  711. /* > the left singular vectors, including an ONB */
  712. /* > of the orthogonal complement of the Range(A). */
  713. /* > If JOBU = 'W' .AND. (JOBV = 'V' .AND. JOBT = 'T' .AND. M = N), */
  714. /* > then U is used as workspace if the procedure */
  715. /* > replaces A with A^*. In that case, [V] is computed */
  716. /* > in U as left singular vectors of A^* and then */
  717. /* > copied back to the V array. This 'W' option is just */
  718. /* > a reminder to the caller that in this case U is */
  719. /* > reserved as workspace of length N*N. */
  720. /* > If JOBU = 'N' U is not referenced, unless JOBT='T'. */
  721. /* > \endverbatim */
  722. /* > */
  723. /* > \param[in] LDU */
  724. /* > \verbatim */
  725. /* > LDU is INTEGER */
  726. /* > The leading dimension of the array U, LDU >= 1. */
  727. /* > IF JOBU = 'U' or 'F' or 'W', then LDU >= M. */
  728. /* > \endverbatim */
  729. /* > */
  730. /* > \param[out] V */
  731. /* > \verbatim */
  732. /* > V is COMPLEX*16 array, dimension ( LDV, N ) */
  733. /* > If JOBV = 'V', 'J' then V contains on exit the N-by-N matrix of */
  734. /* > the right singular vectors; */
  735. /* > If JOBV = 'W', AND (JOBU = 'U' AND JOBT = 'T' AND M = N), */
  736. /* > then V is used as workspace if the pprocedure */
  737. /* > replaces A with A^*. In that case, [U] is computed */
  738. /* > in V as right singular vectors of A^* and then */
  739. /* > copied back to the U array. This 'W' option is just */
  740. /* > a reminder to the caller that in this case V is */
  741. /* > reserved as workspace of length N*N. */
  742. /* > If JOBV = 'N' V is not referenced, unless JOBT='T'. */
  743. /* > \endverbatim */
  744. /* > */
  745. /* > \param[in] LDV */
  746. /* > \verbatim */
  747. /* > LDV is INTEGER */
  748. /* > The leading dimension of the array V, LDV >= 1. */
  749. /* > If JOBV = 'V' or 'J' or 'W', then LDV >= N. */
  750. /* > \endverbatim */
  751. /* > */
  752. /* > \param[out] CWORK */
  753. /* > \verbatim */
  754. /* > CWORK is COMPLEX*16 array, dimension (MAX(2,LWORK)) */
  755. /* > If the call to ZGEJSV is a workspace query (indicated by LWORK=-1 or */
  756. /* > LRWORK=-1), then on exit CWORK(1) contains the required length of */
  757. /* > CWORK for the job parameters used in the call. */
  758. /* > \endverbatim */
  759. /* > */
  760. /* > \param[in] LWORK */
  761. /* > \verbatim */
  762. /* > LWORK is INTEGER */
  763. /* > Length of CWORK to confirm proper allocation of workspace. */
  764. /* > LWORK depends on the job: */
  765. /* > */
  766. /* > 1. If only SIGMA is needed ( JOBU = 'N', JOBV = 'N' ) and */
  767. /* > 1.1 .. no scaled condition estimate required (JOBA.NE.'E'.AND.JOBA.NE.'G'): */
  768. /* > LWORK >= 2*N+1. This is the minimal requirement. */
  769. /* > ->> For optimal performance (blocked code) the optimal value */
  770. /* > is LWORK >= N + (N+1)*NB. Here NB is the optimal */
  771. /* > block size for ZGEQP3 and ZGEQRF. */
  772. /* > In general, optimal LWORK is computed as */
  773. /* > LWORK >= f2cmax(N+LWORK(ZGEQP3),N+LWORK(ZGEQRF), LWORK(ZGESVJ)). */
  774. /* > 1.2. .. an estimate of the scaled condition number of A is */
  775. /* > required (JOBA='E', or 'G'). In this case, LWORK the minimal */
  776. /* > requirement is LWORK >= N*N + 2*N. */
  777. /* > ->> For optimal performance (blocked code) the optimal value */
  778. /* > is LWORK >= f2cmax(N+(N+1)*NB, N*N+2*N)=N**2+2*N. */
  779. /* > In general, the optimal length LWORK is computed as */
  780. /* > LWORK >= f2cmax(N+LWORK(ZGEQP3),N+LWORK(ZGEQRF), LWORK(ZGESVJ), */
  781. /* > N*N+LWORK(ZPOCON)). */
  782. /* > 2. If SIGMA and the right singular vectors are needed (JOBV = 'V'), */
  783. /* > (JOBU = 'N') */
  784. /* > 2.1 .. no scaled condition estimate requested (JOBE = 'N'): */
  785. /* > -> the minimal requirement is LWORK >= 3*N. */
  786. /* > -> For optimal performance, */
  787. /* > LWORK >= f2cmax(N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, */
  788. /* > where NB is the optimal block size for ZGEQP3, ZGEQRF, ZGELQ, */
  789. /* > ZUNMLQ. In general, the optimal length LWORK is computed as */
  790. /* > LWORK >= f2cmax(N+LWORK(ZGEQP3), N+LWORK(ZGESVJ), */
  791. /* > N+LWORK(ZGELQF), 2*N+LWORK(ZGEQRF), N+LWORK(ZUNMLQ)). */
  792. /* > 2.2 .. an estimate of the scaled condition number of A is */
  793. /* > required (JOBA='E', or 'G'). */
  794. /* > -> the minimal requirement is LWORK >= 3*N. */
  795. /* > -> For optimal performance, */
  796. /* > LWORK >= f2cmax(N+(N+1)*NB, 2*N,2*N+N*NB)=2*N+N*NB, */
  797. /* > where NB is the optimal block size for ZGEQP3, ZGEQRF, ZGELQ, */
  798. /* > ZUNMLQ. In general, the optimal length LWORK is computed as */
  799. /* > LWORK >= f2cmax(N+LWORK(ZGEQP3), LWORK(ZPOCON), N+LWORK(ZGESVJ), */
  800. /* > N+LWORK(ZGELQF), 2*N+LWORK(ZGEQRF), N+LWORK(ZUNMLQ)). */
  801. /* > 3. If SIGMA and the left singular vectors are needed */
  802. /* > 3.1 .. no scaled condition estimate requested (JOBE = 'N'): */
  803. /* > -> the minimal requirement is LWORK >= 3*N. */
  804. /* > -> For optimal performance: */
  805. /* > if JOBU = 'U' :: LWORK >= f2cmax(3*N, N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, */
  806. /* > where NB is the optimal block size for ZGEQP3, ZGEQRF, ZUNMQR. */
  807. /* > In general, the optimal length LWORK is computed as */
  808. /* > LWORK >= f2cmax(N+LWORK(ZGEQP3), 2*N+LWORK(ZGEQRF), N+LWORK(ZUNMQR)). */
  809. /* > 3.2 .. an estimate of the scaled condition number of A is */
  810. /* > required (JOBA='E', or 'G'). */
  811. /* > -> the minimal requirement is LWORK >= 3*N. */
  812. /* > -> For optimal performance: */
  813. /* > if JOBU = 'U' :: LWORK >= f2cmax(3*N, N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, */
  814. /* > where NB is the optimal block size for ZGEQP3, ZGEQRF, ZUNMQR. */
  815. /* > In general, the optimal length LWORK is computed as */
  816. /* > LWORK >= f2cmax(N+LWORK(ZGEQP3),N+LWORK(ZPOCON), */
  817. /* > 2*N+LWORK(ZGEQRF), N+LWORK(ZUNMQR)). */
  818. /* > 4. If the full SVD is needed: (JOBU = 'U' or JOBU = 'F') and */
  819. /* > 4.1. if JOBV = 'V' */
  820. /* > the minimal requirement is LWORK >= 5*N+2*N*N. */
  821. /* > 4.2. if JOBV = 'J' the minimal requirement is */
  822. /* > LWORK >= 4*N+N*N. */
  823. /* > In both cases, the allocated CWORK can accommodate blocked runs */
  824. /* > of ZGEQP3, ZGEQRF, ZGELQF, SUNMQR, ZUNMLQ. */
  825. /* > */
  826. /* > If the call to ZGEJSV is a workspace query (indicated by LWORK=-1 or */
  827. /* > LRWORK=-1), then on exit CWORK(1) contains the optimal and CWORK(2) contains the */
  828. /* > minimal length of CWORK for the job parameters used in the call. */
  829. /* > \endverbatim */
  830. /* > */
  831. /* > \param[out] RWORK */
  832. /* > \verbatim */
  833. /* > RWORK is DOUBLE PRECISION array, dimension (MAX(7,LWORK)) */
  834. /* > On exit, */
  835. /* > RWORK(1) = Determines the scaling factor SCALE = RWORK(2) / RWORK(1) */
  836. /* > such that SCALE*SVA(1:N) are the computed singular values */
  837. /* > of A. (See the description of SVA().) */
  838. /* > RWORK(2) = See the description of RWORK(1). */
  839. /* > RWORK(3) = SCONDA is an estimate for the condition number of */
  840. /* > column equilibrated A. (If JOBA = 'E' or 'G') */
  841. /* > SCONDA is an estimate of SQRT(||(R^* * R)^(-1)||_1). */
  842. /* > It is computed using SPOCON. It holds */
  843. /* > N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
  844. /* > where R is the triangular factor from the QRF of A. */
  845. /* > However, if R is truncated and the numerical rank is */
  846. /* > determined to be strictly smaller than N, SCONDA is */
  847. /* > returned as -1, thus indicating that the smallest */
  848. /* > singular values might be lost. */
  849. /* > */
  850. /* > If full SVD is needed, the following two condition numbers are */
  851. /* > useful for the analysis of the algorithm. They are provied for */
  852. /* > a developer/implementer who is familiar with the details of */
  853. /* > the method. */
  854. /* > */
  855. /* > RWORK(4) = an estimate of the scaled condition number of the */
  856. /* > triangular factor in the first QR factorization. */
  857. /* > RWORK(5) = an estimate of the scaled condition number of the */
  858. /* > triangular factor in the second QR factorization. */
  859. /* > The following two parameters are computed if JOBT = 'T'. */
  860. /* > They are provided for a developer/implementer who is familiar */
  861. /* > with the details of the method. */
  862. /* > RWORK(6) = the entropy of A^* * A :: this is the Shannon entropy */
  863. /* > of diag(A^* * A) / Trace(A^* * A) taken as point in the */
  864. /* > probability simplex. */
  865. /* > RWORK(7) = the entropy of A * A^*. (See the description of RWORK(6).) */
  866. /* > If the call to ZGEJSV is a workspace query (indicated by LWORK=-1 or */
  867. /* > LRWORK=-1), then on exit RWORK(1) contains the required length of */
  868. /* > RWORK for the job parameters used in the call. */
  869. /* > \endverbatim */
  870. /* > */
  871. /* > \param[in] LRWORK */
  872. /* > \verbatim */
  873. /* > LRWORK is INTEGER */
  874. /* > Length of RWORK to confirm proper allocation of workspace. */
  875. /* > LRWORK depends on the job: */
  876. /* > */
  877. /* > 1. If only the singular values are requested i.e. if */
  878. /* > LSAME(JOBU,'N') .AND. LSAME(JOBV,'N') */
  879. /* > then: */
  880. /* > 1.1. If LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G'), */
  881. /* > then: LRWORK = f2cmax( 7, 2 * M ). */
  882. /* > 1.2. Otherwise, LRWORK = f2cmax( 7, N ). */
  883. /* > 2. If singular values with the right singular vectors are requested */
  884. /* > i.e. if */
  885. /* > (LSAME(JOBV,'V').OR.LSAME(JOBV,'J')) .AND. */
  886. /* > .NOT.(LSAME(JOBU,'U').OR.LSAME(JOBU,'F')) */
  887. /* > then: */
  888. /* > 2.1. If LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G'), */
  889. /* > then LRWORK = f2cmax( 7, 2 * M ). */
  890. /* > 2.2. Otherwise, LRWORK = f2cmax( 7, N ). */
  891. /* > 3. If singular values with the left singular vectors are requested, i.e. if */
  892. /* > (LSAME(JOBU,'U').OR.LSAME(JOBU,'F')) .AND. */
  893. /* > .NOT.(LSAME(JOBV,'V').OR.LSAME(JOBV,'J')) */
  894. /* > then: */
  895. /* > 3.1. If LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G'), */
  896. /* > then LRWORK = f2cmax( 7, 2 * M ). */
  897. /* > 3.2. Otherwise, LRWORK = f2cmax( 7, N ). */
  898. /* > 4. If singular values with both the left and the right singular vectors */
  899. /* > are requested, i.e. if */
  900. /* > (LSAME(JOBU,'U').OR.LSAME(JOBU,'F')) .AND. */
  901. /* > (LSAME(JOBV,'V').OR.LSAME(JOBV,'J')) */
  902. /* > then: */
  903. /* > 4.1. If LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G'), */
  904. /* > then LRWORK = f2cmax( 7, 2 * M ). */
  905. /* > 4.2. Otherwise, LRWORK = f2cmax( 7, N ). */
  906. /* > */
  907. /* > If, on entry, LRWORK = -1 or LWORK=-1, a workspace query is assumed and */
  908. /* > the length of RWORK is returned in RWORK(1). */
  909. /* > \endverbatim */
  910. /* > */
  911. /* > \param[out] IWORK */
  912. /* > \verbatim */
  913. /* > IWORK is INTEGER array, of dimension at least 4, that further depends */
  914. /* > on the job: */
  915. /* > */
  916. /* > 1. If only the singular values are requested then: */
  917. /* > If ( LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G') ) */
  918. /* > then the length of IWORK is N+M; otherwise the length of IWORK is N. */
  919. /* > 2. If the singular values and the right singular vectors are requested then: */
  920. /* > If ( LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G') ) */
  921. /* > then the length of IWORK is N+M; otherwise the length of IWORK is N. */
  922. /* > 3. If the singular values and the left singular vectors are requested then: */
  923. /* > If ( LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G') ) */
  924. /* > then the length of IWORK is N+M; otherwise the length of IWORK is N. */
  925. /* > 4. If the singular values with both the left and the right singular vectors */
  926. /* > are requested, then: */
  927. /* > 4.1. If LSAME(JOBV,'J') the length of IWORK is determined as follows: */
  928. /* > If ( LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G') ) */
  929. /* > then the length of IWORK is N+M; otherwise the length of IWORK is N. */
  930. /* > 4.2. If LSAME(JOBV,'V') the length of IWORK is determined as follows: */
  931. /* > If ( LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G') ) */
  932. /* > then the length of IWORK is 2*N+M; otherwise the length of IWORK is 2*N. */
  933. /* > */
  934. /* > On exit, */
  935. /* > IWORK(1) = the numerical rank determined after the initial */
  936. /* > QR factorization with pivoting. See the descriptions */
  937. /* > of JOBA and JOBR. */
  938. /* > IWORK(2) = the number of the computed nonzero singular values */
  939. /* > IWORK(3) = if nonzero, a warning message: */
  940. /* > If IWORK(3) = 1 then some of the column norms of A */
  941. /* > were denormalized floats. The requested high accuracy */
  942. /* > is not warranted by the data. */
  943. /* > IWORK(4) = 1 or -1. If IWORK(4) = 1, then the procedure used A^* to */
  944. /* > do the job as specified by the JOB parameters. */
  945. /* > If the call to ZGEJSV is a workspace query (indicated by LWORK = -1 or */
  946. /* > LRWORK = -1), then on exit IWORK(1) contains the required length of */
  947. /* > IWORK for the job parameters used in the call. */
  948. /* > \endverbatim */
  949. /* > */
  950. /* > \param[out] INFO */
  951. /* > \verbatim */
  952. /* > INFO is INTEGER */
  953. /* > < 0: if INFO = -i, then the i-th argument had an illegal value. */
  954. /* > = 0: successful exit; */
  955. /* > > 0: ZGEJSV did not converge in the maximal allowed number */
  956. /* > of sweeps. The computed values may be inaccurate. */
  957. /* > \endverbatim */
  958. /* Authors: */
  959. /* ======== */
  960. /* > \author Univ. of Tennessee */
  961. /* > \author Univ. of California Berkeley */
  962. /* > \author Univ. of Colorado Denver */
  963. /* > \author NAG Ltd. */
  964. /* > \date June 2016 */
  965. /* > \ingroup complex16GEsing */
  966. /* > \par Further Details: */
  967. /* ===================== */
  968. /* > */
  969. /* > \verbatim */
  970. /* > */
  971. /* > ZGEJSV implements a preconditioned Jacobi SVD algorithm. It uses ZGEQP3, */
  972. /* > ZGEQRF, and ZGELQF as preprocessors and preconditioners. Optionally, an */
  973. /* > additional row pivoting can be used as a preprocessor, which in some */
  974. /* > cases results in much higher accuracy. An example is matrix A with the */
  975. /* > structure A = D1 * C * D2, where D1, D2 are arbitrarily ill-conditioned */
  976. /* > diagonal matrices and C is well-conditioned matrix. In that case, complete */
  977. /* > pivoting in the first QR factorizations provides accuracy dependent on the */
  978. /* > condition number of C, and independent of D1, D2. Such higher accuracy is */
  979. /* > not completely understood theoretically, but it works well in practice. */
  980. /* > Further, if A can be written as A = B*D, with well-conditioned B and some */
  981. /* > diagonal D, then the high accuracy is guaranteed, both theoretically and */
  982. /* > in software, independent of D. For more details see [1], [2]. */
  983. /* > The computational range for the singular values can be the full range */
  984. /* > ( UNDERFLOW,OVERFLOW ), provided that the machine arithmetic and the BLAS */
  985. /* > & LAPACK routines called by ZGEJSV are implemented to work in that range. */
  986. /* > If that is not the case, then the restriction for safe computation with */
  987. /* > the singular values in the range of normalized IEEE numbers is that the */
  988. /* > spectral condition number kappa(A)=sigma_max(A)/sigma_min(A) does not */
  989. /* > overflow. This code (ZGEJSV) is best used in this restricted range, */
  990. /* > meaning that singular values of magnitude below ||A||_2 / DLAMCH('O') are */
  991. /* > returned as zeros. See JOBR for details on this. */
  992. /* > Further, this implementation is somewhat slower than the one described */
  993. /* > in [1,2] due to replacement of some non-LAPACK components, and because */
  994. /* > the choice of some tuning parameters in the iterative part (ZGESVJ) is */
  995. /* > left to the implementer on a particular machine. */
  996. /* > The rank revealing QR factorization (in this code: ZGEQP3) should be */
  997. /* > implemented as in [3]. We have a new version of ZGEQP3 under development */
  998. /* > that is more robust than the current one in LAPACK, with a cleaner cut in */
  999. /* > rank deficient cases. It will be available in the SIGMA library [4]. */
  1000. /* > If M is much larger than N, it is obvious that the initial QRF with */
  1001. /* > column pivoting can be preprocessed by the QRF without pivoting. That */
  1002. /* > well known trick is not used in ZGEJSV because in some cases heavy row */
  1003. /* > weighting can be treated with complete pivoting. The overhead in cases */
  1004. /* > M much larger than N is then only due to pivoting, but the benefits in */
  1005. /* > terms of accuracy have prevailed. The implementer/user can incorporate */
  1006. /* > this extra QRF step easily. The implementer can also improve data movement */
  1007. /* > (matrix transpose, matrix copy, matrix transposed copy) - this */
  1008. /* > implementation of ZGEJSV uses only the simplest, naive data movement. */
  1009. /* > \endverbatim */
  1010. /* > \par Contributor: */
  1011. /* ================== */
  1012. /* > */
  1013. /* > Zlatko Drmac, Department of Mathematics, Faculty of Science, */
  1014. /* > University of Zagreb (Zagreb, Croatia); drmac@math.hr */
  1015. /* > \par References: */
  1016. /* ================ */
  1017. /* > */
  1018. /* > \verbatim */
  1019. /* > */
  1020. /* > [1] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm I. */
  1021. /* > SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1322-1342. */
  1022. /* > LAPACK Working note 169. */
  1023. /* > [2] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm II. */
  1024. /* > SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1343-1362. */
  1025. /* > LAPACK Working note 170. */
  1026. /* > [3] Z. Drmac and Z. Bujanovic: On the failure of rank-revealing QR */
  1027. /* > factorization software - a case study. */
  1028. /* > ACM Trans. Math. Softw. Vol. 35, No 2 (2008), pp. 1-28. */
  1029. /* > LAPACK Working note 176. */
  1030. /* > [4] Z. Drmac: SIGMA - mathematical software library for accurate SVD, PSV, */
  1031. /* > QSVD, (H,K)-SVD computations. */
  1032. /* > Department of Mathematics, University of Zagreb, 2008, 2016. */
  1033. /* > \endverbatim */
  1034. /* > \par Bugs, examples and comments: */
  1035. /* ================================= */
  1036. /* > */
  1037. /* > Please report all bugs and send interesting examples and/or comments to */
  1038. /* > drmac@math.hr. Thank you. */
  1039. /* > */
  1040. /* ===================================================================== */
  1041. /* Subroutine */ void zgejsv_(char *joba, char *jobu, char *jobv, char *jobr,
  1042. char *jobt, char *jobp, integer *m, integer *n, doublecomplex *a,
  1043. integer *lda, doublereal *sva, doublecomplex *u, integer *ldu,
  1044. doublecomplex *v, integer *ldv, doublecomplex *cwork, integer *lwork,
  1045. doublereal *rwork, integer *lrwork, integer *iwork, integer *info)
  1046. {
  1047. /* System generated locals */
  1048. integer a_dim1, a_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2,
  1049. i__3, i__4, i__5, i__6, i__7, i__8, i__9, i__10, i__11;
  1050. doublereal d__1, d__2, d__3;
  1051. doublecomplex z__1;
  1052. /* Local variables */
  1053. integer lwrk_zgesvj__;
  1054. logical defr;
  1055. doublereal aapp, aaqq;
  1056. integer lwrk_zunmlq__, lwrk_zunmqr__;
  1057. logical kill;
  1058. integer ierr, lwrk_zgeqp3n__;
  1059. doublereal temp1;
  1060. integer lwunmqrm, lwqp3, p, q;
  1061. logical jracc;
  1062. extern /* Subroutine */ void dscal_(integer *, doublereal *, doublereal *,
  1063. integer *);
  1064. integer lwrk_zgesvju__, lwrk_zgesvjv__;
  1065. extern logical lsame_(char *, char *);
  1066. integer lwrk_zunmqrm__;
  1067. doublecomplex ctemp;
  1068. doublereal entra, small;
  1069. integer iwoff;
  1070. doublereal sfmin;
  1071. logical lsvec;
  1072. doublereal epsln;
  1073. logical rsvec;
  1074. integer lwcon, lwlqf, lwqrf, n1;
  1075. extern /* Subroutine */ void zcopy_(integer *, doublecomplex *, integer *,
  1076. doublecomplex *, integer *), zswap_(integer *, doublecomplex *,
  1077. integer *, doublecomplex *, integer *);
  1078. logical l2aber;
  1079. extern /* Subroutine */ void ztrsm_(char *, char *, char *, char *,
  1080. integer *, integer *, doublecomplex *, doublecomplex *, integer *,
  1081. doublecomplex *, integer *);
  1082. doublereal condr1, condr2, uscal1, uscal2;
  1083. logical l2kill, l2rank, l2tran, l2pert;
  1084. extern /* Subroutine */ void zgeqp3_(integer *, integer *, doublecomplex *,
  1085. integer *, integer *, doublecomplex *, doublecomplex *, integer *
  1086. , doublereal *, integer *);
  1087. extern doublereal dznrm2_(integer *, doublecomplex *, integer *);
  1088. integer lrwqp3;
  1089. extern doublereal dlamch_(char *);
  1090. integer nr;
  1091. extern /* Subroutine */ void dlascl_(char *, integer *, integer *,
  1092. doublereal *, doublereal *, integer *, integer *, doublereal *,
  1093. integer *, integer *);
  1094. extern integer idamax_(integer *, doublereal *, integer *);
  1095. doublereal scalem, sconda;
  1096. logical goscal;
  1097. doublereal aatmin, aatmax;
  1098. extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen);
  1099. logical noscal;
  1100. extern /* Subroutine */ void zdscal_(integer *, doublereal *,
  1101. doublecomplex *, integer *), zlacgv_(integer *, doublecomplex *,
  1102. integer *), dlassq_(integer *, doublereal *, integer *,
  1103. doublereal *, doublereal *);
  1104. extern integer izamax_(integer *, doublecomplex *, integer *);
  1105. extern /* Subroutine */ void zgelqf_(integer *, integer *, doublecomplex *,
  1106. integer *, doublecomplex *, doublecomplex *, integer *, integer *
  1107. ), zlascl_(char *, integer *, integer *, doublereal *, doublereal
  1108. *, integer *, integer *, doublecomplex *, integer *, integer *);
  1109. doublereal entrat;
  1110. logical almort;
  1111. doublecomplex cdummy[1];
  1112. extern /* Subroutine */ void zgeqrf_(integer *, integer *, doublecomplex *,
  1113. integer *, doublecomplex *, doublecomplex *, integer *, integer *
  1114. );
  1115. doublereal maxprj;
  1116. extern /* Subroutine */ void zlacpy_(char *, integer *, integer *,
  1117. doublecomplex *, integer *, doublecomplex *, integer *),
  1118. zlaset_(char *, integer *, integer *, doublecomplex *,
  1119. doublecomplex *, doublecomplex *, integer *);
  1120. logical errest;
  1121. integer lrwcon;
  1122. extern /* Subroutine */ void zlapmr_(logical *, integer *, integer *,
  1123. doublecomplex *, integer *, integer *);
  1124. logical transp;
  1125. integer minwrk, lwsvdj;
  1126. extern /* Subroutine */ void zpocon_(char *, integer *, doublecomplex *,
  1127. integer *, doublereal *, doublereal *, doublecomplex *,
  1128. doublereal *, integer *), zgesvj_(char *, char *, char *,
  1129. integer *, integer *, doublecomplex *, integer *, doublereal *,
  1130. integer *, doublecomplex *, integer *, doublecomplex *, integer *,
  1131. doublereal *, integer *, integer *);
  1132. doublereal rdummy[1];
  1133. extern /* Subroutine */ void zlassq_(integer *, doublecomplex *, integer *,
  1134. doublereal *, doublereal *);
  1135. logical lquery;
  1136. extern /* Subroutine */ int zlaswp_(integer *, doublecomplex *, integer *,
  1137. integer *, integer *, integer *, integer *);
  1138. logical rowpiv;
  1139. integer optwrk;
  1140. extern /* Subroutine */ void zungqr_(integer *, integer *, integer *,
  1141. doublecomplex *, integer *, doublecomplex *, doublecomplex *,
  1142. integer *, integer *), zunmlq_(char *, char *, integer *, integer
  1143. *, integer *, doublecomplex *, integer *, doublecomplex *,
  1144. doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmqr_(char *, char *, integer *, integer *,
  1145. integer *, doublecomplex *, integer *, doublecomplex *,
  1146. doublecomplex *, integer *, doublecomplex *, integer *, integer *);
  1147. doublereal big, cond_ok__, xsc;
  1148. integer lwrk_zgeqp3__;
  1149. doublereal big1;
  1150. integer warning, numrank, miniwrk, minrwrk, lrwsvdj, lwunmlq, lwsvdjv,
  1151. lwunmqr, lwrk_zgelqf__, lwrk_zgeqrf__;
  1152. /* -- LAPACK computational routine (version 3.7.1) -- */
  1153. /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
  1154. /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
  1155. /* June 2017 */
  1156. /* =========================================================================== */
  1157. /* Test the input arguments */
  1158. /* Parameter adjustments */
  1159. --sva;
  1160. a_dim1 = *lda;
  1161. a_offset = 1 + a_dim1 * 1;
  1162. a -= a_offset;
  1163. u_dim1 = *ldu;
  1164. u_offset = 1 + u_dim1 * 1;
  1165. u -= u_offset;
  1166. v_dim1 = *ldv;
  1167. v_offset = 1 + v_dim1 * 1;
  1168. v -= v_offset;
  1169. --cwork;
  1170. --rwork;
  1171. --iwork;
  1172. /* Function Body */
  1173. lsvec = lsame_(jobu, "U") || lsame_(jobu, "F");
  1174. jracc = lsame_(jobv, "J");
  1175. rsvec = lsame_(jobv, "V") || jracc;
  1176. rowpiv = lsame_(joba, "F") || lsame_(joba, "G");
  1177. l2rank = lsame_(joba, "R");
  1178. l2aber = lsame_(joba, "A");
  1179. errest = lsame_(joba, "E") || lsame_(joba, "G");
  1180. l2tran = lsame_(jobt, "T") && *m == *n;
  1181. l2kill = lsame_(jobr, "R");
  1182. defr = lsame_(jobr, "N");
  1183. l2pert = lsame_(jobp, "P");
  1184. lquery = *lwork == -1 || *lrwork == -1;
  1185. if (! (rowpiv || l2rank || l2aber || errest || lsame_(joba, "C"))) {
  1186. *info = -1;
  1187. } else if (! (lsvec || lsame_(jobu, "N") || lsame_(
  1188. jobu, "W") && rsvec && l2tran)) {
  1189. *info = -2;
  1190. } else if (! (rsvec || lsame_(jobv, "N") || lsame_(
  1191. jobv, "W") && lsvec && l2tran)) {
  1192. *info = -3;
  1193. } else if (! (l2kill || defr)) {
  1194. *info = -4;
  1195. } else if (! (lsame_(jobt, "T") || lsame_(jobt,
  1196. "N"))) {
  1197. *info = -5;
  1198. } else if (! (l2pert || lsame_(jobp, "N"))) {
  1199. *info = -6;
  1200. } else if (*m < 0) {
  1201. *info = -7;
  1202. } else if (*n < 0 || *n > *m) {
  1203. *info = -8;
  1204. } else if (*lda < *m) {
  1205. *info = -10;
  1206. } else if (lsvec && *ldu < *m) {
  1207. *info = -13;
  1208. } else if (rsvec && *ldv < *n) {
  1209. *info = -15;
  1210. } else {
  1211. /* #:) */
  1212. *info = 0;
  1213. }
  1214. if (*info == 0) {
  1215. /* [[The expressions for computing the minimal and the optimal */
  1216. /* values of LCWORK, LRWORK are written with a lot of redundancy and */
  1217. /* can be simplified. However, this verbose form is useful for */
  1218. /* maintenance and modifications of the code.]] */
  1219. /* ZGEQRF of an N x N matrix, ZGELQF of an N x N matrix, */
  1220. /* ZUNMLQ for computing N x N matrix, ZUNMQR for computing N x N */
  1221. /* matrix, ZUNMQR for computing M x N matrix, respectively. */
  1222. lwqp3 = *n + 1;
  1223. lwqrf = f2cmax(1,*n);
  1224. lwlqf = f2cmax(1,*n);
  1225. lwunmlq = f2cmax(1,*n);
  1226. lwunmqr = f2cmax(1,*n);
  1227. lwunmqrm = f2cmax(1,*m);
  1228. lwcon = *n << 1;
  1229. /* without and with explicit accumulation of Jacobi rotations */
  1230. /* Computing MAX */
  1231. i__1 = *n << 1;
  1232. lwsvdj = f2cmax(i__1,1);
  1233. /* Computing MAX */
  1234. i__1 = *n << 1;
  1235. lwsvdjv = f2cmax(i__1,1);
  1236. lrwqp3 = *n << 1;
  1237. lrwcon = *n;
  1238. lrwsvdj = *n;
  1239. if (lquery) {
  1240. zgeqp3_(m, n, &a[a_offset], lda, &iwork[1], cdummy, cdummy, &c_n1,
  1241. rdummy, &ierr);
  1242. lwrk_zgeqp3__ = (integer) cdummy[0].r;
  1243. zgeqrf_(n, n, &a[a_offset], lda, cdummy, cdummy, &c_n1, &ierr);
  1244. lwrk_zgeqrf__ = (integer) cdummy[0].r;
  1245. zgelqf_(n, n, &a[a_offset], lda, cdummy, cdummy, &c_n1, &ierr);
  1246. lwrk_zgelqf__ = (integer) cdummy[0].r;
  1247. }
  1248. minwrk = 2;
  1249. optwrk = 2;
  1250. miniwrk = *n;
  1251. if (! (lsvec || rsvec)) {
  1252. /* only the singular values are requested */
  1253. if (errest) {
  1254. /* Computing MAX */
  1255. /* Computing 2nd power */
  1256. i__3 = *n;
  1257. i__1 = *n + lwqp3, i__2 = i__3 * i__3 + lwcon, i__1 = f2cmax(
  1258. i__1,i__2), i__2 = *n + lwqrf, i__1 = f2cmax(i__1,i__2);
  1259. minwrk = f2cmax(i__1,lwsvdj);
  1260. } else {
  1261. /* Computing MAX */
  1262. i__1 = *n + lwqp3, i__2 = *n + lwqrf, i__1 = f2cmax(i__1,i__2);
  1263. minwrk = f2cmax(i__1,lwsvdj);
  1264. }
  1265. if (lquery) {
  1266. zgesvj_("L", "N", "N", n, n, &a[a_offset], lda, &sva[1], n, &
  1267. v[v_offset], ldv, cdummy, &c_n1, rdummy, &c_n1, &ierr);
  1268. lwrk_zgesvj__ = (integer) cdummy[0].r;
  1269. if (errest) {
  1270. /* Computing MAX */
  1271. /* Computing 2nd power */
  1272. i__3 = *n;
  1273. i__1 = *n + lwrk_zgeqp3__, i__2 = i__3 * i__3 + lwcon,
  1274. i__1 = f2cmax(i__1,i__2), i__2 = *n + lwrk_zgeqrf__,
  1275. i__1 = f2cmax(i__1,i__2);
  1276. optwrk = f2cmax(i__1,lwrk_zgesvj__);
  1277. } else {
  1278. /* Computing MAX */
  1279. i__1 = *n + lwrk_zgeqp3__, i__2 = *n + lwrk_zgeqrf__,
  1280. i__1 = f2cmax(i__1,i__2);
  1281. optwrk = f2cmax(i__1,lwrk_zgesvj__);
  1282. }
  1283. }
  1284. if (l2tran || rowpiv) {
  1285. if (errest) {
  1286. /* Computing MAX */
  1287. i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
  1288. f2cmax(i__1,lrwqp3), i__1 = f2cmax(i__1,lrwcon);
  1289. minrwrk = f2cmax(i__1,lrwsvdj);
  1290. } else {
  1291. /* Computing MAX */
  1292. i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
  1293. f2cmax(i__1,lrwqp3);
  1294. minrwrk = f2cmax(i__1,lrwsvdj);
  1295. }
  1296. } else {
  1297. if (errest) {
  1298. /* Computing MAX */
  1299. i__1 = f2cmax(7,lrwqp3), i__1 = f2cmax(i__1,lrwcon);
  1300. minrwrk = f2cmax(i__1,lrwsvdj);
  1301. } else {
  1302. /* Computing MAX */
  1303. i__1 = f2cmax(7,lrwqp3);
  1304. minrwrk = f2cmax(i__1,lrwsvdj);
  1305. }
  1306. }
  1307. if (rowpiv || l2tran) {
  1308. miniwrk += *m;
  1309. }
  1310. } else if (rsvec && ! lsvec) {
  1311. /* singular values and the right singular vectors are requested */
  1312. if (errest) {
  1313. /* Computing MAX */
  1314. i__1 = *n + lwqp3, i__1 = f2cmax(i__1,lwcon), i__1 = f2cmax(i__1,
  1315. lwsvdj), i__2 = *n + lwlqf, i__1 = f2cmax(i__1,i__2),
  1316. i__2 = (*n << 1) + lwqrf, i__1 = f2cmax(i__1,i__2), i__2
  1317. = *n + lwsvdj, i__1 = f2cmax(i__1,i__2), i__2 = *n +
  1318. lwunmlq;
  1319. minwrk = f2cmax(i__1,i__2);
  1320. } else {
  1321. /* Computing MAX */
  1322. i__1 = *n + lwqp3, i__1 = f2cmax(i__1,lwsvdj), i__2 = *n + lwlqf,
  1323. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + lwqrf,
  1324. i__1 = f2cmax(i__1,i__2), i__2 = *n + lwsvdj, i__1 = f2cmax(
  1325. i__1,i__2), i__2 = *n + lwunmlq;
  1326. minwrk = f2cmax(i__1,i__2);
  1327. }
  1328. if (lquery) {
  1329. zgesvj_("L", "U", "N", n, n, &u[u_offset], ldu, &sva[1], n, &
  1330. a[a_offset], lda, cdummy, &c_n1, rdummy, &c_n1, &ierr);
  1331. lwrk_zgesvj__ = (integer) cdummy[0].r;
  1332. zunmlq_("L", "C", n, n, n, &a[a_offset], lda, cdummy, &v[
  1333. v_offset], ldv, cdummy, &c_n1, &ierr);
  1334. lwrk_zunmlq__ = (integer) cdummy[0].r;
  1335. if (errest) {
  1336. /* Computing MAX */
  1337. i__1 = *n + lwrk_zgeqp3__, i__1 = f2cmax(i__1,lwcon), i__1 =
  1338. f2cmax(i__1,lwrk_zgesvj__), i__2 = *n +
  1339. lwrk_zgelqf__, i__1 = f2cmax(i__1,i__2), i__2 = (*n
  1340. << 1) + lwrk_zgeqrf__, i__1 = f2cmax(i__1,i__2),
  1341. i__2 = *n + lwrk_zgesvj__, i__1 = f2cmax(i__1,i__2),
  1342. i__2 = *n + lwrk_zunmlq__;
  1343. optwrk = f2cmax(i__1,i__2);
  1344. } else {
  1345. /* Computing MAX */
  1346. i__1 = *n + lwrk_zgeqp3__, i__1 = f2cmax(i__1,lwrk_zgesvj__),
  1347. i__2 = *n + lwrk_zgelqf__, i__1 = f2cmax(i__1,i__2),
  1348. i__2 = (*n << 1) + lwrk_zgeqrf__, i__1 = f2cmax(
  1349. i__1,i__2), i__2 = *n + lwrk_zgesvj__, i__1 = f2cmax(
  1350. i__1,i__2), i__2 = *n + lwrk_zunmlq__;
  1351. optwrk = f2cmax(i__1,i__2);
  1352. }
  1353. }
  1354. if (l2tran || rowpiv) {
  1355. if (errest) {
  1356. /* Computing MAX */
  1357. i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
  1358. f2cmax(i__1,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
  1359. minrwrk = f2cmax(i__1,lrwcon);
  1360. } else {
  1361. /* Computing MAX */
  1362. i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
  1363. f2cmax(i__1,lrwqp3);
  1364. minrwrk = f2cmax(i__1,lrwsvdj);
  1365. }
  1366. } else {
  1367. if (errest) {
  1368. /* Computing MAX */
  1369. i__1 = f2cmax(7,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
  1370. minrwrk = f2cmax(i__1,lrwcon);
  1371. } else {
  1372. /* Computing MAX */
  1373. i__1 = f2cmax(7,lrwqp3);
  1374. minrwrk = f2cmax(i__1,lrwsvdj);
  1375. }
  1376. }
  1377. if (rowpiv || l2tran) {
  1378. miniwrk += *m;
  1379. }
  1380. } else if (lsvec && ! rsvec) {
  1381. /* singular values and the left singular vectors are requested */
  1382. if (errest) {
  1383. /* Computing MAX */
  1384. i__1 = f2cmax(lwqp3,lwcon), i__2 = *n + lwqrf, i__1 = f2cmax(i__1,
  1385. i__2), i__1 = f2cmax(i__1,lwsvdj);
  1386. minwrk = *n + f2cmax(i__1,lwunmqrm);
  1387. } else {
  1388. /* Computing MAX */
  1389. i__1 = lwqp3, i__2 = *n + lwqrf, i__1 = f2cmax(i__1,i__2), i__1 =
  1390. f2cmax(i__1,lwsvdj);
  1391. minwrk = *n + f2cmax(i__1,lwunmqrm);
  1392. }
  1393. if (lquery) {
  1394. zgesvj_("L", "U", "N", n, n, &u[u_offset], ldu, &sva[1], n, &
  1395. a[a_offset], lda, cdummy, &c_n1, rdummy, &c_n1, &ierr);
  1396. lwrk_zgesvj__ = (integer) cdummy[0].r;
  1397. zunmqr_("L", "N", m, n, n, &a[a_offset], lda, cdummy, &u[
  1398. u_offset], ldu, cdummy, &c_n1, &ierr);
  1399. lwrk_zunmqrm__ = (integer) cdummy[0].r;
  1400. if (errest) {
  1401. /* Computing MAX */
  1402. i__1 = f2cmax(lwrk_zgeqp3__,lwcon), i__2 = *n +
  1403. lwrk_zgeqrf__, i__1 = f2cmax(i__1,i__2), i__1 = f2cmax(
  1404. i__1,lwrk_zgesvj__);
  1405. optwrk = *n + f2cmax(i__1,lwrk_zunmqrm__);
  1406. } else {
  1407. /* Computing MAX */
  1408. i__1 = lwrk_zgeqp3__, i__2 = *n + lwrk_zgeqrf__, i__1 =
  1409. f2cmax(i__1,i__2), i__1 = f2cmax(i__1,lwrk_zgesvj__);
  1410. optwrk = *n + f2cmax(i__1,lwrk_zunmqrm__);
  1411. }
  1412. }
  1413. if (l2tran || rowpiv) {
  1414. if (errest) {
  1415. /* Computing MAX */
  1416. i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
  1417. f2cmax(i__1,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
  1418. minrwrk = f2cmax(i__1,lrwcon);
  1419. } else {
  1420. /* Computing MAX */
  1421. i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
  1422. f2cmax(i__1,lrwqp3);
  1423. minrwrk = f2cmax(i__1,lrwsvdj);
  1424. }
  1425. } else {
  1426. if (errest) {
  1427. /* Computing MAX */
  1428. i__1 = f2cmax(7,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
  1429. minrwrk = f2cmax(i__1,lrwcon);
  1430. } else {
  1431. /* Computing MAX */
  1432. i__1 = f2cmax(7,lrwqp3);
  1433. minrwrk = f2cmax(i__1,lrwsvdj);
  1434. }
  1435. }
  1436. if (rowpiv || l2tran) {
  1437. miniwrk += *m;
  1438. }
  1439. } else {
  1440. /* full SVD is requested */
  1441. if (! jracc) {
  1442. if (errest) {
  1443. /* Computing MAX */
  1444. /* Computing 2nd power */
  1445. i__3 = *n;
  1446. /* Computing 2nd power */
  1447. i__4 = *n;
  1448. /* Computing 2nd power */
  1449. i__5 = *n;
  1450. /* Computing 2nd power */
  1451. i__6 = *n;
  1452. /* Computing 2nd power */
  1453. i__7 = *n;
  1454. /* Computing 2nd power */
  1455. i__8 = *n;
  1456. /* Computing 2nd power */
  1457. i__9 = *n;
  1458. /* Computing 2nd power */
  1459. i__10 = *n;
  1460. /* Computing 2nd power */
  1461. i__11 = *n;
  1462. i__1 = *n + lwqp3, i__2 = *n + lwcon, i__1 = f2cmax(i__1,
  1463. i__2), i__2 = (*n << 1) + i__3 * i__3 + lwcon,
  1464. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + lwqrf,
  1465. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + lwqp3,
  1466. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + i__4 *
  1467. i__4 + *n + lwlqf, i__1 = f2cmax(i__1,i__2), i__2 = (
  1468. *n << 1) + i__5 * i__5 + *n + i__6 * i__6 + lwcon,
  1469. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + i__7 *
  1470. i__7 + *n + lwsvdj, i__1 = f2cmax(i__1,i__2), i__2 =
  1471. (*n << 1) + i__8 * i__8 + *n + lwsvdjv, i__1 =
  1472. f2cmax(i__1,i__2), i__2 = (*n << 1) + i__9 * i__9 + *
  1473. n + lwunmqr, i__1 = f2cmax(i__1,i__2), i__2 = (*n <<
  1474. 1) + i__10 * i__10 + *n + lwunmlq, i__1 = f2cmax(
  1475. i__1,i__2), i__2 = *n + i__11 * i__11 + lwsvdj,
  1476. i__1 = f2cmax(i__1,i__2), i__2 = *n + lwunmqrm;
  1477. minwrk = f2cmax(i__1,i__2);
  1478. } else {
  1479. /* Computing MAX */
  1480. /* Computing 2nd power */
  1481. i__3 = *n;
  1482. /* Computing 2nd power */
  1483. i__4 = *n;
  1484. /* Computing 2nd power */
  1485. i__5 = *n;
  1486. /* Computing 2nd power */
  1487. i__6 = *n;
  1488. /* Computing 2nd power */
  1489. i__7 = *n;
  1490. /* Computing 2nd power */
  1491. i__8 = *n;
  1492. /* Computing 2nd power */
  1493. i__9 = *n;
  1494. /* Computing 2nd power */
  1495. i__10 = *n;
  1496. /* Computing 2nd power */
  1497. i__11 = *n;
  1498. i__1 = *n + lwqp3, i__2 = (*n << 1) + i__3 * i__3 + lwcon,
  1499. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + lwqrf,
  1500. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + lwqp3,
  1501. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + i__4 *
  1502. i__4 + *n + lwlqf, i__1 = f2cmax(i__1,i__2), i__2 = (
  1503. *n << 1) + i__5 * i__5 + *n + i__6 * i__6 + lwcon,
  1504. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + i__7 *
  1505. i__7 + *n + lwsvdj, i__1 = f2cmax(i__1,i__2), i__2 =
  1506. (*n << 1) + i__8 * i__8 + *n + lwsvdjv, i__1 =
  1507. f2cmax(i__1,i__2), i__2 = (*n << 1) + i__9 * i__9 + *
  1508. n + lwunmqr, i__1 = f2cmax(i__1,i__2), i__2 = (*n <<
  1509. 1) + i__10 * i__10 + *n + lwunmlq, i__1 = f2cmax(
  1510. i__1,i__2), i__2 = *n + i__11 * i__11 + lwsvdj,
  1511. i__1 = f2cmax(i__1,i__2), i__2 = *n + lwunmqrm;
  1512. minwrk = f2cmax(i__1,i__2);
  1513. }
  1514. miniwrk += *n;
  1515. if (rowpiv || l2tran) {
  1516. miniwrk += *m;
  1517. }
  1518. } else {
  1519. if (errest) {
  1520. /* Computing MAX */
  1521. /* Computing 2nd power */
  1522. i__3 = *n;
  1523. /* Computing 2nd power */
  1524. i__4 = *n;
  1525. i__1 = *n + lwqp3, i__2 = *n + lwcon, i__1 = f2cmax(i__1,
  1526. i__2), i__2 = (*n << 1) + lwqrf, i__1 = f2cmax(i__1,
  1527. i__2), i__2 = (*n << 1) + i__3 * i__3 + lwsvdjv,
  1528. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + i__4 *
  1529. i__4 + *n + lwunmqr, i__1 = f2cmax(i__1,i__2), i__2 =
  1530. *n + lwunmqrm;
  1531. minwrk = f2cmax(i__1,i__2);
  1532. } else {
  1533. /* Computing MAX */
  1534. /* Computing 2nd power */
  1535. i__3 = *n;
  1536. /* Computing 2nd power */
  1537. i__4 = *n;
  1538. i__1 = *n + lwqp3, i__2 = (*n << 1) + lwqrf, i__1 = f2cmax(
  1539. i__1,i__2), i__2 = (*n << 1) + i__3 * i__3 +
  1540. lwsvdjv, i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1)
  1541. + i__4 * i__4 + *n + lwunmqr, i__1 = f2cmax(i__1,
  1542. i__2), i__2 = *n + lwunmqrm;
  1543. minwrk = f2cmax(i__1,i__2);
  1544. }
  1545. if (rowpiv || l2tran) {
  1546. miniwrk += *m;
  1547. }
  1548. }
  1549. if (lquery) {
  1550. zunmqr_("L", "N", m, n, n, &a[a_offset], lda, cdummy, &u[
  1551. u_offset], ldu, cdummy, &c_n1, &ierr);
  1552. lwrk_zunmqrm__ = (integer) cdummy[0].r;
  1553. zunmqr_("L", "N", n, n, n, &a[a_offset], lda, cdummy, &u[
  1554. u_offset], ldu, cdummy, &c_n1, &ierr);
  1555. lwrk_zunmqr__ = (integer) cdummy[0].r;
  1556. if (! jracc) {
  1557. zgeqp3_(n, n, &a[a_offset], lda, &iwork[1], cdummy,
  1558. cdummy, &c_n1, rdummy, &ierr);
  1559. lwrk_zgeqp3n__ = (integer) cdummy[0].r;
  1560. zgesvj_("L", "U", "N", n, n, &u[u_offset], ldu, &sva[1],
  1561. n, &v[v_offset], ldv, cdummy, &c_n1, rdummy, &
  1562. c_n1, &ierr);
  1563. lwrk_zgesvj__ = (integer) cdummy[0].r;
  1564. zgesvj_("U", "U", "N", n, n, &u[u_offset], ldu, &sva[1],
  1565. n, &v[v_offset], ldv, cdummy, &c_n1, rdummy, &
  1566. c_n1, &ierr);
  1567. lwrk_zgesvju__ = (integer) cdummy[0].r;
  1568. zgesvj_("L", "U", "V", n, n, &u[u_offset], ldu, &sva[1],
  1569. n, &v[v_offset], ldv, cdummy, &c_n1, rdummy, &
  1570. c_n1, &ierr);
  1571. lwrk_zgesvjv__ = (integer) cdummy[0].r;
  1572. zunmlq_("L", "C", n, n, n, &a[a_offset], lda, cdummy, &v[
  1573. v_offset], ldv, cdummy, &c_n1, &ierr);
  1574. lwrk_zunmlq__ = (integer) cdummy[0].r;
  1575. if (errest) {
  1576. /* Computing MAX */
  1577. /* Computing 2nd power */
  1578. i__3 = *n;
  1579. /* Computing 2nd power */
  1580. i__4 = *n;
  1581. /* Computing 2nd power */
  1582. i__5 = *n;
  1583. /* Computing 2nd power */
  1584. i__6 = *n;
  1585. /* Computing 2nd power */
  1586. i__7 = *n;
  1587. /* Computing 2nd power */
  1588. i__8 = *n;
  1589. /* Computing 2nd power */
  1590. i__9 = *n;
  1591. /* Computing 2nd power */
  1592. i__10 = *n;
  1593. /* Computing 2nd power */
  1594. i__11 = *n;
  1595. i__1 = *n + lwrk_zgeqp3__, i__2 = *n + lwcon, i__1 =
  1596. f2cmax(i__1,i__2), i__2 = (*n << 1) + i__3 *
  1597. i__3 + lwcon, i__1 = f2cmax(i__1,i__2), i__2 = (*
  1598. n << 1) + lwrk_zgeqrf__, i__1 = f2cmax(i__1,i__2)
  1599. , i__2 = (*n << 1) + lwrk_zgeqp3n__, i__1 =
  1600. f2cmax(i__1,i__2), i__2 = (*n << 1) + i__4 *
  1601. i__4 + *n + lwrk_zgelqf__, i__1 = f2cmax(i__1,
  1602. i__2), i__2 = (*n << 1) + i__5 * i__5 + *n +
  1603. i__6 * i__6 + lwcon, i__1 = f2cmax(i__1,i__2),
  1604. i__2 = (*n << 1) + i__7 * i__7 + *n +
  1605. lwrk_zgesvj__, i__1 = f2cmax(i__1,i__2), i__2 = (
  1606. *n << 1) + i__8 * i__8 + *n + lwrk_zgesvjv__,
  1607. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) +
  1608. i__9 * i__9 + *n + lwrk_zunmqr__, i__1 = f2cmax(
  1609. i__1,i__2), i__2 = (*n << 1) + i__10 * i__10
  1610. + *n + lwrk_zunmlq__, i__1 = f2cmax(i__1,i__2),
  1611. i__2 = *n + i__11 * i__11 + lwrk_zgesvju__,
  1612. i__1 = f2cmax(i__1,i__2), i__2 = *n +
  1613. lwrk_zunmqrm__;
  1614. optwrk = f2cmax(i__1,i__2);
  1615. } else {
  1616. /* Computing MAX */
  1617. /* Computing 2nd power */
  1618. i__3 = *n;
  1619. /* Computing 2nd power */
  1620. i__4 = *n;
  1621. /* Computing 2nd power */
  1622. i__5 = *n;
  1623. /* Computing 2nd power */
  1624. i__6 = *n;
  1625. /* Computing 2nd power */
  1626. i__7 = *n;
  1627. /* Computing 2nd power */
  1628. i__8 = *n;
  1629. /* Computing 2nd power */
  1630. i__9 = *n;
  1631. /* Computing 2nd power */
  1632. i__10 = *n;
  1633. /* Computing 2nd power */
  1634. i__11 = *n;
  1635. i__1 = *n + lwrk_zgeqp3__, i__2 = (*n << 1) + i__3 *
  1636. i__3 + lwcon, i__1 = f2cmax(i__1,i__2), i__2 = (*
  1637. n << 1) + lwrk_zgeqrf__, i__1 = f2cmax(i__1,i__2)
  1638. , i__2 = (*n << 1) + lwrk_zgeqp3n__, i__1 =
  1639. f2cmax(i__1,i__2), i__2 = (*n << 1) + i__4 *
  1640. i__4 + *n + lwrk_zgelqf__, i__1 = f2cmax(i__1,
  1641. i__2), i__2 = (*n << 1) + i__5 * i__5 + *n +
  1642. i__6 * i__6 + lwcon, i__1 = f2cmax(i__1,i__2),
  1643. i__2 = (*n << 1) + i__7 * i__7 + *n +
  1644. lwrk_zgesvj__, i__1 = f2cmax(i__1,i__2), i__2 = (
  1645. *n << 1) + i__8 * i__8 + *n + lwrk_zgesvjv__,
  1646. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) +
  1647. i__9 * i__9 + *n + lwrk_zunmqr__, i__1 = f2cmax(
  1648. i__1,i__2), i__2 = (*n << 1) + i__10 * i__10
  1649. + *n + lwrk_zunmlq__, i__1 = f2cmax(i__1,i__2),
  1650. i__2 = *n + i__11 * i__11 + lwrk_zgesvju__,
  1651. i__1 = f2cmax(i__1,i__2), i__2 = *n +
  1652. lwrk_zunmqrm__;
  1653. optwrk = f2cmax(i__1,i__2);
  1654. }
  1655. } else {
  1656. zgesvj_("L", "U", "V", n, n, &u[u_offset], ldu, &sva[1],
  1657. n, &v[v_offset], ldv, cdummy, &c_n1, rdummy, &
  1658. c_n1, &ierr);
  1659. lwrk_zgesvjv__ = (integer) cdummy[0].r;
  1660. zunmqr_("L", "N", n, n, n, cdummy, n, cdummy, &v[v_offset]
  1661. , ldv, cdummy, &c_n1, &ierr)
  1662. ;
  1663. lwrk_zunmqr__ = (integer) cdummy[0].r;
  1664. zunmqr_("L", "N", m, n, n, &a[a_offset], lda, cdummy, &u[
  1665. u_offset], ldu, cdummy, &c_n1, &ierr);
  1666. lwrk_zunmqrm__ = (integer) cdummy[0].r;
  1667. if (errest) {
  1668. /* Computing MAX */
  1669. /* Computing 2nd power */
  1670. i__3 = *n;
  1671. /* Computing 2nd power */
  1672. i__4 = *n;
  1673. /* Computing 2nd power */
  1674. i__5 = *n;
  1675. i__1 = *n + lwrk_zgeqp3__, i__2 = *n + lwcon, i__1 =
  1676. f2cmax(i__1,i__2), i__2 = (*n << 1) +
  1677. lwrk_zgeqrf__, i__1 = f2cmax(i__1,i__2), i__2 = (
  1678. *n << 1) + i__3 * i__3, i__1 = f2cmax(i__1,i__2),
  1679. i__2 = (*n << 1) + i__4 * i__4 +
  1680. lwrk_zgesvjv__, i__1 = f2cmax(i__1,i__2), i__2 =
  1681. (*n << 1) + i__5 * i__5 + *n + lwrk_zunmqr__,
  1682. i__1 = f2cmax(i__1,i__2), i__2 = *n +
  1683. lwrk_zunmqrm__;
  1684. optwrk = f2cmax(i__1,i__2);
  1685. } else {
  1686. /* Computing MAX */
  1687. /* Computing 2nd power */
  1688. i__3 = *n;
  1689. /* Computing 2nd power */
  1690. i__4 = *n;
  1691. /* Computing 2nd power */
  1692. i__5 = *n;
  1693. i__1 = *n + lwrk_zgeqp3__, i__2 = (*n << 1) +
  1694. lwrk_zgeqrf__, i__1 = f2cmax(i__1,i__2), i__2 = (
  1695. *n << 1) + i__3 * i__3, i__1 = f2cmax(i__1,i__2),
  1696. i__2 = (*n << 1) + i__4 * i__4 +
  1697. lwrk_zgesvjv__, i__1 = f2cmax(i__1,i__2), i__2 =
  1698. (*n << 1) + i__5 * i__5 + *n + lwrk_zunmqr__,
  1699. i__1 = f2cmax(i__1,i__2), i__2 = *n +
  1700. lwrk_zunmqrm__;
  1701. optwrk = f2cmax(i__1,i__2);
  1702. }
  1703. }
  1704. }
  1705. if (l2tran || rowpiv) {
  1706. /* Computing MAX */
  1707. i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 = f2cmax(
  1708. i__1,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
  1709. minrwrk = f2cmax(i__1,lrwcon);
  1710. } else {
  1711. /* Computing MAX */
  1712. i__1 = f2cmax(7,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
  1713. minrwrk = f2cmax(i__1,lrwcon);
  1714. }
  1715. }
  1716. minwrk = f2cmax(2,minwrk);
  1717. optwrk = f2cmax(minwrk,optwrk);
  1718. if (*lwork < minwrk && ! lquery) {
  1719. *info = -17;
  1720. }
  1721. if (*lrwork < minrwrk && ! lquery) {
  1722. *info = -19;
  1723. }
  1724. }
  1725. if (*info != 0) {
  1726. /* #:( */
  1727. i__1 = -(*info);
  1728. xerbla_("ZGEJSV", &i__1, (ftnlen)6);
  1729. return;
  1730. } else if (lquery) {
  1731. cwork[1].r = (doublereal) optwrk, cwork[1].i = 0.;
  1732. cwork[2].r = (doublereal) minwrk, cwork[2].i = 0.;
  1733. rwork[1] = (doublereal) minrwrk;
  1734. iwork[1] = f2cmax(4,miniwrk);
  1735. return;
  1736. }
  1737. /* Quick return for void matrix (Y3K safe) */
  1738. /* #:) */
  1739. if (*m == 0 || *n == 0) {
  1740. iwork[1] = 0;
  1741. iwork[2] = 0;
  1742. iwork[3] = 0;
  1743. iwork[4] = 0;
  1744. rwork[1] = 0.;
  1745. rwork[2] = 0.;
  1746. rwork[3] = 0.;
  1747. rwork[4] = 0.;
  1748. rwork[5] = 0.;
  1749. rwork[6] = 0.;
  1750. rwork[7] = 0.;
  1751. return;
  1752. }
  1753. /* Determine whether the matrix U should be M x N or M x M */
  1754. if (lsvec) {
  1755. n1 = *n;
  1756. if (lsame_(jobu, "F")) {
  1757. n1 = *m;
  1758. }
  1759. }
  1760. /* Set numerical parameters */
  1761. /* ! NOTE: Make sure DLAMCH() does not fail on the target architecture. */
  1762. epsln = dlamch_("Epsilon");
  1763. sfmin = dlamch_("SafeMinimum");
  1764. small = sfmin / epsln;
  1765. big = dlamch_("O");
  1766. /* BIG = ONE / SFMIN */
  1767. /* Initialize SVA(1:N) = diag( ||A e_i||_2 )_1^N */
  1768. /* (!) If necessary, scale SVA() to protect the largest norm from */
  1769. /* overflow. It is possible that this scaling pushes the smallest */
  1770. /* column norm left from the underflow threshold (extreme case). */
  1771. scalem = 1. / sqrt((doublereal) (*m) * (doublereal) (*n));
  1772. noscal = TRUE_;
  1773. goscal = TRUE_;
  1774. i__1 = *n;
  1775. for (p = 1; p <= i__1; ++p) {
  1776. aapp = 0.;
  1777. aaqq = 1.;
  1778. zlassq_(m, &a[p * a_dim1 + 1], &c__1, &aapp, &aaqq);
  1779. if (aapp > big) {
  1780. *info = -9;
  1781. i__2 = -(*info);
  1782. xerbla_("ZGEJSV", &i__2, (ftnlen)6);
  1783. return;
  1784. }
  1785. aaqq = sqrt(aaqq);
  1786. if (aapp < big / aaqq && noscal) {
  1787. sva[p] = aapp * aaqq;
  1788. } else {
  1789. noscal = FALSE_;
  1790. sva[p] = aapp * (aaqq * scalem);
  1791. if (goscal) {
  1792. goscal = FALSE_;
  1793. i__2 = p - 1;
  1794. dscal_(&i__2, &scalem, &sva[1], &c__1);
  1795. }
  1796. }
  1797. /* L1874: */
  1798. }
  1799. if (noscal) {
  1800. scalem = 1.;
  1801. }
  1802. aapp = 0.;
  1803. aaqq = big;
  1804. i__1 = *n;
  1805. for (p = 1; p <= i__1; ++p) {
  1806. /* Computing MAX */
  1807. d__1 = aapp, d__2 = sva[p];
  1808. aapp = f2cmax(d__1,d__2);
  1809. if (sva[p] != 0.) {
  1810. /* Computing MIN */
  1811. d__1 = aaqq, d__2 = sva[p];
  1812. aaqq = f2cmin(d__1,d__2);
  1813. }
  1814. /* L4781: */
  1815. }
  1816. /* Quick return for zero M x N matrix */
  1817. /* #:) */
  1818. if (aapp == 0.) {
  1819. if (lsvec) {
  1820. zlaset_("G", m, &n1, &c_b1, &c_b2, &u[u_offset], ldu);
  1821. }
  1822. if (rsvec) {
  1823. zlaset_("G", n, n, &c_b1, &c_b2, &v[v_offset], ldv);
  1824. }
  1825. rwork[1] = 1.;
  1826. rwork[2] = 1.;
  1827. if (errest) {
  1828. rwork[3] = 1.;
  1829. }
  1830. if (lsvec && rsvec) {
  1831. rwork[4] = 1.;
  1832. rwork[5] = 1.;
  1833. }
  1834. if (l2tran) {
  1835. rwork[6] = 0.;
  1836. rwork[7] = 0.;
  1837. }
  1838. iwork[1] = 0;
  1839. iwork[2] = 0;
  1840. iwork[3] = 0;
  1841. iwork[4] = -1;
  1842. return;
  1843. }
  1844. /* Issue warning if denormalized column norms detected. Override the */
  1845. /* high relative accuracy request. Issue licence to kill nonzero columns */
  1846. /* (set them to zero) whose norm is less than sigma_max / BIG (roughly). */
  1847. /* #:( */
  1848. warning = 0;
  1849. if (aaqq <= sfmin) {
  1850. l2rank = TRUE_;
  1851. l2kill = TRUE_;
  1852. warning = 1;
  1853. }
  1854. /* Quick return for one-column matrix */
  1855. /* #:) */
  1856. if (*n == 1) {
  1857. if (lsvec) {
  1858. zlascl_("G", &c__0, &c__0, &sva[1], &scalem, m, &c__1, &a[a_dim1
  1859. + 1], lda, &ierr);
  1860. zlacpy_("A", m, &c__1, &a[a_offset], lda, &u[u_offset], ldu);
  1861. /* computing all M left singular vectors of the M x 1 matrix */
  1862. if (n1 != *n) {
  1863. i__1 = *lwork - *n;
  1864. zgeqrf_(m, n, &u[u_offset], ldu, &cwork[1], &cwork[*n + 1], &
  1865. i__1, &ierr);
  1866. i__1 = *lwork - *n;
  1867. zungqr_(m, &n1, &c__1, &u[u_offset], ldu, &cwork[1], &cwork[*
  1868. n + 1], &i__1, &ierr);
  1869. zcopy_(m, &a[a_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1);
  1870. }
  1871. }
  1872. if (rsvec) {
  1873. i__1 = v_dim1 + 1;
  1874. v[i__1].r = 1., v[i__1].i = 0.;
  1875. }
  1876. if (sva[1] < big * scalem) {
  1877. sva[1] /= scalem;
  1878. scalem = 1.;
  1879. }
  1880. rwork[1] = 1. / scalem;
  1881. rwork[2] = 1.;
  1882. if (sva[1] != 0.) {
  1883. iwork[1] = 1;
  1884. if (sva[1] / scalem >= sfmin) {
  1885. iwork[2] = 1;
  1886. } else {
  1887. iwork[2] = 0;
  1888. }
  1889. } else {
  1890. iwork[1] = 0;
  1891. iwork[2] = 0;
  1892. }
  1893. iwork[3] = 0;
  1894. iwork[4] = -1;
  1895. if (errest) {
  1896. rwork[3] = 1.;
  1897. }
  1898. if (lsvec && rsvec) {
  1899. rwork[4] = 1.;
  1900. rwork[5] = 1.;
  1901. }
  1902. if (l2tran) {
  1903. rwork[6] = 0.;
  1904. rwork[7] = 0.;
  1905. }
  1906. return;
  1907. }
  1908. transp = FALSE_;
  1909. aatmax = -1.;
  1910. aatmin = big;
  1911. if (rowpiv || l2tran) {
  1912. /* Compute the row norms, needed to determine row pivoting sequence */
  1913. /* (in the case of heavily row weighted A, row pivoting is strongly */
  1914. /* advised) and to collect information needed to compare the */
  1915. /* structures of A * A^* and A^* * A (in the case L2TRAN.EQ..TRUE.). */
  1916. if (l2tran) {
  1917. i__1 = *m;
  1918. for (p = 1; p <= i__1; ++p) {
  1919. xsc = 0.;
  1920. temp1 = 1.;
  1921. zlassq_(n, &a[p + a_dim1], lda, &xsc, &temp1);
  1922. /* ZLASSQ gets both the ell_2 and the ell_infinity norm */
  1923. /* in one pass through the vector */
  1924. rwork[*m + p] = xsc * scalem;
  1925. rwork[p] = xsc * (scalem * sqrt(temp1));
  1926. /* Computing MAX */
  1927. d__1 = aatmax, d__2 = rwork[p];
  1928. aatmax = f2cmax(d__1,d__2);
  1929. if (rwork[p] != 0.) {
  1930. /* Computing MIN */
  1931. d__1 = aatmin, d__2 = rwork[p];
  1932. aatmin = f2cmin(d__1,d__2);
  1933. }
  1934. /* L1950: */
  1935. }
  1936. } else {
  1937. i__1 = *m;
  1938. for (p = 1; p <= i__1; ++p) {
  1939. rwork[*m + p] = scalem * z_abs(&a[p + izamax_(n, &a[p +
  1940. a_dim1], lda) * a_dim1]);
  1941. /* Computing MAX */
  1942. d__1 = aatmax, d__2 = rwork[*m + p];
  1943. aatmax = f2cmax(d__1,d__2);
  1944. /* Computing MIN */
  1945. d__1 = aatmin, d__2 = rwork[*m + p];
  1946. aatmin = f2cmin(d__1,d__2);
  1947. /* L1904: */
  1948. }
  1949. }
  1950. }
  1951. /* For square matrix A try to determine whether A^* would be better */
  1952. /* input for the preconditioned Jacobi SVD, with faster convergence. */
  1953. /* The decision is based on an O(N) function of the vector of column */
  1954. /* and row norms of A, based on the Shannon entropy. This should give */
  1955. /* the right choice in most cases when the difference actually matters. */
  1956. /* It may fail and pick the slower converging side. */
  1957. entra = 0.;
  1958. entrat = 0.;
  1959. if (l2tran) {
  1960. xsc = 0.;
  1961. temp1 = 1.;
  1962. dlassq_(n, &sva[1], &c__1, &xsc, &temp1);
  1963. temp1 = 1. / temp1;
  1964. entra = 0.;
  1965. i__1 = *n;
  1966. for (p = 1; p <= i__1; ++p) {
  1967. /* Computing 2nd power */
  1968. d__1 = sva[p] / xsc;
  1969. big1 = d__1 * d__1 * temp1;
  1970. if (big1 != 0.) {
  1971. entra += big1 * log(big1);
  1972. }
  1973. /* L1113: */
  1974. }
  1975. entra = -entra / log((doublereal) (*n));
  1976. /* Now, SVA().^2/Trace(A^* * A) is a point in the probability simplex. */
  1977. /* It is derived from the diagonal of A^* * A. Do the same with the */
  1978. /* diagonal of A * A^*, compute the entropy of the corresponding */
  1979. /* probability distribution. Note that A * A^* and A^* * A have the */
  1980. /* same trace. */
  1981. entrat = 0.;
  1982. i__1 = *m;
  1983. for (p = 1; p <= i__1; ++p) {
  1984. /* Computing 2nd power */
  1985. d__1 = rwork[p] / xsc;
  1986. big1 = d__1 * d__1 * temp1;
  1987. if (big1 != 0.) {
  1988. entrat += big1 * log(big1);
  1989. }
  1990. /* L1114: */
  1991. }
  1992. entrat = -entrat / log((doublereal) (*m));
  1993. /* Analyze the entropies and decide A or A^*. Smaller entropy */
  1994. /* usually means better input for the algorithm. */
  1995. transp = entrat < entra;
  1996. /* If A^* is better than A, take the adjoint of A. This is allowed */
  1997. /* only for square matrices, M=N. */
  1998. if (transp) {
  1999. /* In an optimal implementation, this trivial transpose */
  2000. /* should be replaced with faster transpose. */
  2001. i__1 = *n - 1;
  2002. for (p = 1; p <= i__1; ++p) {
  2003. i__2 = p + p * a_dim1;
  2004. d_cnjg(&z__1, &a[p + p * a_dim1]);
  2005. a[i__2].r = z__1.r, a[i__2].i = z__1.i;
  2006. i__2 = *n;
  2007. for (q = p + 1; q <= i__2; ++q) {
  2008. d_cnjg(&z__1, &a[q + p * a_dim1]);
  2009. ctemp.r = z__1.r, ctemp.i = z__1.i;
  2010. i__3 = q + p * a_dim1;
  2011. d_cnjg(&z__1, &a[p + q * a_dim1]);
  2012. a[i__3].r = z__1.r, a[i__3].i = z__1.i;
  2013. i__3 = p + q * a_dim1;
  2014. a[i__3].r = ctemp.r, a[i__3].i = ctemp.i;
  2015. /* L1116: */
  2016. }
  2017. /* L1115: */
  2018. }
  2019. i__1 = *n + *n * a_dim1;
  2020. d_cnjg(&z__1, &a[*n + *n * a_dim1]);
  2021. a[i__1].r = z__1.r, a[i__1].i = z__1.i;
  2022. i__1 = *n;
  2023. for (p = 1; p <= i__1; ++p) {
  2024. rwork[*m + p] = sva[p];
  2025. sva[p] = rwork[p];
  2026. /* previously computed row 2-norms are now column 2-norms */
  2027. /* of the transposed matrix */
  2028. /* L1117: */
  2029. }
  2030. temp1 = aapp;
  2031. aapp = aatmax;
  2032. aatmax = temp1;
  2033. temp1 = aaqq;
  2034. aaqq = aatmin;
  2035. aatmin = temp1;
  2036. kill = lsvec;
  2037. lsvec = rsvec;
  2038. rsvec = kill;
  2039. if (lsvec) {
  2040. n1 = *n;
  2041. }
  2042. rowpiv = TRUE_;
  2043. }
  2044. }
  2045. /* END IF L2TRAN */
  2046. /* Scale the matrix so that its maximal singular value remains less */
  2047. /* than SQRT(BIG) -- the matrix is scaled so that its maximal column */
  2048. /* has Euclidean norm equal to SQRT(BIG/N). The only reason to keep */
  2049. /* SQRT(BIG) instead of BIG is the fact that ZGEJSV uses LAPACK and */
  2050. /* BLAS routines that, in some implementations, are not capable of */
  2051. /* working in the full interval [SFMIN,BIG] and that they may provoke */
  2052. /* overflows in the intermediate results. If the singular values spread */
  2053. /* from SFMIN to BIG, then ZGESVJ will compute them. So, in that case, */
  2054. /* one should use ZGESVJ instead of ZGEJSV. */
  2055. /* >> change in the April 2016 update: allow bigger range, i.e. the */
  2056. /* largest column is allowed up to BIG/N and ZGESVJ will do the rest. */
  2057. big1 = sqrt(big);
  2058. temp1 = sqrt(big / (doublereal) (*n));
  2059. /* TEMP1 = BIG/DBLE(N) */
  2060. dlascl_("G", &c__0, &c__0, &aapp, &temp1, n, &c__1, &sva[1], n, &ierr);
  2061. if (aaqq > aapp * sfmin) {
  2062. aaqq = aaqq / aapp * temp1;
  2063. } else {
  2064. aaqq = aaqq * temp1 / aapp;
  2065. }
  2066. temp1 *= scalem;
  2067. zlascl_("G", &c__0, &c__0, &aapp, &temp1, m, n, &a[a_offset], lda, &ierr);
  2068. /* To undo scaling at the end of this procedure, multiply the */
  2069. /* computed singular values with USCAL2 / USCAL1. */
  2070. uscal1 = temp1;
  2071. uscal2 = aapp;
  2072. if (l2kill) {
  2073. /* L2KILL enforces computation of nonzero singular values in */
  2074. /* the restricted range of condition number of the initial A, */
  2075. /* sigma_max(A) / sigma_min(A) approx. SQRT(BIG)/SQRT(SFMIN). */
  2076. xsc = sqrt(sfmin);
  2077. } else {
  2078. xsc = small;
  2079. /* Now, if the condition number of A is too big, */
  2080. /* sigma_max(A) / sigma_min(A) .GT. SQRT(BIG/N) * EPSLN / SFMIN, */
  2081. /* as a precaution measure, the full SVD is computed using ZGESVJ */
  2082. /* with accumulated Jacobi rotations. This provides numerically */
  2083. /* more robust computation, at the cost of slightly increased run */
  2084. /* time. Depending on the concrete implementation of BLAS and LAPACK */
  2085. /* (i.e. how they behave in presence of extreme ill-conditioning) the */
  2086. /* implementor may decide to remove this switch. */
  2087. if (aaqq < sqrt(sfmin) && lsvec && rsvec) {
  2088. jracc = TRUE_;
  2089. }
  2090. }
  2091. if (aaqq < xsc) {
  2092. i__1 = *n;
  2093. for (p = 1; p <= i__1; ++p) {
  2094. if (sva[p] < xsc) {
  2095. zlaset_("A", m, &c__1, &c_b1, &c_b1, &a[p * a_dim1 + 1], lda);
  2096. sva[p] = 0.;
  2097. }
  2098. /* L700: */
  2099. }
  2100. }
  2101. /* Preconditioning using QR factorization with pivoting */
  2102. if (rowpiv) {
  2103. /* Optional row permutation (Bjoerck row pivoting): */
  2104. /* A result by Cox and Higham shows that the Bjoerck's */
  2105. /* row pivoting combined with standard column pivoting */
  2106. /* has similar effect as Powell-Reid complete pivoting. */
  2107. /* The ell-infinity norms of A are made nonincreasing. */
  2108. if (lsvec && rsvec && ! jracc) {
  2109. iwoff = *n << 1;
  2110. } else {
  2111. iwoff = *n;
  2112. }
  2113. i__1 = *m - 1;
  2114. for (p = 1; p <= i__1; ++p) {
  2115. i__2 = *m - p + 1;
  2116. q = idamax_(&i__2, &rwork[*m + p], &c__1) + p - 1;
  2117. iwork[iwoff + p] = q;
  2118. if (p != q) {
  2119. temp1 = rwork[*m + p];
  2120. rwork[*m + p] = rwork[*m + q];
  2121. rwork[*m + q] = temp1;
  2122. }
  2123. /* L1952: */
  2124. }
  2125. i__1 = *m - 1;
  2126. zlaswp_(n, &a[a_offset], lda, &c__1, &i__1, &iwork[iwoff + 1], &c__1);
  2127. }
  2128. /* End of the preparation phase (scaling, optional sorting and */
  2129. /* transposing, optional flushing of small columns). */
  2130. /* Preconditioning */
  2131. /* If the full SVD is needed, the right singular vectors are computed */
  2132. /* from a matrix equation, and for that we need theoretical analysis */
  2133. /* of the Businger-Golub pivoting. So we use ZGEQP3 as the first RR QRF. */
  2134. /* In all other cases the first RR QRF can be chosen by other criteria */
  2135. /* (eg speed by replacing global with restricted window pivoting, such */
  2136. /* as in xGEQPX from TOMS # 782). Good results will be obtained using */
  2137. /* xGEQPX with properly (!) chosen numerical parameters. */
  2138. /* Any improvement of ZGEQP3 improves overal performance of ZGEJSV. */
  2139. /* A * P1 = Q1 * [ R1^* 0]^*: */
  2140. i__1 = *n;
  2141. for (p = 1; p <= i__1; ++p) {
  2142. iwork[p] = 0;
  2143. /* L1963: */
  2144. }
  2145. i__1 = *lwork - *n;
  2146. zgeqp3_(m, n, &a[a_offset], lda, &iwork[1], &cwork[1], &cwork[*n + 1], &
  2147. i__1, &rwork[1], &ierr);
  2148. /* The upper triangular matrix R1 from the first QRF is inspected for */
  2149. /* rank deficiency and possibilities for deflation, or possible */
  2150. /* ill-conditioning. Depending on the user specified flag L2RANK, */
  2151. /* the procedure explores possibilities to reduce the numerical */
  2152. /* rank by inspecting the computed upper triangular factor. If */
  2153. /* L2RANK or L2ABER are up, then ZGEJSV will compute the SVD of */
  2154. /* A + dA, where ||dA|| <= f(M,N)*EPSLN. */
  2155. nr = 1;
  2156. if (l2aber) {
  2157. /* Standard absolute error bound suffices. All sigma_i with */
  2158. /* sigma_i < N*EPSLN*||A|| are flushed to zero. This is an */
  2159. /* aggressive enforcement of lower numerical rank by introducing a */
  2160. /* backward error of the order of N*EPSLN*||A||. */
  2161. temp1 = sqrt((doublereal) (*n)) * epsln;
  2162. i__1 = *n;
  2163. for (p = 2; p <= i__1; ++p) {
  2164. if (z_abs(&a[p + p * a_dim1]) >= temp1 * z_abs(&a[a_dim1 + 1])) {
  2165. ++nr;
  2166. } else {
  2167. goto L3002;
  2168. }
  2169. /* L3001: */
  2170. }
  2171. L3002:
  2172. ;
  2173. } else if (l2rank) {
  2174. /* Sudden drop on the diagonal of R1 is used as the criterion for */
  2175. /* close-to-rank-deficient. */
  2176. temp1 = sqrt(sfmin);
  2177. i__1 = *n;
  2178. for (p = 2; p <= i__1; ++p) {
  2179. if (z_abs(&a[p + p * a_dim1]) < epsln * z_abs(&a[p - 1 + (p - 1) *
  2180. a_dim1]) || z_abs(&a[p + p * a_dim1]) < small || l2kill
  2181. && z_abs(&a[p + p * a_dim1]) < temp1) {
  2182. goto L3402;
  2183. }
  2184. ++nr;
  2185. /* L3401: */
  2186. }
  2187. L3402:
  2188. ;
  2189. } else {
  2190. /* The goal is high relative accuracy. However, if the matrix */
  2191. /* has high scaled condition number the relative accuracy is in */
  2192. /* general not feasible. Later on, a condition number estimator */
  2193. /* will be deployed to estimate the scaled condition number. */
  2194. /* Here we just remove the underflowed part of the triangular */
  2195. /* factor. This prevents the situation in which the code is */
  2196. /* working hard to get the accuracy not warranted by the data. */
  2197. temp1 = sqrt(sfmin);
  2198. i__1 = *n;
  2199. for (p = 2; p <= i__1; ++p) {
  2200. if (z_abs(&a[p + p * a_dim1]) < small || l2kill && z_abs(&a[p + p
  2201. * a_dim1]) < temp1) {
  2202. goto L3302;
  2203. }
  2204. ++nr;
  2205. /* L3301: */
  2206. }
  2207. L3302:
  2208. ;
  2209. }
  2210. almort = FALSE_;
  2211. if (nr == *n) {
  2212. maxprj = 1.;
  2213. i__1 = *n;
  2214. for (p = 2; p <= i__1; ++p) {
  2215. temp1 = z_abs(&a[p + p * a_dim1]) / sva[iwork[p]];
  2216. maxprj = f2cmin(maxprj,temp1);
  2217. /* L3051: */
  2218. }
  2219. /* Computing 2nd power */
  2220. d__1 = maxprj;
  2221. if (d__1 * d__1 >= 1. - (doublereal) (*n) * epsln) {
  2222. almort = TRUE_;
  2223. }
  2224. }
  2225. sconda = -1.;
  2226. condr1 = -1.;
  2227. condr2 = -1.;
  2228. if (errest) {
  2229. if (*n == nr) {
  2230. if (rsvec) {
  2231. zlacpy_("U", n, n, &a[a_offset], lda, &v[v_offset], ldv);
  2232. i__1 = *n;
  2233. for (p = 1; p <= i__1; ++p) {
  2234. temp1 = sva[iwork[p]];
  2235. d__1 = 1. / temp1;
  2236. zdscal_(&p, &d__1, &v[p * v_dim1 + 1], &c__1);
  2237. /* L3053: */
  2238. }
  2239. if (lsvec) {
  2240. zpocon_("U", n, &v[v_offset], ldv, &c_b141, &temp1, &
  2241. cwork[*n + 1], &rwork[1], &ierr);
  2242. } else {
  2243. zpocon_("U", n, &v[v_offset], ldv, &c_b141, &temp1, &
  2244. cwork[1], &rwork[1], &ierr);
  2245. }
  2246. } else if (lsvec) {
  2247. zlacpy_("U", n, n, &a[a_offset], lda, &u[u_offset], ldu);
  2248. i__1 = *n;
  2249. for (p = 1; p <= i__1; ++p) {
  2250. temp1 = sva[iwork[p]];
  2251. d__1 = 1. / temp1;
  2252. zdscal_(&p, &d__1, &u[p * u_dim1 + 1], &c__1);
  2253. /* L3054: */
  2254. }
  2255. zpocon_("U", n, &u[u_offset], ldu, &c_b141, &temp1, &cwork[*n
  2256. + 1], &rwork[1], &ierr);
  2257. } else {
  2258. zlacpy_("U", n, n, &a[a_offset], lda, &cwork[1], n)
  2259. ;
  2260. /* [] CALL ZLACPY( 'U', N, N, A, LDA, CWORK(N+1), N ) */
  2261. /* Change: here index shifted by N to the left, CWORK(1:N) */
  2262. /* not needed for SIGMA only computation */
  2263. i__1 = *n;
  2264. for (p = 1; p <= i__1; ++p) {
  2265. temp1 = sva[iwork[p]];
  2266. /* [] CALL ZDSCAL( p, ONE/TEMP1, CWORK(N+(p-1)*N+1), 1 ) */
  2267. d__1 = 1. / temp1;
  2268. zdscal_(&p, &d__1, &cwork[(p - 1) * *n + 1], &c__1);
  2269. /* L3052: */
  2270. }
  2271. /* [] CALL ZPOCON( 'U', N, CWORK(N+1), N, ONE, TEMP1, */
  2272. /* [] $ CWORK(N+N*N+1), RWORK, IERR ) */
  2273. zpocon_("U", n, &cwork[1], n, &c_b141, &temp1, &cwork[*n * *n
  2274. + 1], &rwork[1], &ierr);
  2275. }
  2276. if (temp1 != 0.) {
  2277. sconda = 1. / sqrt(temp1);
  2278. } else {
  2279. sconda = -1.;
  2280. }
  2281. /* SCONDA is an estimate of SQRT(||(R^* * R)^(-1)||_1). */
  2282. /* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
  2283. } else {
  2284. sconda = -1.;
  2285. }
  2286. }
  2287. z_div(&z__1, &a[a_dim1 + 1], &a[nr + nr * a_dim1]);
  2288. l2pert = l2pert && z_abs(&z__1) > sqrt(big1);
  2289. /* If there is no violent scaling, artificial perturbation is not needed. */
  2290. /* Phase 3: */
  2291. if (! (rsvec || lsvec)) {
  2292. /* Singular Values only */
  2293. /* Computing MIN */
  2294. i__2 = *n - 1;
  2295. i__1 = f2cmin(i__2,nr);
  2296. for (p = 1; p <= i__1; ++p) {
  2297. i__2 = *n - p;
  2298. zcopy_(&i__2, &a[p + (p + 1) * a_dim1], lda, &a[p + 1 + p *
  2299. a_dim1], &c__1);
  2300. i__2 = *n - p + 1;
  2301. zlacgv_(&i__2, &a[p + p * a_dim1], &c__1);
  2302. /* L1946: */
  2303. }
  2304. if (nr == *n) {
  2305. i__1 = *n + *n * a_dim1;
  2306. d_cnjg(&z__1, &a[*n + *n * a_dim1]);
  2307. a[i__1].r = z__1.r, a[i__1].i = z__1.i;
  2308. }
  2309. /* The following two DO-loops introduce small relative perturbation */
  2310. /* into the strict upper triangle of the lower triangular matrix. */
  2311. /* Small entries below the main diagonal are also changed. */
  2312. /* This modification is useful if the computing environment does not */
  2313. /* provide/allow FLUSH TO ZERO underflow, for it prevents many */
  2314. /* annoying denormalized numbers in case of strongly scaled matrices. */
  2315. /* The perturbation is structured so that it does not introduce any */
  2316. /* new perturbation of the singular values, and it does not destroy */
  2317. /* the job done by the preconditioner. */
  2318. /* The licence for this perturbation is in the variable L2PERT, which */
  2319. /* should be .FALSE. if FLUSH TO ZERO underflow is active. */
  2320. if (! almort) {
  2321. if (l2pert) {
  2322. /* XSC = SQRT(SMALL) */
  2323. xsc = epsln / (doublereal) (*n);
  2324. i__1 = nr;
  2325. for (q = 1; q <= i__1; ++q) {
  2326. d__1 = xsc * z_abs(&a[q + q * a_dim1]);
  2327. z__1.r = d__1, z__1.i = 0.;
  2328. ctemp.r = z__1.r, ctemp.i = z__1.i;
  2329. i__2 = *n;
  2330. for (p = 1; p <= i__2; ++p) {
  2331. if (p > q && z_abs(&a[p + q * a_dim1]) <= temp1 || p <
  2332. q) {
  2333. i__3 = p + q * a_dim1;
  2334. a[i__3].r = ctemp.r, a[i__3].i = ctemp.i;
  2335. }
  2336. /* $ A(p,q) = TEMP1 * ( A(p,q) / ABS(A(p,q)) ) */
  2337. /* L4949: */
  2338. }
  2339. /* L4947: */
  2340. }
  2341. } else {
  2342. i__1 = nr - 1;
  2343. i__2 = nr - 1;
  2344. zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &a[(a_dim1 << 1) + 1]
  2345. , lda);
  2346. }
  2347. i__1 = *lwork - *n;
  2348. zgeqrf_(n, &nr, &a[a_offset], lda, &cwork[1], &cwork[*n + 1], &
  2349. i__1, &ierr);
  2350. i__1 = nr - 1;
  2351. for (p = 1; p <= i__1; ++p) {
  2352. i__2 = nr - p;
  2353. zcopy_(&i__2, &a[p + (p + 1) * a_dim1], lda, &a[p + 1 + p *
  2354. a_dim1], &c__1);
  2355. i__2 = nr - p + 1;
  2356. zlacgv_(&i__2, &a[p + p * a_dim1], &c__1);
  2357. /* L1948: */
  2358. }
  2359. }
  2360. /* Row-cyclic Jacobi SVD algorithm with column pivoting */
  2361. /* to drown denormals */
  2362. if (l2pert) {
  2363. /* XSC = SQRT(SMALL) */
  2364. xsc = epsln / (doublereal) (*n);
  2365. i__1 = nr;
  2366. for (q = 1; q <= i__1; ++q) {
  2367. d__1 = xsc * z_abs(&a[q + q * a_dim1]);
  2368. z__1.r = d__1, z__1.i = 0.;
  2369. ctemp.r = z__1.r, ctemp.i = z__1.i;
  2370. i__2 = nr;
  2371. for (p = 1; p <= i__2; ++p) {
  2372. if (p > q && z_abs(&a[p + q * a_dim1]) <= temp1 || p < q)
  2373. {
  2374. i__3 = p + q * a_dim1;
  2375. a[i__3].r = ctemp.r, a[i__3].i = ctemp.i;
  2376. }
  2377. /* $ A(p,q) = TEMP1 * ( A(p,q) / ABS(A(p,q)) ) */
  2378. /* L1949: */
  2379. }
  2380. /* L1947: */
  2381. }
  2382. } else {
  2383. i__1 = nr - 1;
  2384. i__2 = nr - 1;
  2385. zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &a[(a_dim1 << 1) + 1],
  2386. lda);
  2387. }
  2388. /* triangular matrix (plus perturbation which is ignored in */
  2389. /* the part which destroys triangular form (confusing?!)) */
  2390. zgesvj_("L", "N", "N", &nr, &nr, &a[a_offset], lda, &sva[1], n, &v[
  2391. v_offset], ldv, &cwork[1], lwork, &rwork[1], lrwork, info);
  2392. scalem = rwork[1];
  2393. numrank = i_dnnt(&rwork[2]);
  2394. } else if (rsvec && ! lsvec && ! jracc || jracc && ! lsvec && nr != *n) {
  2395. /* -> Singular Values and Right Singular Vectors <- */
  2396. if (almort) {
  2397. i__1 = nr;
  2398. for (p = 1; p <= i__1; ++p) {
  2399. i__2 = *n - p + 1;
  2400. zcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1], &
  2401. c__1);
  2402. i__2 = *n - p + 1;
  2403. zlacgv_(&i__2, &v[p + p * v_dim1], &c__1);
  2404. /* L1998: */
  2405. }
  2406. i__1 = nr - 1;
  2407. i__2 = nr - 1;
  2408. zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1) + 1],
  2409. ldv);
  2410. zgesvj_("L", "U", "N", n, &nr, &v[v_offset], ldv, &sva[1], &nr, &
  2411. a[a_offset], lda, &cwork[1], lwork, &rwork[1], lrwork,
  2412. info);
  2413. scalem = rwork[1];
  2414. numrank = i_dnnt(&rwork[2]);
  2415. } else {
  2416. /* accumulated product of Jacobi rotations, three are perfect ) */
  2417. i__1 = nr - 1;
  2418. i__2 = nr - 1;
  2419. zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
  2420. i__1 = *lwork - *n;
  2421. zgelqf_(&nr, n, &a[a_offset], lda, &cwork[1], &cwork[*n + 1], &
  2422. i__1, &ierr);
  2423. zlacpy_("L", &nr, &nr, &a[a_offset], lda, &v[v_offset], ldv);
  2424. i__1 = nr - 1;
  2425. i__2 = nr - 1;
  2426. zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1) + 1],
  2427. ldv);
  2428. i__1 = *lwork - (*n << 1);
  2429. zgeqrf_(&nr, &nr, &v[v_offset], ldv, &cwork[*n + 1], &cwork[(*n <<
  2430. 1) + 1], &i__1, &ierr);
  2431. i__1 = nr;
  2432. for (p = 1; p <= i__1; ++p) {
  2433. i__2 = nr - p + 1;
  2434. zcopy_(&i__2, &v[p + p * v_dim1], ldv, &v[p + p * v_dim1], &
  2435. c__1);
  2436. i__2 = nr - p + 1;
  2437. zlacgv_(&i__2, &v[p + p * v_dim1], &c__1);
  2438. /* L8998: */
  2439. }
  2440. i__1 = nr - 1;
  2441. i__2 = nr - 1;
  2442. zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1) + 1],
  2443. ldv);
  2444. i__1 = *lwork - *n;
  2445. zgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[1], &nr,
  2446. &u[u_offset], ldu, &cwork[*n + 1], &i__1, &rwork[1],
  2447. lrwork, info);
  2448. scalem = rwork[1];
  2449. numrank = i_dnnt(&rwork[2]);
  2450. if (nr < *n) {
  2451. i__1 = *n - nr;
  2452. zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1 + v_dim1],
  2453. ldv);
  2454. i__1 = *n - nr;
  2455. zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1) * v_dim1 +
  2456. 1], ldv);
  2457. i__1 = *n - nr;
  2458. i__2 = *n - nr;
  2459. zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr + 1 + (nr + 1)
  2460. * v_dim1], ldv);
  2461. }
  2462. i__1 = *lwork - *n;
  2463. zunmlq_("L", "C", n, n, &nr, &a[a_offset], lda, &cwork[1], &v[
  2464. v_offset], ldv, &cwork[*n + 1], &i__1, &ierr);
  2465. }
  2466. /* DO 8991 p = 1, N */
  2467. /* CALL ZCOPY( N, V(p,1), LDV, A(IWORK(p),1), LDA ) */
  2468. /* 8991 CONTINUE */
  2469. /* CALL ZLACPY( 'All', N, N, A, LDA, V, LDV ) */
  2470. zlapmr_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
  2471. if (transp) {
  2472. zlacpy_("A", n, n, &v[v_offset], ldv, &u[u_offset], ldu);
  2473. }
  2474. } else if (jracc && ! lsvec && nr == *n) {
  2475. i__1 = *n - 1;
  2476. i__2 = *n - 1;
  2477. zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
  2478. zgesvj_("U", "N", "V", n, n, &a[a_offset], lda, &sva[1], n, &v[
  2479. v_offset], ldv, &cwork[1], lwork, &rwork[1], lrwork, info);
  2480. scalem = rwork[1];
  2481. numrank = i_dnnt(&rwork[2]);
  2482. zlapmr_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
  2483. } else if (lsvec && ! rsvec) {
  2484. /* Jacobi rotations in the Jacobi iterations. */
  2485. i__1 = nr;
  2486. for (p = 1; p <= i__1; ++p) {
  2487. i__2 = *n - p + 1;
  2488. zcopy_(&i__2, &a[p + p * a_dim1], lda, &u[p + p * u_dim1], &c__1);
  2489. i__2 = *n - p + 1;
  2490. zlacgv_(&i__2, &u[p + p * u_dim1], &c__1);
  2491. /* L1965: */
  2492. }
  2493. i__1 = nr - 1;
  2494. i__2 = nr - 1;
  2495. zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &u[(u_dim1 << 1) + 1], ldu);
  2496. i__1 = *lwork - (*n << 1);
  2497. zgeqrf_(n, &nr, &u[u_offset], ldu, &cwork[*n + 1], &cwork[(*n << 1) +
  2498. 1], &i__1, &ierr);
  2499. i__1 = nr - 1;
  2500. for (p = 1; p <= i__1; ++p) {
  2501. i__2 = nr - p;
  2502. zcopy_(&i__2, &u[p + (p + 1) * u_dim1], ldu, &u[p + 1 + p *
  2503. u_dim1], &c__1);
  2504. i__2 = *n - p + 1;
  2505. zlacgv_(&i__2, &u[p + p * u_dim1], &c__1);
  2506. /* L1967: */
  2507. }
  2508. i__1 = nr - 1;
  2509. i__2 = nr - 1;
  2510. zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &u[(u_dim1 << 1) + 1], ldu);
  2511. i__1 = *lwork - *n;
  2512. zgesvj_("L", "U", "N", &nr, &nr, &u[u_offset], ldu, &sva[1], &nr, &a[
  2513. a_offset], lda, &cwork[*n + 1], &i__1, &rwork[1], lrwork,
  2514. info);
  2515. scalem = rwork[1];
  2516. numrank = i_dnnt(&rwork[2]);
  2517. if (nr < *m) {
  2518. i__1 = *m - nr;
  2519. zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 + u_dim1], ldu);
  2520. if (nr < n1) {
  2521. i__1 = n1 - nr;
  2522. zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1) * u_dim1 +
  2523. 1], ldu);
  2524. i__1 = *m - nr;
  2525. i__2 = n1 - nr;
  2526. zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr + 1 + (nr + 1)
  2527. * u_dim1], ldu);
  2528. }
  2529. }
  2530. i__1 = *lwork - *n;
  2531. zunmqr_("L", "N", m, &n1, n, &a[a_offset], lda, &cwork[1], &u[
  2532. u_offset], ldu, &cwork[*n + 1], &i__1, &ierr);
  2533. if (rowpiv) {
  2534. i__1 = *m - 1;
  2535. zlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[iwoff + 1], &
  2536. c_n1);
  2537. }
  2538. i__1 = n1;
  2539. for (p = 1; p <= i__1; ++p) {
  2540. xsc = 1. / dznrm2_(m, &u[p * u_dim1 + 1], &c__1);
  2541. zdscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
  2542. /* L1974: */
  2543. }
  2544. if (transp) {
  2545. zlacpy_("A", n, n, &u[u_offset], ldu, &v[v_offset], ldv);
  2546. }
  2547. } else {
  2548. if (! jracc) {
  2549. if (! almort) {
  2550. /* Second Preconditioning Step (QRF [with pivoting]) */
  2551. /* Note that the composition of TRANSPOSE, QRF and TRANSPOSE is */
  2552. /* equivalent to an LQF CALL. Since in many libraries the QRF */
  2553. /* seems to be better optimized than the LQF, we do explicit */
  2554. /* transpose and use the QRF. This is subject to changes in an */
  2555. /* optimized implementation of ZGEJSV. */
  2556. i__1 = nr;
  2557. for (p = 1; p <= i__1; ++p) {
  2558. i__2 = *n - p + 1;
  2559. zcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1],
  2560. &c__1);
  2561. i__2 = *n - p + 1;
  2562. zlacgv_(&i__2, &v[p + p * v_dim1], &c__1);
  2563. /* L1968: */
  2564. }
  2565. /* denormals in the second QR factorization, where they are */
  2566. /* as good as zeros. This is done to avoid painfully slow */
  2567. /* computation with denormals. The relative size of the perturbation */
  2568. /* is a parameter that can be changed by the implementer. */
  2569. /* This perturbation device will be obsolete on machines with */
  2570. /* properly implemented arithmetic. */
  2571. /* To switch it off, set L2PERT=.FALSE. To remove it from the */
  2572. /* code, remove the action under L2PERT=.TRUE., leave the ELSE part. */
  2573. /* The following two loops should be blocked and fused with the */
  2574. /* transposed copy above. */
  2575. if (l2pert) {
  2576. xsc = sqrt(small);
  2577. i__1 = nr;
  2578. for (q = 1; q <= i__1; ++q) {
  2579. d__1 = xsc * z_abs(&v[q + q * v_dim1]);
  2580. z__1.r = d__1, z__1.i = 0.;
  2581. ctemp.r = z__1.r, ctemp.i = z__1.i;
  2582. i__2 = *n;
  2583. for (p = 1; p <= i__2; ++p) {
  2584. if (p > q && z_abs(&v[p + q * v_dim1]) <= temp1 ||
  2585. p < q) {
  2586. i__3 = p + q * v_dim1;
  2587. v[i__3].r = ctemp.r, v[i__3].i = ctemp.i;
  2588. }
  2589. /* $ V(p,q) = TEMP1 * ( V(p,q) / ABS(V(p,q)) ) */
  2590. if (p < q) {
  2591. i__3 = p + q * v_dim1;
  2592. i__4 = p + q * v_dim1;
  2593. z__1.r = -v[i__4].r, z__1.i = -v[i__4].i;
  2594. v[i__3].r = z__1.r, v[i__3].i = z__1.i;
  2595. }
  2596. /* L2968: */
  2597. }
  2598. /* L2969: */
  2599. }
  2600. } else {
  2601. i__1 = nr - 1;
  2602. i__2 = nr - 1;
  2603. zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1)
  2604. + 1], ldv);
  2605. }
  2606. /* Estimate the row scaled condition number of R1 */
  2607. /* (If R1 is rectangular, N > NR, then the condition number */
  2608. /* of the leading NR x NR submatrix is estimated.) */
  2609. zlacpy_("L", &nr, &nr, &v[v_offset], ldv, &cwork[(*n << 1) +
  2610. 1], &nr);
  2611. i__1 = nr;
  2612. for (p = 1; p <= i__1; ++p) {
  2613. i__2 = nr - p + 1;
  2614. temp1 = dznrm2_(&i__2, &cwork[(*n << 1) + (p - 1) * nr +
  2615. p], &c__1);
  2616. i__2 = nr - p + 1;
  2617. d__1 = 1. / temp1;
  2618. zdscal_(&i__2, &d__1, &cwork[(*n << 1) + (p - 1) * nr + p]
  2619. , &c__1);
  2620. /* L3950: */
  2621. }
  2622. zpocon_("L", &nr, &cwork[(*n << 1) + 1], &nr, &c_b141, &temp1,
  2623. &cwork[(*n << 1) + nr * nr + 1], &rwork[1], &ierr);
  2624. condr1 = 1. / sqrt(temp1);
  2625. /* R1 is OK for inverse <=> CONDR1 .LT. DBLE(N) */
  2626. /* more conservative <=> CONDR1 .LT. SQRT(DBLE(N)) */
  2627. cond_ok__ = sqrt(sqrt((doublereal) nr));
  2628. /* [TP] COND_OK is a tuning parameter. */
  2629. if (condr1 < cond_ok__) {
  2630. /* implementation, this QRF should be implemented as the QRF */
  2631. /* of a lower triangular matrix. */
  2632. /* R1^* = Q2 * R2 */
  2633. i__1 = *lwork - (*n << 1);
  2634. zgeqrf_(n, &nr, &v[v_offset], ldv, &cwork[*n + 1], &cwork[
  2635. (*n << 1) + 1], &i__1, &ierr);
  2636. if (l2pert) {
  2637. xsc = sqrt(small) / epsln;
  2638. i__1 = nr;
  2639. for (p = 2; p <= i__1; ++p) {
  2640. i__2 = p - 1;
  2641. for (q = 1; q <= i__2; ++q) {
  2642. /* Computing MIN */
  2643. d__2 = z_abs(&v[p + p * v_dim1]), d__3 =
  2644. z_abs(&v[q + q * v_dim1]);
  2645. d__1 = xsc * f2cmin(d__2,d__3);
  2646. z__1.r = d__1, z__1.i = 0.;
  2647. ctemp.r = z__1.r, ctemp.i = z__1.i;
  2648. if (z_abs(&v[q + p * v_dim1]) <= temp1) {
  2649. i__3 = q + p * v_dim1;
  2650. v[i__3].r = ctemp.r, v[i__3].i = ctemp.i;
  2651. }
  2652. /* $ V(q,p) = TEMP1 * ( V(q,p) / ABS(V(q,p)) ) */
  2653. /* L3958: */
  2654. }
  2655. /* L3959: */
  2656. }
  2657. }
  2658. if (nr != *n) {
  2659. zlacpy_("A", n, &nr, &v[v_offset], ldv, &cwork[(*n <<
  2660. 1) + 1], n);
  2661. }
  2662. i__1 = nr - 1;
  2663. for (p = 1; p <= i__1; ++p) {
  2664. i__2 = nr - p;
  2665. zcopy_(&i__2, &v[p + (p + 1) * v_dim1], ldv, &v[p + 1
  2666. + p * v_dim1], &c__1);
  2667. i__2 = nr - p + 1;
  2668. zlacgv_(&i__2, &v[p + p * v_dim1], &c__1);
  2669. /* L1969: */
  2670. }
  2671. i__1 = nr + nr * v_dim1;
  2672. d_cnjg(&z__1, &v[nr + nr * v_dim1]);
  2673. v[i__1].r = z__1.r, v[i__1].i = z__1.i;
  2674. condr2 = condr1;
  2675. } else {
  2676. /* Note that windowed pivoting would be equally good */
  2677. /* numerically, and more run-time efficient. So, in */
  2678. /* an optimal implementation, the next call to ZGEQP3 */
  2679. /* should be replaced with eg. CALL ZGEQPX (ACM TOMS #782) */
  2680. /* with properly (carefully) chosen parameters. */
  2681. /* R1^* * P2 = Q2 * R2 */
  2682. i__1 = nr;
  2683. for (p = 1; p <= i__1; ++p) {
  2684. iwork[*n + p] = 0;
  2685. /* L3003: */
  2686. }
  2687. i__1 = *lwork - (*n << 1);
  2688. zgeqp3_(n, &nr, &v[v_offset], ldv, &iwork[*n + 1], &cwork[
  2689. *n + 1], &cwork[(*n << 1) + 1], &i__1, &rwork[1],
  2690. &ierr);
  2691. /* * CALL ZGEQRF( N, NR, V, LDV, CWORK(N+1), CWORK(2*N+1), */
  2692. /* * $ LWORK-2*N, IERR ) */
  2693. if (l2pert) {
  2694. xsc = sqrt(small);
  2695. i__1 = nr;
  2696. for (p = 2; p <= i__1; ++p) {
  2697. i__2 = p - 1;
  2698. for (q = 1; q <= i__2; ++q) {
  2699. /* Computing MIN */
  2700. d__2 = z_abs(&v[p + p * v_dim1]), d__3 =
  2701. z_abs(&v[q + q * v_dim1]);
  2702. d__1 = xsc * f2cmin(d__2,d__3);
  2703. z__1.r = d__1, z__1.i = 0.;
  2704. ctemp.r = z__1.r, ctemp.i = z__1.i;
  2705. if (z_abs(&v[q + p * v_dim1]) <= temp1) {
  2706. i__3 = q + p * v_dim1;
  2707. v[i__3].r = ctemp.r, v[i__3].i = ctemp.i;
  2708. }
  2709. /* $ V(q,p) = TEMP1 * ( V(q,p) / ABS(V(q,p)) ) */
  2710. /* L3968: */
  2711. }
  2712. /* L3969: */
  2713. }
  2714. }
  2715. zlacpy_("A", n, &nr, &v[v_offset], ldv, &cwork[(*n << 1)
  2716. + 1], n);
  2717. if (l2pert) {
  2718. xsc = sqrt(small);
  2719. i__1 = nr;
  2720. for (p = 2; p <= i__1; ++p) {
  2721. i__2 = p - 1;
  2722. for (q = 1; q <= i__2; ++q) {
  2723. /* Computing MIN */
  2724. d__2 = z_abs(&v[p + p * v_dim1]), d__3 =
  2725. z_abs(&v[q + q * v_dim1]);
  2726. d__1 = xsc * f2cmin(d__2,d__3);
  2727. z__1.r = d__1, z__1.i = 0.;
  2728. ctemp.r = z__1.r, ctemp.i = z__1.i;
  2729. /* V(p,q) = - TEMP1*( V(q,p) / ABS(V(q,p)) ) */
  2730. i__3 = p + q * v_dim1;
  2731. z__1.r = -ctemp.r, z__1.i = -ctemp.i;
  2732. v[i__3].r = z__1.r, v[i__3].i = z__1.i;
  2733. /* L8971: */
  2734. }
  2735. /* L8970: */
  2736. }
  2737. } else {
  2738. i__1 = nr - 1;
  2739. i__2 = nr - 1;
  2740. zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &v[v_dim1 +
  2741. 2], ldv);
  2742. }
  2743. /* Now, compute R2 = L3 * Q3, the LQ factorization. */
  2744. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2745. zgelqf_(&nr, &nr, &v[v_offset], ldv, &cwork[(*n << 1) + *
  2746. n * nr + 1], &cwork[(*n << 1) + *n * nr + nr + 1],
  2747. &i__1, &ierr);
  2748. zlacpy_("L", &nr, &nr, &v[v_offset], ldv, &cwork[(*n << 1)
  2749. + *n * nr + nr + 1], &nr);
  2750. i__1 = nr;
  2751. for (p = 1; p <= i__1; ++p) {
  2752. temp1 = dznrm2_(&p, &cwork[(*n << 1) + *n * nr + nr +
  2753. p], &nr);
  2754. d__1 = 1. / temp1;
  2755. zdscal_(&p, &d__1, &cwork[(*n << 1) + *n * nr + nr +
  2756. p], &nr);
  2757. /* L4950: */
  2758. }
  2759. zpocon_("L", &nr, &cwork[(*n << 1) + *n * nr + nr + 1], &
  2760. nr, &c_b141, &temp1, &cwork[(*n << 1) + *n * nr +
  2761. nr + nr * nr + 1], &rwork[1], &ierr);
  2762. condr2 = 1. / sqrt(temp1);
  2763. if (condr2 >= cond_ok__) {
  2764. /* (this overwrites the copy of R2, as it will not be */
  2765. /* needed in this branch, but it does not overwritte the */
  2766. /* Huseholder vectors of Q2.). */
  2767. zlacpy_("U", &nr, &nr, &v[v_offset], ldv, &cwork[(*n
  2768. << 1) + 1], n);
  2769. /* WORK(2*N+N*NR+1:2*N+N*NR+N) */
  2770. }
  2771. }
  2772. if (l2pert) {
  2773. xsc = sqrt(small);
  2774. i__1 = nr;
  2775. for (q = 2; q <= i__1; ++q) {
  2776. i__2 = q + q * v_dim1;
  2777. z__1.r = xsc * v[i__2].r, z__1.i = xsc * v[i__2].i;
  2778. ctemp.r = z__1.r, ctemp.i = z__1.i;
  2779. i__2 = q - 1;
  2780. for (p = 1; p <= i__2; ++p) {
  2781. /* V(p,q) = - TEMP1*( V(p,q) / ABS(V(p,q)) ) */
  2782. i__3 = p + q * v_dim1;
  2783. z__1.r = -ctemp.r, z__1.i = -ctemp.i;
  2784. v[i__3].r = z__1.r, v[i__3].i = z__1.i;
  2785. /* L4969: */
  2786. }
  2787. /* L4968: */
  2788. }
  2789. } else {
  2790. i__1 = nr - 1;
  2791. i__2 = nr - 1;
  2792. zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1)
  2793. + 1], ldv);
  2794. }
  2795. /* Second preconditioning finished; continue with Jacobi SVD */
  2796. /* The input matrix is lower trinagular. */
  2797. /* Recover the right singular vectors as solution of a well */
  2798. /* conditioned triangular matrix equation. */
  2799. if (condr1 < cond_ok__) {
  2800. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2801. zgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[
  2802. 1], &nr, &u[u_offset], ldu, &cwork[(*n << 1) + *n
  2803. * nr + nr + 1], &i__1, &rwork[1], lrwork, info);
  2804. scalem = rwork[1];
  2805. numrank = i_dnnt(&rwork[2]);
  2806. i__1 = nr;
  2807. for (p = 1; p <= i__1; ++p) {
  2808. zcopy_(&nr, &v[p * v_dim1 + 1], &c__1, &u[p * u_dim1
  2809. + 1], &c__1);
  2810. zdscal_(&nr, &sva[p], &v[p * v_dim1 + 1], &c__1);
  2811. /* L3970: */
  2812. }
  2813. if (nr == *n) {
  2814. /* :)) .. best case, R1 is inverted. The solution of this matrix */
  2815. /* equation is Q2*V2 = the product of the Jacobi rotations */
  2816. /* used in ZGESVJ, premultiplied with the orthogonal matrix */
  2817. /* from the second QR factorization. */
  2818. ztrsm_("L", "U", "N", "N", &nr, &nr, &c_b2, &a[
  2819. a_offset], lda, &v[v_offset], ldv);
  2820. } else {
  2821. /* is inverted to get the product of the Jacobi rotations */
  2822. /* used in ZGESVJ. The Q-factor from the second QR */
  2823. /* factorization is then built in explicitly. */
  2824. ztrsm_("L", "U", "C", "N", &nr, &nr, &c_b2, &cwork[(*
  2825. n << 1) + 1], n, &v[v_offset], ldv);
  2826. if (nr < *n) {
  2827. i__1 = *n - nr;
  2828. zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1
  2829. + v_dim1], ldv);
  2830. i__1 = *n - nr;
  2831. zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1)
  2832. * v_dim1 + 1], ldv);
  2833. i__1 = *n - nr;
  2834. i__2 = *n - nr;
  2835. zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr +
  2836. 1 + (nr + 1) * v_dim1], ldv);
  2837. }
  2838. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2839. zunmqr_("L", "N", n, n, &nr, &cwork[(*n << 1) + 1], n,
  2840. &cwork[*n + 1], &v[v_offset], ldv, &cwork[(*
  2841. n << 1) + *n * nr + nr + 1], &i__1, &ierr);
  2842. }
  2843. } else if (condr2 < cond_ok__) {
  2844. /* The matrix R2 is inverted. The solution of the matrix equation */
  2845. /* is Q3^* * V3 = the product of the Jacobi rotations (appplied to */
  2846. /* the lower triangular L3 from the LQ factorization of */
  2847. /* R2=L3*Q3), pre-multiplied with the transposed Q3. */
  2848. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2849. zgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[
  2850. 1], &nr, &u[u_offset], ldu, &cwork[(*n << 1) + *n
  2851. * nr + nr + 1], &i__1, &rwork[1], lrwork, info);
  2852. scalem = rwork[1];
  2853. numrank = i_dnnt(&rwork[2]);
  2854. i__1 = nr;
  2855. for (p = 1; p <= i__1; ++p) {
  2856. zcopy_(&nr, &v[p * v_dim1 + 1], &c__1, &u[p * u_dim1
  2857. + 1], &c__1);
  2858. zdscal_(&nr, &sva[p], &u[p * u_dim1 + 1], &c__1);
  2859. /* L3870: */
  2860. }
  2861. ztrsm_("L", "U", "N", "N", &nr, &nr, &c_b2, &cwork[(*n <<
  2862. 1) + 1], n, &u[u_offset], ldu);
  2863. i__1 = nr;
  2864. for (q = 1; q <= i__1; ++q) {
  2865. i__2 = nr;
  2866. for (p = 1; p <= i__2; ++p) {
  2867. i__3 = (*n << 1) + *n * nr + nr + iwork[*n + p];
  2868. i__4 = p + q * u_dim1;
  2869. cwork[i__3].r = u[i__4].r, cwork[i__3].i = u[i__4]
  2870. .i;
  2871. /* L872: */
  2872. }
  2873. i__2 = nr;
  2874. for (p = 1; p <= i__2; ++p) {
  2875. i__3 = p + q * u_dim1;
  2876. i__4 = (*n << 1) + *n * nr + nr + p;
  2877. u[i__3].r = cwork[i__4].r, u[i__3].i = cwork[i__4]
  2878. .i;
  2879. /* L874: */
  2880. }
  2881. /* L873: */
  2882. }
  2883. if (nr < *n) {
  2884. i__1 = *n - nr;
  2885. zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1 +
  2886. v_dim1], ldv);
  2887. i__1 = *n - nr;
  2888. zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1) *
  2889. v_dim1 + 1], ldv);
  2890. i__1 = *n - nr;
  2891. i__2 = *n - nr;
  2892. zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr + 1 + (
  2893. nr + 1) * v_dim1], ldv);
  2894. }
  2895. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2896. zunmqr_("L", "N", n, n, &nr, &cwork[(*n << 1) + 1], n, &
  2897. cwork[*n + 1], &v[v_offset], ldv, &cwork[(*n << 1)
  2898. + *n * nr + nr + 1], &i__1, &ierr);
  2899. } else {
  2900. /* Last line of defense. */
  2901. /* #:( This is a rather pathological case: no scaled condition */
  2902. /* improvement after two pivoted QR factorizations. Other */
  2903. /* possibility is that the rank revealing QR factorization */
  2904. /* or the condition estimator has failed, or the COND_OK */
  2905. /* is set very close to ONE (which is unnecessary). Normally, */
  2906. /* this branch should never be executed, but in rare cases of */
  2907. /* failure of the RRQR or condition estimator, the last line of */
  2908. /* defense ensures that ZGEJSV completes the task. */
  2909. /* Compute the full SVD of L3 using ZGESVJ with explicit */
  2910. /* accumulation of Jacobi rotations. */
  2911. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2912. zgesvj_("L", "U", "V", &nr, &nr, &v[v_offset], ldv, &sva[
  2913. 1], &nr, &u[u_offset], ldu, &cwork[(*n << 1) + *n
  2914. * nr + nr + 1], &i__1, &rwork[1], lrwork, info);
  2915. scalem = rwork[1];
  2916. numrank = i_dnnt(&rwork[2]);
  2917. if (nr < *n) {
  2918. i__1 = *n - nr;
  2919. zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1 +
  2920. v_dim1], ldv);
  2921. i__1 = *n - nr;
  2922. zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1) *
  2923. v_dim1 + 1], ldv);
  2924. i__1 = *n - nr;
  2925. i__2 = *n - nr;
  2926. zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr + 1 + (
  2927. nr + 1) * v_dim1], ldv);
  2928. }
  2929. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2930. zunmqr_("L", "N", n, n, &nr, &cwork[(*n << 1) + 1], n, &
  2931. cwork[*n + 1], &v[v_offset], ldv, &cwork[(*n << 1)
  2932. + *n * nr + nr + 1], &i__1, &ierr);
  2933. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2934. zunmlq_("L", "C", &nr, &nr, &nr, &cwork[(*n << 1) + 1], n,
  2935. &cwork[(*n << 1) + *n * nr + 1], &u[u_offset],
  2936. ldu, &cwork[(*n << 1) + *n * nr + nr + 1], &i__1,
  2937. &ierr);
  2938. i__1 = nr;
  2939. for (q = 1; q <= i__1; ++q) {
  2940. i__2 = nr;
  2941. for (p = 1; p <= i__2; ++p) {
  2942. i__3 = (*n << 1) + *n * nr + nr + iwork[*n + p];
  2943. i__4 = p + q * u_dim1;
  2944. cwork[i__3].r = u[i__4].r, cwork[i__3].i = u[i__4]
  2945. .i;
  2946. /* L772: */
  2947. }
  2948. i__2 = nr;
  2949. for (p = 1; p <= i__2; ++p) {
  2950. i__3 = p + q * u_dim1;
  2951. i__4 = (*n << 1) + *n * nr + nr + p;
  2952. u[i__3].r = cwork[i__4].r, u[i__3].i = cwork[i__4]
  2953. .i;
  2954. /* L774: */
  2955. }
  2956. /* L773: */
  2957. }
  2958. }
  2959. /* Permute the rows of V using the (column) permutation from the */
  2960. /* first QRF. Also, scale the columns to make them unit in */
  2961. /* Euclidean norm. This applies to all cases. */
  2962. temp1 = sqrt((doublereal) (*n)) * epsln;
  2963. i__1 = *n;
  2964. for (q = 1; q <= i__1; ++q) {
  2965. i__2 = *n;
  2966. for (p = 1; p <= i__2; ++p) {
  2967. i__3 = (*n << 1) + *n * nr + nr + iwork[p];
  2968. i__4 = p + q * v_dim1;
  2969. cwork[i__3].r = v[i__4].r, cwork[i__3].i = v[i__4].i;
  2970. /* L972: */
  2971. }
  2972. i__2 = *n;
  2973. for (p = 1; p <= i__2; ++p) {
  2974. i__3 = p + q * v_dim1;
  2975. i__4 = (*n << 1) + *n * nr + nr + p;
  2976. v[i__3].r = cwork[i__4].r, v[i__3].i = cwork[i__4].i;
  2977. /* L973: */
  2978. }
  2979. xsc = 1. / dznrm2_(n, &v[q * v_dim1 + 1], &c__1);
  2980. if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
  2981. zdscal_(n, &xsc, &v[q * v_dim1 + 1], &c__1);
  2982. }
  2983. /* L1972: */
  2984. }
  2985. /* At this moment, V contains the right singular vectors of A. */
  2986. /* Next, assemble the left singular vector matrix U (M x N). */
  2987. if (nr < *m) {
  2988. i__1 = *m - nr;
  2989. zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 + u_dim1]
  2990. , ldu);
  2991. if (nr < n1) {
  2992. i__1 = n1 - nr;
  2993. zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1) *
  2994. u_dim1 + 1], ldu);
  2995. i__1 = *m - nr;
  2996. i__2 = n1 - nr;
  2997. zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr + 1 + (
  2998. nr + 1) * u_dim1], ldu);
  2999. }
  3000. }
  3001. /* The Q matrix from the first QRF is built into the left singular */
  3002. /* matrix U. This applies to all cases. */
  3003. i__1 = *lwork - *n;
  3004. zunmqr_("L", "N", m, &n1, n, &a[a_offset], lda, &cwork[1], &u[
  3005. u_offset], ldu, &cwork[*n + 1], &i__1, &ierr);
  3006. /* The columns of U are normalized. The cost is O(M*N) flops. */
  3007. temp1 = sqrt((doublereal) (*m)) * epsln;
  3008. i__1 = nr;
  3009. for (p = 1; p <= i__1; ++p) {
  3010. xsc = 1. / dznrm2_(m, &u[p * u_dim1 + 1], &c__1);
  3011. if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
  3012. zdscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
  3013. }
  3014. /* L1973: */
  3015. }
  3016. /* If the initial QRF is computed with row pivoting, the left */
  3017. /* singular vectors must be adjusted. */
  3018. if (rowpiv) {
  3019. i__1 = *m - 1;
  3020. zlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[
  3021. iwoff + 1], &c_n1);
  3022. }
  3023. } else {
  3024. /* the second QRF is not needed */
  3025. zlacpy_("U", n, n, &a[a_offset], lda, &cwork[*n + 1], n);
  3026. if (l2pert) {
  3027. xsc = sqrt(small);
  3028. i__1 = *n;
  3029. for (p = 2; p <= i__1; ++p) {
  3030. i__2 = *n + (p - 1) * *n + p;
  3031. z__1.r = xsc * cwork[i__2].r, z__1.i = xsc * cwork[
  3032. i__2].i;
  3033. ctemp.r = z__1.r, ctemp.i = z__1.i;
  3034. i__2 = p - 1;
  3035. for (q = 1; q <= i__2; ++q) {
  3036. /* CWORK(N+(q-1)*N+p)=-TEMP1 * ( CWORK(N+(p-1)*N+q) / */
  3037. /* $ ABS(CWORK(N+(p-1)*N+q)) ) */
  3038. i__3 = *n + (q - 1) * *n + p;
  3039. z__1.r = -ctemp.r, z__1.i = -ctemp.i;
  3040. cwork[i__3].r = z__1.r, cwork[i__3].i = z__1.i;
  3041. /* L5971: */
  3042. }
  3043. /* L5970: */
  3044. }
  3045. } else {
  3046. i__1 = *n - 1;
  3047. i__2 = *n - 1;
  3048. zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &cwork[*n + 2],
  3049. n);
  3050. }
  3051. i__1 = *lwork - *n - *n * *n;
  3052. zgesvj_("U", "U", "N", n, n, &cwork[*n + 1], n, &sva[1], n, &
  3053. u[u_offset], ldu, &cwork[*n + *n * *n + 1], &i__1, &
  3054. rwork[1], lrwork, info);
  3055. scalem = rwork[1];
  3056. numrank = i_dnnt(&rwork[2]);
  3057. i__1 = *n;
  3058. for (p = 1; p <= i__1; ++p) {
  3059. zcopy_(n, &cwork[*n + (p - 1) * *n + 1], &c__1, &u[p *
  3060. u_dim1 + 1], &c__1);
  3061. zdscal_(n, &sva[p], &cwork[*n + (p - 1) * *n + 1], &c__1);
  3062. /* L6970: */
  3063. }
  3064. ztrsm_("L", "U", "N", "N", n, n, &c_b2, &a[a_offset], lda, &
  3065. cwork[*n + 1], n);
  3066. i__1 = *n;
  3067. for (p = 1; p <= i__1; ++p) {
  3068. zcopy_(n, &cwork[*n + p], n, &v[iwork[p] + v_dim1], ldv);
  3069. /* L6972: */
  3070. }
  3071. temp1 = sqrt((doublereal) (*n)) * epsln;
  3072. i__1 = *n;
  3073. for (p = 1; p <= i__1; ++p) {
  3074. xsc = 1. / dznrm2_(n, &v[p * v_dim1 + 1], &c__1);
  3075. if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
  3076. zdscal_(n, &xsc, &v[p * v_dim1 + 1], &c__1);
  3077. }
  3078. /* L6971: */
  3079. }
  3080. /* Assemble the left singular vector matrix U (M x N). */
  3081. if (*n < *m) {
  3082. i__1 = *m - *n;
  3083. zlaset_("A", &i__1, n, &c_b1, &c_b1, &u[*n + 1 + u_dim1],
  3084. ldu);
  3085. if (*n < n1) {
  3086. i__1 = n1 - *n;
  3087. zlaset_("A", n, &i__1, &c_b1, &c_b1, &u[(*n + 1) *
  3088. u_dim1 + 1], ldu);
  3089. i__1 = *m - *n;
  3090. i__2 = n1 - *n;
  3091. zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[*n + 1 + (
  3092. *n + 1) * u_dim1], ldu);
  3093. }
  3094. }
  3095. i__1 = *lwork - *n;
  3096. zunmqr_("L", "N", m, &n1, n, &a[a_offset], lda, &cwork[1], &u[
  3097. u_offset], ldu, &cwork[*n + 1], &i__1, &ierr);
  3098. temp1 = sqrt((doublereal) (*m)) * epsln;
  3099. i__1 = n1;
  3100. for (p = 1; p <= i__1; ++p) {
  3101. xsc = 1. / dznrm2_(m, &u[p * u_dim1 + 1], &c__1);
  3102. if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
  3103. zdscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
  3104. }
  3105. /* L6973: */
  3106. }
  3107. if (rowpiv) {
  3108. i__1 = *m - 1;
  3109. zlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[
  3110. iwoff + 1], &c_n1);
  3111. }
  3112. }
  3113. /* end of the >> almost orthogonal case << in the full SVD */
  3114. } else {
  3115. /* This branch deploys a preconditioned Jacobi SVD with explicitly */
  3116. /* accumulated rotations. It is included as optional, mainly for */
  3117. /* experimental purposes. It does perform well, and can also be used. */
  3118. /* In this implementation, this branch will be automatically activated */
  3119. /* if the condition number sigma_max(A) / sigma_min(A) is predicted */
  3120. /* to be greater than the overflow threshold. This is because the */
  3121. /* a posteriori computation of the singular vectors assumes robust */
  3122. /* implementation of BLAS and some LAPACK procedures, capable of working */
  3123. /* in presence of extreme values, e.g. when the singular values spread from */
  3124. /* the underflow to the overflow threshold. */
  3125. i__1 = nr;
  3126. for (p = 1; p <= i__1; ++p) {
  3127. i__2 = *n - p + 1;
  3128. zcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1], &
  3129. c__1);
  3130. i__2 = *n - p + 1;
  3131. zlacgv_(&i__2, &v[p + p * v_dim1], &c__1);
  3132. /* L7968: */
  3133. }
  3134. if (l2pert) {
  3135. xsc = sqrt(small / epsln);
  3136. i__1 = nr;
  3137. for (q = 1; q <= i__1; ++q) {
  3138. d__1 = xsc * z_abs(&v[q + q * v_dim1]);
  3139. z__1.r = d__1, z__1.i = 0.;
  3140. ctemp.r = z__1.r, ctemp.i = z__1.i;
  3141. i__2 = *n;
  3142. for (p = 1; p <= i__2; ++p) {
  3143. if (p > q && z_abs(&v[p + q * v_dim1]) <= temp1 || p <
  3144. q) {
  3145. i__3 = p + q * v_dim1;
  3146. v[i__3].r = ctemp.r, v[i__3].i = ctemp.i;
  3147. }
  3148. /* $ V(p,q) = TEMP1 * ( V(p,q) / ABS(V(p,q)) ) */
  3149. if (p < q) {
  3150. i__3 = p + q * v_dim1;
  3151. i__4 = p + q * v_dim1;
  3152. z__1.r = -v[i__4].r, z__1.i = -v[i__4].i;
  3153. v[i__3].r = z__1.r, v[i__3].i = z__1.i;
  3154. }
  3155. /* L5968: */
  3156. }
  3157. /* L5969: */
  3158. }
  3159. } else {
  3160. i__1 = nr - 1;
  3161. i__2 = nr - 1;
  3162. zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1) + 1]
  3163. , ldv);
  3164. }
  3165. i__1 = *lwork - (*n << 1);
  3166. zgeqrf_(n, &nr, &v[v_offset], ldv, &cwork[*n + 1], &cwork[(*n <<
  3167. 1) + 1], &i__1, &ierr);
  3168. zlacpy_("L", n, &nr, &v[v_offset], ldv, &cwork[(*n << 1) + 1], n);
  3169. i__1 = nr;
  3170. for (p = 1; p <= i__1; ++p) {
  3171. i__2 = nr - p + 1;
  3172. zcopy_(&i__2, &v[p + p * v_dim1], ldv, &u[p + p * u_dim1], &
  3173. c__1);
  3174. i__2 = nr - p + 1;
  3175. zlacgv_(&i__2, &u[p + p * u_dim1], &c__1);
  3176. /* L7969: */
  3177. }
  3178. if (l2pert) {
  3179. xsc = sqrt(small / epsln);
  3180. i__1 = nr;
  3181. for (q = 2; q <= i__1; ++q) {
  3182. i__2 = q - 1;
  3183. for (p = 1; p <= i__2; ++p) {
  3184. /* Computing MIN */
  3185. d__2 = z_abs(&u[p + p * u_dim1]), d__3 = z_abs(&u[q +
  3186. q * u_dim1]);
  3187. d__1 = xsc * f2cmin(d__2,d__3);
  3188. z__1.r = d__1, z__1.i = 0.;
  3189. ctemp.r = z__1.r, ctemp.i = z__1.i;
  3190. /* U(p,q) = - TEMP1 * ( U(q,p) / ABS(U(q,p)) ) */
  3191. i__3 = p + q * u_dim1;
  3192. z__1.r = -ctemp.r, z__1.i = -ctemp.i;
  3193. u[i__3].r = z__1.r, u[i__3].i = z__1.i;
  3194. /* L9971: */
  3195. }
  3196. /* L9970: */
  3197. }
  3198. } else {
  3199. i__1 = nr - 1;
  3200. i__2 = nr - 1;
  3201. zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &u[(u_dim1 << 1) + 1]
  3202. , ldu);
  3203. }
  3204. i__1 = *lwork - (*n << 1) - *n * nr;
  3205. zgesvj_("L", "U", "V", &nr, &nr, &u[u_offset], ldu, &sva[1], n, &
  3206. v[v_offset], ldv, &cwork[(*n << 1) + *n * nr + 1], &i__1,
  3207. &rwork[1], lrwork, info);
  3208. scalem = rwork[1];
  3209. numrank = i_dnnt(&rwork[2]);
  3210. if (nr < *n) {
  3211. i__1 = *n - nr;
  3212. zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1 + v_dim1],
  3213. ldv);
  3214. i__1 = *n - nr;
  3215. zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1) * v_dim1 +
  3216. 1], ldv);
  3217. i__1 = *n - nr;
  3218. i__2 = *n - nr;
  3219. zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr + 1 + (nr + 1)
  3220. * v_dim1], ldv);
  3221. }
  3222. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  3223. zunmqr_("L", "N", n, n, &nr, &cwork[(*n << 1) + 1], n, &cwork[*n
  3224. + 1], &v[v_offset], ldv, &cwork[(*n << 1) + *n * nr + nr
  3225. + 1], &i__1, &ierr);
  3226. /* Permute the rows of V using the (column) permutation from the */
  3227. /* first QRF. Also, scale the columns to make them unit in */
  3228. /* Euclidean norm. This applies to all cases. */
  3229. temp1 = sqrt((doublereal) (*n)) * epsln;
  3230. i__1 = *n;
  3231. for (q = 1; q <= i__1; ++q) {
  3232. i__2 = *n;
  3233. for (p = 1; p <= i__2; ++p) {
  3234. i__3 = (*n << 1) + *n * nr + nr + iwork[p];
  3235. i__4 = p + q * v_dim1;
  3236. cwork[i__3].r = v[i__4].r, cwork[i__3].i = v[i__4].i;
  3237. /* L8972: */
  3238. }
  3239. i__2 = *n;
  3240. for (p = 1; p <= i__2; ++p) {
  3241. i__3 = p + q * v_dim1;
  3242. i__4 = (*n << 1) + *n * nr + nr + p;
  3243. v[i__3].r = cwork[i__4].r, v[i__3].i = cwork[i__4].i;
  3244. /* L8973: */
  3245. }
  3246. xsc = 1. / dznrm2_(n, &v[q * v_dim1 + 1], &c__1);
  3247. if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
  3248. zdscal_(n, &xsc, &v[q * v_dim1 + 1], &c__1);
  3249. }
  3250. /* L7972: */
  3251. }
  3252. /* At this moment, V contains the right singular vectors of A. */
  3253. /* Next, assemble the left singular vector matrix U (M x N). */
  3254. if (nr < *m) {
  3255. i__1 = *m - nr;
  3256. zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 + u_dim1],
  3257. ldu);
  3258. if (nr < n1) {
  3259. i__1 = n1 - nr;
  3260. zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1) *
  3261. u_dim1 + 1], ldu);
  3262. i__1 = *m - nr;
  3263. i__2 = n1 - nr;
  3264. zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr + 1 + (nr
  3265. + 1) * u_dim1], ldu);
  3266. }
  3267. }
  3268. i__1 = *lwork - *n;
  3269. zunmqr_("L", "N", m, &n1, n, &a[a_offset], lda, &cwork[1], &u[
  3270. u_offset], ldu, &cwork[*n + 1], &i__1, &ierr);
  3271. if (rowpiv) {
  3272. i__1 = *m - 1;
  3273. zlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[iwoff +
  3274. 1], &c_n1);
  3275. }
  3276. }
  3277. if (transp) {
  3278. i__1 = *n;
  3279. for (p = 1; p <= i__1; ++p) {
  3280. zswap_(n, &u[p * u_dim1 + 1], &c__1, &v[p * v_dim1 + 1], &
  3281. c__1);
  3282. /* L6974: */
  3283. }
  3284. }
  3285. }
  3286. /* end of the full SVD */
  3287. /* Undo scaling, if necessary (and possible) */
  3288. if (uscal2 <= big / sva[1] * uscal1) {
  3289. dlascl_("G", &c__0, &c__0, &uscal1, &uscal2, &nr, &c__1, &sva[1], n, &
  3290. ierr);
  3291. uscal1 = 1.;
  3292. uscal2 = 1.;
  3293. }
  3294. if (nr < *n) {
  3295. i__1 = *n;
  3296. for (p = nr + 1; p <= i__1; ++p) {
  3297. sva[p] = 0.;
  3298. /* L3004: */
  3299. }
  3300. }
  3301. rwork[1] = uscal2 * scalem;
  3302. rwork[2] = uscal1;
  3303. if (errest) {
  3304. rwork[3] = sconda;
  3305. }
  3306. if (lsvec && rsvec) {
  3307. rwork[4] = condr1;
  3308. rwork[5] = condr2;
  3309. }
  3310. if (l2tran) {
  3311. rwork[6] = entra;
  3312. rwork[7] = entrat;
  3313. }
  3314. iwork[1] = nr;
  3315. iwork[2] = numrank;
  3316. iwork[3] = warning;
  3317. if (transp) {
  3318. iwork[4] = 1;
  3319. } else {
  3320. iwork[4] = -1;
  3321. }
  3322. return;
  3323. } /* zgejsv_ */