You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

cgejsv.c 122 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552
  1. #include <math.h>
  2. #include <stdlib.h>
  3. #include <string.h>
  4. #include <stdio.h>
  5. #include <complex.h>
  6. #ifdef complex
  7. #undef complex
  8. #endif
  9. #ifdef I
  10. #undef I
  11. #endif
  12. #if defined(_WIN64)
  13. typedef long long BLASLONG;
  14. typedef unsigned long long BLASULONG;
  15. #else
  16. typedef long BLASLONG;
  17. typedef unsigned long BLASULONG;
  18. #endif
  19. #ifdef LAPACK_ILP64
  20. typedef BLASLONG blasint;
  21. #if defined(_WIN64)
  22. #define blasabs(x) llabs(x)
  23. #else
  24. #define blasabs(x) labs(x)
  25. #endif
  26. #else
  27. typedef int blasint;
  28. #define blasabs(x) abs(x)
  29. #endif
  30. typedef blasint integer;
  31. typedef unsigned int uinteger;
  32. typedef char *address;
  33. typedef short int shortint;
  34. typedef float real;
  35. typedef double doublereal;
  36. typedef struct { real r, i; } complex;
  37. typedef struct { doublereal r, i; } doublecomplex;
  38. #ifdef _MSC_VER
  39. static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
  40. static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
  41. static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
  42. static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
  43. #else
  44. static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
  45. static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
  46. static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
  47. static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
  48. #endif
  49. #define pCf(z) (*_pCf(z))
  50. #define pCd(z) (*_pCd(z))
  51. typedef int logical;
  52. typedef short int shortlogical;
  53. typedef char logical1;
  54. typedef char integer1;
  55. #define TRUE_ (1)
  56. #define FALSE_ (0)
  57. /* Extern is for use with -E */
  58. #ifndef Extern
  59. #define Extern extern
  60. #endif
  61. /* I/O stuff */
  62. typedef int flag;
  63. typedef int ftnlen;
  64. typedef int ftnint;
  65. /*external read, write*/
  66. typedef struct
  67. { flag cierr;
  68. ftnint ciunit;
  69. flag ciend;
  70. char *cifmt;
  71. ftnint cirec;
  72. } cilist;
  73. /*internal read, write*/
  74. typedef struct
  75. { flag icierr;
  76. char *iciunit;
  77. flag iciend;
  78. char *icifmt;
  79. ftnint icirlen;
  80. ftnint icirnum;
  81. } icilist;
  82. /*open*/
  83. typedef struct
  84. { flag oerr;
  85. ftnint ounit;
  86. char *ofnm;
  87. ftnlen ofnmlen;
  88. char *osta;
  89. char *oacc;
  90. char *ofm;
  91. ftnint orl;
  92. char *oblnk;
  93. } olist;
  94. /*close*/
  95. typedef struct
  96. { flag cerr;
  97. ftnint cunit;
  98. char *csta;
  99. } cllist;
  100. /*rewind, backspace, endfile*/
  101. typedef struct
  102. { flag aerr;
  103. ftnint aunit;
  104. } alist;
  105. /* inquire */
  106. typedef struct
  107. { flag inerr;
  108. ftnint inunit;
  109. char *infile;
  110. ftnlen infilen;
  111. ftnint *inex; /*parameters in standard's order*/
  112. ftnint *inopen;
  113. ftnint *innum;
  114. ftnint *innamed;
  115. char *inname;
  116. ftnlen innamlen;
  117. char *inacc;
  118. ftnlen inacclen;
  119. char *inseq;
  120. ftnlen inseqlen;
  121. char *indir;
  122. ftnlen indirlen;
  123. char *infmt;
  124. ftnlen infmtlen;
  125. char *inform;
  126. ftnint informlen;
  127. char *inunf;
  128. ftnlen inunflen;
  129. ftnint *inrecl;
  130. ftnint *innrec;
  131. char *inblank;
  132. ftnlen inblanklen;
  133. } inlist;
  134. #define VOID void
  135. union Multitype { /* for multiple entry points */
  136. integer1 g;
  137. shortint h;
  138. integer i;
  139. /* longint j; */
  140. real r;
  141. doublereal d;
  142. complex c;
  143. doublecomplex z;
  144. };
  145. typedef union Multitype Multitype;
  146. struct Vardesc { /* for Namelist */
  147. char *name;
  148. char *addr;
  149. ftnlen *dims;
  150. int type;
  151. };
  152. typedef struct Vardesc Vardesc;
  153. struct Namelist {
  154. char *name;
  155. Vardesc **vars;
  156. int nvars;
  157. };
  158. typedef struct Namelist Namelist;
  159. #define abs(x) ((x) >= 0 ? (x) : -(x))
  160. #define dabs(x) (fabs(x))
  161. #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
  162. #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
  163. #define dmin(a,b) (f2cmin(a,b))
  164. #define dmax(a,b) (f2cmax(a,b))
  165. #define bit_test(a,b) ((a) >> (b) & 1)
  166. #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
  167. #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
  168. #define abort_() { sig_die("Fortran abort routine called", 1); }
  169. #define c_abs(z) (cabsf(Cf(z)))
  170. #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
  171. #ifdef _MSC_VER
  172. #define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
  173. #define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/df(b)._Val[1]);}
  174. #else
  175. #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
  176. #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
  177. #endif
  178. #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
  179. #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
  180. #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
  181. //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
  182. #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
  183. #define d_abs(x) (fabs(*(x)))
  184. #define d_acos(x) (acos(*(x)))
  185. #define d_asin(x) (asin(*(x)))
  186. #define d_atan(x) (atan(*(x)))
  187. #define d_atn2(x, y) (atan2(*(x),*(y)))
  188. #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
  189. #define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
  190. #define d_cos(x) (cos(*(x)))
  191. #define d_cosh(x) (cosh(*(x)))
  192. #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
  193. #define d_exp(x) (exp(*(x)))
  194. #define d_imag(z) (cimag(Cd(z)))
  195. #define r_imag(z) (cimagf(Cf(z)))
  196. #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
  197. #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
  198. #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
  199. #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
  200. #define d_log(x) (log(*(x)))
  201. #define d_mod(x, y) (fmod(*(x), *(y)))
  202. #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
  203. #define d_nint(x) u_nint(*(x))
  204. #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
  205. #define d_sign(a,b) u_sign(*(a),*(b))
  206. #define r_sign(a,b) u_sign(*(a),*(b))
  207. #define d_sin(x) (sin(*(x)))
  208. #define d_sinh(x) (sinh(*(x)))
  209. #define d_sqrt(x) (sqrt(*(x)))
  210. #define d_tan(x) (tan(*(x)))
  211. #define d_tanh(x) (tanh(*(x)))
  212. #define i_abs(x) abs(*(x))
  213. #define i_dnnt(x) ((integer)u_nint(*(x)))
  214. #define i_len(s, n) (n)
  215. #define i_nint(x) ((integer)u_nint(*(x)))
  216. #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
  217. #define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
  218. #define pow_si(B,E) spow_ui(*(B),*(E))
  219. #define pow_ri(B,E) spow_ui(*(B),*(E))
  220. #define pow_di(B,E) dpow_ui(*(B),*(E))
  221. #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
  222. #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
  223. #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
  224. #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
  225. #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
  226. #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
  227. #define sig_die(s, kill) { exit(1); }
  228. #define s_stop(s, n) {exit(0);}
  229. static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
  230. #define z_abs(z) (cabs(Cd(z)))
  231. #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
  232. #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
  233. #define myexit_() break;
  234. #define mycycle() continue;
  235. #define myceiling(w) {ceil(w)}
  236. #define myhuge(w) {HUGE_VAL}
  237. //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
  238. #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
  239. /* procedure parameter types for -A and -C++ */
  240. #define F2C_proc_par_types 1
  241. #ifdef __cplusplus
  242. typedef logical (*L_fp)(...);
  243. #else
  244. typedef logical (*L_fp)();
  245. #endif
  246. static float spow_ui(float x, integer n) {
  247. float pow=1.0; unsigned long int u;
  248. if(n != 0) {
  249. if(n < 0) n = -n, x = 1/x;
  250. for(u = n; ; ) {
  251. if(u & 01) pow *= x;
  252. if(u >>= 1) x *= x;
  253. else break;
  254. }
  255. }
  256. return pow;
  257. }
  258. static double dpow_ui(double x, integer n) {
  259. double pow=1.0; unsigned long int u;
  260. if(n != 0) {
  261. if(n < 0) n = -n, x = 1/x;
  262. for(u = n; ; ) {
  263. if(u & 01) pow *= x;
  264. if(u >>= 1) x *= x;
  265. else break;
  266. }
  267. }
  268. return pow;
  269. }
  270. #ifdef _MSC_VER
  271. static _Fcomplex cpow_ui(complex x, integer n) {
  272. complex pow={1.0,0.0}; unsigned long int u;
  273. if(n != 0) {
  274. if(n < 0) n = -n, x.r = 1/x.r, x.i=1/x.i;
  275. for(u = n; ; ) {
  276. if(u & 01) pow.r *= x.r, pow.i *= x.i;
  277. if(u >>= 1) x.r *= x.r, x.i *= x.i;
  278. else break;
  279. }
  280. }
  281. _Fcomplex p={pow.r, pow.i};
  282. return p;
  283. }
  284. #else
  285. static _Complex float cpow_ui(_Complex float x, integer n) {
  286. _Complex float pow=1.0; unsigned long int u;
  287. if(n != 0) {
  288. if(n < 0) n = -n, x = 1/x;
  289. for(u = n; ; ) {
  290. if(u & 01) pow *= x;
  291. if(u >>= 1) x *= x;
  292. else break;
  293. }
  294. }
  295. return pow;
  296. }
  297. #endif
  298. #ifdef _MSC_VER
  299. static _Dcomplex zpow_ui(_Dcomplex x, integer n) {
  300. _Dcomplex pow={1.0,0.0}; unsigned long int u;
  301. if(n != 0) {
  302. if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1];
  303. for(u = n; ; ) {
  304. if(u & 01) pow._Val[0] *= x._Val[0], pow._Val[1] *= x._Val[1];
  305. if(u >>= 1) x._Val[0] *= x._Val[0], x._Val[1] *= x._Val[1];
  306. else break;
  307. }
  308. }
  309. _Dcomplex p = {pow._Val[0], pow._Val[1]};
  310. return p;
  311. }
  312. #else
  313. static _Complex double zpow_ui(_Complex double x, integer n) {
  314. _Complex double pow=1.0; unsigned long int u;
  315. if(n != 0) {
  316. if(n < 0) n = -n, x = 1/x;
  317. for(u = n; ; ) {
  318. if(u & 01) pow *= x;
  319. if(u >>= 1) x *= x;
  320. else break;
  321. }
  322. }
  323. return pow;
  324. }
  325. #endif
  326. static integer pow_ii(integer x, integer n) {
  327. integer pow; unsigned long int u;
  328. if (n <= 0) {
  329. if (n == 0 || x == 1) pow = 1;
  330. else if (x != -1) pow = x == 0 ? 1/x : 0;
  331. else n = -n;
  332. }
  333. if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
  334. u = n;
  335. for(pow = 1; ; ) {
  336. if(u & 01) pow *= x;
  337. if(u >>= 1) x *= x;
  338. else break;
  339. }
  340. }
  341. return pow;
  342. }
  343. static integer dmaxloc_(double *w, integer s, integer e, integer *n)
  344. {
  345. double m; integer i, mi;
  346. for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
  347. if (w[i-1]>m) mi=i ,m=w[i-1];
  348. return mi-s+1;
  349. }
  350. static integer smaxloc_(float *w, integer s, integer e, integer *n)
  351. {
  352. float m; integer i, mi;
  353. for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
  354. if (w[i-1]>m) mi=i ,m=w[i-1];
  355. return mi-s+1;
  356. }
  357. static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
  358. integer n = *n_, incx = *incx_, incy = *incy_, i;
  359. #ifdef _MSC_VER
  360. _Fcomplex zdotc = {0.0, 0.0};
  361. if (incx == 1 && incy == 1) {
  362. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  363. zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0];
  364. zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1];
  365. }
  366. } else {
  367. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  368. zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0];
  369. zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1];
  370. }
  371. }
  372. pCf(z) = zdotc;
  373. }
  374. #else
  375. _Complex float zdotc = 0.0;
  376. if (incx == 1 && incy == 1) {
  377. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  378. zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
  379. }
  380. } else {
  381. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  382. zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
  383. }
  384. }
  385. pCf(z) = zdotc;
  386. }
  387. #endif
  388. static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
  389. integer n = *n_, incx = *incx_, incy = *incy_, i;
  390. #ifdef _MSC_VER
  391. _Dcomplex zdotc = {0.0, 0.0};
  392. if (incx == 1 && incy == 1) {
  393. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  394. zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0];
  395. zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1];
  396. }
  397. } else {
  398. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  399. zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0];
  400. zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1];
  401. }
  402. }
  403. pCd(z) = zdotc;
  404. }
  405. #else
  406. _Complex double zdotc = 0.0;
  407. if (incx == 1 && incy == 1) {
  408. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  409. zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
  410. }
  411. } else {
  412. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  413. zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
  414. }
  415. }
  416. pCd(z) = zdotc;
  417. }
  418. #endif
  419. static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
  420. integer n = *n_, incx = *incx_, incy = *incy_, i;
  421. #ifdef _MSC_VER
  422. _Fcomplex zdotc = {0.0, 0.0};
  423. if (incx == 1 && incy == 1) {
  424. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  425. zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0];
  426. zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1];
  427. }
  428. } else {
  429. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  430. zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0];
  431. zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1];
  432. }
  433. }
  434. pCf(z) = zdotc;
  435. }
  436. #else
  437. _Complex float zdotc = 0.0;
  438. if (incx == 1 && incy == 1) {
  439. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  440. zdotc += Cf(&x[i]) * Cf(&y[i]);
  441. }
  442. } else {
  443. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  444. zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
  445. }
  446. }
  447. pCf(z) = zdotc;
  448. }
  449. #endif
  450. static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
  451. integer n = *n_, incx = *incx_, incy = *incy_, i;
  452. #ifdef _MSC_VER
  453. _Dcomplex zdotc = {0.0, 0.0};
  454. if (incx == 1 && incy == 1) {
  455. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  456. zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0];
  457. zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1];
  458. }
  459. } else {
  460. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  461. zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0];
  462. zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1];
  463. }
  464. }
  465. pCd(z) = zdotc;
  466. }
  467. #else
  468. _Complex double zdotc = 0.0;
  469. if (incx == 1 && incy == 1) {
  470. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  471. zdotc += Cd(&x[i]) * Cd(&y[i]);
  472. }
  473. } else {
  474. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  475. zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
  476. }
  477. }
  478. pCd(z) = zdotc;
  479. }
  480. #endif
  481. /* -- translated by f2c (version 20000121).
  482. You must link the resulting object file with the libraries:
  483. -lf2c -lm (in that order)
  484. */
  485. /* Table of constant values */
  486. static complex c_b1 = {0.f,0.f};
  487. static complex c_b2 = {1.f,0.f};
  488. static integer c_n1 = -1;
  489. static integer c__1 = 1;
  490. static integer c__0 = 0;
  491. static real c_b141 = 1.f;
  492. static logical c_false = FALSE_;
  493. /* > \brief \b CGEJSV */
  494. /* =========== DOCUMENTATION =========== */
  495. /* Online html documentation available at */
  496. /* http://www.netlib.org/lapack/explore-html/ */
  497. /* > \htmlonly */
  498. /* > Download CGEJSV + dependencies */
  499. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/cgejsv.
  500. f"> */
  501. /* > [TGZ]</a> */
  502. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/cgejsv.
  503. f"> */
  504. /* > [ZIP]</a> */
  505. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/cgejsv.
  506. f"> */
  507. /* > [TXT]</a> */
  508. /* > \endhtmlonly */
  509. /* Definition: */
  510. /* =========== */
  511. /* SUBROUTINE CGEJSV( JOBA, JOBU, JOBV, JOBR, JOBT, JOBP, */
  512. /* M, N, A, LDA, SVA, U, LDU, V, LDV, */
  513. /* CWORK, LWORK, RWORK, LRWORK, IWORK, INFO ) */
  514. /* IMPLICIT NONE */
  515. /* INTEGER INFO, LDA, LDU, LDV, LWORK, M, N */
  516. /* COMPLEX A( LDA, * ), U( LDU, * ), V( LDV, * ), CWORK( LWORK ) */
  517. /* REAL SVA( N ), RWORK( LRWORK ) */
  518. /* INTEGER IWORK( * ) */
  519. /* CHARACTER*1 JOBA, JOBP, JOBR, JOBT, JOBU, JOBV */
  520. /* > \par Purpose: */
  521. /* ============= */
  522. /* > */
  523. /* > \verbatim */
  524. /* > */
  525. /* > CGEJSV computes the singular value decomposition (SVD) of a complex M-by-N */
  526. /* > matrix [A], where M >= N. The SVD of [A] is written as */
  527. /* > */
  528. /* > [A] = [U] * [SIGMA] * [V]^*, */
  529. /* > */
  530. /* > where [SIGMA] is an N-by-N (M-by-N) matrix which is zero except for its N */
  531. /* > diagonal elements, [U] is an M-by-N (or M-by-M) unitary matrix, and */
  532. /* > [V] is an N-by-N unitary matrix. The diagonal elements of [SIGMA] are */
  533. /* > the singular values of [A]. The columns of [U] and [V] are the left and */
  534. /* > the right singular vectors of [A], respectively. The matrices [U] and [V] */
  535. /* > are computed and stored in the arrays U and V, respectively. The diagonal */
  536. /* > of [SIGMA] is computed and stored in the array SVA. */
  537. /* > \endverbatim */
  538. /* > */
  539. /* > Arguments: */
  540. /* > ========== */
  541. /* > */
  542. /* > \param[in] JOBA */
  543. /* > \verbatim */
  544. /* > JOBA is CHARACTER*1 */
  545. /* > Specifies the level of accuracy: */
  546. /* > = 'C': This option works well (high relative accuracy) if A = B * D, */
  547. /* > with well-conditioned B and arbitrary diagonal matrix D. */
  548. /* > The accuracy cannot be spoiled by COLUMN scaling. The */
  549. /* > accuracy of the computed output depends on the condition of */
  550. /* > B, and the procedure aims at the best theoretical accuracy. */
  551. /* > The relative error max_{i=1:N}|d sigma_i| / sigma_i is */
  552. /* > bounded by f(M,N)*epsilon* cond(B), independent of D. */
  553. /* > The input matrix is preprocessed with the QRF with column */
  554. /* > pivoting. This initial preprocessing and preconditioning by */
  555. /* > a rank revealing QR factorization is common for all values of */
  556. /* > JOBA. Additional actions are specified as follows: */
  557. /* > = 'E': Computation as with 'C' with an additional estimate of the */
  558. /* > condition number of B. It provides a realistic error bound. */
  559. /* > = 'F': If A = D1 * C * D2 with ill-conditioned diagonal scalings */
  560. /* > D1, D2, and well-conditioned matrix C, this option gives */
  561. /* > higher accuracy than the 'C' option. If the structure of the */
  562. /* > input matrix is not known, and relative accuracy is */
  563. /* > desirable, then this option is advisable. The input matrix A */
  564. /* > is preprocessed with QR factorization with FULL (row and */
  565. /* > column) pivoting. */
  566. /* > = 'G': Computation as with 'F' with an additional estimate of the */
  567. /* > condition number of B, where A=B*D. If A has heavily weighted */
  568. /* > rows, then using this condition number gives too pessimistic */
  569. /* > error bound. */
  570. /* > = 'A': Small singular values are not well determined by the data */
  571. /* > and are considered as noisy; the matrix is treated as */
  572. /* > numerically rank deficient. The error in the computed */
  573. /* > singular values is bounded by f(m,n)*epsilon*||A||. */
  574. /* > The computed SVD A = U * S * V^* restores A up to */
  575. /* > f(m,n)*epsilon*||A||. */
  576. /* > This gives the procedure the licence to discard (set to zero) */
  577. /* > all singular values below N*epsilon*||A||. */
  578. /* > = 'R': Similar as in 'A'. Rank revealing property of the initial */
  579. /* > QR factorization is used do reveal (using triangular factor) */
  580. /* > a gap sigma_{r+1} < epsilon * sigma_r in which case the */
  581. /* > numerical RANK is declared to be r. The SVD is computed with */
  582. /* > absolute error bounds, but more accurately than with 'A'. */
  583. /* > \endverbatim */
  584. /* > */
  585. /* > \param[in] JOBU */
  586. /* > \verbatim */
  587. /* > JOBU is CHARACTER*1 */
  588. /* > Specifies whether to compute the columns of U: */
  589. /* > = 'U': N columns of U are returned in the array U. */
  590. /* > = 'F': full set of M left sing. vectors is returned in the array U. */
  591. /* > = 'W': U may be used as workspace of length M*N. See the description */
  592. /* > of U. */
  593. /* > = 'N': U is not computed. */
  594. /* > \endverbatim */
  595. /* > */
  596. /* > \param[in] JOBV */
  597. /* > \verbatim */
  598. /* > JOBV is CHARACTER*1 */
  599. /* > Specifies whether to compute the matrix V: */
  600. /* > = 'V': N columns of V are returned in the array V; Jacobi rotations */
  601. /* > are not explicitly accumulated. */
  602. /* > = 'J': N columns of V are returned in the array V, but they are */
  603. /* > computed as the product of Jacobi rotations, if JOBT = 'N'. */
  604. /* > = 'W': V may be used as workspace of length N*N. See the description */
  605. /* > of V. */
  606. /* > = 'N': V is not computed. */
  607. /* > \endverbatim */
  608. /* > */
  609. /* > \param[in] JOBR */
  610. /* > \verbatim */
  611. /* > JOBR is CHARACTER*1 */
  612. /* > Specifies the RANGE for the singular values. Issues the licence to */
  613. /* > set to zero small positive singular values if they are outside */
  614. /* > specified range. If A .NE. 0 is scaled so that the largest singular */
  615. /* > value of c*A is around SQRT(BIG), BIG=SLAMCH('O'), then JOBR issues */
  616. /* > the licence to kill columns of A whose norm in c*A is less than */
  617. /* > SQRT(SFMIN) (for JOBR = 'R'), or less than SMALL=SFMIN/EPSLN, */
  618. /* > where SFMIN=SLAMCH('S'), EPSLN=SLAMCH('E'). */
  619. /* > = 'N': Do not kill small columns of c*A. This option assumes that */
  620. /* > BLAS and QR factorizations and triangular solvers are */
  621. /* > implemented to work in that range. If the condition of A */
  622. /* > is greater than BIG, use CGESVJ. */
  623. /* > = 'R': RESTRICTED range for sigma(c*A) is [SQRT(SFMIN), SQRT(BIG)] */
  624. /* > (roughly, as described above). This option is recommended. */
  625. /* > =========================== */
  626. /* > For computing the singular values in the FULL range [SFMIN,BIG] */
  627. /* > use CGESVJ. */
  628. /* > \endverbatim */
  629. /* > */
  630. /* > \param[in] JOBT */
  631. /* > \verbatim */
  632. /* > JOBT is CHARACTER*1 */
  633. /* > If the matrix is square then the procedure may determine to use */
  634. /* > transposed A if A^* seems to be better with respect to convergence. */
  635. /* > If the matrix is not square, JOBT is ignored. */
  636. /* > The decision is based on two values of entropy over the adjoint */
  637. /* > orbit of A^* * A. See the descriptions of WORK(6) and WORK(7). */
  638. /* > = 'T': transpose if entropy test indicates possibly faster */
  639. /* > convergence of Jacobi process if A^* is taken as input. If A is */
  640. /* > replaced with A^*, then the row pivoting is included automatically. */
  641. /* > = 'N': do not speculate. */
  642. /* > The option 'T' can be used to compute only the singular values, or */
  643. /* > the full SVD (U, SIGMA and V). For only one set of singular vectors */
  644. /* > (U or V), the caller should provide both U and V, as one of the */
  645. /* > matrices is used as workspace if the matrix A is transposed. */
  646. /* > The implementer can easily remove this constraint and make the */
  647. /* > code more complicated. See the descriptions of U and V. */
  648. /* > In general, this option is considered experimental, and 'N'; should */
  649. /* > be preferred. This is subject to changes in the future. */
  650. /* > \endverbatim */
  651. /* > */
  652. /* > \param[in] JOBP */
  653. /* > \verbatim */
  654. /* > JOBP is CHARACTER*1 */
  655. /* > Issues the licence to introduce structured perturbations to drown */
  656. /* > denormalized numbers. This licence should be active if the */
  657. /* > denormals are poorly implemented, causing slow computation, */
  658. /* > especially in cases of fast convergence (!). For details see [1,2]. */
  659. /* > For the sake of simplicity, this perturbations are included only */
  660. /* > when the full SVD or only the singular values are requested. The */
  661. /* > implementer/user can easily add the perturbation for the cases of */
  662. /* > computing one set of singular vectors. */
  663. /* > = 'P': introduce perturbation */
  664. /* > = 'N': do not perturb */
  665. /* > \endverbatim */
  666. /* > */
  667. /* > \param[in] M */
  668. /* > \verbatim */
  669. /* > M is INTEGER */
  670. /* > The number of rows of the input matrix A. M >= 0. */
  671. /* > \endverbatim */
  672. /* > */
  673. /* > \param[in] N */
  674. /* > \verbatim */
  675. /* > N is INTEGER */
  676. /* > The number of columns of the input matrix A. M >= N >= 0. */
  677. /* > \endverbatim */
  678. /* > */
  679. /* > \param[in,out] A */
  680. /* > \verbatim */
  681. /* > A is COMPLEX array, dimension (LDA,N) */
  682. /* > On entry, the M-by-N matrix A. */
  683. /* > \endverbatim */
  684. /* > */
  685. /* > \param[in] LDA */
  686. /* > \verbatim */
  687. /* > LDA is INTEGER */
  688. /* > The leading dimension of the array A. LDA >= f2cmax(1,M). */
  689. /* > \endverbatim */
  690. /* > */
  691. /* > \param[out] SVA */
  692. /* > \verbatim */
  693. /* > SVA is REAL array, dimension (N) */
  694. /* > On exit, */
  695. /* > - For WORK(1)/WORK(2) = ONE: The singular values of A. During the */
  696. /* > computation SVA contains Euclidean column norms of the */
  697. /* > iterated matrices in the array A. */
  698. /* > - For WORK(1) .NE. WORK(2): The singular values of A are */
  699. /* > (WORK(1)/WORK(2)) * SVA(1:N). This factored form is used if */
  700. /* > sigma_max(A) overflows or if small singular values have been */
  701. /* > saved from underflow by scaling the input matrix A. */
  702. /* > - If JOBR='R' then some of the singular values may be returned */
  703. /* > as exact zeros obtained by "set to zero" because they are */
  704. /* > below the numerical rank threshold or are denormalized numbers. */
  705. /* > \endverbatim */
  706. /* > */
  707. /* > \param[out] U */
  708. /* > \verbatim */
  709. /* > U is COMPLEX array, dimension ( LDU, N ) or ( LDU, M ) */
  710. /* > If JOBU = 'U', then U contains on exit the M-by-N matrix of */
  711. /* > the left singular vectors. */
  712. /* > If JOBU = 'F', then U contains on exit the M-by-M matrix of */
  713. /* > the left singular vectors, including an ONB */
  714. /* > of the orthogonal complement of the Range(A). */
  715. /* > If JOBU = 'W' .AND. (JOBV = 'V' .AND. JOBT = 'T' .AND. M = N), */
  716. /* > then U is used as workspace if the procedure */
  717. /* > replaces A with A^*. In that case, [V] is computed */
  718. /* > in U as left singular vectors of A^* and then */
  719. /* > copied back to the V array. This 'W' option is just */
  720. /* > a reminder to the caller that in this case U is */
  721. /* > reserved as workspace of length N*N. */
  722. /* > If JOBU = 'N' U is not referenced, unless JOBT='T'. */
  723. /* > \endverbatim */
  724. /* > */
  725. /* > \param[in] LDU */
  726. /* > \verbatim */
  727. /* > LDU is INTEGER */
  728. /* > The leading dimension of the array U, LDU >= 1. */
  729. /* > IF JOBU = 'U' or 'F' or 'W', then LDU >= M. */
  730. /* > \endverbatim */
  731. /* > */
  732. /* > \param[out] V */
  733. /* > \verbatim */
  734. /* > V is COMPLEX array, dimension ( LDV, N ) */
  735. /* > If JOBV = 'V', 'J' then V contains on exit the N-by-N matrix of */
  736. /* > the right singular vectors; */
  737. /* > If JOBV = 'W', AND (JOBU = 'U' AND JOBT = 'T' AND M = N), */
  738. /* > then V is used as workspace if the pprocedure */
  739. /* > replaces A with A^*. In that case, [U] is computed */
  740. /* > in V as right singular vectors of A^* and then */
  741. /* > copied back to the U array. This 'W' option is just */
  742. /* > a reminder to the caller that in this case V is */
  743. /* > reserved as workspace of length N*N. */
  744. /* > If JOBV = 'N' V is not referenced, unless JOBT='T'. */
  745. /* > \endverbatim */
  746. /* > */
  747. /* > \param[in] LDV */
  748. /* > \verbatim */
  749. /* > LDV is INTEGER */
  750. /* > The leading dimension of the array V, LDV >= 1. */
  751. /* > If JOBV = 'V' or 'J' or 'W', then LDV >= N. */
  752. /* > \endverbatim */
  753. /* > */
  754. /* > \param[out] CWORK */
  755. /* > \verbatim */
  756. /* > CWORK is COMPLEX array, dimension (MAX(2,LWORK)) */
  757. /* > If the call to CGEJSV is a workspace query (indicated by LWORK=-1 or */
  758. /* > LRWORK=-1), then on exit CWORK(1) contains the required length of */
  759. /* > CWORK for the job parameters used in the call. */
  760. /* > \endverbatim */
  761. /* > */
  762. /* > \param[in] LWORK */
  763. /* > \verbatim */
  764. /* > LWORK is INTEGER */
  765. /* > Length of CWORK to confirm proper allocation of workspace. */
  766. /* > LWORK depends on the job: */
  767. /* > */
  768. /* > 1. If only SIGMA is needed ( JOBU = 'N', JOBV = 'N' ) and */
  769. /* > 1.1 .. no scaled condition estimate required (JOBA.NE.'E'.AND.JOBA.NE.'G'): */
  770. /* > LWORK >= 2*N+1. This is the minimal requirement. */
  771. /* > ->> For optimal performance (blocked code) the optimal value */
  772. /* > is LWORK >= N + (N+1)*NB. Here NB is the optimal */
  773. /* > block size for CGEQP3 and CGEQRF. */
  774. /* > In general, optimal LWORK is computed as */
  775. /* > LWORK >= f2cmax(N+LWORK(CGEQP3),N+LWORK(CGEQRF), LWORK(CGESVJ)). */
  776. /* > 1.2. .. an estimate of the scaled condition number of A is */
  777. /* > required (JOBA='E', or 'G'). In this case, LWORK the minimal */
  778. /* > requirement is LWORK >= N*N + 2*N. */
  779. /* > ->> For optimal performance (blocked code) the optimal value */
  780. /* > is LWORK >= f2cmax(N+(N+1)*NB, N*N+2*N)=N**2+2*N. */
  781. /* > In general, the optimal length LWORK is computed as */
  782. /* > LWORK >= f2cmax(N+LWORK(CGEQP3),N+LWORK(CGEQRF), LWORK(CGESVJ), */
  783. /* > N*N+LWORK(CPOCON)). */
  784. /* > 2. If SIGMA and the right singular vectors are needed (JOBV = 'V'), */
  785. /* > (JOBU = 'N') */
  786. /* > 2.1 .. no scaled condition estimate requested (JOBE = 'N'): */
  787. /* > -> the minimal requirement is LWORK >= 3*N. */
  788. /* > -> For optimal performance, */
  789. /* > LWORK >= f2cmax(N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, */
  790. /* > where NB is the optimal block size for CGEQP3, CGEQRF, CGELQ, */
  791. /* > CUNMLQ. In general, the optimal length LWORK is computed as */
  792. /* > LWORK >= f2cmax(N+LWORK(CGEQP3), N+LWORK(CGESVJ), */
  793. /* > N+LWORK(CGELQF), 2*N+LWORK(CGEQRF), N+LWORK(CUNMLQ)). */
  794. /* > 2.2 .. an estimate of the scaled condition number of A is */
  795. /* > required (JOBA='E', or 'G'). */
  796. /* > -> the minimal requirement is LWORK >= 3*N. */
  797. /* > -> For optimal performance, */
  798. /* > LWORK >= f2cmax(N+(N+1)*NB, 2*N,2*N+N*NB)=2*N+N*NB, */
  799. /* > where NB is the optimal block size for CGEQP3, CGEQRF, CGELQ, */
  800. /* > CUNMLQ. In general, the optimal length LWORK is computed as */
  801. /* > LWORK >= f2cmax(N+LWORK(CGEQP3), LWORK(CPOCON), N+LWORK(CGESVJ), */
  802. /* > N+LWORK(CGELQF), 2*N+LWORK(CGEQRF), N+LWORK(CUNMLQ)). */
  803. /* > 3. If SIGMA and the left singular vectors are needed */
  804. /* > 3.1 .. no scaled condition estimate requested (JOBE = 'N'): */
  805. /* > -> the minimal requirement is LWORK >= 3*N. */
  806. /* > -> For optimal performance: */
  807. /* > if JOBU = 'U' :: LWORK >= f2cmax(3*N, N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, */
  808. /* > where NB is the optimal block size for CGEQP3, CGEQRF, CUNMQR. */
  809. /* > In general, the optimal length LWORK is computed as */
  810. /* > LWORK >= f2cmax(N+LWORK(CGEQP3), 2*N+LWORK(CGEQRF), N+LWORK(CUNMQR)). */
  811. /* > 3.2 .. an estimate of the scaled condition number of A is */
  812. /* > required (JOBA='E', or 'G'). */
  813. /* > -> the minimal requirement is LWORK >= 3*N. */
  814. /* > -> For optimal performance: */
  815. /* > if JOBU = 'U' :: LWORK >= f2cmax(3*N, N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, */
  816. /* > where NB is the optimal block size for CGEQP3, CGEQRF, CUNMQR. */
  817. /* > In general, the optimal length LWORK is computed as */
  818. /* > LWORK >= f2cmax(N+LWORK(CGEQP3),N+LWORK(CPOCON), */
  819. /* > 2*N+LWORK(CGEQRF), N+LWORK(CUNMQR)). */
  820. /* > */
  821. /* > 4. If the full SVD is needed: (JOBU = 'U' or JOBU = 'F') and */
  822. /* > 4.1. if JOBV = 'V' */
  823. /* > the minimal requirement is LWORK >= 5*N+2*N*N. */
  824. /* > 4.2. if JOBV = 'J' the minimal requirement is */
  825. /* > LWORK >= 4*N+N*N. */
  826. /* > In both cases, the allocated CWORK can accommodate blocked runs */
  827. /* > of CGEQP3, CGEQRF, CGELQF, CUNMQR, CUNMLQ. */
  828. /* > */
  829. /* > If the call to CGEJSV is a workspace query (indicated by LWORK=-1 or */
  830. /* > LRWORK=-1), then on exit CWORK(1) contains the optimal and CWORK(2) contains the */
  831. /* > minimal length of CWORK for the job parameters used in the call. */
  832. /* > \endverbatim */
  833. /* > */
  834. /* > \param[out] RWORK */
  835. /* > \verbatim */
  836. /* > RWORK is REAL array, dimension (MAX(7,LWORK)) */
  837. /* > On exit, */
  838. /* > RWORK(1) = Determines the scaling factor SCALE = RWORK(2) / RWORK(1) */
  839. /* > such that SCALE*SVA(1:N) are the computed singular values */
  840. /* > of A. (See the description of SVA().) */
  841. /* > RWORK(2) = See the description of RWORK(1). */
  842. /* > RWORK(3) = SCONDA is an estimate for the condition number of */
  843. /* > column equilibrated A. (If JOBA = 'E' or 'G') */
  844. /* > SCONDA is an estimate of SQRT(||(R^* * R)^(-1)||_1). */
  845. /* > It is computed using SPOCON. It holds */
  846. /* > N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
  847. /* > where R is the triangular factor from the QRF of A. */
  848. /* > However, if R is truncated and the numerical rank is */
  849. /* > determined to be strictly smaller than N, SCONDA is */
  850. /* > returned as -1, thus indicating that the smallest */
  851. /* > singular values might be lost. */
  852. /* > */
  853. /* > If full SVD is needed, the following two condition numbers are */
  854. /* > useful for the analysis of the algorithm. They are provied for */
  855. /* > a developer/implementer who is familiar with the details of */
  856. /* > the method. */
  857. /* > */
  858. /* > RWORK(4) = an estimate of the scaled condition number of the */
  859. /* > triangular factor in the first QR factorization. */
  860. /* > RWORK(5) = an estimate of the scaled condition number of the */
  861. /* > triangular factor in the second QR factorization. */
  862. /* > The following two parameters are computed if JOBT = 'T'. */
  863. /* > They are provided for a developer/implementer who is familiar */
  864. /* > with the details of the method. */
  865. /* > RWORK(6) = the entropy of A^* * A :: this is the Shannon entropy */
  866. /* > of diag(A^* * A) / Trace(A^* * A) taken as point in the */
  867. /* > probability simplex. */
  868. /* > RWORK(7) = the entropy of A * A^*. (See the description of RWORK(6).) */
  869. /* > If the call to CGEJSV is a workspace query (indicated by LWORK=-1 or */
  870. /* > LRWORK=-1), then on exit RWORK(1) contains the required length of */
  871. /* > RWORK for the job parameters used in the call. */
  872. /* > \endverbatim */
  873. /* > */
  874. /* > \param[in] LRWORK */
  875. /* > \verbatim */
  876. /* > LRWORK is INTEGER */
  877. /* > Length of RWORK to confirm proper allocation of workspace. */
  878. /* > LRWORK depends on the job: */
  879. /* > */
  880. /* > 1. If only the singular values are requested i.e. if */
  881. /* > LSAME(JOBU,'N') .AND. LSAME(JOBV,'N') */
  882. /* > then: */
  883. /* > 1.1. If LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G'), */
  884. /* > then: LRWORK = f2cmax( 7, 2 * M ). */
  885. /* > 1.2. Otherwise, LRWORK = f2cmax( 7, N ). */
  886. /* > 2. If singular values with the right singular vectors are requested */
  887. /* > i.e. if */
  888. /* > (LSAME(JOBV,'V').OR.LSAME(JOBV,'J')) .AND. */
  889. /* > .NOT.(LSAME(JOBU,'U').OR.LSAME(JOBU,'F')) */
  890. /* > then: */
  891. /* > 2.1. If LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G'), */
  892. /* > then LRWORK = f2cmax( 7, 2 * M ). */
  893. /* > 2.2. Otherwise, LRWORK = f2cmax( 7, N ). */
  894. /* > 3. If singular values with the left singular vectors are requested, i.e. if */
  895. /* > (LSAME(JOBU,'U').OR.LSAME(JOBU,'F')) .AND. */
  896. /* > .NOT.(LSAME(JOBV,'V').OR.LSAME(JOBV,'J')) */
  897. /* > then: */
  898. /* > 3.1. If LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G'), */
  899. /* > then LRWORK = f2cmax( 7, 2 * M ). */
  900. /* > 3.2. Otherwise, LRWORK = f2cmax( 7, N ). */
  901. /* > 4. If singular values with both the left and the right singular vectors */
  902. /* > are requested, i.e. if */
  903. /* > (LSAME(JOBU,'U').OR.LSAME(JOBU,'F')) .AND. */
  904. /* > (LSAME(JOBV,'V').OR.LSAME(JOBV,'J')) */
  905. /* > then: */
  906. /* > 4.1. If LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G'), */
  907. /* > then LRWORK = f2cmax( 7, 2 * M ). */
  908. /* > 4.2. Otherwise, LRWORK = f2cmax( 7, N ). */
  909. /* > */
  910. /* > If, on entry, LRWORK = -1 or LWORK=-1, a workspace query is assumed and */
  911. /* > the length of RWORK is returned in RWORK(1). */
  912. /* > \endverbatim */
  913. /* > */
  914. /* > \param[out] IWORK */
  915. /* > \verbatim */
  916. /* > IWORK is INTEGER array, of dimension at least 4, that further depends */
  917. /* > on the job: */
  918. /* > */
  919. /* > 1. If only the singular values are requested then: */
  920. /* > If ( LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G') ) */
  921. /* > then the length of IWORK is N+M; otherwise the length of IWORK is N. */
  922. /* > 2. If the singular values and the right singular vectors are requested then: */
  923. /* > If ( LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G') ) */
  924. /* > then the length of IWORK is N+M; otherwise the length of IWORK is N. */
  925. /* > 3. If the singular values and the left singular vectors are requested then: */
  926. /* > If ( LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G') ) */
  927. /* > then the length of IWORK is N+M; otherwise the length of IWORK is N. */
  928. /* > 4. If the singular values with both the left and the right singular vectors */
  929. /* > are requested, then: */
  930. /* > 4.1. If LSAME(JOBV,'J') the length of IWORK is determined as follows: */
  931. /* > If ( LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G') ) */
  932. /* > then the length of IWORK is N+M; otherwise the length of IWORK is N. */
  933. /* > 4.2. If LSAME(JOBV,'V') the length of IWORK is determined as follows: */
  934. /* > If ( LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G') ) */
  935. /* > then the length of IWORK is 2*N+M; otherwise the length of IWORK is 2*N. */
  936. /* > */
  937. /* > On exit, */
  938. /* > IWORK(1) = the numerical rank determined after the initial */
  939. /* > QR factorization with pivoting. See the descriptions */
  940. /* > of JOBA and JOBR. */
  941. /* > IWORK(2) = the number of the computed nonzero singular values */
  942. /* > IWORK(3) = if nonzero, a warning message: */
  943. /* > If IWORK(3) = 1 then some of the column norms of A */
  944. /* > were denormalized floats. The requested high accuracy */
  945. /* > is not warranted by the data. */
  946. /* > IWORK(4) = 1 or -1. If IWORK(4) = 1, then the procedure used A^* to */
  947. /* > do the job as specified by the JOB parameters. */
  948. /* > If the call to CGEJSV is a workspace query (indicated by LWORK = -1 and */
  949. /* > LRWORK = -1), then on exit IWORK(1) contains the required length of */
  950. /* > IWORK for the job parameters used in the call. */
  951. /* > \endverbatim */
  952. /* > */
  953. /* > \param[out] INFO */
  954. /* > \verbatim */
  955. /* > INFO is INTEGER */
  956. /* > < 0: if INFO = -i, then the i-th argument had an illegal value. */
  957. /* > = 0: successful exit; */
  958. /* > > 0: CGEJSV did not converge in the maximal allowed number */
  959. /* > of sweeps. The computed values may be inaccurate. */
  960. /* > \endverbatim */
  961. /* Authors: */
  962. /* ======== */
  963. /* > \author Univ. of Tennessee */
  964. /* > \author Univ. of California Berkeley */
  965. /* > \author Univ. of Colorado Denver */
  966. /* > \author NAG Ltd. */
  967. /* > \date June 2016 */
  968. /* > \ingroup complexGEsing */
  969. /* > \par Further Details: */
  970. /* ===================== */
  971. /* > */
  972. /* > \verbatim */
  973. /* > CGEJSV implements a preconditioned Jacobi SVD algorithm. It uses CGEQP3, */
  974. /* > CGEQRF, and CGELQF as preprocessors and preconditioners. Optionally, an */
  975. /* > additional row pivoting can be used as a preprocessor, which in some */
  976. /* > cases results in much higher accuracy. An example is matrix A with the */
  977. /* > structure A = D1 * C * D2, where D1, D2 are arbitrarily ill-conditioned */
  978. /* > diagonal matrices and C is well-conditioned matrix. In that case, complete */
  979. /* > pivoting in the first QR factorizations provides accuracy dependent on the */
  980. /* > condition number of C, and independent of D1, D2. Such higher accuracy is */
  981. /* > not completely understood theoretically, but it works well in practice. */
  982. /* > Further, if A can be written as A = B*D, with well-conditioned B and some */
  983. /* > diagonal D, then the high accuracy is guaranteed, both theoretically and */
  984. /* > in software, independent of D. For more details see [1], [2]. */
  985. /* > The computational range for the singular values can be the full range */
  986. /* > ( UNDERFLOW,OVERFLOW ), provided that the machine arithmetic and the BLAS */
  987. /* > & LAPACK routines called by CGEJSV are implemented to work in that range. */
  988. /* > If that is not the case, then the restriction for safe computation with */
  989. /* > the singular values in the range of normalized IEEE numbers is that the */
  990. /* > spectral condition number kappa(A)=sigma_max(A)/sigma_min(A) does not */
  991. /* > overflow. This code (CGEJSV) is best used in this restricted range, */
  992. /* > meaning that singular values of magnitude below ||A||_2 / SLAMCH('O') are */
  993. /* > returned as zeros. See JOBR for details on this. */
  994. /* > Further, this implementation is somewhat slower than the one described */
  995. /* > in [1,2] due to replacement of some non-LAPACK components, and because */
  996. /* > the choice of some tuning parameters in the iterative part (CGESVJ) is */
  997. /* > left to the implementer on a particular machine. */
  998. /* > The rank revealing QR factorization (in this code: CGEQP3) should be */
  999. /* > implemented as in [3]. We have a new version of CGEQP3 under development */
  1000. /* > that is more robust than the current one in LAPACK, with a cleaner cut in */
  1001. /* > rank deficient cases. It will be available in the SIGMA library [4]. */
  1002. /* > If M is much larger than N, it is obvious that the initial QRF with */
  1003. /* > column pivoting can be preprocessed by the QRF without pivoting. That */
  1004. /* > well known trick is not used in CGEJSV because in some cases heavy row */
  1005. /* > weighting can be treated with complete pivoting. The overhead in cases */
  1006. /* > M much larger than N is then only due to pivoting, but the benefits in */
  1007. /* > terms of accuracy have prevailed. The implementer/user can incorporate */
  1008. /* > this extra QRF step easily. The implementer can also improve data movement */
  1009. /* > (matrix transpose, matrix copy, matrix transposed copy) - this */
  1010. /* > implementation of CGEJSV uses only the simplest, naive data movement. */
  1011. /* > \endverbatim */
  1012. /* > \par Contributor: */
  1013. /* ================== */
  1014. /* > */
  1015. /* > Zlatko Drmac (Zagreb, Croatia) */
  1016. /* > \par References: */
  1017. /* ================ */
  1018. /* > */
  1019. /* > \verbatim */
  1020. /* > */
  1021. /* > [1] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm I. */
  1022. /* > SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1322-1342. */
  1023. /* > LAPACK Working note 169. */
  1024. /* > [2] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm II. */
  1025. /* > SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1343-1362. */
  1026. /* > LAPACK Working note 170. */
  1027. /* > [3] Z. Drmac and Z. Bujanovic: On the failure of rank-revealing QR */
  1028. /* > factorization software - a case study. */
  1029. /* > ACM Trans. Math. Softw. Vol. 35, No 2 (2008), pp. 1-28. */
  1030. /* > LAPACK Working note 176. */
  1031. /* > [4] Z. Drmac: SIGMA - mathematical software library for accurate SVD, PSV, */
  1032. /* > QSVD, (H,K)-SVD computations. */
  1033. /* > Department of Mathematics, University of Zagreb, 2008, 2016. */
  1034. /* > \endverbatim */
  1035. /* > \par Bugs, examples and comments: */
  1036. /* ================================= */
  1037. /* > */
  1038. /* > Please report all bugs and send interesting examples and/or comments to */
  1039. /* > drmac@math.hr. Thank you. */
  1040. /* > */
  1041. /* ===================================================================== */
  1042. /* Subroutine */ int cgejsv_(char *joba, char *jobu, char *jobv, char *jobr,
  1043. char *jobt, char *jobp, integer *m, integer *n, complex *a, integer *
  1044. lda, real *sva, complex *u, integer *ldu, complex *v, integer *ldv,
  1045. complex *cwork, integer *lwork, real *rwork, integer *lrwork, integer
  1046. *iwork, integer *info)
  1047. {
  1048. /* System generated locals */
  1049. integer a_dim1, a_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2,
  1050. i__3, i__4, i__5, i__6, i__7, i__8, i__9, i__10, i__11;
  1051. real r__1, r__2, r__3;
  1052. complex q__1;
  1053. /* Local variables */
  1054. integer lwrk_cunmqr__;
  1055. logical defr;
  1056. real aapp, aaqq;
  1057. logical kill;
  1058. integer ierr, lwrk_cgeqp3n__;
  1059. real temp1;
  1060. integer lwunmqrm, lwrk_cgesvju__, lwrk_cgesvjv__, lwqp3, lwrk_cunmqrm__,
  1061. p, q;
  1062. logical jracc;
  1063. extern logical lsame_(char *, char *);
  1064. extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
  1065. complex ctemp;
  1066. real entra, small;
  1067. integer iwoff;
  1068. real sfmin;
  1069. logical lsvec;
  1070. extern /* Subroutine */ int ccopy_(integer *, complex *, integer *,
  1071. complex *, integer *), cswap_(integer *, complex *, integer *,
  1072. complex *, integer *);
  1073. real epsln;
  1074. logical rsvec;
  1075. integer lwcon, lwlqf;
  1076. extern /* Subroutine */ int ctrsm_(char *, char *, char *, char *,
  1077. integer *, integer *, complex *, complex *, integer *, complex *,
  1078. integer *);
  1079. integer lwqrf, n1;
  1080. logical l2aber;
  1081. extern /* Subroutine */ int cgeqp3_(integer *, integer *, complex *,
  1082. integer *, integer *, complex *, complex *, integer *, real *,
  1083. integer *);
  1084. real condr1, condr2, uscal1, uscal2;
  1085. logical l2kill, l2rank, l2tran;
  1086. extern real scnrm2_(integer *, complex *, integer *);
  1087. logical l2pert;
  1088. integer lrwqp3;
  1089. extern /* Subroutine */ int clacgv_(integer *, complex *, integer *);
  1090. integer nr;
  1091. extern /* Subroutine */ int cgelqf_(integer *, integer *, complex *,
  1092. integer *, complex *, complex *, integer *, integer *);
  1093. extern integer icamax_(integer *, complex *, integer *);
  1094. extern /* Subroutine */ int clascl_(char *, integer *, integer *, real *,
  1095. real *, integer *, integer *, complex *, integer *, integer *);
  1096. real scalem, sconda;
  1097. logical goscal;
  1098. real aatmin;
  1099. extern real slamch_(char *);
  1100. real aatmax;
  1101. extern /* Subroutine */ int cgeqrf_(integer *, integer *, complex *,
  1102. integer *, complex *, complex *, integer *, integer *), clacpy_(
  1103. char *, integer *, integer *, complex *, integer *, complex *,
  1104. integer *), clapmr_(logical *, integer *, integer *,
  1105. complex *, integer *, integer *);
  1106. logical noscal;
  1107. extern /* Subroutine */ int claset_(char *, integer *, integer *, complex
  1108. *, complex *, complex *, integer *);
  1109. extern integer isamax_(integer *, real *, integer *);
  1110. extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
  1111. real *, integer *, integer *, real *, integer *, integer *), cpocon_(char *, integer *, complex *, integer *, real *,
  1112. real *, complex *, real *, integer *), csscal_(integer *,
  1113. real *, complex *, integer *), classq_(integer *, complex *,
  1114. integer *, real *, real *), xerbla_(char *, integer *, ftnlen),
  1115. cgesvj_(char *, char *, char *, integer *, integer *, complex *,
  1116. integer *, real *, integer *, complex *, integer *, complex *,
  1117. integer *, real *, integer *, integer *),
  1118. claswp_(integer *, complex *, integer *, integer *, integer *,
  1119. integer *, integer *);
  1120. real entrat;
  1121. logical almort;
  1122. complex cdummy[1];
  1123. extern /* Subroutine */ int cungqr_(integer *, integer *, integer *,
  1124. complex *, integer *, complex *, complex *, integer *, integer *);
  1125. real maxprj;
  1126. extern /* Subroutine */ int cunmlq_(char *, char *, integer *, integer *,
  1127. integer *, complex *, integer *, complex *, complex *, integer *,
  1128. complex *, integer *, integer *);
  1129. logical errest;
  1130. integer lrwcon;
  1131. extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
  1132. real *);
  1133. logical transp;
  1134. integer minwrk, lwsvdj;
  1135. extern /* Subroutine */ int cunmqr_(char *, char *, integer *, integer *,
  1136. integer *, complex *, integer *, complex *, complex *, integer *,
  1137. complex *, integer *, integer *);
  1138. real rdummy[1];
  1139. logical lquery, rowpiv;
  1140. integer optwrk;
  1141. real big;
  1142. integer lwrk_cgeqp3__;
  1143. real cond_ok__, xsc, big1;
  1144. integer warning, numrank, lwrk_cgelqf__, miniwrk, lwrk_cgeqrf__, minrwrk,
  1145. lrwsvdj, lwunmlq, lwsvdjv, lwrk_cgesvj__, lwunmqr, lwrk_cunmlq__;
  1146. /* -- LAPACK computational routine (version 3.7.1) -- */
  1147. /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
  1148. /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
  1149. /* June 2017 */
  1150. /* =========================================================================== */
  1151. /* Test the input arguments */
  1152. /* Parameter adjustments */
  1153. --sva;
  1154. a_dim1 = *lda;
  1155. a_offset = 1 + a_dim1 * 1;
  1156. a -= a_offset;
  1157. u_dim1 = *ldu;
  1158. u_offset = 1 + u_dim1 * 1;
  1159. u -= u_offset;
  1160. v_dim1 = *ldv;
  1161. v_offset = 1 + v_dim1 * 1;
  1162. v -= v_offset;
  1163. --cwork;
  1164. --rwork;
  1165. --iwork;
  1166. /* Function Body */
  1167. lsvec = lsame_(jobu, "U") || lsame_(jobu, "F");
  1168. jracc = lsame_(jobv, "J");
  1169. rsvec = lsame_(jobv, "V") || jracc;
  1170. rowpiv = lsame_(joba, "F") || lsame_(joba, "G");
  1171. l2rank = lsame_(joba, "R");
  1172. l2aber = lsame_(joba, "A");
  1173. errest = lsame_(joba, "E") || lsame_(joba, "G");
  1174. l2tran = lsame_(jobt, "T") && *m == *n;
  1175. l2kill = lsame_(jobr, "R");
  1176. defr = lsame_(jobr, "N");
  1177. l2pert = lsame_(jobp, "P");
  1178. lquery = *lwork == -1 || *lrwork == -1;
  1179. if (! (rowpiv || l2rank || l2aber || errest || lsame_(joba, "C"))) {
  1180. *info = -1;
  1181. } else if (! (lsvec || lsame_(jobu, "N") || lsame_(
  1182. jobu, "W") && rsvec && l2tran)) {
  1183. *info = -2;
  1184. } else if (! (rsvec || lsame_(jobv, "N") || lsame_(
  1185. jobv, "W") && lsvec && l2tran)) {
  1186. *info = -3;
  1187. } else if (! (l2kill || defr)) {
  1188. *info = -4;
  1189. } else if (! (lsame_(jobt, "T") || lsame_(jobt,
  1190. "N"))) {
  1191. *info = -5;
  1192. } else if (! (l2pert || lsame_(jobp, "N"))) {
  1193. *info = -6;
  1194. } else if (*m < 0) {
  1195. *info = -7;
  1196. } else if (*n < 0 || *n > *m) {
  1197. *info = -8;
  1198. } else if (*lda < *m) {
  1199. *info = -10;
  1200. } else if (lsvec && *ldu < *m) {
  1201. *info = -13;
  1202. } else if (rsvec && *ldv < *n) {
  1203. *info = -15;
  1204. } else {
  1205. /* #:) */
  1206. *info = 0;
  1207. }
  1208. if (*info == 0) {
  1209. /* [[The expressions for computing the minimal and the optimal */
  1210. /* values of LCWORK, LRWORK are written with a lot of redundancy and */
  1211. /* can be simplified. However, this verbose form is useful for */
  1212. /* maintenance and modifications of the code.]] */
  1213. /* CGEQRF of an N x N matrix, CGELQF of an N x N matrix, */
  1214. /* CUNMLQ for computing N x N matrix, CUNMQR for computing N x N */
  1215. /* matrix, CUNMQR for computing M x N matrix, respectively. */
  1216. lwqp3 = *n + 1;
  1217. lwqrf = f2cmax(1,*n);
  1218. lwlqf = f2cmax(1,*n);
  1219. lwunmlq = f2cmax(1,*n);
  1220. lwunmqr = f2cmax(1,*n);
  1221. lwunmqrm = f2cmax(1,*m);
  1222. lwcon = *n << 1;
  1223. /* without and with explicit accumulation of Jacobi rotations */
  1224. /* Computing MAX */
  1225. i__1 = *n << 1;
  1226. lwsvdj = f2cmax(i__1,1);
  1227. /* Computing MAX */
  1228. i__1 = *n << 1;
  1229. lwsvdjv = f2cmax(i__1,1);
  1230. lrwqp3 = *n << 1;
  1231. lrwcon = *n;
  1232. lrwsvdj = *n;
  1233. if (lquery) {
  1234. cgeqp3_(m, n, &a[a_offset], lda, &iwork[1], cdummy, cdummy, &c_n1,
  1235. rdummy, &ierr);
  1236. lwrk_cgeqp3__ = cdummy[0].r;
  1237. cgeqrf_(n, n, &a[a_offset], lda, cdummy, cdummy, &c_n1, &ierr);
  1238. lwrk_cgeqrf__ = cdummy[0].r;
  1239. cgelqf_(n, n, &a[a_offset], lda, cdummy, cdummy, &c_n1, &ierr);
  1240. lwrk_cgelqf__ = cdummy[0].r;
  1241. }
  1242. minwrk = 2;
  1243. optwrk = 2;
  1244. miniwrk = *n;
  1245. if (! (lsvec || rsvec)) {
  1246. /* only the singular values are requested */
  1247. if (errest) {
  1248. /* Computing MAX */
  1249. /* Computing 2nd power */
  1250. i__3 = *n;
  1251. i__1 = *n + lwqp3, i__2 = i__3 * i__3 + lwcon, i__1 = f2cmax(
  1252. i__1,i__2), i__2 = *n + lwqrf, i__1 = f2cmax(i__1,i__2);
  1253. minwrk = f2cmax(i__1,lwsvdj);
  1254. } else {
  1255. /* Computing MAX */
  1256. i__1 = *n + lwqp3, i__2 = *n + lwqrf, i__1 = f2cmax(i__1,i__2);
  1257. minwrk = f2cmax(i__1,lwsvdj);
  1258. }
  1259. if (lquery) {
  1260. cgesvj_("L", "N", "N", n, n, &a[a_offset], lda, &sva[1], n, &
  1261. v[v_offset], ldv, cdummy, &c_n1, rdummy, &c_n1, &ierr);
  1262. lwrk_cgesvj__ = cdummy[0].r;
  1263. if (errest) {
  1264. /* Computing MAX */
  1265. /* Computing 2nd power */
  1266. i__3 = *n;
  1267. i__1 = *n + lwrk_cgeqp3__, i__2 = i__3 * i__3 + lwcon,
  1268. i__1 = f2cmax(i__1,i__2), i__2 = *n + lwrk_cgeqrf__,
  1269. i__1 = f2cmax(i__1,i__2);
  1270. optwrk = f2cmax(i__1,lwrk_cgesvj__);
  1271. } else {
  1272. /* Computing MAX */
  1273. i__1 = *n + lwrk_cgeqp3__, i__2 = *n + lwrk_cgeqrf__,
  1274. i__1 = f2cmax(i__1,i__2);
  1275. optwrk = f2cmax(i__1,lwrk_cgesvj__);
  1276. }
  1277. }
  1278. if (l2tran || rowpiv) {
  1279. if (errest) {
  1280. /* Computing MAX */
  1281. i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
  1282. f2cmax(i__1,lrwqp3), i__1 = f2cmax(i__1,lrwcon);
  1283. minrwrk = f2cmax(i__1,lrwsvdj);
  1284. } else {
  1285. /* Computing MAX */
  1286. i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
  1287. f2cmax(i__1,lrwqp3);
  1288. minrwrk = f2cmax(i__1,lrwsvdj);
  1289. }
  1290. } else {
  1291. if (errest) {
  1292. /* Computing MAX */
  1293. i__1 = f2cmax(7,lrwqp3), i__1 = f2cmax(i__1,lrwcon);
  1294. minrwrk = f2cmax(i__1,lrwsvdj);
  1295. } else {
  1296. /* Computing MAX */
  1297. i__1 = f2cmax(7,lrwqp3);
  1298. minrwrk = f2cmax(i__1,lrwsvdj);
  1299. }
  1300. }
  1301. if (rowpiv || l2tran) {
  1302. miniwrk += *m;
  1303. }
  1304. } else if (rsvec && ! lsvec) {
  1305. /* singular values and the right singular vectors are requested */
  1306. if (errest) {
  1307. /* Computing MAX */
  1308. i__1 = *n + lwqp3, i__1 = f2cmax(i__1,lwcon), i__1 = f2cmax(i__1,
  1309. lwsvdj), i__2 = *n + lwlqf, i__1 = f2cmax(i__1,i__2),
  1310. i__2 = (*n << 1) + lwqrf, i__1 = f2cmax(i__1,i__2), i__2
  1311. = *n + lwsvdj, i__1 = f2cmax(i__1,i__2), i__2 = *n +
  1312. lwunmlq;
  1313. minwrk = f2cmax(i__1,i__2);
  1314. } else {
  1315. /* Computing MAX */
  1316. i__1 = *n + lwqp3, i__1 = f2cmax(i__1,lwsvdj), i__2 = *n + lwlqf,
  1317. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + lwqrf,
  1318. i__1 = f2cmax(i__1,i__2), i__2 = *n + lwsvdj, i__1 = f2cmax(
  1319. i__1,i__2), i__2 = *n + lwunmlq;
  1320. minwrk = f2cmax(i__1,i__2);
  1321. }
  1322. if (lquery) {
  1323. cgesvj_("L", "U", "N", n, n, &u[u_offset], ldu, &sva[1], n, &
  1324. a[a_offset], lda, cdummy, &c_n1, rdummy, &c_n1, &ierr);
  1325. lwrk_cgesvj__ = cdummy[0].r;
  1326. cunmlq_("L", "C", n, n, n, &a[a_offset], lda, cdummy, &v[
  1327. v_offset], ldv, cdummy, &c_n1, &ierr);
  1328. lwrk_cunmlq__ = cdummy[0].r;
  1329. if (errest) {
  1330. /* Computing MAX */
  1331. i__1 = *n + lwrk_cgeqp3__, i__1 = f2cmax(i__1,lwcon), i__1 =
  1332. f2cmax(i__1,lwrk_cgesvj__), i__2 = *n +
  1333. lwrk_cgelqf__, i__1 = f2cmax(i__1,i__2), i__2 = (*n
  1334. << 1) + lwrk_cgeqrf__, i__1 = f2cmax(i__1,i__2),
  1335. i__2 = *n + lwrk_cgesvj__, i__1 = f2cmax(i__1,i__2),
  1336. i__2 = *n + lwrk_cunmlq__;
  1337. optwrk = f2cmax(i__1,i__2);
  1338. } else {
  1339. /* Computing MAX */
  1340. i__1 = *n + lwrk_cgeqp3__, i__1 = f2cmax(i__1,lwrk_cgesvj__),
  1341. i__2 = *n + lwrk_cgelqf__, i__1 = f2cmax(i__1,i__2),
  1342. i__2 = (*n << 1) + lwrk_cgeqrf__, i__1 = f2cmax(
  1343. i__1,i__2), i__2 = *n + lwrk_cgesvj__, i__1 = f2cmax(
  1344. i__1,i__2), i__2 = *n + lwrk_cunmlq__;
  1345. optwrk = f2cmax(i__1,i__2);
  1346. }
  1347. }
  1348. if (l2tran || rowpiv) {
  1349. if (errest) {
  1350. /* Computing MAX */
  1351. i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
  1352. f2cmax(i__1,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
  1353. minrwrk = f2cmax(i__1,lrwcon);
  1354. } else {
  1355. /* Computing MAX */
  1356. i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
  1357. f2cmax(i__1,lrwqp3);
  1358. minrwrk = f2cmax(i__1,lrwsvdj);
  1359. }
  1360. } else {
  1361. if (errest) {
  1362. /* Computing MAX */
  1363. i__1 = f2cmax(7,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
  1364. minrwrk = f2cmax(i__1,lrwcon);
  1365. } else {
  1366. /* Computing MAX */
  1367. i__1 = f2cmax(7,lrwqp3);
  1368. minrwrk = f2cmax(i__1,lrwsvdj);
  1369. }
  1370. }
  1371. if (rowpiv || l2tran) {
  1372. miniwrk += *m;
  1373. }
  1374. } else if (lsvec && ! rsvec) {
  1375. /* singular values and the left singular vectors are requested */
  1376. if (errest) {
  1377. /* Computing MAX */
  1378. i__1 = f2cmax(lwqp3,lwcon), i__2 = *n + lwqrf, i__1 = f2cmax(i__1,
  1379. i__2), i__1 = f2cmax(i__1,lwsvdj);
  1380. minwrk = *n + f2cmax(i__1,lwunmqrm);
  1381. } else {
  1382. /* Computing MAX */
  1383. i__1 = lwqp3, i__2 = *n + lwqrf, i__1 = f2cmax(i__1,i__2), i__1 =
  1384. f2cmax(i__1,lwsvdj);
  1385. minwrk = *n + f2cmax(i__1,lwunmqrm);
  1386. }
  1387. if (lquery) {
  1388. cgesvj_("L", "U", "N", n, n, &u[u_offset], ldu, &sva[1], n, &
  1389. a[a_offset], lda, cdummy, &c_n1, rdummy, &c_n1, &ierr);
  1390. lwrk_cgesvj__ = cdummy[0].r;
  1391. cunmqr_("L", "N", m, n, n, &a[a_offset], lda, cdummy, &u[
  1392. u_offset], ldu, cdummy, &c_n1, &ierr);
  1393. lwrk_cunmqrm__ = cdummy[0].r;
  1394. if (errest) {
  1395. /* Computing MAX */
  1396. i__1 = f2cmax(lwrk_cgeqp3__,lwcon), i__2 = *n +
  1397. lwrk_cgeqrf__, i__1 = f2cmax(i__1,i__2), i__1 = f2cmax(
  1398. i__1,lwrk_cgesvj__);
  1399. optwrk = *n + f2cmax(i__1,lwrk_cunmqrm__);
  1400. } else {
  1401. /* Computing MAX */
  1402. i__1 = lwrk_cgeqp3__, i__2 = *n + lwrk_cgeqrf__, i__1 =
  1403. f2cmax(i__1,i__2), i__1 = f2cmax(i__1,lwrk_cgesvj__);
  1404. optwrk = *n + f2cmax(i__1,lwrk_cunmqrm__);
  1405. }
  1406. }
  1407. if (l2tran || rowpiv) {
  1408. if (errest) {
  1409. /* Computing MAX */
  1410. i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
  1411. f2cmax(i__1,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
  1412. minrwrk = f2cmax(i__1,lrwcon);
  1413. } else {
  1414. /* Computing MAX */
  1415. i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
  1416. f2cmax(i__1,lrwqp3);
  1417. minrwrk = f2cmax(i__1,lrwsvdj);
  1418. }
  1419. } else {
  1420. if (errest) {
  1421. /* Computing MAX */
  1422. i__1 = f2cmax(7,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
  1423. minrwrk = f2cmax(i__1,lrwcon);
  1424. } else {
  1425. /* Computing MAX */
  1426. i__1 = f2cmax(7,lrwqp3);
  1427. minrwrk = f2cmax(i__1,lrwsvdj);
  1428. }
  1429. }
  1430. if (rowpiv || l2tran) {
  1431. miniwrk += *m;
  1432. }
  1433. } else {
  1434. /* full SVD is requested */
  1435. if (! jracc) {
  1436. if (errest) {
  1437. /* Computing MAX */
  1438. /* Computing 2nd power */
  1439. i__3 = *n;
  1440. /* Computing 2nd power */
  1441. i__4 = *n;
  1442. /* Computing 2nd power */
  1443. i__5 = *n;
  1444. /* Computing 2nd power */
  1445. i__6 = *n;
  1446. /* Computing 2nd power */
  1447. i__7 = *n;
  1448. /* Computing 2nd power */
  1449. i__8 = *n;
  1450. /* Computing 2nd power */
  1451. i__9 = *n;
  1452. /* Computing 2nd power */
  1453. i__10 = *n;
  1454. /* Computing 2nd power */
  1455. i__11 = *n;
  1456. i__1 = *n + lwqp3, i__2 = *n + lwcon, i__1 = f2cmax(i__1,
  1457. i__2), i__2 = (*n << 1) + i__3 * i__3 + lwcon,
  1458. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + lwqrf,
  1459. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + lwqp3,
  1460. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + i__4 *
  1461. i__4 + *n + lwlqf, i__1 = f2cmax(i__1,i__2), i__2 = (
  1462. *n << 1) + i__5 * i__5 + *n + i__6 * i__6 + lwcon,
  1463. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + i__7 *
  1464. i__7 + *n + lwsvdj, i__1 = f2cmax(i__1,i__2), i__2 =
  1465. (*n << 1) + i__8 * i__8 + *n + lwsvdjv, i__1 =
  1466. f2cmax(i__1,i__2), i__2 = (*n << 1) + i__9 * i__9 + *
  1467. n + lwunmqr, i__1 = f2cmax(i__1,i__2), i__2 = (*n <<
  1468. 1) + i__10 * i__10 + *n + lwunmlq, i__1 = f2cmax(
  1469. i__1,i__2), i__2 = *n + i__11 * i__11 + lwsvdj,
  1470. i__1 = f2cmax(i__1,i__2), i__2 = *n + lwunmqrm;
  1471. minwrk = f2cmax(i__1,i__2);
  1472. } else {
  1473. /* Computing MAX */
  1474. /* Computing 2nd power */
  1475. i__3 = *n;
  1476. /* Computing 2nd power */
  1477. i__4 = *n;
  1478. /* Computing 2nd power */
  1479. i__5 = *n;
  1480. /* Computing 2nd power */
  1481. i__6 = *n;
  1482. /* Computing 2nd power */
  1483. i__7 = *n;
  1484. /* Computing 2nd power */
  1485. i__8 = *n;
  1486. /* Computing 2nd power */
  1487. i__9 = *n;
  1488. /* Computing 2nd power */
  1489. i__10 = *n;
  1490. /* Computing 2nd power */
  1491. i__11 = *n;
  1492. i__1 = *n + lwqp3, i__2 = (*n << 1) + i__3 * i__3 + lwcon,
  1493. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + lwqrf,
  1494. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + lwqp3,
  1495. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + i__4 *
  1496. i__4 + *n + lwlqf, i__1 = f2cmax(i__1,i__2), i__2 = (
  1497. *n << 1) + i__5 * i__5 + *n + i__6 * i__6 + lwcon,
  1498. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + i__7 *
  1499. i__7 + *n + lwsvdj, i__1 = f2cmax(i__1,i__2), i__2 =
  1500. (*n << 1) + i__8 * i__8 + *n + lwsvdjv, i__1 =
  1501. f2cmax(i__1,i__2), i__2 = (*n << 1) + i__9 * i__9 + *
  1502. n + lwunmqr, i__1 = f2cmax(i__1,i__2), i__2 = (*n <<
  1503. 1) + i__10 * i__10 + *n + lwunmlq, i__1 = f2cmax(
  1504. i__1,i__2), i__2 = *n + i__11 * i__11 + lwsvdj,
  1505. i__1 = f2cmax(i__1,i__2), i__2 = *n + lwunmqrm;
  1506. minwrk = f2cmax(i__1,i__2);
  1507. }
  1508. miniwrk += *n;
  1509. if (rowpiv || l2tran) {
  1510. miniwrk += *m;
  1511. }
  1512. } else {
  1513. if (errest) {
  1514. /* Computing MAX */
  1515. /* Computing 2nd power */
  1516. i__3 = *n;
  1517. /* Computing 2nd power */
  1518. i__4 = *n;
  1519. i__1 = *n + lwqp3, i__2 = *n + lwcon, i__1 = f2cmax(i__1,
  1520. i__2), i__2 = (*n << 1) + lwqrf, i__1 = f2cmax(i__1,
  1521. i__2), i__2 = (*n << 1) + i__3 * i__3 + lwsvdjv,
  1522. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + i__4 *
  1523. i__4 + *n + lwunmqr, i__1 = f2cmax(i__1,i__2), i__2 =
  1524. *n + lwunmqrm;
  1525. minwrk = f2cmax(i__1,i__2);
  1526. } else {
  1527. /* Computing MAX */
  1528. /* Computing 2nd power */
  1529. i__3 = *n;
  1530. /* Computing 2nd power */
  1531. i__4 = *n;
  1532. i__1 = *n + lwqp3, i__2 = (*n << 1) + lwqrf, i__1 = f2cmax(
  1533. i__1,i__2), i__2 = (*n << 1) + i__3 * i__3 +
  1534. lwsvdjv, i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1)
  1535. + i__4 * i__4 + *n + lwunmqr, i__1 = f2cmax(i__1,
  1536. i__2), i__2 = *n + lwunmqrm;
  1537. minwrk = f2cmax(i__1,i__2);
  1538. }
  1539. if (rowpiv || l2tran) {
  1540. miniwrk += *m;
  1541. }
  1542. }
  1543. if (lquery) {
  1544. cunmqr_("L", "N", m, n, n, &a[a_offset], lda, cdummy, &u[
  1545. u_offset], ldu, cdummy, &c_n1, &ierr);
  1546. lwrk_cunmqrm__ = cdummy[0].r;
  1547. cunmqr_("L", "N", n, n, n, &a[a_offset], lda, cdummy, &u[
  1548. u_offset], ldu, cdummy, &c_n1, &ierr);
  1549. lwrk_cunmqr__ = cdummy[0].r;
  1550. if (! jracc) {
  1551. cgeqp3_(n, n, &a[a_offset], lda, &iwork[1], cdummy,
  1552. cdummy, &c_n1, rdummy, &ierr);
  1553. lwrk_cgeqp3n__ = cdummy[0].r;
  1554. cgesvj_("L", "U", "N", n, n, &u[u_offset], ldu, &sva[1],
  1555. n, &v[v_offset], ldv, cdummy, &c_n1, rdummy, &
  1556. c_n1, &ierr);
  1557. lwrk_cgesvj__ = cdummy[0].r;
  1558. cgesvj_("U", "U", "N", n, n, &u[u_offset], ldu, &sva[1],
  1559. n, &v[v_offset], ldv, cdummy, &c_n1, rdummy, &
  1560. c_n1, &ierr);
  1561. lwrk_cgesvju__ = cdummy[0].r;
  1562. cgesvj_("L", "U", "V", n, n, &u[u_offset], ldu, &sva[1],
  1563. n, &v[v_offset], ldv, cdummy, &c_n1, rdummy, &
  1564. c_n1, &ierr);
  1565. lwrk_cgesvjv__ = cdummy[0].r;
  1566. cunmlq_("L", "C", n, n, n, &a[a_offset], lda, cdummy, &v[
  1567. v_offset], ldv, cdummy, &c_n1, &ierr);
  1568. lwrk_cunmlq__ = cdummy[0].r;
  1569. if (errest) {
  1570. /* Computing MAX */
  1571. /* Computing 2nd power */
  1572. i__3 = *n;
  1573. /* Computing 2nd power */
  1574. i__4 = *n;
  1575. /* Computing 2nd power */
  1576. i__5 = *n;
  1577. /* Computing 2nd power */
  1578. i__6 = *n;
  1579. /* Computing 2nd power */
  1580. i__7 = *n;
  1581. /* Computing 2nd power */
  1582. i__8 = *n;
  1583. /* Computing 2nd power */
  1584. i__9 = *n;
  1585. /* Computing 2nd power */
  1586. i__10 = *n;
  1587. /* Computing 2nd power */
  1588. i__11 = *n;
  1589. i__1 = *n + lwrk_cgeqp3__, i__2 = *n + lwcon, i__1 =
  1590. f2cmax(i__1,i__2), i__2 = (*n << 1) + i__3 *
  1591. i__3 + lwcon, i__1 = f2cmax(i__1,i__2), i__2 = (*
  1592. n << 1) + lwrk_cgeqrf__, i__1 = f2cmax(i__1,i__2)
  1593. , i__2 = (*n << 1) + lwrk_cgeqp3n__, i__1 =
  1594. f2cmax(i__1,i__2), i__2 = (*n << 1) + i__4 *
  1595. i__4 + *n + lwrk_cgelqf__, i__1 = f2cmax(i__1,
  1596. i__2), i__2 = (*n << 1) + i__5 * i__5 + *n +
  1597. i__6 * i__6 + lwcon, i__1 = f2cmax(i__1,i__2),
  1598. i__2 = (*n << 1) + i__7 * i__7 + *n +
  1599. lwrk_cgesvj__, i__1 = f2cmax(i__1,i__2), i__2 = (
  1600. *n << 1) + i__8 * i__8 + *n + lwrk_cgesvjv__,
  1601. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) +
  1602. i__9 * i__9 + *n + lwrk_cunmqr__, i__1 = f2cmax(
  1603. i__1,i__2), i__2 = (*n << 1) + i__10 * i__10
  1604. + *n + lwrk_cunmlq__, i__1 = f2cmax(i__1,i__2),
  1605. i__2 = *n + i__11 * i__11 + lwrk_cgesvju__,
  1606. i__1 = f2cmax(i__1,i__2), i__2 = *n +
  1607. lwrk_cunmqrm__;
  1608. optwrk = f2cmax(i__1,i__2);
  1609. } else {
  1610. /* Computing MAX */
  1611. /* Computing 2nd power */
  1612. i__3 = *n;
  1613. /* Computing 2nd power */
  1614. i__4 = *n;
  1615. /* Computing 2nd power */
  1616. i__5 = *n;
  1617. /* Computing 2nd power */
  1618. i__6 = *n;
  1619. /* Computing 2nd power */
  1620. i__7 = *n;
  1621. /* Computing 2nd power */
  1622. i__8 = *n;
  1623. /* Computing 2nd power */
  1624. i__9 = *n;
  1625. /* Computing 2nd power */
  1626. i__10 = *n;
  1627. /* Computing 2nd power */
  1628. i__11 = *n;
  1629. i__1 = *n + lwrk_cgeqp3__, i__2 = (*n << 1) + i__3 *
  1630. i__3 + lwcon, i__1 = f2cmax(i__1,i__2), i__2 = (*
  1631. n << 1) + lwrk_cgeqrf__, i__1 = f2cmax(i__1,i__2)
  1632. , i__2 = (*n << 1) + lwrk_cgeqp3n__, i__1 =
  1633. f2cmax(i__1,i__2), i__2 = (*n << 1) + i__4 *
  1634. i__4 + *n + lwrk_cgelqf__, i__1 = f2cmax(i__1,
  1635. i__2), i__2 = (*n << 1) + i__5 * i__5 + *n +
  1636. i__6 * i__6 + lwcon, i__1 = f2cmax(i__1,i__2),
  1637. i__2 = (*n << 1) + i__7 * i__7 + *n +
  1638. lwrk_cgesvj__, i__1 = f2cmax(i__1,i__2), i__2 = (
  1639. *n << 1) + i__8 * i__8 + *n + lwrk_cgesvjv__,
  1640. i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) +
  1641. i__9 * i__9 + *n + lwrk_cunmqr__, i__1 = f2cmax(
  1642. i__1,i__2), i__2 = (*n << 1) + i__10 * i__10
  1643. + *n + lwrk_cunmlq__, i__1 = f2cmax(i__1,i__2),
  1644. i__2 = *n + i__11 * i__11 + lwrk_cgesvju__,
  1645. i__1 = f2cmax(i__1,i__2), i__2 = *n +
  1646. lwrk_cunmqrm__;
  1647. optwrk = f2cmax(i__1,i__2);
  1648. }
  1649. } else {
  1650. cgesvj_("L", "U", "V", n, n, &u[u_offset], ldu, &sva[1],
  1651. n, &v[v_offset], ldv, cdummy, &c_n1, rdummy, &
  1652. c_n1, &ierr);
  1653. lwrk_cgesvjv__ = cdummy[0].r;
  1654. cunmqr_("L", "N", n, n, n, cdummy, n, cdummy, &v[v_offset]
  1655. , ldv, cdummy, &c_n1, &ierr)
  1656. ;
  1657. lwrk_cunmqr__ = cdummy[0].r;
  1658. cunmqr_("L", "N", m, n, n, &a[a_offset], lda, cdummy, &u[
  1659. u_offset], ldu, cdummy, &c_n1, &ierr);
  1660. lwrk_cunmqrm__ = cdummy[0].r;
  1661. if (errest) {
  1662. /* Computing MAX */
  1663. /* Computing 2nd power */
  1664. i__3 = *n;
  1665. /* Computing 2nd power */
  1666. i__4 = *n;
  1667. /* Computing 2nd power */
  1668. i__5 = *n;
  1669. i__1 = *n + lwrk_cgeqp3__, i__2 = *n + lwcon, i__1 =
  1670. f2cmax(i__1,i__2), i__2 = (*n << 1) +
  1671. lwrk_cgeqrf__, i__1 = f2cmax(i__1,i__2), i__2 = (
  1672. *n << 1) + i__3 * i__3, i__1 = f2cmax(i__1,i__2),
  1673. i__2 = (*n << 1) + i__4 * i__4 +
  1674. lwrk_cgesvjv__, i__1 = f2cmax(i__1,i__2), i__2 =
  1675. (*n << 1) + i__5 * i__5 + *n + lwrk_cunmqr__,
  1676. i__1 = f2cmax(i__1,i__2), i__2 = *n +
  1677. lwrk_cunmqrm__;
  1678. optwrk = f2cmax(i__1,i__2);
  1679. } else {
  1680. /* Computing MAX */
  1681. /* Computing 2nd power */
  1682. i__3 = *n;
  1683. /* Computing 2nd power */
  1684. i__4 = *n;
  1685. /* Computing 2nd power */
  1686. i__5 = *n;
  1687. i__1 = *n + lwrk_cgeqp3__, i__2 = (*n << 1) +
  1688. lwrk_cgeqrf__, i__1 = f2cmax(i__1,i__2), i__2 = (
  1689. *n << 1) + i__3 * i__3, i__1 = f2cmax(i__1,i__2),
  1690. i__2 = (*n << 1) + i__4 * i__4 +
  1691. lwrk_cgesvjv__, i__1 = f2cmax(i__1,i__2), i__2 =
  1692. (*n << 1) + i__5 * i__5 + *n + lwrk_cunmqr__,
  1693. i__1 = f2cmax(i__1,i__2), i__2 = *n +
  1694. lwrk_cunmqrm__;
  1695. optwrk = f2cmax(i__1,i__2);
  1696. }
  1697. }
  1698. }
  1699. if (l2tran || rowpiv) {
  1700. /* Computing MAX */
  1701. i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 = f2cmax(
  1702. i__1,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
  1703. minrwrk = f2cmax(i__1,lrwcon);
  1704. } else {
  1705. /* Computing MAX */
  1706. i__1 = f2cmax(7,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
  1707. minrwrk = f2cmax(i__1,lrwcon);
  1708. }
  1709. }
  1710. minwrk = f2cmax(2,minwrk);
  1711. optwrk = f2cmax(optwrk,minwrk);
  1712. if (*lwork < minwrk && ! lquery) {
  1713. *info = -17;
  1714. }
  1715. if (*lrwork < minrwrk && ! lquery) {
  1716. *info = -19;
  1717. }
  1718. }
  1719. if (*info != 0) {
  1720. /* #:( */
  1721. i__1 = -(*info);
  1722. xerbla_("CGEJSV", &i__1, (ftnlen)6);
  1723. return 0;
  1724. } else if (lquery) {
  1725. cwork[1].r = (real) optwrk, cwork[1].i = 0.f;
  1726. cwork[2].r = (real) minwrk, cwork[2].i = 0.f;
  1727. rwork[1] = (real) minrwrk;
  1728. iwork[1] = f2cmax(4,miniwrk);
  1729. return 0;
  1730. }
  1731. /* Quick return for void matrix (Y3K safe) */
  1732. /* #:) */
  1733. if (*m == 0 || *n == 0) {
  1734. iwork[1] = 0;
  1735. iwork[2] = 0;
  1736. iwork[3] = 0;
  1737. iwork[4] = 0;
  1738. rwork[1] = 0.f;
  1739. rwork[2] = 0.f;
  1740. rwork[3] = 0.f;
  1741. rwork[4] = 0.f;
  1742. rwork[5] = 0.f;
  1743. rwork[6] = 0.f;
  1744. rwork[7] = 0.f;
  1745. return 0;
  1746. }
  1747. /* Determine whether the matrix U should be M x N or M x M */
  1748. if (lsvec) {
  1749. n1 = *n;
  1750. if (lsame_(jobu, "F")) {
  1751. n1 = *m;
  1752. }
  1753. }
  1754. /* Set numerical parameters */
  1755. /* ! NOTE: Make sure SLAMCH() does not fail on the target architecture. */
  1756. epsln = slamch_("Epsilon");
  1757. sfmin = slamch_("SafeMinimum");
  1758. small = sfmin / epsln;
  1759. big = slamch_("O");
  1760. /* BIG = ONE / SFMIN */
  1761. /* Initialize SVA(1:N) = diag( ||A e_i||_2 )_1^N */
  1762. /* (!) If necessary, scale SVA() to protect the largest norm from */
  1763. /* overflow. It is possible that this scaling pushes the smallest */
  1764. /* column norm left from the underflow threshold (extreme case). */
  1765. scalem = 1.f / sqrt((real) (*m) * (real) (*n));
  1766. noscal = TRUE_;
  1767. goscal = TRUE_;
  1768. i__1 = *n;
  1769. for (p = 1; p <= i__1; ++p) {
  1770. aapp = 0.f;
  1771. aaqq = 1.f;
  1772. classq_(m, &a[p * a_dim1 + 1], &c__1, &aapp, &aaqq);
  1773. if (aapp > big) {
  1774. *info = -9;
  1775. i__2 = -(*info);
  1776. xerbla_("CGEJSV", &i__2, (ftnlen)6);
  1777. return 0;
  1778. }
  1779. aaqq = sqrt(aaqq);
  1780. if (aapp < big / aaqq && noscal) {
  1781. sva[p] = aapp * aaqq;
  1782. } else {
  1783. noscal = FALSE_;
  1784. sva[p] = aapp * (aaqq * scalem);
  1785. if (goscal) {
  1786. goscal = FALSE_;
  1787. i__2 = p - 1;
  1788. sscal_(&i__2, &scalem, &sva[1], &c__1);
  1789. }
  1790. }
  1791. /* L1874: */
  1792. }
  1793. if (noscal) {
  1794. scalem = 1.f;
  1795. }
  1796. aapp = 0.f;
  1797. aaqq = big;
  1798. i__1 = *n;
  1799. for (p = 1; p <= i__1; ++p) {
  1800. /* Computing MAX */
  1801. r__1 = aapp, r__2 = sva[p];
  1802. aapp = f2cmax(r__1,r__2);
  1803. if (sva[p] != 0.f) {
  1804. /* Computing MIN */
  1805. r__1 = aaqq, r__2 = sva[p];
  1806. aaqq = f2cmin(r__1,r__2);
  1807. }
  1808. /* L4781: */
  1809. }
  1810. /* Quick return for zero M x N matrix */
  1811. /* #:) */
  1812. if (aapp == 0.f) {
  1813. if (lsvec) {
  1814. claset_("G", m, &n1, &c_b1, &c_b2, &u[u_offset], ldu);
  1815. }
  1816. if (rsvec) {
  1817. claset_("G", n, n, &c_b1, &c_b2, &v[v_offset], ldv);
  1818. }
  1819. rwork[1] = 1.f;
  1820. rwork[2] = 1.f;
  1821. if (errest) {
  1822. rwork[3] = 1.f;
  1823. }
  1824. if (lsvec && rsvec) {
  1825. rwork[4] = 1.f;
  1826. rwork[5] = 1.f;
  1827. }
  1828. if (l2tran) {
  1829. rwork[6] = 0.f;
  1830. rwork[7] = 0.f;
  1831. }
  1832. iwork[1] = 0;
  1833. iwork[2] = 0;
  1834. iwork[3] = 0;
  1835. iwork[4] = -1;
  1836. return 0;
  1837. }
  1838. /* Issue warning if denormalized column norms detected. Override the */
  1839. /* high relative accuracy request. Issue licence to kill nonzero columns */
  1840. /* (set them to zero) whose norm is less than sigma_max / BIG (roughly). */
  1841. /* #:( */
  1842. warning = 0;
  1843. if (aaqq <= sfmin) {
  1844. l2rank = TRUE_;
  1845. l2kill = TRUE_;
  1846. warning = 1;
  1847. }
  1848. /* Quick return for one-column matrix */
  1849. /* #:) */
  1850. if (*n == 1) {
  1851. if (lsvec) {
  1852. clascl_("G", &c__0, &c__0, &sva[1], &scalem, m, &c__1, &a[a_dim1
  1853. + 1], lda, &ierr);
  1854. clacpy_("A", m, &c__1, &a[a_offset], lda, &u[u_offset], ldu);
  1855. /* computing all M left singular vectors of the M x 1 matrix */
  1856. if (n1 != *n) {
  1857. i__1 = *lwork - *n;
  1858. cgeqrf_(m, n, &u[u_offset], ldu, &cwork[1], &cwork[*n + 1], &
  1859. i__1, &ierr);
  1860. i__1 = *lwork - *n;
  1861. cungqr_(m, &n1, &c__1, &u[u_offset], ldu, &cwork[1], &cwork[*
  1862. n + 1], &i__1, &ierr);
  1863. ccopy_(m, &a[a_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1);
  1864. }
  1865. }
  1866. if (rsvec) {
  1867. i__1 = v_dim1 + 1;
  1868. v[i__1].r = 1.f, v[i__1].i = 0.f;
  1869. }
  1870. if (sva[1] < big * scalem) {
  1871. sva[1] /= scalem;
  1872. scalem = 1.f;
  1873. }
  1874. rwork[1] = 1.f / scalem;
  1875. rwork[2] = 1.f;
  1876. if (sva[1] != 0.f) {
  1877. iwork[1] = 1;
  1878. if (sva[1] / scalem >= sfmin) {
  1879. iwork[2] = 1;
  1880. } else {
  1881. iwork[2] = 0;
  1882. }
  1883. } else {
  1884. iwork[1] = 0;
  1885. iwork[2] = 0;
  1886. }
  1887. iwork[3] = 0;
  1888. iwork[4] = -1;
  1889. if (errest) {
  1890. rwork[3] = 1.f;
  1891. }
  1892. if (lsvec && rsvec) {
  1893. rwork[4] = 1.f;
  1894. rwork[5] = 1.f;
  1895. }
  1896. if (l2tran) {
  1897. rwork[6] = 0.f;
  1898. rwork[7] = 0.f;
  1899. }
  1900. return 0;
  1901. }
  1902. transp = FALSE_;
  1903. aatmax = -1.f;
  1904. aatmin = big;
  1905. if (rowpiv || l2tran) {
  1906. /* Compute the row norms, needed to determine row pivoting sequence */
  1907. /* (in the case of heavily row weighted A, row pivoting is strongly */
  1908. /* advised) and to collect information needed to compare the */
  1909. /* structures of A * A^* and A^* * A (in the case L2TRAN.EQ..TRUE.). */
  1910. if (l2tran) {
  1911. i__1 = *m;
  1912. for (p = 1; p <= i__1; ++p) {
  1913. xsc = 0.f;
  1914. temp1 = 1.f;
  1915. classq_(n, &a[p + a_dim1], lda, &xsc, &temp1);
  1916. /* CLASSQ gets both the ell_2 and the ell_infinity norm */
  1917. /* in one pass through the vector */
  1918. rwork[*m + p] = xsc * scalem;
  1919. rwork[p] = xsc * (scalem * sqrt(temp1));
  1920. /* Computing MAX */
  1921. r__1 = aatmax, r__2 = rwork[p];
  1922. aatmax = f2cmax(r__1,r__2);
  1923. if (rwork[p] != 0.f) {
  1924. /* Computing MIN */
  1925. r__1 = aatmin, r__2 = rwork[p];
  1926. aatmin = f2cmin(r__1,r__2);
  1927. }
  1928. /* L1950: */
  1929. }
  1930. } else {
  1931. i__1 = *m;
  1932. for (p = 1; p <= i__1; ++p) {
  1933. rwork[*m + p] = scalem * c_abs(&a[p + icamax_(n, &a[p +
  1934. a_dim1], lda) * a_dim1]);
  1935. /* Computing MAX */
  1936. r__1 = aatmax, r__2 = rwork[*m + p];
  1937. aatmax = f2cmax(r__1,r__2);
  1938. /* Computing MIN */
  1939. r__1 = aatmin, r__2 = rwork[*m + p];
  1940. aatmin = f2cmin(r__1,r__2);
  1941. /* L1904: */
  1942. }
  1943. }
  1944. }
  1945. /* For square matrix A try to determine whether A^* would be better */
  1946. /* input for the preconditioned Jacobi SVD, with faster convergence. */
  1947. /* The decision is based on an O(N) function of the vector of column */
  1948. /* and row norms of A, based on the Shannon entropy. This should give */
  1949. /* the right choice in most cases when the difference actually matters. */
  1950. /* It may fail and pick the slower converging side. */
  1951. entra = 0.f;
  1952. entrat = 0.f;
  1953. if (l2tran) {
  1954. xsc = 0.f;
  1955. temp1 = 1.f;
  1956. slassq_(n, &sva[1], &c__1, &xsc, &temp1);
  1957. temp1 = 1.f / temp1;
  1958. entra = 0.f;
  1959. i__1 = *n;
  1960. for (p = 1; p <= i__1; ++p) {
  1961. /* Computing 2nd power */
  1962. r__1 = sva[p] / xsc;
  1963. big1 = r__1 * r__1 * temp1;
  1964. if (big1 != 0.f) {
  1965. entra += big1 * log(big1);
  1966. }
  1967. /* L1113: */
  1968. }
  1969. entra = -entra / log((real) (*n));
  1970. /* Now, SVA().^2/Trace(A^* * A) is a point in the probability simplex. */
  1971. /* It is derived from the diagonal of A^* * A. Do the same with the */
  1972. /* diagonal of A * A^*, compute the entropy of the corresponding */
  1973. /* probability distribution. Note that A * A^* and A^* * A have the */
  1974. /* same trace. */
  1975. entrat = 0.f;
  1976. i__1 = *m;
  1977. for (p = 1; p <= i__1; ++p) {
  1978. /* Computing 2nd power */
  1979. r__1 = rwork[p] / xsc;
  1980. big1 = r__1 * r__1 * temp1;
  1981. if (big1 != 0.f) {
  1982. entrat += big1 * log(big1);
  1983. }
  1984. /* L1114: */
  1985. }
  1986. entrat = -entrat / log((real) (*m));
  1987. /* Analyze the entropies and decide A or A^*. Smaller entropy */
  1988. /* usually means better input for the algorithm. */
  1989. transp = entrat < entra;
  1990. /* If A^* is better than A, take the adjoint of A. This is allowed */
  1991. /* only for square matrices, M=N. */
  1992. if (transp) {
  1993. /* In an optimal implementation, this trivial transpose */
  1994. /* should be replaced with faster transpose. */
  1995. i__1 = *n - 1;
  1996. for (p = 1; p <= i__1; ++p) {
  1997. i__2 = p + p * a_dim1;
  1998. r_cnjg(&q__1, &a[p + p * a_dim1]);
  1999. a[i__2].r = q__1.r, a[i__2].i = q__1.i;
  2000. i__2 = *n;
  2001. for (q = p + 1; q <= i__2; ++q) {
  2002. r_cnjg(&q__1, &a[q + p * a_dim1]);
  2003. ctemp.r = q__1.r, ctemp.i = q__1.i;
  2004. i__3 = q + p * a_dim1;
  2005. r_cnjg(&q__1, &a[p + q * a_dim1]);
  2006. a[i__3].r = q__1.r, a[i__3].i = q__1.i;
  2007. i__3 = p + q * a_dim1;
  2008. a[i__3].r = ctemp.r, a[i__3].i = ctemp.i;
  2009. /* L1116: */
  2010. }
  2011. /* L1115: */
  2012. }
  2013. i__1 = *n + *n * a_dim1;
  2014. r_cnjg(&q__1, &a[*n + *n * a_dim1]);
  2015. a[i__1].r = q__1.r, a[i__1].i = q__1.i;
  2016. i__1 = *n;
  2017. for (p = 1; p <= i__1; ++p) {
  2018. rwork[*m + p] = sva[p];
  2019. sva[p] = rwork[p];
  2020. /* previously computed row 2-norms are now column 2-norms */
  2021. /* of the transposed matrix */
  2022. /* L1117: */
  2023. }
  2024. temp1 = aapp;
  2025. aapp = aatmax;
  2026. aatmax = temp1;
  2027. temp1 = aaqq;
  2028. aaqq = aatmin;
  2029. aatmin = temp1;
  2030. kill = lsvec;
  2031. lsvec = rsvec;
  2032. rsvec = kill;
  2033. if (lsvec) {
  2034. n1 = *n;
  2035. }
  2036. rowpiv = TRUE_;
  2037. }
  2038. }
  2039. /* END IF L2TRAN */
  2040. /* Scale the matrix so that its maximal singular value remains less */
  2041. /* than SQRT(BIG) -- the matrix is scaled so that its maximal column */
  2042. /* has Euclidean norm equal to SQRT(BIG/N). The only reason to keep */
  2043. /* SQRT(BIG) instead of BIG is the fact that CGEJSV uses LAPACK and */
  2044. /* BLAS routines that, in some implementations, are not capable of */
  2045. /* working in the full interval [SFMIN,BIG] and that they may provoke */
  2046. /* overflows in the intermediate results. If the singular values spread */
  2047. /* from SFMIN to BIG, then CGESVJ will compute them. So, in that case, */
  2048. /* one should use CGESVJ instead of CGEJSV. */
  2049. big1 = sqrt(big);
  2050. temp1 = sqrt(big / (real) (*n));
  2051. /* >> for future updates: allow bigger range, i.e. the largest column */
  2052. /* will be allowed up to BIG/N and CGESVJ will do the rest. However, for */
  2053. /* this all other (LAPACK) components must allow such a range. */
  2054. /* TEMP1 = BIG/REAL(N) */
  2055. /* TEMP1 = BIG * EPSLN this should 'almost' work with current LAPACK components */
  2056. slascl_("G", &c__0, &c__0, &aapp, &temp1, n, &c__1, &sva[1], n, &ierr);
  2057. if (aaqq > aapp * sfmin) {
  2058. aaqq = aaqq / aapp * temp1;
  2059. } else {
  2060. aaqq = aaqq * temp1 / aapp;
  2061. }
  2062. temp1 *= scalem;
  2063. clascl_("G", &c__0, &c__0, &aapp, &temp1, m, n, &a[a_offset], lda, &ierr);
  2064. /* To undo scaling at the end of this procedure, multiply the */
  2065. /* computed singular values with USCAL2 / USCAL1. */
  2066. uscal1 = temp1;
  2067. uscal2 = aapp;
  2068. if (l2kill) {
  2069. /* L2KILL enforces computation of nonzero singular values in */
  2070. /* the restricted range of condition number of the initial A, */
  2071. /* sigma_max(A) / sigma_min(A) approx. SQRT(BIG)/SQRT(SFMIN). */
  2072. xsc = sqrt(sfmin);
  2073. } else {
  2074. xsc = small;
  2075. /* Now, if the condition number of A is too big, */
  2076. /* sigma_max(A) / sigma_min(A) .GT. SQRT(BIG/N) * EPSLN / SFMIN, */
  2077. /* as a precaution measure, the full SVD is computed using CGESVJ */
  2078. /* with accumulated Jacobi rotations. This provides numerically */
  2079. /* more robust computation, at the cost of slightly increased run */
  2080. /* time. Depending on the concrete implementation of BLAS and LAPACK */
  2081. /* (i.e. how they behave in presence of extreme ill-conditioning) the */
  2082. /* implementor may decide to remove this switch. */
  2083. if (aaqq < sqrt(sfmin) && lsvec && rsvec) {
  2084. jracc = TRUE_;
  2085. }
  2086. }
  2087. if (aaqq < xsc) {
  2088. i__1 = *n;
  2089. for (p = 1; p <= i__1; ++p) {
  2090. if (sva[p] < xsc) {
  2091. claset_("A", m, &c__1, &c_b1, &c_b1, &a[p * a_dim1 + 1], lda);
  2092. sva[p] = 0.f;
  2093. }
  2094. /* L700: */
  2095. }
  2096. }
  2097. /* Preconditioning using QR factorization with pivoting */
  2098. if (rowpiv) {
  2099. /* Optional row permutation (Bjoerck row pivoting): */
  2100. /* A result by Cox and Higham shows that the Bjoerck's */
  2101. /* row pivoting combined with standard column pivoting */
  2102. /* has similar effect as Powell-Reid complete pivoting. */
  2103. /* The ell-infinity norms of A are made nonincreasing. */
  2104. if (lsvec && rsvec && ! jracc) {
  2105. iwoff = *n << 1;
  2106. } else {
  2107. iwoff = *n;
  2108. }
  2109. i__1 = *m - 1;
  2110. for (p = 1; p <= i__1; ++p) {
  2111. i__2 = *m - p + 1;
  2112. q = isamax_(&i__2, &rwork[*m + p], &c__1) + p - 1;
  2113. iwork[iwoff + p] = q;
  2114. if (p != q) {
  2115. temp1 = rwork[*m + p];
  2116. rwork[*m + p] = rwork[*m + q];
  2117. rwork[*m + q] = temp1;
  2118. }
  2119. /* L1952: */
  2120. }
  2121. i__1 = *m - 1;
  2122. claswp_(n, &a[a_offset], lda, &c__1, &i__1, &iwork[iwoff + 1], &c__1);
  2123. }
  2124. /* End of the preparation phase (scaling, optional sorting and */
  2125. /* transposing, optional flushing of small columns). */
  2126. /* Preconditioning */
  2127. /* If the full SVD is needed, the right singular vectors are computed */
  2128. /* from a matrix equation, and for that we need theoretical analysis */
  2129. /* of the Businger-Golub pivoting. So we use CGEQP3 as the first RR QRF. */
  2130. /* In all other cases the first RR QRF can be chosen by other criteria */
  2131. /* (eg speed by replacing global with restricted window pivoting, such */
  2132. /* as in xGEQPX from TOMS # 782). Good results will be obtained using */
  2133. /* xGEQPX with properly (!) chosen numerical parameters. */
  2134. /* Any improvement of CGEQP3 improves overal performance of CGEJSV. */
  2135. /* A * P1 = Q1 * [ R1^* 0]^*: */
  2136. i__1 = *n;
  2137. for (p = 1; p <= i__1; ++p) {
  2138. iwork[p] = 0;
  2139. /* L1963: */
  2140. }
  2141. i__1 = *lwork - *n;
  2142. cgeqp3_(m, n, &a[a_offset], lda, &iwork[1], &cwork[1], &cwork[*n + 1], &
  2143. i__1, &rwork[1], &ierr);
  2144. /* The upper triangular matrix R1 from the first QRF is inspected for */
  2145. /* rank deficiency and possibilities for deflation, or possible */
  2146. /* ill-conditioning. Depending on the user specified flag L2RANK, */
  2147. /* the procedure explores possibilities to reduce the numerical */
  2148. /* rank by inspecting the computed upper triangular factor. If */
  2149. /* L2RANK or L2ABER are up, then CGEJSV will compute the SVD of */
  2150. /* A + dA, where ||dA|| <= f(M,N)*EPSLN. */
  2151. nr = 1;
  2152. if (l2aber) {
  2153. /* Standard absolute error bound suffices. All sigma_i with */
  2154. /* sigma_i < N*EPSLN*||A|| are flushed to zero. This is an */
  2155. /* aggressive enforcement of lower numerical rank by introducing a */
  2156. /* backward error of the order of N*EPSLN*||A||. */
  2157. temp1 = sqrt((real) (*n)) * epsln;
  2158. i__1 = *n;
  2159. for (p = 2; p <= i__1; ++p) {
  2160. if (c_abs(&a[p + p * a_dim1]) >= temp1 * c_abs(&a[a_dim1 + 1])) {
  2161. ++nr;
  2162. } else {
  2163. goto L3002;
  2164. }
  2165. /* L3001: */
  2166. }
  2167. L3002:
  2168. ;
  2169. } else if (l2rank) {
  2170. /* Sudden drop on the diagonal of R1 is used as the criterion for */
  2171. /* close-to-rank-deficient. */
  2172. temp1 = sqrt(sfmin);
  2173. i__1 = *n;
  2174. for (p = 2; p <= i__1; ++p) {
  2175. if (c_abs(&a[p + p * a_dim1]) < epsln * c_abs(&a[p - 1 + (p - 1) *
  2176. a_dim1]) || c_abs(&a[p + p * a_dim1]) < small || l2kill
  2177. && c_abs(&a[p + p * a_dim1]) < temp1) {
  2178. goto L3402;
  2179. }
  2180. ++nr;
  2181. /* L3401: */
  2182. }
  2183. L3402:
  2184. ;
  2185. } else {
  2186. /* The goal is high relative accuracy. However, if the matrix */
  2187. /* has high scaled condition number the relative accuracy is in */
  2188. /* general not feasible. Later on, a condition number estimator */
  2189. /* will be deployed to estimate the scaled condition number. */
  2190. /* Here we just remove the underflowed part of the triangular */
  2191. /* factor. This prevents the situation in which the code is */
  2192. /* working hard to get the accuracy not warranted by the data. */
  2193. temp1 = sqrt(sfmin);
  2194. i__1 = *n;
  2195. for (p = 2; p <= i__1; ++p) {
  2196. if (c_abs(&a[p + p * a_dim1]) < small || l2kill && c_abs(&a[p + p
  2197. * a_dim1]) < temp1) {
  2198. goto L3302;
  2199. }
  2200. ++nr;
  2201. /* L3301: */
  2202. }
  2203. L3302:
  2204. ;
  2205. }
  2206. almort = FALSE_;
  2207. if (nr == *n) {
  2208. maxprj = 1.f;
  2209. i__1 = *n;
  2210. for (p = 2; p <= i__1; ++p) {
  2211. temp1 = c_abs(&a[p + p * a_dim1]) / sva[iwork[p]];
  2212. maxprj = f2cmin(maxprj,temp1);
  2213. /* L3051: */
  2214. }
  2215. /* Computing 2nd power */
  2216. r__1 = maxprj;
  2217. if (r__1 * r__1 >= 1.f - (real) (*n) * epsln) {
  2218. almort = TRUE_;
  2219. }
  2220. }
  2221. sconda = -1.f;
  2222. condr1 = -1.f;
  2223. condr2 = -1.f;
  2224. if (errest) {
  2225. if (*n == nr) {
  2226. if (rsvec) {
  2227. clacpy_("U", n, n, &a[a_offset], lda, &v[v_offset], ldv);
  2228. i__1 = *n;
  2229. for (p = 1; p <= i__1; ++p) {
  2230. temp1 = sva[iwork[p]];
  2231. r__1 = 1.f / temp1;
  2232. csscal_(&p, &r__1, &v[p * v_dim1 + 1], &c__1);
  2233. /* L3053: */
  2234. }
  2235. if (lsvec) {
  2236. cpocon_("U", n, &v[v_offset], ldv, &c_b141, &temp1, &
  2237. cwork[*n + 1], &rwork[1], &ierr);
  2238. } else {
  2239. cpocon_("U", n, &v[v_offset], ldv, &c_b141, &temp1, &
  2240. cwork[1], &rwork[1], &ierr);
  2241. }
  2242. } else if (lsvec) {
  2243. clacpy_("U", n, n, &a[a_offset], lda, &u[u_offset], ldu);
  2244. i__1 = *n;
  2245. for (p = 1; p <= i__1; ++p) {
  2246. temp1 = sva[iwork[p]];
  2247. r__1 = 1.f / temp1;
  2248. csscal_(&p, &r__1, &u[p * u_dim1 + 1], &c__1);
  2249. /* L3054: */
  2250. }
  2251. cpocon_("U", n, &u[u_offset], ldu, &c_b141, &temp1, &cwork[*n
  2252. + 1], &rwork[1], &ierr);
  2253. } else {
  2254. clacpy_("U", n, n, &a[a_offset], lda, &cwork[1], n)
  2255. ;
  2256. /* [] CALL CLACPY( 'U', N, N, A, LDA, CWORK(N+1), N ) */
  2257. /* Change: here index shifted by N to the left, CWORK(1:N) */
  2258. /* not needed for SIGMA only computation */
  2259. i__1 = *n;
  2260. for (p = 1; p <= i__1; ++p) {
  2261. temp1 = sva[iwork[p]];
  2262. /* [] CALL CSSCAL( p, ONE/TEMP1, CWORK(N+(p-1)*N+1), 1 ) */
  2263. r__1 = 1.f / temp1;
  2264. csscal_(&p, &r__1, &cwork[(p - 1) * *n + 1], &c__1);
  2265. /* L3052: */
  2266. }
  2267. /* [] CALL CPOCON( 'U', N, CWORK(N+1), N, ONE, TEMP1, */
  2268. /* [] $ CWORK(N+N*N+1), RWORK, IERR ) */
  2269. cpocon_("U", n, &cwork[1], n, &c_b141, &temp1, &cwork[*n * *n
  2270. + 1], &rwork[1], &ierr);
  2271. }
  2272. if (temp1 != 0.f) {
  2273. sconda = 1.f / sqrt(temp1);
  2274. } else {
  2275. sconda = -1.f;
  2276. }
  2277. /* SCONDA is an estimate of SQRT(||(R^* * R)^(-1)||_1). */
  2278. /* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
  2279. } else {
  2280. sconda = -1.f;
  2281. }
  2282. }
  2283. c_div(&q__1, &a[a_dim1 + 1], &a[nr + nr * a_dim1]);
  2284. l2pert = l2pert && c_abs(&q__1) > sqrt(big1);
  2285. /* If there is no violent scaling, artificial perturbation is not needed. */
  2286. /* Phase 3: */
  2287. if (! (rsvec || lsvec)) {
  2288. /* Singular Values only */
  2289. /* Computing MIN */
  2290. i__2 = *n - 1;
  2291. i__1 = f2cmin(i__2,nr);
  2292. for (p = 1; p <= i__1; ++p) {
  2293. i__2 = *n - p;
  2294. ccopy_(&i__2, &a[p + (p + 1) * a_dim1], lda, &a[p + 1 + p *
  2295. a_dim1], &c__1);
  2296. i__2 = *n - p + 1;
  2297. clacgv_(&i__2, &a[p + p * a_dim1], &c__1);
  2298. /* L1946: */
  2299. }
  2300. if (nr == *n) {
  2301. i__1 = *n + *n * a_dim1;
  2302. r_cnjg(&q__1, &a[*n + *n * a_dim1]);
  2303. a[i__1].r = q__1.r, a[i__1].i = q__1.i;
  2304. }
  2305. /* The following two DO-loops introduce small relative perturbation */
  2306. /* into the strict upper triangle of the lower triangular matrix. */
  2307. /* Small entries below the main diagonal are also changed. */
  2308. /* This modification is useful if the computing environment does not */
  2309. /* provide/allow FLUSH TO ZERO underflow, for it prevents many */
  2310. /* annoying denormalized numbers in case of strongly scaled matrices. */
  2311. /* The perturbation is structured so that it does not introduce any */
  2312. /* new perturbation of the singular values, and it does not destroy */
  2313. /* the job done by the preconditioner. */
  2314. /* The licence for this perturbation is in the variable L2PERT, which */
  2315. /* should be .FALSE. if FLUSH TO ZERO underflow is active. */
  2316. if (! almort) {
  2317. if (l2pert) {
  2318. /* XSC = SQRT(SMALL) */
  2319. xsc = epsln / (real) (*n);
  2320. i__1 = nr;
  2321. for (q = 1; q <= i__1; ++q) {
  2322. r__1 = xsc * c_abs(&a[q + q * a_dim1]);
  2323. q__1.r = r__1, q__1.i = 0.f;
  2324. ctemp.r = q__1.r, ctemp.i = q__1.i;
  2325. i__2 = *n;
  2326. for (p = 1; p <= i__2; ++p) {
  2327. if (p > q && c_abs(&a[p + q * a_dim1]) <= temp1 || p <
  2328. q) {
  2329. i__3 = p + q * a_dim1;
  2330. a[i__3].r = ctemp.r, a[i__3].i = ctemp.i;
  2331. }
  2332. /* $ A(p,q) = TEMP1 * ( A(p,q) / ABS(A(p,q)) ) */
  2333. /* L4949: */
  2334. }
  2335. /* L4947: */
  2336. }
  2337. } else {
  2338. i__1 = nr - 1;
  2339. i__2 = nr - 1;
  2340. claset_("U", &i__1, &i__2, &c_b1, &c_b1, &a[(a_dim1 << 1) + 1]
  2341. , lda);
  2342. }
  2343. i__1 = *lwork - *n;
  2344. cgeqrf_(n, &nr, &a[a_offset], lda, &cwork[1], &cwork[*n + 1], &
  2345. i__1, &ierr);
  2346. i__1 = nr - 1;
  2347. for (p = 1; p <= i__1; ++p) {
  2348. i__2 = nr - p;
  2349. ccopy_(&i__2, &a[p + (p + 1) * a_dim1], lda, &a[p + 1 + p *
  2350. a_dim1], &c__1);
  2351. i__2 = nr - p + 1;
  2352. clacgv_(&i__2, &a[p + p * a_dim1], &c__1);
  2353. /* L1948: */
  2354. }
  2355. }
  2356. /* Row-cyclic Jacobi SVD algorithm with column pivoting */
  2357. /* to drown denormals */
  2358. if (l2pert) {
  2359. /* XSC = SQRT(SMALL) */
  2360. xsc = epsln / (real) (*n);
  2361. i__1 = nr;
  2362. for (q = 1; q <= i__1; ++q) {
  2363. r__1 = xsc * c_abs(&a[q + q * a_dim1]);
  2364. q__1.r = r__1, q__1.i = 0.f;
  2365. ctemp.r = q__1.r, ctemp.i = q__1.i;
  2366. i__2 = nr;
  2367. for (p = 1; p <= i__2; ++p) {
  2368. if (p > q && c_abs(&a[p + q * a_dim1]) <= temp1 || p < q)
  2369. {
  2370. i__3 = p + q * a_dim1;
  2371. a[i__3].r = ctemp.r, a[i__3].i = ctemp.i;
  2372. }
  2373. /* $ A(p,q) = TEMP1 * ( A(p,q) / ABS(A(p,q)) ) */
  2374. /* L1949: */
  2375. }
  2376. /* L1947: */
  2377. }
  2378. } else {
  2379. i__1 = nr - 1;
  2380. i__2 = nr - 1;
  2381. claset_("U", &i__1, &i__2, &c_b1, &c_b1, &a[(a_dim1 << 1) + 1],
  2382. lda);
  2383. }
  2384. /* triangular matrix (plus perturbation which is ignored in */
  2385. /* the part which destroys triangular form (confusing?!)) */
  2386. cgesvj_("L", "N", "N", &nr, &nr, &a[a_offset], lda, &sva[1], n, &v[
  2387. v_offset], ldv, &cwork[1], lwork, &rwork[1], lrwork, info);
  2388. scalem = rwork[1];
  2389. numrank = i_nint(&rwork[2]);
  2390. } else if (rsvec && ! lsvec && ! jracc || jracc && ! lsvec && nr != *n) {
  2391. /* -> Singular Values and Right Singular Vectors <- */
  2392. if (almort) {
  2393. i__1 = nr;
  2394. for (p = 1; p <= i__1; ++p) {
  2395. i__2 = *n - p + 1;
  2396. ccopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1], &
  2397. c__1);
  2398. i__2 = *n - p + 1;
  2399. clacgv_(&i__2, &v[p + p * v_dim1], &c__1);
  2400. /* L1998: */
  2401. }
  2402. i__1 = nr - 1;
  2403. i__2 = nr - 1;
  2404. claset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1) + 1],
  2405. ldv);
  2406. cgesvj_("L", "U", "N", n, &nr, &v[v_offset], ldv, &sva[1], &nr, &
  2407. a[a_offset], lda, &cwork[1], lwork, &rwork[1], lrwork,
  2408. info);
  2409. scalem = rwork[1];
  2410. numrank = i_nint(&rwork[2]);
  2411. } else {
  2412. /* accumulated product of Jacobi rotations, three are perfect ) */
  2413. i__1 = nr - 1;
  2414. i__2 = nr - 1;
  2415. claset_("L", &i__1, &i__2, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
  2416. i__1 = *lwork - *n;
  2417. cgelqf_(&nr, n, &a[a_offset], lda, &cwork[1], &cwork[*n + 1], &
  2418. i__1, &ierr);
  2419. clacpy_("L", &nr, &nr, &a[a_offset], lda, &v[v_offset], ldv);
  2420. i__1 = nr - 1;
  2421. i__2 = nr - 1;
  2422. claset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1) + 1],
  2423. ldv);
  2424. i__1 = *lwork - (*n << 1);
  2425. cgeqrf_(&nr, &nr, &v[v_offset], ldv, &cwork[*n + 1], &cwork[(*n <<
  2426. 1) + 1], &i__1, &ierr);
  2427. i__1 = nr;
  2428. for (p = 1; p <= i__1; ++p) {
  2429. i__2 = nr - p + 1;
  2430. ccopy_(&i__2, &v[p + p * v_dim1], ldv, &v[p + p * v_dim1], &
  2431. c__1);
  2432. i__2 = nr - p + 1;
  2433. clacgv_(&i__2, &v[p + p * v_dim1], &c__1);
  2434. /* L8998: */
  2435. }
  2436. i__1 = nr - 1;
  2437. i__2 = nr - 1;
  2438. claset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1) + 1],
  2439. ldv);
  2440. i__1 = *lwork - *n;
  2441. cgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[1], &nr,
  2442. &u[u_offset], ldu, &cwork[*n + 1], &i__1, &rwork[1],
  2443. lrwork, info);
  2444. scalem = rwork[1];
  2445. numrank = i_nint(&rwork[2]);
  2446. if (nr < *n) {
  2447. i__1 = *n - nr;
  2448. claset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1 + v_dim1],
  2449. ldv);
  2450. i__1 = *n - nr;
  2451. claset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1) * v_dim1 +
  2452. 1], ldv);
  2453. i__1 = *n - nr;
  2454. i__2 = *n - nr;
  2455. claset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr + 1 + (nr + 1)
  2456. * v_dim1], ldv);
  2457. }
  2458. i__1 = *lwork - *n;
  2459. cunmlq_("L", "C", n, n, &nr, &a[a_offset], lda, &cwork[1], &v[
  2460. v_offset], ldv, &cwork[*n + 1], &i__1, &ierr);
  2461. }
  2462. /* DO 8991 p = 1, N */
  2463. /* CALL CCOPY( N, V(p,1), LDV, A(IWORK(p),1), LDA ) */
  2464. /* 8991 CONTINUE */
  2465. /* CALL CLACPY( 'All', N, N, A, LDA, V, LDV ) */
  2466. clapmr_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
  2467. if (transp) {
  2468. clacpy_("A", n, n, &v[v_offset], ldv, &u[u_offset], ldu);
  2469. }
  2470. } else if (jracc && ! lsvec && nr == *n) {
  2471. i__1 = *n - 1;
  2472. i__2 = *n - 1;
  2473. claset_("L", &i__1, &i__2, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
  2474. cgesvj_("U", "N", "V", n, n, &a[a_offset], lda, &sva[1], n, &v[
  2475. v_offset], ldv, &cwork[1], lwork, &rwork[1], lrwork, info);
  2476. scalem = rwork[1];
  2477. numrank = i_nint(&rwork[2]);
  2478. clapmr_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
  2479. } else if (lsvec && ! rsvec) {
  2480. /* Jacobi rotations in the Jacobi iterations. */
  2481. i__1 = nr;
  2482. for (p = 1; p <= i__1; ++p) {
  2483. i__2 = *n - p + 1;
  2484. ccopy_(&i__2, &a[p + p * a_dim1], lda, &u[p + p * u_dim1], &c__1);
  2485. i__2 = *n - p + 1;
  2486. clacgv_(&i__2, &u[p + p * u_dim1], &c__1);
  2487. /* L1965: */
  2488. }
  2489. i__1 = nr - 1;
  2490. i__2 = nr - 1;
  2491. claset_("U", &i__1, &i__2, &c_b1, &c_b1, &u[(u_dim1 << 1) + 1], ldu);
  2492. i__1 = *lwork - (*n << 1);
  2493. cgeqrf_(n, &nr, &u[u_offset], ldu, &cwork[*n + 1], &cwork[(*n << 1) +
  2494. 1], &i__1, &ierr);
  2495. i__1 = nr - 1;
  2496. for (p = 1; p <= i__1; ++p) {
  2497. i__2 = nr - p;
  2498. ccopy_(&i__2, &u[p + (p + 1) * u_dim1], ldu, &u[p + 1 + p *
  2499. u_dim1], &c__1);
  2500. i__2 = *n - p + 1;
  2501. clacgv_(&i__2, &u[p + p * u_dim1], &c__1);
  2502. /* L1967: */
  2503. }
  2504. i__1 = nr - 1;
  2505. i__2 = nr - 1;
  2506. claset_("U", &i__1, &i__2, &c_b1, &c_b1, &u[(u_dim1 << 1) + 1], ldu);
  2507. i__1 = *lwork - *n;
  2508. cgesvj_("L", "U", "N", &nr, &nr, &u[u_offset], ldu, &sva[1], &nr, &a[
  2509. a_offset], lda, &cwork[*n + 1], &i__1, &rwork[1], lrwork,
  2510. info);
  2511. scalem = rwork[1];
  2512. numrank = i_nint(&rwork[2]);
  2513. if (nr < *m) {
  2514. i__1 = *m - nr;
  2515. claset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 + u_dim1], ldu);
  2516. if (nr < n1) {
  2517. i__1 = n1 - nr;
  2518. claset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1) * u_dim1 +
  2519. 1], ldu);
  2520. i__1 = *m - nr;
  2521. i__2 = n1 - nr;
  2522. claset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr + 1 + (nr + 1)
  2523. * u_dim1], ldu);
  2524. }
  2525. }
  2526. i__1 = *lwork - *n;
  2527. cunmqr_("L", "N", m, &n1, n, &a[a_offset], lda, &cwork[1], &u[
  2528. u_offset], ldu, &cwork[*n + 1], &i__1, &ierr);
  2529. if (rowpiv) {
  2530. i__1 = *m - 1;
  2531. claswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[iwoff + 1], &
  2532. c_n1);
  2533. }
  2534. i__1 = n1;
  2535. for (p = 1; p <= i__1; ++p) {
  2536. xsc = 1.f / scnrm2_(m, &u[p * u_dim1 + 1], &c__1);
  2537. csscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
  2538. /* L1974: */
  2539. }
  2540. if (transp) {
  2541. clacpy_("A", n, n, &u[u_offset], ldu, &v[v_offset], ldv);
  2542. }
  2543. } else {
  2544. if (! jracc) {
  2545. if (! almort) {
  2546. /* Second Preconditioning Step (QRF [with pivoting]) */
  2547. /* Note that the composition of TRANSPOSE, QRF and TRANSPOSE is */
  2548. /* equivalent to an LQF CALL. Since in many libraries the QRF */
  2549. /* seems to be better optimized than the LQF, we do explicit */
  2550. /* transpose and use the QRF. This is subject to changes in an */
  2551. /* optimized implementation of CGEJSV. */
  2552. i__1 = nr;
  2553. for (p = 1; p <= i__1; ++p) {
  2554. i__2 = *n - p + 1;
  2555. ccopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1],
  2556. &c__1);
  2557. i__2 = *n - p + 1;
  2558. clacgv_(&i__2, &v[p + p * v_dim1], &c__1);
  2559. /* L1968: */
  2560. }
  2561. /* denormals in the second QR factorization, where they are */
  2562. /* as good as zeros. This is done to avoid painfully slow */
  2563. /* computation with denormals. The relative size of the perturbation */
  2564. /* is a parameter that can be changed by the implementer. */
  2565. /* This perturbation device will be obsolete on machines with */
  2566. /* properly implemented arithmetic. */
  2567. /* To switch it off, set L2PERT=.FALSE. To remove it from the */
  2568. /* code, remove the action under L2PERT=.TRUE., leave the ELSE part. */
  2569. /* The following two loops should be blocked and fused with the */
  2570. /* transposed copy above. */
  2571. if (l2pert) {
  2572. xsc = sqrt(small);
  2573. i__1 = nr;
  2574. for (q = 1; q <= i__1; ++q) {
  2575. r__1 = xsc * c_abs(&v[q + q * v_dim1]);
  2576. q__1.r = r__1, q__1.i = 0.f;
  2577. ctemp.r = q__1.r, ctemp.i = q__1.i;
  2578. i__2 = *n;
  2579. for (p = 1; p <= i__2; ++p) {
  2580. if (p > q && c_abs(&v[p + q * v_dim1]) <= temp1 ||
  2581. p < q) {
  2582. i__3 = p + q * v_dim1;
  2583. v[i__3].r = ctemp.r, v[i__3].i = ctemp.i;
  2584. }
  2585. /* $ V(p,q) = TEMP1 * ( V(p,q) / ABS(V(p,q)) ) */
  2586. if (p < q) {
  2587. i__3 = p + q * v_dim1;
  2588. i__4 = p + q * v_dim1;
  2589. q__1.r = -v[i__4].r, q__1.i = -v[i__4].i;
  2590. v[i__3].r = q__1.r, v[i__3].i = q__1.i;
  2591. }
  2592. /* L2968: */
  2593. }
  2594. /* L2969: */
  2595. }
  2596. } else {
  2597. i__1 = nr - 1;
  2598. i__2 = nr - 1;
  2599. claset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1)
  2600. + 1], ldv);
  2601. }
  2602. /* Estimate the row scaled condition number of R1 */
  2603. /* (If R1 is rectangular, N > NR, then the condition number */
  2604. /* of the leading NR x NR submatrix is estimated.) */
  2605. clacpy_("L", &nr, &nr, &v[v_offset], ldv, &cwork[(*n << 1) +
  2606. 1], &nr);
  2607. i__1 = nr;
  2608. for (p = 1; p <= i__1; ++p) {
  2609. i__2 = nr - p + 1;
  2610. temp1 = scnrm2_(&i__2, &cwork[(*n << 1) + (p - 1) * nr +
  2611. p], &c__1);
  2612. i__2 = nr - p + 1;
  2613. r__1 = 1.f / temp1;
  2614. csscal_(&i__2, &r__1, &cwork[(*n << 1) + (p - 1) * nr + p]
  2615. , &c__1);
  2616. /* L3950: */
  2617. }
  2618. cpocon_("L", &nr, &cwork[(*n << 1) + 1], &nr, &c_b141, &temp1,
  2619. &cwork[(*n << 1) + nr * nr + 1], &rwork[1], &ierr);
  2620. condr1 = 1.f / sqrt(temp1);
  2621. /* R1 is OK for inverse <=> CONDR1 .LT. REAL(N) */
  2622. /* more conservative <=> CONDR1 .LT. SQRT(REAL(N)) */
  2623. cond_ok__ = sqrt(sqrt((real) nr));
  2624. /* [TP] COND_OK is a tuning parameter. */
  2625. if (condr1 < cond_ok__) {
  2626. /* implementation, this QRF should be implemented as the QRF */
  2627. /* of a lower triangular matrix. */
  2628. /* R1^* = Q2 * R2 */
  2629. i__1 = *lwork - (*n << 1);
  2630. cgeqrf_(n, &nr, &v[v_offset], ldv, &cwork[*n + 1], &cwork[
  2631. (*n << 1) + 1], &i__1, &ierr);
  2632. if (l2pert) {
  2633. xsc = sqrt(small) / epsln;
  2634. i__1 = nr;
  2635. for (p = 2; p <= i__1; ++p) {
  2636. i__2 = p - 1;
  2637. for (q = 1; q <= i__2; ++q) {
  2638. /* Computing MIN */
  2639. r__2 = c_abs(&v[p + p * v_dim1]), r__3 =
  2640. c_abs(&v[q + q * v_dim1]);
  2641. r__1 = xsc * f2cmin(r__2,r__3);
  2642. q__1.r = r__1, q__1.i = 0.f;
  2643. ctemp.r = q__1.r, ctemp.i = q__1.i;
  2644. if (c_abs(&v[q + p * v_dim1]) <= temp1) {
  2645. i__3 = q + p * v_dim1;
  2646. v[i__3].r = ctemp.r, v[i__3].i = ctemp.i;
  2647. }
  2648. /* $ V(q,p) = TEMP1 * ( V(q,p) / ABS(V(q,p)) ) */
  2649. /* L3958: */
  2650. }
  2651. /* L3959: */
  2652. }
  2653. }
  2654. if (nr != *n) {
  2655. clacpy_("A", n, &nr, &v[v_offset], ldv, &cwork[(*n <<
  2656. 1) + 1], n);
  2657. }
  2658. i__1 = nr - 1;
  2659. for (p = 1; p <= i__1; ++p) {
  2660. i__2 = nr - p;
  2661. ccopy_(&i__2, &v[p + (p + 1) * v_dim1], ldv, &v[p + 1
  2662. + p * v_dim1], &c__1);
  2663. i__2 = nr - p + 1;
  2664. clacgv_(&i__2, &v[p + p * v_dim1], &c__1);
  2665. /* L1969: */
  2666. }
  2667. i__1 = nr + nr * v_dim1;
  2668. r_cnjg(&q__1, &v[nr + nr * v_dim1]);
  2669. v[i__1].r = q__1.r, v[i__1].i = q__1.i;
  2670. condr2 = condr1;
  2671. } else {
  2672. /* Note that windowed pivoting would be equally good */
  2673. /* numerically, and more run-time efficient. So, in */
  2674. /* an optimal implementation, the next call to CGEQP3 */
  2675. /* should be replaced with eg. CALL CGEQPX (ACM TOMS #782) */
  2676. /* with properly (carefully) chosen parameters. */
  2677. /* R1^* * P2 = Q2 * R2 */
  2678. i__1 = nr;
  2679. for (p = 1; p <= i__1; ++p) {
  2680. iwork[*n + p] = 0;
  2681. /* L3003: */
  2682. }
  2683. i__1 = *lwork - (*n << 1);
  2684. cgeqp3_(n, &nr, &v[v_offset], ldv, &iwork[*n + 1], &cwork[
  2685. *n + 1], &cwork[(*n << 1) + 1], &i__1, &rwork[1],
  2686. &ierr);
  2687. /* * CALL CGEQRF( N, NR, V, LDV, CWORK(N+1), CWORK(2*N+1), */
  2688. /* * $ LWORK-2*N, IERR ) */
  2689. if (l2pert) {
  2690. xsc = sqrt(small);
  2691. i__1 = nr;
  2692. for (p = 2; p <= i__1; ++p) {
  2693. i__2 = p - 1;
  2694. for (q = 1; q <= i__2; ++q) {
  2695. /* Computing MIN */
  2696. r__2 = c_abs(&v[p + p * v_dim1]), r__3 =
  2697. c_abs(&v[q + q * v_dim1]);
  2698. r__1 = xsc * f2cmin(r__2,r__3);
  2699. q__1.r = r__1, q__1.i = 0.f;
  2700. ctemp.r = q__1.r, ctemp.i = q__1.i;
  2701. if (c_abs(&v[q + p * v_dim1]) <= temp1) {
  2702. i__3 = q + p * v_dim1;
  2703. v[i__3].r = ctemp.r, v[i__3].i = ctemp.i;
  2704. }
  2705. /* $ V(q,p) = TEMP1 * ( V(q,p) / ABS(V(q,p)) ) */
  2706. /* L3968: */
  2707. }
  2708. /* L3969: */
  2709. }
  2710. }
  2711. clacpy_("A", n, &nr, &v[v_offset], ldv, &cwork[(*n << 1)
  2712. + 1], n);
  2713. if (l2pert) {
  2714. xsc = sqrt(small);
  2715. i__1 = nr;
  2716. for (p = 2; p <= i__1; ++p) {
  2717. i__2 = p - 1;
  2718. for (q = 1; q <= i__2; ++q) {
  2719. /* Computing MIN */
  2720. r__2 = c_abs(&v[p + p * v_dim1]), r__3 =
  2721. c_abs(&v[q + q * v_dim1]);
  2722. r__1 = xsc * f2cmin(r__2,r__3);
  2723. q__1.r = r__1, q__1.i = 0.f;
  2724. ctemp.r = q__1.r, ctemp.i = q__1.i;
  2725. /* V(p,q) = - TEMP1*( V(q,p) / ABS(V(q,p)) ) */
  2726. i__3 = p + q * v_dim1;
  2727. q__1.r = -ctemp.r, q__1.i = -ctemp.i;
  2728. v[i__3].r = q__1.r, v[i__3].i = q__1.i;
  2729. /* L8971: */
  2730. }
  2731. /* L8970: */
  2732. }
  2733. } else {
  2734. i__1 = nr - 1;
  2735. i__2 = nr - 1;
  2736. claset_("L", &i__1, &i__2, &c_b1, &c_b1, &v[v_dim1 +
  2737. 2], ldv);
  2738. }
  2739. /* Now, compute R2 = L3 * Q3, the LQ factorization. */
  2740. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2741. cgelqf_(&nr, &nr, &v[v_offset], ldv, &cwork[(*n << 1) + *
  2742. n * nr + 1], &cwork[(*n << 1) + *n * nr + nr + 1],
  2743. &i__1, &ierr);
  2744. clacpy_("L", &nr, &nr, &v[v_offset], ldv, &cwork[(*n << 1)
  2745. + *n * nr + nr + 1], &nr);
  2746. i__1 = nr;
  2747. for (p = 1; p <= i__1; ++p) {
  2748. temp1 = scnrm2_(&p, &cwork[(*n << 1) + *n * nr + nr +
  2749. p], &nr);
  2750. r__1 = 1.f / temp1;
  2751. csscal_(&p, &r__1, &cwork[(*n << 1) + *n * nr + nr +
  2752. p], &nr);
  2753. /* L4950: */
  2754. }
  2755. cpocon_("L", &nr, &cwork[(*n << 1) + *n * nr + nr + 1], &
  2756. nr, &c_b141, &temp1, &cwork[(*n << 1) + *n * nr +
  2757. nr + nr * nr + 1], &rwork[1], &ierr);
  2758. condr2 = 1.f / sqrt(temp1);
  2759. if (condr2 >= cond_ok__) {
  2760. /* (this overwrites the copy of R2, as it will not be */
  2761. /* needed in this branch, but it does not overwritte the */
  2762. /* Huseholder vectors of Q2.). */
  2763. clacpy_("U", &nr, &nr, &v[v_offset], ldv, &cwork[(*n
  2764. << 1) + 1], n);
  2765. /* WORK(2*N+N*NR+1:2*N+N*NR+N) */
  2766. }
  2767. }
  2768. if (l2pert) {
  2769. xsc = sqrt(small);
  2770. i__1 = nr;
  2771. for (q = 2; q <= i__1; ++q) {
  2772. i__2 = q + q * v_dim1;
  2773. q__1.r = xsc * v[i__2].r, q__1.i = xsc * v[i__2].i;
  2774. ctemp.r = q__1.r, ctemp.i = q__1.i;
  2775. i__2 = q - 1;
  2776. for (p = 1; p <= i__2; ++p) {
  2777. /* V(p,q) = - TEMP1*( V(p,q) / ABS(V(p,q)) ) */
  2778. i__3 = p + q * v_dim1;
  2779. q__1.r = -ctemp.r, q__1.i = -ctemp.i;
  2780. v[i__3].r = q__1.r, v[i__3].i = q__1.i;
  2781. /* L4969: */
  2782. }
  2783. /* L4968: */
  2784. }
  2785. } else {
  2786. i__1 = nr - 1;
  2787. i__2 = nr - 1;
  2788. claset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1)
  2789. + 1], ldv);
  2790. }
  2791. /* Second preconditioning finished; continue with Jacobi SVD */
  2792. /* The input matrix is lower trinagular. */
  2793. /* Recover the right singular vectors as solution of a well */
  2794. /* conditioned triangular matrix equation. */
  2795. if (condr1 < cond_ok__) {
  2796. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2797. cgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[
  2798. 1], &nr, &u[u_offset], ldu, &cwork[(*n << 1) + *n
  2799. * nr + nr + 1], &i__1, &rwork[1], lrwork, info);
  2800. scalem = rwork[1];
  2801. numrank = i_nint(&rwork[2]);
  2802. i__1 = nr;
  2803. for (p = 1; p <= i__1; ++p) {
  2804. ccopy_(&nr, &v[p * v_dim1 + 1], &c__1, &u[p * u_dim1
  2805. + 1], &c__1);
  2806. csscal_(&nr, &sva[p], &v[p * v_dim1 + 1], &c__1);
  2807. /* L3970: */
  2808. }
  2809. if (nr == *n) {
  2810. /* :)) .. best case, R1 is inverted. The solution of this matrix */
  2811. /* equation is Q2*V2 = the product of the Jacobi rotations */
  2812. /* used in CGESVJ, premultiplied with the orthogonal matrix */
  2813. /* from the second QR factorization. */
  2814. ctrsm_("L", "U", "N", "N", &nr, &nr, &c_b2, &a[
  2815. a_offset], lda, &v[v_offset], ldv);
  2816. } else {
  2817. /* is inverted to get the product of the Jacobi rotations */
  2818. /* used in CGESVJ. The Q-factor from the second QR */
  2819. /* factorization is then built in explicitly. */
  2820. ctrsm_("L", "U", "C", "N", &nr, &nr, &c_b2, &cwork[(*
  2821. n << 1) + 1], n, &v[v_offset], ldv);
  2822. if (nr < *n) {
  2823. i__1 = *n - nr;
  2824. claset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1
  2825. + v_dim1], ldv);
  2826. i__1 = *n - nr;
  2827. claset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1)
  2828. * v_dim1 + 1], ldv);
  2829. i__1 = *n - nr;
  2830. i__2 = *n - nr;
  2831. claset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr +
  2832. 1 + (nr + 1) * v_dim1], ldv);
  2833. }
  2834. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2835. cunmqr_("L", "N", n, n, &nr, &cwork[(*n << 1) + 1], n,
  2836. &cwork[*n + 1], &v[v_offset], ldv, &cwork[(*
  2837. n << 1) + *n * nr + nr + 1], &i__1, &ierr);
  2838. }
  2839. } else if (condr2 < cond_ok__) {
  2840. /* The matrix R2 is inverted. The solution of the matrix equation */
  2841. /* is Q3^* * V3 = the product of the Jacobi rotations (appplied to */
  2842. /* the lower triangular L3 from the LQ factorization of */
  2843. /* R2=L3*Q3), pre-multiplied with the transposed Q3. */
  2844. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2845. cgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[
  2846. 1], &nr, &u[u_offset], ldu, &cwork[(*n << 1) + *n
  2847. * nr + nr + 1], &i__1, &rwork[1], lrwork, info);
  2848. scalem = rwork[1];
  2849. numrank = i_nint(&rwork[2]);
  2850. i__1 = nr;
  2851. for (p = 1; p <= i__1; ++p) {
  2852. ccopy_(&nr, &v[p * v_dim1 + 1], &c__1, &u[p * u_dim1
  2853. + 1], &c__1);
  2854. csscal_(&nr, &sva[p], &u[p * u_dim1 + 1], &c__1);
  2855. /* L3870: */
  2856. }
  2857. ctrsm_("L", "U", "N", "N", &nr, &nr, &c_b2, &cwork[(*n <<
  2858. 1) + 1], n, &u[u_offset], ldu);
  2859. i__1 = nr;
  2860. for (q = 1; q <= i__1; ++q) {
  2861. i__2 = nr;
  2862. for (p = 1; p <= i__2; ++p) {
  2863. i__3 = (*n << 1) + *n * nr + nr + iwork[*n + p];
  2864. i__4 = p + q * u_dim1;
  2865. cwork[i__3].r = u[i__4].r, cwork[i__3].i = u[i__4]
  2866. .i;
  2867. /* L872: */
  2868. }
  2869. i__2 = nr;
  2870. for (p = 1; p <= i__2; ++p) {
  2871. i__3 = p + q * u_dim1;
  2872. i__4 = (*n << 1) + *n * nr + nr + p;
  2873. u[i__3].r = cwork[i__4].r, u[i__3].i = cwork[i__4]
  2874. .i;
  2875. /* L874: */
  2876. }
  2877. /* L873: */
  2878. }
  2879. if (nr < *n) {
  2880. i__1 = *n - nr;
  2881. claset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1 +
  2882. v_dim1], ldv);
  2883. i__1 = *n - nr;
  2884. claset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1) *
  2885. v_dim1 + 1], ldv);
  2886. i__1 = *n - nr;
  2887. i__2 = *n - nr;
  2888. claset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr + 1 + (
  2889. nr + 1) * v_dim1], ldv);
  2890. }
  2891. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2892. cunmqr_("L", "N", n, n, &nr, &cwork[(*n << 1) + 1], n, &
  2893. cwork[*n + 1], &v[v_offset], ldv, &cwork[(*n << 1)
  2894. + *n * nr + nr + 1], &i__1, &ierr);
  2895. } else {
  2896. /* Last line of defense. */
  2897. /* #:( This is a rather pathological case: no scaled condition */
  2898. /* improvement after two pivoted QR factorizations. Other */
  2899. /* possibility is that the rank revealing QR factorization */
  2900. /* or the condition estimator has failed, or the COND_OK */
  2901. /* is set very close to ONE (which is unnecessary). Normally, */
  2902. /* this branch should never be executed, but in rare cases of */
  2903. /* failure of the RRQR or condition estimator, the last line of */
  2904. /* defense ensures that CGEJSV completes the task. */
  2905. /* Compute the full SVD of L3 using CGESVJ with explicit */
  2906. /* accumulation of Jacobi rotations. */
  2907. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2908. cgesvj_("L", "U", "V", &nr, &nr, &v[v_offset], ldv, &sva[
  2909. 1], &nr, &u[u_offset], ldu, &cwork[(*n << 1) + *n
  2910. * nr + nr + 1], &i__1, &rwork[1], lrwork, info);
  2911. scalem = rwork[1];
  2912. numrank = i_nint(&rwork[2]);
  2913. if (nr < *n) {
  2914. i__1 = *n - nr;
  2915. claset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1 +
  2916. v_dim1], ldv);
  2917. i__1 = *n - nr;
  2918. claset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1) *
  2919. v_dim1 + 1], ldv);
  2920. i__1 = *n - nr;
  2921. i__2 = *n - nr;
  2922. claset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr + 1 + (
  2923. nr + 1) * v_dim1], ldv);
  2924. }
  2925. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2926. cunmqr_("L", "N", n, n, &nr, &cwork[(*n << 1) + 1], n, &
  2927. cwork[*n + 1], &v[v_offset], ldv, &cwork[(*n << 1)
  2928. + *n * nr + nr + 1], &i__1, &ierr);
  2929. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2930. cunmlq_("L", "C", &nr, &nr, &nr, &cwork[(*n << 1) + 1], n,
  2931. &cwork[(*n << 1) + *n * nr + 1], &u[u_offset],
  2932. ldu, &cwork[(*n << 1) + *n * nr + nr + 1], &i__1,
  2933. &ierr);
  2934. i__1 = nr;
  2935. for (q = 1; q <= i__1; ++q) {
  2936. i__2 = nr;
  2937. for (p = 1; p <= i__2; ++p) {
  2938. i__3 = (*n << 1) + *n * nr + nr + iwork[*n + p];
  2939. i__4 = p + q * u_dim1;
  2940. cwork[i__3].r = u[i__4].r, cwork[i__3].i = u[i__4]
  2941. .i;
  2942. /* L772: */
  2943. }
  2944. i__2 = nr;
  2945. for (p = 1; p <= i__2; ++p) {
  2946. i__3 = p + q * u_dim1;
  2947. i__4 = (*n << 1) + *n * nr + nr + p;
  2948. u[i__3].r = cwork[i__4].r, u[i__3].i = cwork[i__4]
  2949. .i;
  2950. /* L774: */
  2951. }
  2952. /* L773: */
  2953. }
  2954. }
  2955. /* Permute the rows of V using the (column) permutation from the */
  2956. /* first QRF. Also, scale the columns to make them unit in */
  2957. /* Euclidean norm. This applies to all cases. */
  2958. temp1 = sqrt((real) (*n)) * epsln;
  2959. i__1 = *n;
  2960. for (q = 1; q <= i__1; ++q) {
  2961. i__2 = *n;
  2962. for (p = 1; p <= i__2; ++p) {
  2963. i__3 = (*n << 1) + *n * nr + nr + iwork[p];
  2964. i__4 = p + q * v_dim1;
  2965. cwork[i__3].r = v[i__4].r, cwork[i__3].i = v[i__4].i;
  2966. /* L972: */
  2967. }
  2968. i__2 = *n;
  2969. for (p = 1; p <= i__2; ++p) {
  2970. i__3 = p + q * v_dim1;
  2971. i__4 = (*n << 1) + *n * nr + nr + p;
  2972. v[i__3].r = cwork[i__4].r, v[i__3].i = cwork[i__4].i;
  2973. /* L973: */
  2974. }
  2975. xsc = 1.f / scnrm2_(n, &v[q * v_dim1 + 1], &c__1);
  2976. if (xsc < 1.f - temp1 || xsc > temp1 + 1.f) {
  2977. csscal_(n, &xsc, &v[q * v_dim1 + 1], &c__1);
  2978. }
  2979. /* L1972: */
  2980. }
  2981. /* At this moment, V contains the right singular vectors of A. */
  2982. /* Next, assemble the left singular vector matrix U (M x N). */
  2983. if (nr < *m) {
  2984. i__1 = *m - nr;
  2985. claset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 + u_dim1]
  2986. , ldu);
  2987. if (nr < n1) {
  2988. i__1 = n1 - nr;
  2989. claset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1) *
  2990. u_dim1 + 1], ldu);
  2991. i__1 = *m - nr;
  2992. i__2 = n1 - nr;
  2993. claset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr + 1 + (
  2994. nr + 1) * u_dim1], ldu);
  2995. }
  2996. }
  2997. /* The Q matrix from the first QRF is built into the left singular */
  2998. /* matrix U. This applies to all cases. */
  2999. i__1 = *lwork - *n;
  3000. cunmqr_("L", "N", m, &n1, n, &a[a_offset], lda, &cwork[1], &u[
  3001. u_offset], ldu, &cwork[*n + 1], &i__1, &ierr);
  3002. /* The columns of U are normalized. The cost is O(M*N) flops. */
  3003. temp1 = sqrt((real) (*m)) * epsln;
  3004. i__1 = nr;
  3005. for (p = 1; p <= i__1; ++p) {
  3006. xsc = 1.f / scnrm2_(m, &u[p * u_dim1 + 1], &c__1);
  3007. if (xsc < 1.f - temp1 || xsc > temp1 + 1.f) {
  3008. csscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
  3009. }
  3010. /* L1973: */
  3011. }
  3012. /* If the initial QRF is computed with row pivoting, the left */
  3013. /* singular vectors must be adjusted. */
  3014. if (rowpiv) {
  3015. i__1 = *m - 1;
  3016. claswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[
  3017. iwoff + 1], &c_n1);
  3018. }
  3019. } else {
  3020. /* the second QRF is not needed */
  3021. clacpy_("U", n, n, &a[a_offset], lda, &cwork[*n + 1], n);
  3022. if (l2pert) {
  3023. xsc = sqrt(small);
  3024. i__1 = *n;
  3025. for (p = 2; p <= i__1; ++p) {
  3026. i__2 = *n + (p - 1) * *n + p;
  3027. q__1.r = xsc * cwork[i__2].r, q__1.i = xsc * cwork[
  3028. i__2].i;
  3029. ctemp.r = q__1.r, ctemp.i = q__1.i;
  3030. i__2 = p - 1;
  3031. for (q = 1; q <= i__2; ++q) {
  3032. /* CWORK(N+(q-1)*N+p)=-TEMP1 * ( CWORK(N+(p-1)*N+q) / */
  3033. /* $ ABS(CWORK(N+(p-1)*N+q)) ) */
  3034. i__3 = *n + (q - 1) * *n + p;
  3035. q__1.r = -ctemp.r, q__1.i = -ctemp.i;
  3036. cwork[i__3].r = q__1.r, cwork[i__3].i = q__1.i;
  3037. /* L5971: */
  3038. }
  3039. /* L5970: */
  3040. }
  3041. } else {
  3042. i__1 = *n - 1;
  3043. i__2 = *n - 1;
  3044. claset_("L", &i__1, &i__2, &c_b1, &c_b1, &cwork[*n + 2],
  3045. n);
  3046. }
  3047. i__1 = *lwork - *n - *n * *n;
  3048. cgesvj_("U", "U", "N", n, n, &cwork[*n + 1], n, &sva[1], n, &
  3049. u[u_offset], ldu, &cwork[*n + *n * *n + 1], &i__1, &
  3050. rwork[1], lrwork, info);
  3051. scalem = rwork[1];
  3052. numrank = i_nint(&rwork[2]);
  3053. i__1 = *n;
  3054. for (p = 1; p <= i__1; ++p) {
  3055. ccopy_(n, &cwork[*n + (p - 1) * *n + 1], &c__1, &u[p *
  3056. u_dim1 + 1], &c__1);
  3057. csscal_(n, &sva[p], &cwork[*n + (p - 1) * *n + 1], &c__1);
  3058. /* L6970: */
  3059. }
  3060. ctrsm_("L", "U", "N", "N", n, n, &c_b2, &a[a_offset], lda, &
  3061. cwork[*n + 1], n);
  3062. i__1 = *n;
  3063. for (p = 1; p <= i__1; ++p) {
  3064. ccopy_(n, &cwork[*n + p], n, &v[iwork[p] + v_dim1], ldv);
  3065. /* L6972: */
  3066. }
  3067. temp1 = sqrt((real) (*n)) * epsln;
  3068. i__1 = *n;
  3069. for (p = 1; p <= i__1; ++p) {
  3070. xsc = 1.f / scnrm2_(n, &v[p * v_dim1 + 1], &c__1);
  3071. if (xsc < 1.f - temp1 || xsc > temp1 + 1.f) {
  3072. csscal_(n, &xsc, &v[p * v_dim1 + 1], &c__1);
  3073. }
  3074. /* L6971: */
  3075. }
  3076. /* Assemble the left singular vector matrix U (M x N). */
  3077. if (*n < *m) {
  3078. i__1 = *m - *n;
  3079. claset_("A", &i__1, n, &c_b1, &c_b1, &u[*n + 1 + u_dim1],
  3080. ldu);
  3081. if (*n < n1) {
  3082. i__1 = n1 - *n;
  3083. claset_("A", n, &i__1, &c_b1, &c_b1, &u[(*n + 1) *
  3084. u_dim1 + 1], ldu);
  3085. i__1 = *m - *n;
  3086. i__2 = n1 - *n;
  3087. claset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[*n + 1 + (
  3088. *n + 1) * u_dim1], ldu);
  3089. }
  3090. }
  3091. i__1 = *lwork - *n;
  3092. cunmqr_("L", "N", m, &n1, n, &a[a_offset], lda, &cwork[1], &u[
  3093. u_offset], ldu, &cwork[*n + 1], &i__1, &ierr);
  3094. temp1 = sqrt((real) (*m)) * epsln;
  3095. i__1 = n1;
  3096. for (p = 1; p <= i__1; ++p) {
  3097. xsc = 1.f / scnrm2_(m, &u[p * u_dim1 + 1], &c__1);
  3098. if (xsc < 1.f - temp1 || xsc > temp1 + 1.f) {
  3099. csscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
  3100. }
  3101. /* L6973: */
  3102. }
  3103. if (rowpiv) {
  3104. i__1 = *m - 1;
  3105. claswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[
  3106. iwoff + 1], &c_n1);
  3107. }
  3108. }
  3109. /* end of the >> almost orthogonal case << in the full SVD */
  3110. } else {
  3111. /* This branch deploys a preconditioned Jacobi SVD with explicitly */
  3112. /* accumulated rotations. It is included as optional, mainly for */
  3113. /* experimental purposes. It does perform well, and can also be used. */
  3114. /* In this implementation, this branch will be automatically activated */
  3115. /* if the condition number sigma_max(A) / sigma_min(A) is predicted */
  3116. /* to be greater than the overflow threshold. This is because the */
  3117. /* a posteriori computation of the singular vectors assumes robust */
  3118. /* implementation of BLAS and some LAPACK procedures, capable of working */
  3119. /* in presence of extreme values, e.g. when the singular values spread from */
  3120. /* the underflow to the overflow threshold. */
  3121. i__1 = nr;
  3122. for (p = 1; p <= i__1; ++p) {
  3123. i__2 = *n - p + 1;
  3124. ccopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1], &
  3125. c__1);
  3126. i__2 = *n - p + 1;
  3127. clacgv_(&i__2, &v[p + p * v_dim1], &c__1);
  3128. /* L7968: */
  3129. }
  3130. if (l2pert) {
  3131. xsc = sqrt(small / epsln);
  3132. i__1 = nr;
  3133. for (q = 1; q <= i__1; ++q) {
  3134. r__1 = xsc * c_abs(&v[q + q * v_dim1]);
  3135. q__1.r = r__1, q__1.i = 0.f;
  3136. ctemp.r = q__1.r, ctemp.i = q__1.i;
  3137. i__2 = *n;
  3138. for (p = 1; p <= i__2; ++p) {
  3139. if (p > q && c_abs(&v[p + q * v_dim1]) <= temp1 || p <
  3140. q) {
  3141. i__3 = p + q * v_dim1;
  3142. v[i__3].r = ctemp.r, v[i__3].i = ctemp.i;
  3143. }
  3144. /* $ V(p,q) = TEMP1 * ( V(p,q) / ABS(V(p,q)) ) */
  3145. if (p < q) {
  3146. i__3 = p + q * v_dim1;
  3147. i__4 = p + q * v_dim1;
  3148. q__1.r = -v[i__4].r, q__1.i = -v[i__4].i;
  3149. v[i__3].r = q__1.r, v[i__3].i = q__1.i;
  3150. }
  3151. /* L5968: */
  3152. }
  3153. /* L5969: */
  3154. }
  3155. } else {
  3156. i__1 = nr - 1;
  3157. i__2 = nr - 1;
  3158. claset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1) + 1]
  3159. , ldv);
  3160. }
  3161. i__1 = *lwork - (*n << 1);
  3162. cgeqrf_(n, &nr, &v[v_offset], ldv, &cwork[*n + 1], &cwork[(*n <<
  3163. 1) + 1], &i__1, &ierr);
  3164. clacpy_("L", n, &nr, &v[v_offset], ldv, &cwork[(*n << 1) + 1], n);
  3165. i__1 = nr;
  3166. for (p = 1; p <= i__1; ++p) {
  3167. i__2 = nr - p + 1;
  3168. ccopy_(&i__2, &v[p + p * v_dim1], ldv, &u[p + p * u_dim1], &
  3169. c__1);
  3170. i__2 = nr - p + 1;
  3171. clacgv_(&i__2, &u[p + p * u_dim1], &c__1);
  3172. /* L7969: */
  3173. }
  3174. if (l2pert) {
  3175. xsc = sqrt(small / epsln);
  3176. i__1 = nr;
  3177. for (q = 2; q <= i__1; ++q) {
  3178. i__2 = q - 1;
  3179. for (p = 1; p <= i__2; ++p) {
  3180. /* Computing MIN */
  3181. r__2 = c_abs(&u[p + p * u_dim1]), r__3 = c_abs(&u[q +
  3182. q * u_dim1]);
  3183. r__1 = xsc * f2cmin(r__2,r__3);
  3184. q__1.r = r__1, q__1.i = 0.f;
  3185. ctemp.r = q__1.r, ctemp.i = q__1.i;
  3186. /* U(p,q) = - TEMP1 * ( U(q,p) / ABS(U(q,p)) ) */
  3187. i__3 = p + q * u_dim1;
  3188. q__1.r = -ctemp.r, q__1.i = -ctemp.i;
  3189. u[i__3].r = q__1.r, u[i__3].i = q__1.i;
  3190. /* L9971: */
  3191. }
  3192. /* L9970: */
  3193. }
  3194. } else {
  3195. i__1 = nr - 1;
  3196. i__2 = nr - 1;
  3197. claset_("U", &i__1, &i__2, &c_b1, &c_b1, &u[(u_dim1 << 1) + 1]
  3198. , ldu);
  3199. }
  3200. i__1 = *lwork - (*n << 1) - *n * nr;
  3201. cgesvj_("L", "U", "V", &nr, &nr, &u[u_offset], ldu, &sva[1], n, &
  3202. v[v_offset], ldv, &cwork[(*n << 1) + *n * nr + 1], &i__1,
  3203. &rwork[1], lrwork, info);
  3204. scalem = rwork[1];
  3205. numrank = i_nint(&rwork[2]);
  3206. if (nr < *n) {
  3207. i__1 = *n - nr;
  3208. claset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1 + v_dim1],
  3209. ldv);
  3210. i__1 = *n - nr;
  3211. claset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1) * v_dim1 +
  3212. 1], ldv);
  3213. i__1 = *n - nr;
  3214. i__2 = *n - nr;
  3215. claset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr + 1 + (nr + 1)
  3216. * v_dim1], ldv);
  3217. }
  3218. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  3219. cunmqr_("L", "N", n, n, &nr, &cwork[(*n << 1) + 1], n, &cwork[*n
  3220. + 1], &v[v_offset], ldv, &cwork[(*n << 1) + *n * nr + nr
  3221. + 1], &i__1, &ierr);
  3222. /* Permute the rows of V using the (column) permutation from the */
  3223. /* first QRF. Also, scale the columns to make them unit in */
  3224. /* Euclidean norm. This applies to all cases. */
  3225. temp1 = sqrt((real) (*n)) * epsln;
  3226. i__1 = *n;
  3227. for (q = 1; q <= i__1; ++q) {
  3228. i__2 = *n;
  3229. for (p = 1; p <= i__2; ++p) {
  3230. i__3 = (*n << 1) + *n * nr + nr + iwork[p];
  3231. i__4 = p + q * v_dim1;
  3232. cwork[i__3].r = v[i__4].r, cwork[i__3].i = v[i__4].i;
  3233. /* L8972: */
  3234. }
  3235. i__2 = *n;
  3236. for (p = 1; p <= i__2; ++p) {
  3237. i__3 = p + q * v_dim1;
  3238. i__4 = (*n << 1) + *n * nr + nr + p;
  3239. v[i__3].r = cwork[i__4].r, v[i__3].i = cwork[i__4].i;
  3240. /* L8973: */
  3241. }
  3242. xsc = 1.f / scnrm2_(n, &v[q * v_dim1 + 1], &c__1);
  3243. if (xsc < 1.f - temp1 || xsc > temp1 + 1.f) {
  3244. csscal_(n, &xsc, &v[q * v_dim1 + 1], &c__1);
  3245. }
  3246. /* L7972: */
  3247. }
  3248. /* At this moment, V contains the right singular vectors of A. */
  3249. /* Next, assemble the left singular vector matrix U (M x N). */
  3250. if (nr < *m) {
  3251. i__1 = *m - nr;
  3252. claset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 + u_dim1],
  3253. ldu);
  3254. if (nr < n1) {
  3255. i__1 = n1 - nr;
  3256. claset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1) *
  3257. u_dim1 + 1], ldu);
  3258. i__1 = *m - nr;
  3259. i__2 = n1 - nr;
  3260. claset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr + 1 + (nr
  3261. + 1) * u_dim1], ldu);
  3262. }
  3263. }
  3264. i__1 = *lwork - *n;
  3265. cunmqr_("L", "N", m, &n1, n, &a[a_offset], lda, &cwork[1], &u[
  3266. u_offset], ldu, &cwork[*n + 1], &i__1, &ierr);
  3267. if (rowpiv) {
  3268. i__1 = *m - 1;
  3269. claswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[iwoff +
  3270. 1], &c_n1);
  3271. }
  3272. }
  3273. if (transp) {
  3274. i__1 = *n;
  3275. for (p = 1; p <= i__1; ++p) {
  3276. cswap_(n, &u[p * u_dim1 + 1], &c__1, &v[p * v_dim1 + 1], &
  3277. c__1);
  3278. /* L6974: */
  3279. }
  3280. }
  3281. }
  3282. /* end of the full SVD */
  3283. /* Undo scaling, if necessary (and possible) */
  3284. if (uscal2 <= big / sva[1] * uscal1) {
  3285. slascl_("G", &c__0, &c__0, &uscal1, &uscal2, &nr, &c__1, &sva[1], n, &
  3286. ierr);
  3287. uscal1 = 1.f;
  3288. uscal2 = 1.f;
  3289. }
  3290. if (nr < *n) {
  3291. i__1 = *n;
  3292. for (p = nr + 1; p <= i__1; ++p) {
  3293. sva[p] = 0.f;
  3294. /* L3004: */
  3295. }
  3296. }
  3297. rwork[1] = uscal2 * scalem;
  3298. rwork[2] = uscal1;
  3299. if (errest) {
  3300. rwork[3] = sconda;
  3301. }
  3302. if (lsvec && rsvec) {
  3303. rwork[4] = condr1;
  3304. rwork[5] = condr2;
  3305. }
  3306. if (l2tran) {
  3307. rwork[6] = entra;
  3308. rwork[7] = entrat;
  3309. }
  3310. iwork[1] = nr;
  3311. iwork[2] = numrank;
  3312. iwork[3] = warning;
  3313. if (transp) {
  3314. iwork[4] = 1;
  3315. } else {
  3316. iwork[4] = -1;
  3317. }
  3318. return 0;
  3319. } /* cgejsv_ */