You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dgejsv.c 94 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808
  1. #include <math.h>
  2. #include <stdlib.h>
  3. #include <string.h>
  4. #include <stdio.h>
  5. #include <complex.h>
  6. #ifdef complex
  7. #undef complex
  8. #endif
  9. #ifdef I
  10. #undef I
  11. #endif
  12. #if defined(_WIN64)
  13. typedef long long BLASLONG;
  14. typedef unsigned long long BLASULONG;
  15. #else
  16. typedef long BLASLONG;
  17. typedef unsigned long BLASULONG;
  18. #endif
  19. #ifdef LAPACK_ILP64
  20. typedef BLASLONG blasint;
  21. #if defined(_WIN64)
  22. #define blasabs(x) llabs(x)
  23. #else
  24. #define blasabs(x) labs(x)
  25. #endif
  26. #else
  27. typedef int blasint;
  28. #define blasabs(x) abs(x)
  29. #endif
  30. typedef blasint integer;
  31. typedef unsigned int uinteger;
  32. typedef char *address;
  33. typedef short int shortint;
  34. typedef float real;
  35. typedef double doublereal;
  36. typedef struct { real r, i; } complex;
  37. typedef struct { doublereal r, i; } doublecomplex;
  38. #ifdef _MSC_VER
  39. static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
  40. static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
  41. static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
  42. static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
  43. #else
  44. static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
  45. static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
  46. static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
  47. static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
  48. #endif
  49. #define pCf(z) (*_pCf(z))
  50. #define pCd(z) (*_pCd(z))
  51. typedef blasint logical;
  52. typedef char logical1;
  53. typedef char integer1;
  54. #define TRUE_ (1)
  55. #define FALSE_ (0)
  56. /* Extern is for use with -E */
  57. #ifndef Extern
  58. #define Extern extern
  59. #endif
  60. /* I/O stuff */
  61. typedef int flag;
  62. typedef int ftnlen;
  63. typedef int ftnint;
  64. /*external read, write*/
  65. typedef struct
  66. { flag cierr;
  67. ftnint ciunit;
  68. flag ciend;
  69. char *cifmt;
  70. ftnint cirec;
  71. } cilist;
  72. /*internal read, write*/
  73. typedef struct
  74. { flag icierr;
  75. char *iciunit;
  76. flag iciend;
  77. char *icifmt;
  78. ftnint icirlen;
  79. ftnint icirnum;
  80. } icilist;
  81. /*open*/
  82. typedef struct
  83. { flag oerr;
  84. ftnint ounit;
  85. char *ofnm;
  86. ftnlen ofnmlen;
  87. char *osta;
  88. char *oacc;
  89. char *ofm;
  90. ftnint orl;
  91. char *oblnk;
  92. } olist;
  93. /*close*/
  94. typedef struct
  95. { flag cerr;
  96. ftnint cunit;
  97. char *csta;
  98. } cllist;
  99. /*rewind, backspace, endfile*/
  100. typedef struct
  101. { flag aerr;
  102. ftnint aunit;
  103. } alist;
  104. /* inquire */
  105. typedef struct
  106. { flag inerr;
  107. ftnint inunit;
  108. char *infile;
  109. ftnlen infilen;
  110. ftnint *inex; /*parameters in standard's order*/
  111. ftnint *inopen;
  112. ftnint *innum;
  113. ftnint *innamed;
  114. char *inname;
  115. ftnlen innamlen;
  116. char *inacc;
  117. ftnlen inacclen;
  118. char *inseq;
  119. ftnlen inseqlen;
  120. char *indir;
  121. ftnlen indirlen;
  122. char *infmt;
  123. ftnlen infmtlen;
  124. char *inform;
  125. ftnint informlen;
  126. char *inunf;
  127. ftnlen inunflen;
  128. ftnint *inrecl;
  129. ftnint *innrec;
  130. char *inblank;
  131. ftnlen inblanklen;
  132. } inlist;
  133. #define VOID void
  134. union Multitype { /* for multiple entry points */
  135. integer1 g;
  136. shortint h;
  137. integer i;
  138. /* longint j; */
  139. real r;
  140. doublereal d;
  141. complex c;
  142. doublecomplex z;
  143. };
  144. typedef union Multitype Multitype;
  145. struct Vardesc { /* for Namelist */
  146. char *name;
  147. char *addr;
  148. ftnlen *dims;
  149. int type;
  150. };
  151. typedef struct Vardesc Vardesc;
  152. struct Namelist {
  153. char *name;
  154. Vardesc **vars;
  155. int nvars;
  156. };
  157. typedef struct Namelist Namelist;
  158. #define abs(x) ((x) >= 0 ? (x) : -(x))
  159. #define dabs(x) (fabs(x))
  160. #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
  161. #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
  162. #define dmin(a,b) (f2cmin(a,b))
  163. #define dmax(a,b) (f2cmax(a,b))
  164. #define bit_test(a,b) ((a) >> (b) & 1)
  165. #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
  166. #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
  167. #define abort_() { sig_die("Fortran abort routine called", 1); }
  168. #define c_abs(z) (cabsf(Cf(z)))
  169. #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
  170. #ifdef _MSC_VER
  171. #define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
  172. #define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/df(b)._Val[1]);}
  173. #else
  174. #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
  175. #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
  176. #endif
  177. #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
  178. #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
  179. #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
  180. //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
  181. #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
  182. #define d_abs(x) (fabs(*(x)))
  183. #define d_acos(x) (acos(*(x)))
  184. #define d_asin(x) (asin(*(x)))
  185. #define d_atan(x) (atan(*(x)))
  186. #define d_atn2(x, y) (atan2(*(x),*(y)))
  187. #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
  188. #define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
  189. #define d_cos(x) (cos(*(x)))
  190. #define d_cosh(x) (cosh(*(x)))
  191. #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
  192. #define d_exp(x) (exp(*(x)))
  193. #define d_imag(z) (cimag(Cd(z)))
  194. #define r_imag(z) (cimagf(Cf(z)))
  195. #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
  196. #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
  197. #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
  198. #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
  199. #define d_log(x) (log(*(x)))
  200. #define d_mod(x, y) (fmod(*(x), *(y)))
  201. #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
  202. #define d_nint(x) u_nint(*(x))
  203. #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
  204. #define d_sign(a,b) u_sign(*(a),*(b))
  205. #define r_sign(a,b) u_sign(*(a),*(b))
  206. #define d_sin(x) (sin(*(x)))
  207. #define d_sinh(x) (sinh(*(x)))
  208. #define d_sqrt(x) (sqrt(*(x)))
  209. #define d_tan(x) (tan(*(x)))
  210. #define d_tanh(x) (tanh(*(x)))
  211. #define i_abs(x) abs(*(x))
  212. #define i_dnnt(x) ((integer)u_nint(*(x)))
  213. #define i_len(s, n) (n)
  214. #define i_nint(x) ((integer)u_nint(*(x)))
  215. #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
  216. #define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
  217. #define pow_si(B,E) spow_ui(*(B),*(E))
  218. #define pow_ri(B,E) spow_ui(*(B),*(E))
  219. #define pow_di(B,E) dpow_ui(*(B),*(E))
  220. #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
  221. #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
  222. #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
  223. #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
  224. #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
  225. #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
  226. #define sig_die(s, kill) { exit(1); }
  227. #define s_stop(s, n) {exit(0);}
  228. static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
  229. #define z_abs(z) (cabs(Cd(z)))
  230. #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
  231. #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
  232. #define myexit_() break;
  233. #define mycycle() continue;
  234. #define myceiling(w) {ceil(w)}
  235. #define myhuge(w) {HUGE_VAL}
  236. //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
  237. #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
  238. /* procedure parameter types for -A and -C++ */
  239. #ifdef __cplusplus
  240. typedef logical (*L_fp)(...);
  241. #else
  242. typedef logical (*L_fp)();
  243. #endif
  244. static float spow_ui(float x, integer n) {
  245. float pow=1.0; unsigned long int u;
  246. if(n != 0) {
  247. if(n < 0) n = -n, x = 1/x;
  248. for(u = n; ; ) {
  249. if(u & 01) pow *= x;
  250. if(u >>= 1) x *= x;
  251. else break;
  252. }
  253. }
  254. return pow;
  255. }
  256. static double dpow_ui(double x, integer n) {
  257. double pow=1.0; unsigned long int u;
  258. if(n != 0) {
  259. if(n < 0) n = -n, x = 1/x;
  260. for(u = n; ; ) {
  261. if(u & 01) pow *= x;
  262. if(u >>= 1) x *= x;
  263. else break;
  264. }
  265. }
  266. return pow;
  267. }
  268. #ifdef _MSC_VER
  269. static _Fcomplex cpow_ui(complex x, integer n) {
  270. complex pow={1.0,0.0}; unsigned long int u;
  271. if(n != 0) {
  272. if(n < 0) n = -n, x.r = 1/x.r, x.i=1/x.i;
  273. for(u = n; ; ) {
  274. if(u & 01) pow.r *= x.r, pow.i *= x.i;
  275. if(u >>= 1) x.r *= x.r, x.i *= x.i;
  276. else break;
  277. }
  278. }
  279. _Fcomplex p={pow.r, pow.i};
  280. return p;
  281. }
  282. #else
  283. static _Complex float cpow_ui(_Complex float x, integer n) {
  284. _Complex float pow=1.0; unsigned long int u;
  285. if(n != 0) {
  286. if(n < 0) n = -n, x = 1/x;
  287. for(u = n; ; ) {
  288. if(u & 01) pow *= x;
  289. if(u >>= 1) x *= x;
  290. else break;
  291. }
  292. }
  293. return pow;
  294. }
  295. #endif
  296. #ifdef _MSC_VER
  297. static _Dcomplex zpow_ui(_Dcomplex x, integer n) {
  298. _Dcomplex pow={1.0,0.0}; unsigned long int u;
  299. if(n != 0) {
  300. if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1];
  301. for(u = n; ; ) {
  302. if(u & 01) pow._Val[0] *= x._Val[0], pow._Val[1] *= x._Val[1];
  303. if(u >>= 1) x._Val[0] *= x._Val[0], x._Val[1] *= x._Val[1];
  304. else break;
  305. }
  306. }
  307. _Dcomplex p = {pow._Val[0], pow._Val[1]};
  308. return p;
  309. }
  310. #else
  311. static _Complex double zpow_ui(_Complex double x, integer n) {
  312. _Complex double pow=1.0; unsigned long int u;
  313. if(n != 0) {
  314. if(n < 0) n = -n, x = 1/x;
  315. for(u = n; ; ) {
  316. if(u & 01) pow *= x;
  317. if(u >>= 1) x *= x;
  318. else break;
  319. }
  320. }
  321. return pow;
  322. }
  323. #endif
  324. static integer pow_ii(integer x, integer n) {
  325. integer pow; unsigned long int u;
  326. if (n <= 0) {
  327. if (n == 0 || x == 1) pow = 1;
  328. else if (x != -1) pow = x == 0 ? 1/x : 0;
  329. else n = -n;
  330. }
  331. if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
  332. u = n;
  333. for(pow = 1; ; ) {
  334. if(u & 01) pow *= x;
  335. if(u >>= 1) x *= x;
  336. else break;
  337. }
  338. }
  339. return pow;
  340. }
  341. static integer dmaxloc_(double *w, integer s, integer e, integer *n)
  342. {
  343. double m; integer i, mi;
  344. for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
  345. if (w[i-1]>m) mi=i ,m=w[i-1];
  346. return mi-s+1;
  347. }
  348. static integer smaxloc_(float *w, integer s, integer e, integer *n)
  349. {
  350. float m; integer i, mi;
  351. for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
  352. if (w[i-1]>m) mi=i ,m=w[i-1];
  353. return mi-s+1;
  354. }
  355. static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
  356. integer n = *n_, incx = *incx_, incy = *incy_, i;
  357. #ifdef _MSC_VER
  358. _Fcomplex zdotc = {0.0, 0.0};
  359. if (incx == 1 && incy == 1) {
  360. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  361. zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0];
  362. zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1];
  363. }
  364. } else {
  365. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  366. zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0];
  367. zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1];
  368. }
  369. }
  370. pCf(z) = zdotc;
  371. }
  372. #else
  373. _Complex float zdotc = 0.0;
  374. if (incx == 1 && incy == 1) {
  375. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  376. zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
  377. }
  378. } else {
  379. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  380. zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
  381. }
  382. }
  383. pCf(z) = zdotc;
  384. }
  385. #endif
  386. static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
  387. integer n = *n_, incx = *incx_, incy = *incy_, i;
  388. #ifdef _MSC_VER
  389. _Dcomplex zdotc = {0.0, 0.0};
  390. if (incx == 1 && incy == 1) {
  391. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  392. zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0];
  393. zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1];
  394. }
  395. } else {
  396. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  397. zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0];
  398. zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1];
  399. }
  400. }
  401. pCd(z) = zdotc;
  402. }
  403. #else
  404. _Complex double zdotc = 0.0;
  405. if (incx == 1 && incy == 1) {
  406. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  407. zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
  408. }
  409. } else {
  410. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  411. zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
  412. }
  413. }
  414. pCd(z) = zdotc;
  415. }
  416. #endif
  417. static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
  418. integer n = *n_, incx = *incx_, incy = *incy_, i;
  419. #ifdef _MSC_VER
  420. _Fcomplex zdotc = {0.0, 0.0};
  421. if (incx == 1 && incy == 1) {
  422. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  423. zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0];
  424. zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1];
  425. }
  426. } else {
  427. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  428. zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0];
  429. zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1];
  430. }
  431. }
  432. pCf(z) = zdotc;
  433. }
  434. #else
  435. _Complex float zdotc = 0.0;
  436. if (incx == 1 && incy == 1) {
  437. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  438. zdotc += Cf(&x[i]) * Cf(&y[i]);
  439. }
  440. } else {
  441. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  442. zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
  443. }
  444. }
  445. pCf(z) = zdotc;
  446. }
  447. #endif
  448. static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
  449. integer n = *n_, incx = *incx_, incy = *incy_, i;
  450. #ifdef _MSC_VER
  451. _Dcomplex zdotc = {0.0, 0.0};
  452. if (incx == 1 && incy == 1) {
  453. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  454. zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0];
  455. zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1];
  456. }
  457. } else {
  458. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  459. zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0];
  460. zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1];
  461. }
  462. }
  463. pCd(z) = zdotc;
  464. }
  465. #else
  466. _Complex double zdotc = 0.0;
  467. if (incx == 1 && incy == 1) {
  468. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  469. zdotc += Cd(&x[i]) * Cd(&y[i]);
  470. }
  471. } else {
  472. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  473. zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
  474. }
  475. }
  476. pCd(z) = zdotc;
  477. }
  478. #endif
  479. /* -- translated by f2c (version 20000121).
  480. You must link the resulting object file with the libraries:
  481. -lf2c -lm (in that order)
  482. */
  483. /* Table of constant values */
  484. static integer c__1 = 1;
  485. static doublereal c_b34 = 0.;
  486. static doublereal c_b35 = 1.;
  487. static integer c__0 = 0;
  488. static integer c_n1 = -1;
  489. /* > \brief \b DGEJSV */
  490. /* =========== DOCUMENTATION =========== */
  491. /* Online html documentation available at */
  492. /* http://www.netlib.org/lapack/explore-html/ */
  493. /* > \htmlonly */
  494. /* > Download DGEJSV + dependencies */
  495. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/dgejsv.
  496. f"> */
  497. /* > [TGZ]</a> */
  498. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/dgejsv.
  499. f"> */
  500. /* > [ZIP]</a> */
  501. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/dgejsv.
  502. f"> */
  503. /* > [TXT]</a> */
  504. /* > \endhtmlonly */
  505. /* Definition: */
  506. /* =========== */
  507. /* SUBROUTINE DGEJSV( JOBA, JOBU, JOBV, JOBR, JOBT, JOBP, */
  508. /* M, N, A, LDA, SVA, U, LDU, V, LDV, */
  509. /* WORK, LWORK, IWORK, INFO ) */
  510. /* IMPLICIT NONE */
  511. /* INTEGER INFO, LDA, LDU, LDV, LWORK, M, N */
  512. /* DOUBLE PRECISION A( LDA, * ), SVA( N ), U( LDU, * ), V( LDV, * ), */
  513. /* $ WORK( LWORK ) */
  514. /* INTEGER IWORK( * ) */
  515. /* CHARACTER*1 JOBA, JOBP, JOBR, JOBT, JOBU, JOBV */
  516. /* > \par Purpose: */
  517. /* ============= */
  518. /* > */
  519. /* > \verbatim */
  520. /* > */
  521. /* > DGEJSV computes the singular value decomposition (SVD) of a real M-by-N */
  522. /* > matrix [A], where M >= N. The SVD of [A] is written as */
  523. /* > */
  524. /* > [A] = [U] * [SIGMA] * [V]^t, */
  525. /* > */
  526. /* > where [SIGMA] is an N-by-N (M-by-N) matrix which is zero except for its N */
  527. /* > diagonal elements, [U] is an M-by-N (or M-by-M) orthonormal matrix, and */
  528. /* > [V] is an N-by-N orthogonal matrix. The diagonal elements of [SIGMA] are */
  529. /* > the singular values of [A]. The columns of [U] and [V] are the left and */
  530. /* > the right singular vectors of [A], respectively. The matrices [U] and [V] */
  531. /* > are computed and stored in the arrays U and V, respectively. The diagonal */
  532. /* > of [SIGMA] is computed and stored in the array SVA. */
  533. /* > DGEJSV can sometimes compute tiny singular values and their singular vectors much */
  534. /* > more accurately than other SVD routines, see below under Further Details. */
  535. /* > \endverbatim */
  536. /* Arguments: */
  537. /* ========== */
  538. /* > \param[in] JOBA */
  539. /* > \verbatim */
  540. /* > JOBA is CHARACTER*1 */
  541. /* > Specifies the level of accuracy: */
  542. /* > = 'C': This option works well (high relative accuracy) if A = B * D, */
  543. /* > with well-conditioned B and arbitrary diagonal matrix D. */
  544. /* > The accuracy cannot be spoiled by COLUMN scaling. The */
  545. /* > accuracy of the computed output depends on the condition of */
  546. /* > B, and the procedure aims at the best theoretical accuracy. */
  547. /* > The relative error max_{i=1:N}|d sigma_i| / sigma_i is */
  548. /* > bounded by f(M,N)*epsilon* cond(B), independent of D. */
  549. /* > The input matrix is preprocessed with the QRF with column */
  550. /* > pivoting. This initial preprocessing and preconditioning by */
  551. /* > a rank revealing QR factorization is common for all values of */
  552. /* > JOBA. Additional actions are specified as follows: */
  553. /* > = 'E': Computation as with 'C' with an additional estimate of the */
  554. /* > condition number of B. It provides a realistic error bound. */
  555. /* > = 'F': If A = D1 * C * D2 with ill-conditioned diagonal scalings */
  556. /* > D1, D2, and well-conditioned matrix C, this option gives */
  557. /* > higher accuracy than the 'C' option. If the structure of the */
  558. /* > input matrix is not known, and relative accuracy is */
  559. /* > desirable, then this option is advisable. The input matrix A */
  560. /* > is preprocessed with QR factorization with FULL (row and */
  561. /* > column) pivoting. */
  562. /* > = 'G': Computation as with 'F' with an additional estimate of the */
  563. /* > condition number of B, where A=D*B. If A has heavily weighted */
  564. /* > rows, then using this condition number gives too pessimistic */
  565. /* > error bound. */
  566. /* > = 'A': Small singular values are the noise and the matrix is treated */
  567. /* > as numerically rank deficient. The error in the computed */
  568. /* > singular values is bounded by f(m,n)*epsilon*||A||. */
  569. /* > The computed SVD A = U * S * V^t restores A up to */
  570. /* > f(m,n)*epsilon*||A||. */
  571. /* > This gives the procedure the licence to discard (set to zero) */
  572. /* > all singular values below N*epsilon*||A||. */
  573. /* > = 'R': Similar as in 'A'. Rank revealing property of the initial */
  574. /* > QR factorization is used do reveal (using triangular factor) */
  575. /* > a gap sigma_{r+1} < epsilon * sigma_r in which case the */
  576. /* > numerical RANK is declared to be r. The SVD is computed with */
  577. /* > absolute error bounds, but more accurately than with 'A'. */
  578. /* > \endverbatim */
  579. /* > */
  580. /* > \param[in] JOBU */
  581. /* > \verbatim */
  582. /* > JOBU is CHARACTER*1 */
  583. /* > Specifies whether to compute the columns of U: */
  584. /* > = 'U': N columns of U are returned in the array U. */
  585. /* > = 'F': full set of M left sing. vectors is returned in the array U. */
  586. /* > = 'W': U may be used as workspace of length M*N. See the description */
  587. /* > of U. */
  588. /* > = 'N': U is not computed. */
  589. /* > \endverbatim */
  590. /* > */
  591. /* > \param[in] JOBV */
  592. /* > \verbatim */
  593. /* > JOBV is CHARACTER*1 */
  594. /* > Specifies whether to compute the matrix V: */
  595. /* > = 'V': N columns of V are returned in the array V; Jacobi rotations */
  596. /* > are not explicitly accumulated. */
  597. /* > = 'J': N columns of V are returned in the array V, but they are */
  598. /* > computed as the product of Jacobi rotations. This option is */
  599. /* > allowed only if JOBU .NE. 'N', i.e. in computing the full SVD. */
  600. /* > = 'W': V may be used as workspace of length N*N. See the description */
  601. /* > of V. */
  602. /* > = 'N': V is not computed. */
  603. /* > \endverbatim */
  604. /* > */
  605. /* > \param[in] JOBR */
  606. /* > \verbatim */
  607. /* > JOBR is CHARACTER*1 */
  608. /* > Specifies the RANGE for the singular values. Issues the licence to */
  609. /* > set to zero small positive singular values if they are outside */
  610. /* > specified range. If A .NE. 0 is scaled so that the largest singular */
  611. /* > value of c*A is around DSQRT(BIG), BIG=SLAMCH('O'), then JOBR issues */
  612. /* > the licence to kill columns of A whose norm in c*A is less than */
  613. /* > DSQRT(SFMIN) (for JOBR = 'R'), or less than SMALL=SFMIN/EPSLN, */
  614. /* > where SFMIN=SLAMCH('S'), EPSLN=SLAMCH('E'). */
  615. /* > = 'N': Do not kill small columns of c*A. This option assumes that */
  616. /* > BLAS and QR factorizations and triangular solvers are */
  617. /* > implemented to work in that range. If the condition of A */
  618. /* > is greater than BIG, use DGESVJ. */
  619. /* > = 'R': RESTRICTED range for sigma(c*A) is [DSQRT(SFMIN), DSQRT(BIG)] */
  620. /* > (roughly, as described above). This option is recommended. */
  621. /* > ~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
  622. /* > For computing the singular values in the FULL range [SFMIN,BIG] */
  623. /* > use DGESVJ. */
  624. /* > \endverbatim */
  625. /* > */
  626. /* > \param[in] JOBT */
  627. /* > \verbatim */
  628. /* > JOBT is CHARACTER*1 */
  629. /* > If the matrix is square then the procedure may determine to use */
  630. /* > transposed A if A^t seems to be better with respect to convergence. */
  631. /* > If the matrix is not square, JOBT is ignored. This is subject to */
  632. /* > changes in the future. */
  633. /* > The decision is based on two values of entropy over the adjoint */
  634. /* > orbit of A^t * A. See the descriptions of WORK(6) and WORK(7). */
  635. /* > = 'T': transpose if entropy test indicates possibly faster */
  636. /* > convergence of Jacobi process if A^t is taken as input. If A is */
  637. /* > replaced with A^t, then the row pivoting is included automatically. */
  638. /* > = 'N': do not speculate. */
  639. /* > This option can be used to compute only the singular values, or the */
  640. /* > full SVD (U, SIGMA and V). For only one set of singular vectors */
  641. /* > (U or V), the caller should provide both U and V, as one of the */
  642. /* > matrices is used as workspace if the matrix A is transposed. */
  643. /* > The implementer can easily remove this constraint and make the */
  644. /* > code more complicated. See the descriptions of U and V. */
  645. /* > \endverbatim */
  646. /* > */
  647. /* > \param[in] JOBP */
  648. /* > \verbatim */
  649. /* > JOBP is CHARACTER*1 */
  650. /* > Issues the licence to introduce structured perturbations to drown */
  651. /* > denormalized numbers. This licence should be active if the */
  652. /* > denormals are poorly implemented, causing slow computation, */
  653. /* > especially in cases of fast convergence (!). For details see [1,2]. */
  654. /* > For the sake of simplicity, this perturbations are included only */
  655. /* > when the full SVD or only the singular values are requested. The */
  656. /* > implementer/user can easily add the perturbation for the cases of */
  657. /* > computing one set of singular vectors. */
  658. /* > = 'P': introduce perturbation */
  659. /* > = 'N': do not perturb */
  660. /* > \endverbatim */
  661. /* > */
  662. /* > \param[in] M */
  663. /* > \verbatim */
  664. /* > M is INTEGER */
  665. /* > The number of rows of the input matrix A. M >= 0. */
  666. /* > \endverbatim */
  667. /* > */
  668. /* > \param[in] N */
  669. /* > \verbatim */
  670. /* > N is INTEGER */
  671. /* > The number of columns of the input matrix A. M >= N >= 0. */
  672. /* > \endverbatim */
  673. /* > */
  674. /* > \param[in,out] A */
  675. /* > \verbatim */
  676. /* > A is DOUBLE PRECISION array, dimension (LDA,N) */
  677. /* > On entry, the M-by-N matrix A. */
  678. /* > \endverbatim */
  679. /* > */
  680. /* > \param[in] LDA */
  681. /* > \verbatim */
  682. /* > LDA is INTEGER */
  683. /* > The leading dimension of the array A. LDA >= f2cmax(1,M). */
  684. /* > \endverbatim */
  685. /* > */
  686. /* > \param[out] SVA */
  687. /* > \verbatim */
  688. /* > SVA is DOUBLE PRECISION array, dimension (N) */
  689. /* > On exit, */
  690. /* > - For WORK(1)/WORK(2) = ONE: The singular values of A. During the */
  691. /* > computation SVA contains Euclidean column norms of the */
  692. /* > iterated matrices in the array A. */
  693. /* > - For WORK(1) .NE. WORK(2): The singular values of A are */
  694. /* > (WORK(1)/WORK(2)) * SVA(1:N). This factored form is used if */
  695. /* > sigma_max(A) overflows or if small singular values have been */
  696. /* > saved from underflow by scaling the input matrix A. */
  697. /* > - If JOBR='R' then some of the singular values may be returned */
  698. /* > as exact zeros obtained by "set to zero" because they are */
  699. /* > below the numerical rank threshold or are denormalized numbers. */
  700. /* > \endverbatim */
  701. /* > */
  702. /* > \param[out] U */
  703. /* > \verbatim */
  704. /* > U is DOUBLE PRECISION array, dimension ( LDU, N ) */
  705. /* > If JOBU = 'U', then U contains on exit the M-by-N matrix of */
  706. /* > the left singular vectors. */
  707. /* > If JOBU = 'F', then U contains on exit the M-by-M matrix of */
  708. /* > the left singular vectors, including an ONB */
  709. /* > of the orthogonal complement of the Range(A). */
  710. /* > If JOBU = 'W' .AND. (JOBV = 'V' .AND. JOBT = 'T' .AND. M = N), */
  711. /* > then U is used as workspace if the procedure */
  712. /* > replaces A with A^t. In that case, [V] is computed */
  713. /* > in U as left singular vectors of A^t and then */
  714. /* > copied back to the V array. This 'W' option is just */
  715. /* > a reminder to the caller that in this case U is */
  716. /* > reserved as workspace of length N*N. */
  717. /* > If JOBU = 'N' U is not referenced, unless JOBT='T'. */
  718. /* > \endverbatim */
  719. /* > */
  720. /* > \param[in] LDU */
  721. /* > \verbatim */
  722. /* > LDU is INTEGER */
  723. /* > The leading dimension of the array U, LDU >= 1. */
  724. /* > IF JOBU = 'U' or 'F' or 'W', then LDU >= M. */
  725. /* > \endverbatim */
  726. /* > */
  727. /* > \param[out] V */
  728. /* > \verbatim */
  729. /* > V is DOUBLE PRECISION array, dimension ( LDV, N ) */
  730. /* > If JOBV = 'V', 'J' then V contains on exit the N-by-N matrix of */
  731. /* > the right singular vectors; */
  732. /* > If JOBV = 'W', AND (JOBU = 'U' AND JOBT = 'T' AND M = N), */
  733. /* > then V is used as workspace if the pprocedure */
  734. /* > replaces A with A^t. In that case, [U] is computed */
  735. /* > in V as right singular vectors of A^t and then */
  736. /* > copied back to the U array. This 'W' option is just */
  737. /* > a reminder to the caller that in this case V is */
  738. /* > reserved as workspace of length N*N. */
  739. /* > If JOBV = 'N' V is not referenced, unless JOBT='T'. */
  740. /* > \endverbatim */
  741. /* > */
  742. /* > \param[in] LDV */
  743. /* > \verbatim */
  744. /* > LDV is INTEGER */
  745. /* > The leading dimension of the array V, LDV >= 1. */
  746. /* > If JOBV = 'V' or 'J' or 'W', then LDV >= N. */
  747. /* > \endverbatim */
  748. /* > */
  749. /* > \param[out] WORK */
  750. /* > \verbatim */
  751. /* > WORK is DOUBLE PRECISION array, dimension (LWORK) */
  752. /* > On exit, if N > 0 .AND. M > 0 (else not referenced), */
  753. /* > WORK(1) = SCALE = WORK(2) / WORK(1) is the scaling factor such */
  754. /* > that SCALE*SVA(1:N) are the computed singular values */
  755. /* > of A. (See the description of SVA().) */
  756. /* > WORK(2) = See the description of WORK(1). */
  757. /* > WORK(3) = SCONDA is an estimate for the condition number of */
  758. /* > column equilibrated A. (If JOBA = 'E' or 'G') */
  759. /* > SCONDA is an estimate of DSQRT(||(R^t * R)^(-1)||_1). */
  760. /* > It is computed using DPOCON. It holds */
  761. /* > N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
  762. /* > where R is the triangular factor from the QRF of A. */
  763. /* > However, if R is truncated and the numerical rank is */
  764. /* > determined to be strictly smaller than N, SCONDA is */
  765. /* > returned as -1, thus indicating that the smallest */
  766. /* > singular values might be lost. */
  767. /* > */
  768. /* > If full SVD is needed, the following two condition numbers are */
  769. /* > useful for the analysis of the algorithm. They are provied for */
  770. /* > a developer/implementer who is familiar with the details of */
  771. /* > the method. */
  772. /* > */
  773. /* > WORK(4) = an estimate of the scaled condition number of the */
  774. /* > triangular factor in the first QR factorization. */
  775. /* > WORK(5) = an estimate of the scaled condition number of the */
  776. /* > triangular factor in the second QR factorization. */
  777. /* > The following two parameters are computed if JOBT = 'T'. */
  778. /* > They are provided for a developer/implementer who is familiar */
  779. /* > with the details of the method. */
  780. /* > */
  781. /* > WORK(6) = the entropy of A^t*A :: this is the Shannon entropy */
  782. /* > of diag(A^t*A) / Trace(A^t*A) taken as point in the */
  783. /* > probability simplex. */
  784. /* > WORK(7) = the entropy of A*A^t. */
  785. /* > \endverbatim */
  786. /* > */
  787. /* > \param[in] LWORK */
  788. /* > \verbatim */
  789. /* > LWORK is INTEGER */
  790. /* > Length of WORK to confirm proper allocation of work space. */
  791. /* > LWORK depends on the job: */
  792. /* > */
  793. /* > If only SIGMA is needed (JOBU = 'N', JOBV = 'N') and */
  794. /* > -> .. no scaled condition estimate required (JOBE = 'N'): */
  795. /* > LWORK >= f2cmax(2*M+N,4*N+1,7). This is the minimal requirement. */
  796. /* > ->> For optimal performance (blocked code) the optimal value */
  797. /* > is LWORK >= f2cmax(2*M+N,3*N+(N+1)*NB,7). Here NB is the optimal */
  798. /* > block size for DGEQP3 and DGEQRF. */
  799. /* > In general, optimal LWORK is computed as */
  800. /* > LWORK >= f2cmax(2*M+N,N+LWORK(DGEQP3),N+LWORK(DGEQRF), 7). */
  801. /* > -> .. an estimate of the scaled condition number of A is */
  802. /* > required (JOBA='E', 'G'). In this case, LWORK is the maximum */
  803. /* > of the above and N*N+4*N, i.e. LWORK >= f2cmax(2*M+N,N*N+4*N,7). */
  804. /* > ->> For optimal performance (blocked code) the optimal value */
  805. /* > is LWORK >= f2cmax(2*M+N,3*N+(N+1)*NB, N*N+4*N, 7). */
  806. /* > In general, the optimal length LWORK is computed as */
  807. /* > LWORK >= f2cmax(2*M+N,N+LWORK(DGEQP3),N+LWORK(DGEQRF), */
  808. /* > N+N*N+LWORK(DPOCON),7). */
  809. /* > */
  810. /* > If SIGMA and the right singular vectors are needed (JOBV = 'V'), */
  811. /* > -> the minimal requirement is LWORK >= f2cmax(2*M+N,4*N+1,7). */
  812. /* > -> For optimal performance, LWORK >= f2cmax(2*M+N,3*N+(N+1)*NB,7), */
  813. /* > where NB is the optimal block size for DGEQP3, DGEQRF, DGELQF, */
  814. /* > DORMLQ. In general, the optimal length LWORK is computed as */
  815. /* > LWORK >= f2cmax(2*M+N,N+LWORK(DGEQP3), N+LWORK(DPOCON), */
  816. /* > N+LWORK(DGELQF), 2*N+LWORK(DGEQRF), N+LWORK(DORMLQ)). */
  817. /* > */
  818. /* > If SIGMA and the left singular vectors are needed */
  819. /* > -> the minimal requirement is LWORK >= f2cmax(2*M+N,4*N+1,7). */
  820. /* > -> For optimal performance: */
  821. /* > if JOBU = 'U' :: LWORK >= f2cmax(2*M+N,3*N+(N+1)*NB,7), */
  822. /* > if JOBU = 'F' :: LWORK >= f2cmax(2*M+N,3*N+(N+1)*NB,N+M*NB,7), */
  823. /* > where NB is the optimal block size for DGEQP3, DGEQRF, DORMQR. */
  824. /* > In general, the optimal length LWORK is computed as */
  825. /* > LWORK >= f2cmax(2*M+N,N+LWORK(DGEQP3),N+LWORK(DPOCON), */
  826. /* > 2*N+LWORK(DGEQRF), N+LWORK(DORMQR)). */
  827. /* > Here LWORK(DORMQR) equals N*NB (for JOBU = 'U') or */
  828. /* > M*NB (for JOBU = 'F'). */
  829. /* > */
  830. /* > If the full SVD is needed: (JOBU = 'U' or JOBU = 'F') and */
  831. /* > -> if JOBV = 'V' */
  832. /* > the minimal requirement is LWORK >= f2cmax(2*M+N,6*N+2*N*N). */
  833. /* > -> if JOBV = 'J' the minimal requirement is */
  834. /* > LWORK >= f2cmax(2*M+N, 4*N+N*N,2*N+N*N+6). */
  835. /* > -> For optimal performance, LWORK should be additionally */
  836. /* > larger than N+M*NB, where NB is the optimal block size */
  837. /* > for DORMQR. */
  838. /* > \endverbatim */
  839. /* > */
  840. /* > \param[out] IWORK */
  841. /* > \verbatim */
  842. /* > IWORK is INTEGER array, dimension (M+3*N). */
  843. /* > On exit, */
  844. /* > IWORK(1) = the numerical rank determined after the initial */
  845. /* > QR factorization with pivoting. See the descriptions */
  846. /* > of JOBA and JOBR. */
  847. /* > IWORK(2) = the number of the computed nonzero singular values */
  848. /* > IWORK(3) = if nonzero, a warning message: */
  849. /* > If IWORK(3) = 1 then some of the column norms of A */
  850. /* > were denormalized floats. The requested high accuracy */
  851. /* > is not warranted by the data. */
  852. /* > \endverbatim */
  853. /* > */
  854. /* > \param[out] INFO */
  855. /* > \verbatim */
  856. /* > INFO is INTEGER */
  857. /* > < 0: if INFO = -i, then the i-th argument had an illegal value. */
  858. /* > = 0: successful exit; */
  859. /* > > 0: DGEJSV did not converge in the maximal allowed number */
  860. /* > of sweeps. The computed values may be inaccurate. */
  861. /* > \endverbatim */
  862. /* Authors: */
  863. /* ======== */
  864. /* > \author Univ. of Tennessee */
  865. /* > \author Univ. of California Berkeley */
  866. /* > \author Univ. of Colorado Denver */
  867. /* > \author NAG Ltd. */
  868. /* > \date June 2016 */
  869. /* > \ingroup doubleGEsing */
  870. /* > \par Further Details: */
  871. /* ===================== */
  872. /* > */
  873. /* > \verbatim */
  874. /* > */
  875. /* > DGEJSV implements a preconditioned Jacobi SVD algorithm. It uses DGEQP3, */
  876. /* > DGEQRF, and DGELQF as preprocessors and preconditioners. Optionally, an */
  877. /* > additional row pivoting can be used as a preprocessor, which in some */
  878. /* > cases results in much higher accuracy. An example is matrix A with the */
  879. /* > structure A = D1 * C * D2, where D1, D2 are arbitrarily ill-conditioned */
  880. /* > diagonal matrices and C is well-conditioned matrix. In that case, complete */
  881. /* > pivoting in the first QR factorizations provides accuracy dependent on the */
  882. /* > condition number of C, and independent of D1, D2. Such higher accuracy is */
  883. /* > not completely understood theoretically, but it works well in practice. */
  884. /* > Further, if A can be written as A = B*D, with well-conditioned B and some */
  885. /* > diagonal D, then the high accuracy is guaranteed, both theoretically and */
  886. /* > in software, independent of D. For more details see [1], [2]. */
  887. /* > The computational range for the singular values can be the full range */
  888. /* > ( UNDERFLOW,OVERFLOW ), provided that the machine arithmetic and the BLAS */
  889. /* > & LAPACK routines called by DGEJSV are implemented to work in that range. */
  890. /* > If that is not the case, then the restriction for safe computation with */
  891. /* > the singular values in the range of normalized IEEE numbers is that the */
  892. /* > spectral condition number kappa(A)=sigma_max(A)/sigma_min(A) does not */
  893. /* > overflow. This code (DGEJSV) is best used in this restricted range, */
  894. /* > meaning that singular values of magnitude below ||A||_2 / DLAMCH('O') are */
  895. /* > returned as zeros. See JOBR for details on this. */
  896. /* > Further, this implementation is somewhat slower than the one described */
  897. /* > in [1,2] due to replacement of some non-LAPACK components, and because */
  898. /* > the choice of some tuning parameters in the iterative part (DGESVJ) is */
  899. /* > left to the implementer on a particular machine. */
  900. /* > The rank revealing QR factorization (in this code: DGEQP3) should be */
  901. /* > implemented as in [3]. We have a new version of DGEQP3 under development */
  902. /* > that is more robust than the current one in LAPACK, with a cleaner cut in */
  903. /* > rank deficient cases. It will be available in the SIGMA library [4]. */
  904. /* > If M is much larger than N, it is obvious that the initial QRF with */
  905. /* > column pivoting can be preprocessed by the QRF without pivoting. That */
  906. /* > well known trick is not used in DGEJSV because in some cases heavy row */
  907. /* > weighting can be treated with complete pivoting. The overhead in cases */
  908. /* > M much larger than N is then only due to pivoting, but the benefits in */
  909. /* > terms of accuracy have prevailed. The implementer/user can incorporate */
  910. /* > this extra QRF step easily. The implementer can also improve data movement */
  911. /* > (matrix transpose, matrix copy, matrix transposed copy) - this */
  912. /* > implementation of DGEJSV uses only the simplest, naive data movement. */
  913. /* > \endverbatim */
  914. /* > \par Contributors: */
  915. /* ================== */
  916. /* > */
  917. /* > Zlatko Drmac (Zagreb, Croatia) and Kresimir Veselic (Hagen, Germany) */
  918. /* > \par References: */
  919. /* ================ */
  920. /* > */
  921. /* > \verbatim */
  922. /* > */
  923. /* > [1] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm I. */
  924. /* > SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1322-1342. */
  925. /* > LAPACK Working note 169. */
  926. /* > [2] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm II. */
  927. /* > SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1343-1362. */
  928. /* > LAPACK Working note 170. */
  929. /* > [3] Z. Drmac and Z. Bujanovic: On the failure of rank-revealing QR */
  930. /* > factorization software - a case study. */
  931. /* > ACM Trans. Math. Softw. Vol. 35, No 2 (2008), pp. 1-28. */
  932. /* > LAPACK Working note 176. */
  933. /* > [4] Z. Drmac: SIGMA - mathematical software library for accurate SVD, PSV, */
  934. /* > QSVD, (H,K)-SVD computations. */
  935. /* > Department of Mathematics, University of Zagreb, 2008. */
  936. /* > \endverbatim */
  937. /* > \par Bugs, examples and comments: */
  938. /* ================================= */
  939. /* > */
  940. /* > Please report all bugs and send interesting examples and/or comments to */
  941. /* > drmac@math.hr. Thank you. */
  942. /* > */
  943. /* ===================================================================== */
  944. /* Subroutine */ void dgejsv_(char *joba, char *jobu, char *jobv, char *jobr,
  945. char *jobt, char *jobp, integer *m, integer *n, doublereal *a,
  946. integer *lda, doublereal *sva, doublereal *u, integer *ldu,
  947. doublereal *v, integer *ldv, doublereal *work, integer *lwork,
  948. integer *iwork, integer *info)
  949. {
  950. /* System generated locals */
  951. integer a_dim1, a_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2,
  952. i__3, i__4, i__5, i__6, i__7, i__8, i__9, i__10, i__11, i__12;
  953. doublereal d__1, d__2, d__3, d__4;
  954. /* Local variables */
  955. logical defr;
  956. doublereal aapp, aaqq;
  957. logical kill;
  958. integer ierr;
  959. extern doublereal dnrm2_(integer *, doublereal *, integer *);
  960. doublereal temp1;
  961. integer p, q;
  962. logical jracc;
  963. extern /* Subroutine */ void dscal_(integer *, doublereal *, doublereal *,
  964. integer *);
  965. extern logical lsame_(char *, char *);
  966. doublereal small, entra, sfmin;
  967. logical lsvec;
  968. extern /* Subroutine */ void dcopy_(integer *, doublereal *, integer *,
  969. doublereal *, integer *), dswap_(integer *, doublereal *, integer
  970. *, doublereal *, integer *);
  971. doublereal epsln;
  972. logical rsvec;
  973. extern /* Subroutine */ void dtrsm_(char *, char *, char *, char *,
  974. integer *, integer *, doublereal *, doublereal *, integer *,
  975. doublereal *, integer *);
  976. integer n1;
  977. logical l2aber;
  978. extern /* Subroutine */ void dgeqp3_(integer *, integer *, doublereal *,
  979. integer *, integer *, doublereal *, doublereal *, integer *,
  980. integer *);
  981. doublereal condr1, condr2, uscal1, uscal2;
  982. logical l2kill, l2rank, l2tran, l2pert;
  983. extern doublereal dlamch_(char *);
  984. integer nr;
  985. extern /* Subroutine */ void dgelqf_(integer *, integer *, doublereal *,
  986. integer *, doublereal *, doublereal *, integer *, integer *);
  987. extern integer idamax_(integer *, doublereal *, integer *);
  988. doublereal scalem;
  989. extern /* Subroutine */ void dlascl_(char *, integer *, integer *,
  990. doublereal *, doublereal *, integer *, integer *, doublereal *,
  991. integer *, integer *);
  992. doublereal sconda;
  993. logical goscal;
  994. doublereal aatmin;
  995. extern /* Subroutine */ void dgeqrf_(integer *, integer *, doublereal *,
  996. integer *, doublereal *, doublereal *, integer *, integer *);
  997. doublereal aatmax;
  998. extern /* Subroutine */ void dlacpy_(char *, integer *, integer *,
  999. doublereal *, integer *, doublereal *, integer *),
  1000. dlaset_(char *, integer *, integer *, doublereal *, doublereal *,
  1001. doublereal *, integer *);
  1002. extern int xerbla_(char *, integer *, ftnlen);
  1003. logical noscal;
  1004. extern /* Subroutine */ void dpocon_(char *, integer *, doublereal *,
  1005. integer *, doublereal *, doublereal *, doublereal *, integer *,
  1006. integer *), dgesvj_(char *, char *, char *, integer *,
  1007. integer *, doublereal *, integer *, doublereal *, integer *,
  1008. doublereal *, integer *, doublereal *, integer *, integer *), dlassq_(integer *, doublereal *, integer
  1009. *, doublereal *, doublereal *);
  1010. extern int dlaswp_(integer *, doublereal *,
  1011. integer *, integer *, integer *, integer *, integer *);
  1012. doublereal entrat;
  1013. logical almort;
  1014. extern /* Subroutine */ void dorgqr_(integer *, integer *, integer *,
  1015. doublereal *, integer *, doublereal *, doublereal *, integer *,
  1016. integer *), dormlq_(char *, char *, integer *, integer *, integer
  1017. *, doublereal *, integer *, doublereal *, doublereal *, integer *,
  1018. doublereal *, integer *, integer *);
  1019. doublereal maxprj;
  1020. logical errest;
  1021. extern /* Subroutine */ void dormqr_(char *, char *, integer *, integer *,
  1022. integer *, doublereal *, integer *, doublereal *, doublereal *,
  1023. integer *, doublereal *, integer *, integer *);
  1024. logical transp, rowpiv;
  1025. doublereal big, cond_ok__, xsc, big1;
  1026. integer warning, numrank;
  1027. /* -- LAPACK computational routine (version 3.7.1) -- */
  1028. /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
  1029. /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
  1030. /* June 2016 */
  1031. /* =========================================================================== */
  1032. /* Test the input arguments */
  1033. /* Parameter adjustments */
  1034. --sva;
  1035. a_dim1 = *lda;
  1036. a_offset = 1 + a_dim1 * 1;
  1037. a -= a_offset;
  1038. u_dim1 = *ldu;
  1039. u_offset = 1 + u_dim1 * 1;
  1040. u -= u_offset;
  1041. v_dim1 = *ldv;
  1042. v_offset = 1 + v_dim1 * 1;
  1043. v -= v_offset;
  1044. --work;
  1045. --iwork;
  1046. /* Function Body */
  1047. lsvec = lsame_(jobu, "U") || lsame_(jobu, "F");
  1048. jracc = lsame_(jobv, "J");
  1049. rsvec = lsame_(jobv, "V") || jracc;
  1050. rowpiv = lsame_(joba, "F") || lsame_(joba, "G");
  1051. l2rank = lsame_(joba, "R");
  1052. l2aber = lsame_(joba, "A");
  1053. errest = lsame_(joba, "E") || lsame_(joba, "G");
  1054. l2tran = lsame_(jobt, "T");
  1055. l2kill = lsame_(jobr, "R");
  1056. defr = lsame_(jobr, "N");
  1057. l2pert = lsame_(jobp, "P");
  1058. if (! (rowpiv || l2rank || l2aber || errest || lsame_(joba, "C"))) {
  1059. *info = -1;
  1060. } else if (! (lsvec || lsame_(jobu, "N") || lsame_(
  1061. jobu, "W"))) {
  1062. *info = -2;
  1063. } else if (! (rsvec || lsame_(jobv, "N") || lsame_(
  1064. jobv, "W")) || jracc && ! lsvec) {
  1065. *info = -3;
  1066. } else if (! (l2kill || defr)) {
  1067. *info = -4;
  1068. } else if (! (l2tran || lsame_(jobt, "N"))) {
  1069. *info = -5;
  1070. } else if (! (l2pert || lsame_(jobp, "N"))) {
  1071. *info = -6;
  1072. } else if (*m < 0) {
  1073. *info = -7;
  1074. } else if (*n < 0 || *n > *m) {
  1075. *info = -8;
  1076. } else if (*lda < *m) {
  1077. *info = -10;
  1078. } else if (lsvec && *ldu < *m) {
  1079. *info = -13;
  1080. } else if (rsvec && *ldv < *n) {
  1081. *info = -15;
  1082. } else /* if(complicated condition) */ {
  1083. /* Computing MAX */
  1084. i__1 = 7, i__2 = (*n << 2) + 1, i__1 = f2cmax(i__1,i__2), i__2 = (*m <<
  1085. 1) + *n;
  1086. /* Computing MAX */
  1087. i__3 = 7, i__4 = (*n << 2) + *n * *n, i__3 = f2cmax(i__3,i__4), i__4 = (*
  1088. m << 1) + *n;
  1089. /* Computing MAX */
  1090. i__5 = 7, i__6 = (*m << 1) + *n, i__5 = f2cmax(i__5,i__6), i__6 = (*n <<
  1091. 2) + 1;
  1092. /* Computing MAX */
  1093. i__7 = 7, i__8 = (*m << 1) + *n, i__7 = f2cmax(i__7,i__8), i__8 = (*n <<
  1094. 2) + 1;
  1095. /* Computing MAX */
  1096. i__9 = (*m << 1) + *n, i__10 = *n * 6 + (*n << 1) * *n;
  1097. /* Computing MAX */
  1098. i__11 = (*m << 1) + *n, i__12 = (*n << 2) + *n * *n, i__11 = f2cmax(
  1099. i__11,i__12), i__12 = (*n << 1) + *n * *n + 6;
  1100. if (! (lsvec || rsvec || errest) && *lwork < f2cmax(i__1,i__2) || ! (
  1101. lsvec || rsvec) && errest && *lwork < f2cmax(i__3,i__4) || lsvec
  1102. && ! rsvec && *lwork < f2cmax(i__5,i__6) || rsvec && ! lsvec && *
  1103. lwork < f2cmax(i__7,i__8) || lsvec && rsvec && ! jracc && *lwork
  1104. < f2cmax(i__9,i__10) || lsvec && rsvec && jracc && *lwork < f2cmax(
  1105. i__11,i__12)) {
  1106. *info = -17;
  1107. } else {
  1108. /* #:) */
  1109. *info = 0;
  1110. }
  1111. }
  1112. if (*info != 0) {
  1113. /* #:( */
  1114. i__1 = -(*info);
  1115. xerbla_("DGEJSV", &i__1, (ftnlen)6);
  1116. return;
  1117. }
  1118. /* Quick return for void matrix (Y3K safe) */
  1119. /* #:) */
  1120. if (*m == 0 || *n == 0) {
  1121. iwork[1] = 0;
  1122. iwork[2] = 0;
  1123. iwork[3] = 0;
  1124. work[1] = 0.;
  1125. work[2] = 0.;
  1126. work[3] = 0.;
  1127. work[4] = 0.;
  1128. work[5] = 0.;
  1129. work[6] = 0.;
  1130. work[7] = 0.;
  1131. return;
  1132. }
  1133. /* Determine whether the matrix U should be M x N or M x M */
  1134. if (lsvec) {
  1135. n1 = *n;
  1136. if (lsame_(jobu, "F")) {
  1137. n1 = *m;
  1138. }
  1139. }
  1140. /* Set numerical parameters */
  1141. /* ! NOTE: Make sure DLAMCH() does not fail on the target architecture. */
  1142. epsln = dlamch_("Epsilon");
  1143. sfmin = dlamch_("SafeMinimum");
  1144. small = sfmin / epsln;
  1145. big = dlamch_("O");
  1146. /* BIG = ONE / SFMIN */
  1147. /* Initialize SVA(1:N) = diag( ||A e_i||_2 )_1^N */
  1148. /* (!) If necessary, scale SVA() to protect the largest norm from */
  1149. /* overflow. It is possible that this scaling pushes the smallest */
  1150. /* column norm left from the underflow threshold (extreme case). */
  1151. scalem = 1. / sqrt((doublereal) (*m) * (doublereal) (*n));
  1152. noscal = TRUE_;
  1153. goscal = TRUE_;
  1154. i__1 = *n;
  1155. for (p = 1; p <= i__1; ++p) {
  1156. aapp = 0.;
  1157. aaqq = 1.;
  1158. dlassq_(m, &a[p * a_dim1 + 1], &c__1, &aapp, &aaqq);
  1159. if (aapp > big) {
  1160. *info = -9;
  1161. i__2 = -(*info);
  1162. xerbla_("DGEJSV", &i__2, (ftnlen)6);
  1163. return;
  1164. }
  1165. aaqq = sqrt(aaqq);
  1166. if (aapp < big / aaqq && noscal) {
  1167. sva[p] = aapp * aaqq;
  1168. } else {
  1169. noscal = FALSE_;
  1170. sva[p] = aapp * (aaqq * scalem);
  1171. if (goscal) {
  1172. goscal = FALSE_;
  1173. i__2 = p - 1;
  1174. dscal_(&i__2, &scalem, &sva[1], &c__1);
  1175. }
  1176. }
  1177. /* L1874: */
  1178. }
  1179. if (noscal) {
  1180. scalem = 1.;
  1181. }
  1182. aapp = 0.;
  1183. aaqq = big;
  1184. i__1 = *n;
  1185. for (p = 1; p <= i__1; ++p) {
  1186. /* Computing MAX */
  1187. d__1 = aapp, d__2 = sva[p];
  1188. aapp = f2cmax(d__1,d__2);
  1189. if (sva[p] != 0.) {
  1190. /* Computing MIN */
  1191. d__1 = aaqq, d__2 = sva[p];
  1192. aaqq = f2cmin(d__1,d__2);
  1193. }
  1194. /* L4781: */
  1195. }
  1196. /* Quick return for zero M x N matrix */
  1197. /* #:) */
  1198. if (aapp == 0.) {
  1199. if (lsvec) {
  1200. dlaset_("G", m, &n1, &c_b34, &c_b35, &u[u_offset], ldu)
  1201. ;
  1202. }
  1203. if (rsvec) {
  1204. dlaset_("G", n, n, &c_b34, &c_b35, &v[v_offset], ldv);
  1205. }
  1206. work[1] = 1.;
  1207. work[2] = 1.;
  1208. if (errest) {
  1209. work[3] = 1.;
  1210. }
  1211. if (lsvec && rsvec) {
  1212. work[4] = 1.;
  1213. work[5] = 1.;
  1214. }
  1215. if (l2tran) {
  1216. work[6] = 0.;
  1217. work[7] = 0.;
  1218. }
  1219. iwork[1] = 0;
  1220. iwork[2] = 0;
  1221. iwork[3] = 0;
  1222. return;
  1223. }
  1224. /* Issue warning if denormalized column norms detected. Override the */
  1225. /* high relative accuracy request. Issue licence to kill columns */
  1226. /* (set them to zero) whose norm is less than sigma_max / BIG (roughly). */
  1227. /* #:( */
  1228. warning = 0;
  1229. if (aaqq <= sfmin) {
  1230. l2rank = TRUE_;
  1231. l2kill = TRUE_;
  1232. warning = 1;
  1233. }
  1234. /* Quick return for one-column matrix */
  1235. /* #:) */
  1236. if (*n == 1) {
  1237. if (lsvec) {
  1238. dlascl_("G", &c__0, &c__0, &sva[1], &scalem, m, &c__1, &a[a_dim1
  1239. + 1], lda, &ierr);
  1240. dlacpy_("A", m, &c__1, &a[a_offset], lda, &u[u_offset], ldu);
  1241. /* computing all M left singular vectors of the M x 1 matrix */
  1242. if (n1 != *n) {
  1243. i__1 = *lwork - *n;
  1244. dgeqrf_(m, n, &u[u_offset], ldu, &work[1], &work[*n + 1], &
  1245. i__1, &ierr);
  1246. i__1 = *lwork - *n;
  1247. dorgqr_(m, &n1, &c__1, &u[u_offset], ldu, &work[1], &work[*n
  1248. + 1], &i__1, &ierr);
  1249. dcopy_(m, &a[a_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1);
  1250. }
  1251. }
  1252. if (rsvec) {
  1253. v[v_dim1 + 1] = 1.;
  1254. }
  1255. if (sva[1] < big * scalem) {
  1256. sva[1] /= scalem;
  1257. scalem = 1.;
  1258. }
  1259. work[1] = 1. / scalem;
  1260. work[2] = 1.;
  1261. if (sva[1] != 0.) {
  1262. iwork[1] = 1;
  1263. if (sva[1] / scalem >= sfmin) {
  1264. iwork[2] = 1;
  1265. } else {
  1266. iwork[2] = 0;
  1267. }
  1268. } else {
  1269. iwork[1] = 0;
  1270. iwork[2] = 0;
  1271. }
  1272. iwork[3] = 0;
  1273. if (errest) {
  1274. work[3] = 1.;
  1275. }
  1276. if (lsvec && rsvec) {
  1277. work[4] = 1.;
  1278. work[5] = 1.;
  1279. }
  1280. if (l2tran) {
  1281. work[6] = 0.;
  1282. work[7] = 0.;
  1283. }
  1284. return;
  1285. }
  1286. transp = FALSE_;
  1287. l2tran = l2tran && *m == *n;
  1288. aatmax = -1.;
  1289. aatmin = big;
  1290. if (rowpiv || l2tran) {
  1291. /* Compute the row norms, needed to determine row pivoting sequence */
  1292. /* (in the case of heavily row weighted A, row pivoting is strongly */
  1293. /* advised) and to collect information needed to compare the */
  1294. /* structures of A * A^t and A^t * A (in the case L2TRAN.EQ..TRUE.). */
  1295. if (l2tran) {
  1296. i__1 = *m;
  1297. for (p = 1; p <= i__1; ++p) {
  1298. xsc = 0.;
  1299. temp1 = 1.;
  1300. dlassq_(n, &a[p + a_dim1], lda, &xsc, &temp1);
  1301. /* DLASSQ gets both the ell_2 and the ell_infinity norm */
  1302. /* in one pass through the vector */
  1303. work[*m + *n + p] = xsc * scalem;
  1304. work[*n + p] = xsc * (scalem * sqrt(temp1));
  1305. /* Computing MAX */
  1306. d__1 = aatmax, d__2 = work[*n + p];
  1307. aatmax = f2cmax(d__1,d__2);
  1308. if (work[*n + p] != 0.) {
  1309. /* Computing MIN */
  1310. d__1 = aatmin, d__2 = work[*n + p];
  1311. aatmin = f2cmin(d__1,d__2);
  1312. }
  1313. /* L1950: */
  1314. }
  1315. } else {
  1316. i__1 = *m;
  1317. for (p = 1; p <= i__1; ++p) {
  1318. work[*m + *n + p] = scalem * (d__1 = a[p + idamax_(n, &a[p +
  1319. a_dim1], lda) * a_dim1], abs(d__1));
  1320. /* Computing MAX */
  1321. d__1 = aatmax, d__2 = work[*m + *n + p];
  1322. aatmax = f2cmax(d__1,d__2);
  1323. /* Computing MIN */
  1324. d__1 = aatmin, d__2 = work[*m + *n + p];
  1325. aatmin = f2cmin(d__1,d__2);
  1326. /* L1904: */
  1327. }
  1328. }
  1329. }
  1330. /* For square matrix A try to determine whether A^t would be better */
  1331. /* input for the preconditioned Jacobi SVD, with faster convergence. */
  1332. /* The decision is based on an O(N) function of the vector of column */
  1333. /* and row norms of A, based on the Shannon entropy. This should give */
  1334. /* the right choice in most cases when the difference actually matters. */
  1335. /* It may fail and pick the slower converging side. */
  1336. entra = 0.;
  1337. entrat = 0.;
  1338. if (l2tran) {
  1339. xsc = 0.;
  1340. temp1 = 1.;
  1341. dlassq_(n, &sva[1], &c__1, &xsc, &temp1);
  1342. temp1 = 1. / temp1;
  1343. entra = 0.;
  1344. i__1 = *n;
  1345. for (p = 1; p <= i__1; ++p) {
  1346. /* Computing 2nd power */
  1347. d__1 = sva[p] / xsc;
  1348. big1 = d__1 * d__1 * temp1;
  1349. if (big1 != 0.) {
  1350. entra += big1 * log(big1);
  1351. }
  1352. /* L1113: */
  1353. }
  1354. entra = -entra / log((doublereal) (*n));
  1355. /* Now, SVA().^2/Trace(A^t * A) is a point in the probability simplex. */
  1356. /* It is derived from the diagonal of A^t * A. Do the same with the */
  1357. /* diagonal of A * A^t, compute the entropy of the corresponding */
  1358. /* probability distribution. Note that A * A^t and A^t * A have the */
  1359. /* same trace. */
  1360. entrat = 0.;
  1361. i__1 = *n + *m;
  1362. for (p = *n + 1; p <= i__1; ++p) {
  1363. /* Computing 2nd power */
  1364. d__1 = work[p] / xsc;
  1365. big1 = d__1 * d__1 * temp1;
  1366. if (big1 != 0.) {
  1367. entrat += big1 * log(big1);
  1368. }
  1369. /* L1114: */
  1370. }
  1371. entrat = -entrat / log((doublereal) (*m));
  1372. /* Analyze the entropies and decide A or A^t. Smaller entropy */
  1373. /* usually means better input for the algorithm. */
  1374. transp = entrat < entra;
  1375. /* If A^t is better than A, transpose A. */
  1376. if (transp) {
  1377. /* In an optimal implementation, this trivial transpose */
  1378. /* should be replaced with faster transpose. */
  1379. i__1 = *n - 1;
  1380. for (p = 1; p <= i__1; ++p) {
  1381. i__2 = *n;
  1382. for (q = p + 1; q <= i__2; ++q) {
  1383. temp1 = a[q + p * a_dim1];
  1384. a[q + p * a_dim1] = a[p + q * a_dim1];
  1385. a[p + q * a_dim1] = temp1;
  1386. /* L1116: */
  1387. }
  1388. /* L1115: */
  1389. }
  1390. i__1 = *n;
  1391. for (p = 1; p <= i__1; ++p) {
  1392. work[*m + *n + p] = sva[p];
  1393. sva[p] = work[*n + p];
  1394. /* L1117: */
  1395. }
  1396. temp1 = aapp;
  1397. aapp = aatmax;
  1398. aatmax = temp1;
  1399. temp1 = aaqq;
  1400. aaqq = aatmin;
  1401. aatmin = temp1;
  1402. kill = lsvec;
  1403. lsvec = rsvec;
  1404. rsvec = kill;
  1405. if (lsvec) {
  1406. n1 = *n;
  1407. }
  1408. rowpiv = TRUE_;
  1409. }
  1410. }
  1411. /* END IF L2TRAN */
  1412. /* Scale the matrix so that its maximal singular value remains less */
  1413. /* than DSQRT(BIG) -- the matrix is scaled so that its maximal column */
  1414. /* has Euclidean norm equal to DSQRT(BIG/N). The only reason to keep */
  1415. /* DSQRT(BIG) instead of BIG is the fact that DGEJSV uses LAPACK and */
  1416. /* BLAS routines that, in some implementations, are not capable of */
  1417. /* working in the full interval [SFMIN,BIG] and that they may provoke */
  1418. /* overflows in the intermediate results. If the singular values spread */
  1419. /* from SFMIN to BIG, then DGESVJ will compute them. So, in that case, */
  1420. /* one should use DGESVJ instead of DGEJSV. */
  1421. big1 = sqrt(big);
  1422. temp1 = sqrt(big / (doublereal) (*n));
  1423. dlascl_("G", &c__0, &c__0, &aapp, &temp1, n, &c__1, &sva[1], n, &ierr);
  1424. if (aaqq > aapp * sfmin) {
  1425. aaqq = aaqq / aapp * temp1;
  1426. } else {
  1427. aaqq = aaqq * temp1 / aapp;
  1428. }
  1429. temp1 *= scalem;
  1430. dlascl_("G", &c__0, &c__0, &aapp, &temp1, m, n, &a[a_offset], lda, &ierr);
  1431. /* To undo scaling at the end of this procedure, multiply the */
  1432. /* computed singular values with USCAL2 / USCAL1. */
  1433. uscal1 = temp1;
  1434. uscal2 = aapp;
  1435. if (l2kill) {
  1436. /* L2KILL enforces computation of nonzero singular values in */
  1437. /* the restricted range of condition number of the initial A, */
  1438. /* sigma_max(A) / sigma_min(A) approx. DSQRT(BIG)/DSQRT(SFMIN). */
  1439. xsc = sqrt(sfmin);
  1440. } else {
  1441. xsc = small;
  1442. /* Now, if the condition number of A is too big, */
  1443. /* sigma_max(A) / sigma_min(A) .GT. DSQRT(BIG/N) * EPSLN / SFMIN, */
  1444. /* as a precaution measure, the full SVD is computed using DGESVJ */
  1445. /* with accumulated Jacobi rotations. This provides numerically */
  1446. /* more robust computation, at the cost of slightly increased run */
  1447. /* time. Depending on the concrete implementation of BLAS and LAPACK */
  1448. /* (i.e. how they behave in presence of extreme ill-conditioning) the */
  1449. /* implementor may decide to remove this switch. */
  1450. if (aaqq < sqrt(sfmin) && lsvec && rsvec) {
  1451. jracc = TRUE_;
  1452. }
  1453. }
  1454. if (aaqq < xsc) {
  1455. i__1 = *n;
  1456. for (p = 1; p <= i__1; ++p) {
  1457. if (sva[p] < xsc) {
  1458. dlaset_("A", m, &c__1, &c_b34, &c_b34, &a[p * a_dim1 + 1],
  1459. lda);
  1460. sva[p] = 0.;
  1461. }
  1462. /* L700: */
  1463. }
  1464. }
  1465. /* Preconditioning using QR factorization with pivoting */
  1466. if (rowpiv) {
  1467. /* Optional row permutation (Bjoerck row pivoting): */
  1468. /* A result by Cox and Higham shows that the Bjoerck's */
  1469. /* row pivoting combined with standard column pivoting */
  1470. /* has similar effect as Powell-Reid complete pivoting. */
  1471. /* The ell-infinity norms of A are made nonincreasing. */
  1472. i__1 = *m - 1;
  1473. for (p = 1; p <= i__1; ++p) {
  1474. i__2 = *m - p + 1;
  1475. q = idamax_(&i__2, &work[*m + *n + p], &c__1) + p - 1;
  1476. iwork[(*n << 1) + p] = q;
  1477. if (p != q) {
  1478. temp1 = work[*m + *n + p];
  1479. work[*m + *n + p] = work[*m + *n + q];
  1480. work[*m + *n + q] = temp1;
  1481. }
  1482. /* L1952: */
  1483. }
  1484. i__1 = *m - 1;
  1485. dlaswp_(n, &a[a_offset], lda, &c__1, &i__1, &iwork[(*n << 1) + 1], &
  1486. c__1);
  1487. }
  1488. /* End of the preparation phase (scaling, optional sorting and */
  1489. /* transposing, optional flushing of small columns). */
  1490. /* Preconditioning */
  1491. /* If the full SVD is needed, the right singular vectors are computed */
  1492. /* from a matrix equation, and for that we need theoretical analysis */
  1493. /* of the Businger-Golub pivoting. So we use DGEQP3 as the first RR QRF. */
  1494. /* In all other cases the first RR QRF can be chosen by other criteria */
  1495. /* (eg speed by replacing global with restricted window pivoting, such */
  1496. /* as in SGEQPX from TOMS # 782). Good results will be obtained using */
  1497. /* SGEQPX with properly (!) chosen numerical parameters. */
  1498. /* Any improvement of DGEQP3 improves overal performance of DGEJSV. */
  1499. /* A * P1 = Q1 * [ R1^t 0]^t: */
  1500. i__1 = *n;
  1501. for (p = 1; p <= i__1; ++p) {
  1502. iwork[p] = 0;
  1503. /* L1963: */
  1504. }
  1505. i__1 = *lwork - *n;
  1506. dgeqp3_(m, n, &a[a_offset], lda, &iwork[1], &work[1], &work[*n + 1], &
  1507. i__1, &ierr);
  1508. /* The upper triangular matrix R1 from the first QRF is inspected for */
  1509. /* rank deficiency and possibilities for deflation, or possible */
  1510. /* ill-conditioning. Depending on the user specified flag L2RANK, */
  1511. /* the procedure explores possibilities to reduce the numerical */
  1512. /* rank by inspecting the computed upper triangular factor. If */
  1513. /* L2RANK or L2ABER are up, then DGEJSV will compute the SVD of */
  1514. /* A + dA, where ||dA|| <= f(M,N)*EPSLN. */
  1515. nr = 1;
  1516. if (l2aber) {
  1517. /* Standard absolute error bound suffices. All sigma_i with */
  1518. /* sigma_i < N*EPSLN*||A|| are flushed to zero. This is an */
  1519. /* aggressive enforcement of lower numerical rank by introducing a */
  1520. /* backward error of the order of N*EPSLN*||A||. */
  1521. temp1 = sqrt((doublereal) (*n)) * epsln;
  1522. i__1 = *n;
  1523. for (p = 2; p <= i__1; ++p) {
  1524. if ((d__2 = a[p + p * a_dim1], abs(d__2)) >= temp1 * (d__1 = a[
  1525. a_dim1 + 1], abs(d__1))) {
  1526. ++nr;
  1527. } else {
  1528. goto L3002;
  1529. }
  1530. /* L3001: */
  1531. }
  1532. L3002:
  1533. ;
  1534. } else if (l2rank) {
  1535. /* Sudden drop on the diagonal of R1 is used as the criterion for */
  1536. /* close-to-rank-deficient. */
  1537. temp1 = sqrt(sfmin);
  1538. i__1 = *n;
  1539. for (p = 2; p <= i__1; ++p) {
  1540. if ((d__2 = a[p + p * a_dim1], abs(d__2)) < epsln * (d__1 = a[p -
  1541. 1 + (p - 1) * a_dim1], abs(d__1)) || (d__3 = a[p + p *
  1542. a_dim1], abs(d__3)) < small || l2kill && (d__4 = a[p + p *
  1543. a_dim1], abs(d__4)) < temp1) {
  1544. goto L3402;
  1545. }
  1546. ++nr;
  1547. /* L3401: */
  1548. }
  1549. L3402:
  1550. ;
  1551. } else {
  1552. /* The goal is high relative accuracy. However, if the matrix */
  1553. /* has high scaled condition number the relative accuracy is in */
  1554. /* general not feasible. Later on, a condition number estimator */
  1555. /* will be deployed to estimate the scaled condition number. */
  1556. /* Here we just remove the underflowed part of the triangular */
  1557. /* factor. This prevents the situation in which the code is */
  1558. /* working hard to get the accuracy not warranted by the data. */
  1559. temp1 = sqrt(sfmin);
  1560. i__1 = *n;
  1561. for (p = 2; p <= i__1; ++p) {
  1562. if ((d__1 = a[p + p * a_dim1], abs(d__1)) < small || l2kill && (
  1563. d__2 = a[p + p * a_dim1], abs(d__2)) < temp1) {
  1564. goto L3302;
  1565. }
  1566. ++nr;
  1567. /* L3301: */
  1568. }
  1569. L3302:
  1570. ;
  1571. }
  1572. almort = FALSE_;
  1573. if (nr == *n) {
  1574. maxprj = 1.;
  1575. i__1 = *n;
  1576. for (p = 2; p <= i__1; ++p) {
  1577. temp1 = (d__1 = a[p + p * a_dim1], abs(d__1)) / sva[iwork[p]];
  1578. maxprj = f2cmin(maxprj,temp1);
  1579. /* L3051: */
  1580. }
  1581. /* Computing 2nd power */
  1582. d__1 = maxprj;
  1583. if (d__1 * d__1 >= 1. - (doublereal) (*n) * epsln) {
  1584. almort = TRUE_;
  1585. }
  1586. }
  1587. sconda = -1.;
  1588. condr1 = -1.;
  1589. condr2 = -1.;
  1590. if (errest) {
  1591. if (*n == nr) {
  1592. if (rsvec) {
  1593. dlacpy_("U", n, n, &a[a_offset], lda, &v[v_offset], ldv);
  1594. i__1 = *n;
  1595. for (p = 1; p <= i__1; ++p) {
  1596. temp1 = sva[iwork[p]];
  1597. d__1 = 1. / temp1;
  1598. dscal_(&p, &d__1, &v[p * v_dim1 + 1], &c__1);
  1599. /* L3053: */
  1600. }
  1601. dpocon_("U", n, &v[v_offset], ldv, &c_b35, &temp1, &work[*n +
  1602. 1], &iwork[(*n << 1) + *m + 1], &ierr);
  1603. } else if (lsvec) {
  1604. dlacpy_("U", n, n, &a[a_offset], lda, &u[u_offset], ldu);
  1605. i__1 = *n;
  1606. for (p = 1; p <= i__1; ++p) {
  1607. temp1 = sva[iwork[p]];
  1608. d__1 = 1. / temp1;
  1609. dscal_(&p, &d__1, &u[p * u_dim1 + 1], &c__1);
  1610. /* L3054: */
  1611. }
  1612. dpocon_("U", n, &u[u_offset], ldu, &c_b35, &temp1, &work[*n +
  1613. 1], &iwork[(*n << 1) + *m + 1], &ierr);
  1614. } else {
  1615. dlacpy_("U", n, n, &a[a_offset], lda, &work[*n + 1], n);
  1616. i__1 = *n;
  1617. for (p = 1; p <= i__1; ++p) {
  1618. temp1 = sva[iwork[p]];
  1619. d__1 = 1. / temp1;
  1620. dscal_(&p, &d__1, &work[*n + (p - 1) * *n + 1], &c__1);
  1621. /* L3052: */
  1622. }
  1623. dpocon_("U", n, &work[*n + 1], n, &c_b35, &temp1, &work[*n + *
  1624. n * *n + 1], &iwork[(*n << 1) + *m + 1], &ierr);
  1625. }
  1626. sconda = 1. / sqrt(temp1);
  1627. /* SCONDA is an estimate of DSQRT(||(R^t * R)^(-1)||_1). */
  1628. /* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
  1629. } else {
  1630. sconda = -1.;
  1631. }
  1632. }
  1633. l2pert = l2pert && (d__1 = a[a_dim1 + 1] / a[nr + nr * a_dim1], abs(d__1))
  1634. > sqrt(big1);
  1635. /* If there is no violent scaling, artificial perturbation is not needed. */
  1636. /* Phase 3: */
  1637. if (! (rsvec || lsvec)) {
  1638. /* Singular Values only */
  1639. /* Computing MIN */
  1640. i__2 = *n - 1;
  1641. i__1 = f2cmin(i__2,nr);
  1642. for (p = 1; p <= i__1; ++p) {
  1643. i__2 = *n - p;
  1644. dcopy_(&i__2, &a[p + (p + 1) * a_dim1], lda, &a[p + 1 + p *
  1645. a_dim1], &c__1);
  1646. /* L1946: */
  1647. }
  1648. /* The following two DO-loops introduce small relative perturbation */
  1649. /* into the strict upper triangle of the lower triangular matrix. */
  1650. /* Small entries below the main diagonal are also changed. */
  1651. /* This modification is useful if the computing environment does not */
  1652. /* provide/allow FLUSH TO ZERO underflow, for it prevents many */
  1653. /* annoying denormalized numbers in case of strongly scaled matrices. */
  1654. /* The perturbation is structured so that it does not introduce any */
  1655. /* new perturbation of the singular values, and it does not destroy */
  1656. /* the job done by the preconditioner. */
  1657. /* The licence for this perturbation is in the variable L2PERT, which */
  1658. /* should be .FALSE. if FLUSH TO ZERO underflow is active. */
  1659. if (! almort) {
  1660. if (l2pert) {
  1661. /* XSC = DSQRT(SMALL) */
  1662. xsc = epsln / (doublereal) (*n);
  1663. i__1 = nr;
  1664. for (q = 1; q <= i__1; ++q) {
  1665. temp1 = xsc * (d__1 = a[q + q * a_dim1], abs(d__1));
  1666. i__2 = *n;
  1667. for (p = 1; p <= i__2; ++p) {
  1668. if (p > q && (d__1 = a[p + q * a_dim1], abs(d__1)) <=
  1669. temp1 || p < q) {
  1670. a[p + q * a_dim1] = d_sign(&temp1, &a[p + q *
  1671. a_dim1]);
  1672. }
  1673. /* L4949: */
  1674. }
  1675. /* L4947: */
  1676. }
  1677. } else {
  1678. i__1 = nr - 1;
  1679. i__2 = nr - 1;
  1680. dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &a[(a_dim1 << 1) +
  1681. 1], lda);
  1682. }
  1683. i__1 = *lwork - *n;
  1684. dgeqrf_(n, &nr, &a[a_offset], lda, &work[1], &work[*n + 1], &i__1,
  1685. &ierr);
  1686. i__1 = nr - 1;
  1687. for (p = 1; p <= i__1; ++p) {
  1688. i__2 = nr - p;
  1689. dcopy_(&i__2, &a[p + (p + 1) * a_dim1], lda, &a[p + 1 + p *
  1690. a_dim1], &c__1);
  1691. /* L1948: */
  1692. }
  1693. }
  1694. /* Row-cyclic Jacobi SVD algorithm with column pivoting */
  1695. /* to drown denormals */
  1696. if (l2pert) {
  1697. /* XSC = DSQRT(SMALL) */
  1698. xsc = epsln / (doublereal) (*n);
  1699. i__1 = nr;
  1700. for (q = 1; q <= i__1; ++q) {
  1701. temp1 = xsc * (d__1 = a[q + q * a_dim1], abs(d__1));
  1702. i__2 = nr;
  1703. for (p = 1; p <= i__2; ++p) {
  1704. if (p > q && (d__1 = a[p + q * a_dim1], abs(d__1)) <=
  1705. temp1 || p < q) {
  1706. a[p + q * a_dim1] = d_sign(&temp1, &a[p + q * a_dim1])
  1707. ;
  1708. }
  1709. /* L1949: */
  1710. }
  1711. /* L1947: */
  1712. }
  1713. } else {
  1714. i__1 = nr - 1;
  1715. i__2 = nr - 1;
  1716. dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &a[(a_dim1 << 1) + 1],
  1717. lda);
  1718. }
  1719. /* triangular matrix (plus perturbation which is ignored in */
  1720. /* the part which destroys triangular form (confusing?!)) */
  1721. dgesvj_("L", "NoU", "NoV", &nr, &nr, &a[a_offset], lda, &sva[1], n, &
  1722. v[v_offset], ldv, &work[1], lwork, info);
  1723. scalem = work[1];
  1724. numrank = i_dnnt(&work[2]);
  1725. } else if (rsvec && ! lsvec) {
  1726. /* -> Singular Values and Right Singular Vectors <- */
  1727. if (almort) {
  1728. i__1 = nr;
  1729. for (p = 1; p <= i__1; ++p) {
  1730. i__2 = *n - p + 1;
  1731. dcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1], &
  1732. c__1);
  1733. /* L1998: */
  1734. }
  1735. i__1 = nr - 1;
  1736. i__2 = nr - 1;
  1737. dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
  1738. 1], ldv);
  1739. dgesvj_("L", "U", "N", n, &nr, &v[v_offset], ldv, &sva[1], &nr, &
  1740. a[a_offset], lda, &work[1], lwork, info);
  1741. scalem = work[1];
  1742. numrank = i_dnnt(&work[2]);
  1743. } else {
  1744. /* accumulated product of Jacobi rotations, three are perfect ) */
  1745. i__1 = nr - 1;
  1746. i__2 = nr - 1;
  1747. dlaset_("Lower", &i__1, &i__2, &c_b34, &c_b34, &a[a_dim1 + 2],
  1748. lda);
  1749. i__1 = *lwork - *n;
  1750. dgelqf_(&nr, n, &a[a_offset], lda, &work[1], &work[*n + 1], &i__1,
  1751. &ierr);
  1752. dlacpy_("Lower", &nr, &nr, &a[a_offset], lda, &v[v_offset], ldv);
  1753. i__1 = nr - 1;
  1754. i__2 = nr - 1;
  1755. dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
  1756. 1], ldv);
  1757. i__1 = *lwork - (*n << 1);
  1758. dgeqrf_(&nr, &nr, &v[v_offset], ldv, &work[*n + 1], &work[(*n <<
  1759. 1) + 1], &i__1, &ierr);
  1760. i__1 = nr;
  1761. for (p = 1; p <= i__1; ++p) {
  1762. i__2 = nr - p + 1;
  1763. dcopy_(&i__2, &v[p + p * v_dim1], ldv, &v[p + p * v_dim1], &
  1764. c__1);
  1765. /* L8998: */
  1766. }
  1767. i__1 = nr - 1;
  1768. i__2 = nr - 1;
  1769. dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
  1770. 1], ldv);
  1771. dgesvj_("Lower", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[1], &
  1772. nr, &u[u_offset], ldu, &work[*n + 1], lwork, info);
  1773. scalem = work[*n + 1];
  1774. numrank = i_dnnt(&work[*n + 2]);
  1775. if (nr < *n) {
  1776. i__1 = *n - nr;
  1777. dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 + v_dim1],
  1778. ldv);
  1779. i__1 = *n - nr;
  1780. dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) * v_dim1
  1781. + 1], ldv);
  1782. i__1 = *n - nr;
  1783. i__2 = *n - nr;
  1784. dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1 + (nr +
  1785. 1) * v_dim1], ldv);
  1786. }
  1787. i__1 = *lwork - *n;
  1788. dormlq_("Left", "Transpose", n, n, &nr, &a[a_offset], lda, &work[
  1789. 1], &v[v_offset], ldv, &work[*n + 1], &i__1, &ierr);
  1790. }
  1791. i__1 = *n;
  1792. for (p = 1; p <= i__1; ++p) {
  1793. dcopy_(n, &v[p + v_dim1], ldv, &a[iwork[p] + a_dim1], lda);
  1794. /* L8991: */
  1795. }
  1796. dlacpy_("All", n, n, &a[a_offset], lda, &v[v_offset], ldv);
  1797. if (transp) {
  1798. dlacpy_("All", n, n, &v[v_offset], ldv, &u[u_offset], ldu);
  1799. }
  1800. } else if (lsvec && ! rsvec) {
  1801. /* Jacobi rotations in the Jacobi iterations. */
  1802. i__1 = nr;
  1803. for (p = 1; p <= i__1; ++p) {
  1804. i__2 = *n - p + 1;
  1805. dcopy_(&i__2, &a[p + p * a_dim1], lda, &u[p + p * u_dim1], &c__1);
  1806. /* L1965: */
  1807. }
  1808. i__1 = nr - 1;
  1809. i__2 = nr - 1;
  1810. dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &u[(u_dim1 << 1) + 1],
  1811. ldu);
  1812. i__1 = *lwork - (*n << 1);
  1813. dgeqrf_(n, &nr, &u[u_offset], ldu, &work[*n + 1], &work[(*n << 1) + 1]
  1814. , &i__1, &ierr);
  1815. i__1 = nr - 1;
  1816. for (p = 1; p <= i__1; ++p) {
  1817. i__2 = nr - p;
  1818. dcopy_(&i__2, &u[p + (p + 1) * u_dim1], ldu, &u[p + 1 + p *
  1819. u_dim1], &c__1);
  1820. /* L1967: */
  1821. }
  1822. i__1 = nr - 1;
  1823. i__2 = nr - 1;
  1824. dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &u[(u_dim1 << 1) + 1],
  1825. ldu);
  1826. i__1 = *lwork - *n;
  1827. dgesvj_("Lower", "U", "N", &nr, &nr, &u[u_offset], ldu, &sva[1], &nr,
  1828. &a[a_offset], lda, &work[*n + 1], &i__1, info);
  1829. scalem = work[*n + 1];
  1830. numrank = i_dnnt(&work[*n + 2]);
  1831. if (nr < *m) {
  1832. i__1 = *m - nr;
  1833. dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &u[nr + 1 + u_dim1], ldu);
  1834. if (nr < n1) {
  1835. i__1 = n1 - nr;
  1836. dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &u[(nr + 1) * u_dim1
  1837. + 1], ldu);
  1838. i__1 = *m - nr;
  1839. i__2 = n1 - nr;
  1840. dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1 + (nr +
  1841. 1) * u_dim1], ldu);
  1842. }
  1843. }
  1844. i__1 = *lwork - *n;
  1845. dormqr_("Left", "No Tr", m, &n1, n, &a[a_offset], lda, &work[1], &u[
  1846. u_offset], ldu, &work[*n + 1], &i__1, &ierr);
  1847. if (rowpiv) {
  1848. i__1 = *m - 1;
  1849. dlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n << 1) +
  1850. 1], &c_n1);
  1851. }
  1852. i__1 = n1;
  1853. for (p = 1; p <= i__1; ++p) {
  1854. xsc = 1. / dnrm2_(m, &u[p * u_dim1 + 1], &c__1);
  1855. dscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
  1856. /* L1974: */
  1857. }
  1858. if (transp) {
  1859. dlacpy_("All", n, n, &u[u_offset], ldu, &v[v_offset], ldv);
  1860. }
  1861. } else {
  1862. if (! jracc) {
  1863. if (! almort) {
  1864. /* Second Preconditioning Step (QRF [with pivoting]) */
  1865. /* Note that the composition of TRANSPOSE, QRF and TRANSPOSE is */
  1866. /* equivalent to an LQF CALL. Since in many libraries the QRF */
  1867. /* seems to be better optimized than the LQF, we do explicit */
  1868. /* transpose and use the QRF. This is subject to changes in an */
  1869. /* optimized implementation of DGEJSV. */
  1870. i__1 = nr;
  1871. for (p = 1; p <= i__1; ++p) {
  1872. i__2 = *n - p + 1;
  1873. dcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1],
  1874. &c__1);
  1875. /* L1968: */
  1876. }
  1877. /* denormals in the second QR factorization, where they are */
  1878. /* as good as zeros. This is done to avoid painfully slow */
  1879. /* computation with denormals. The relative size of the perturbation */
  1880. /* is a parameter that can be changed by the implementer. */
  1881. /* This perturbation device will be obsolete on machines with */
  1882. /* properly implemented arithmetic. */
  1883. /* To switch it off, set L2PERT=.FALSE. To remove it from the */
  1884. /* code, remove the action under L2PERT=.TRUE., leave the ELSE part. */
  1885. /* The following two loops should be blocked and fused with the */
  1886. /* transposed copy above. */
  1887. if (l2pert) {
  1888. xsc = sqrt(small);
  1889. i__1 = nr;
  1890. for (q = 1; q <= i__1; ++q) {
  1891. temp1 = xsc * (d__1 = v[q + q * v_dim1], abs(d__1));
  1892. i__2 = *n;
  1893. for (p = 1; p <= i__2; ++p) {
  1894. if (p > q && (d__1 = v[p + q * v_dim1], abs(d__1))
  1895. <= temp1 || p < q) {
  1896. v[p + q * v_dim1] = d_sign(&temp1, &v[p + q *
  1897. v_dim1]);
  1898. }
  1899. if (p < q) {
  1900. v[p + q * v_dim1] = -v[p + q * v_dim1];
  1901. }
  1902. /* L2968: */
  1903. }
  1904. /* L2969: */
  1905. }
  1906. } else {
  1907. i__1 = nr - 1;
  1908. i__2 = nr - 1;
  1909. dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 <<
  1910. 1) + 1], ldv);
  1911. }
  1912. /* Estimate the row scaled condition number of R1 */
  1913. /* (If R1 is rectangular, N > NR, then the condition number */
  1914. /* of the leading NR x NR submatrix is estimated.) */
  1915. dlacpy_("L", &nr, &nr, &v[v_offset], ldv, &work[(*n << 1) + 1]
  1916. , &nr);
  1917. i__1 = nr;
  1918. for (p = 1; p <= i__1; ++p) {
  1919. i__2 = nr - p + 1;
  1920. temp1 = dnrm2_(&i__2, &work[(*n << 1) + (p - 1) * nr + p],
  1921. &c__1);
  1922. i__2 = nr - p + 1;
  1923. d__1 = 1. / temp1;
  1924. dscal_(&i__2, &d__1, &work[(*n << 1) + (p - 1) * nr + p],
  1925. &c__1);
  1926. /* L3950: */
  1927. }
  1928. dpocon_("Lower", &nr, &work[(*n << 1) + 1], &nr, &c_b35, &
  1929. temp1, &work[(*n << 1) + nr * nr + 1], &iwork[*m + (*
  1930. n << 1) + 1], &ierr);
  1931. condr1 = 1. / sqrt(temp1);
  1932. /* R1 is OK for inverse <=> CONDR1 .LT. DBLE(N) */
  1933. /* more conservative <=> CONDR1 .LT. DSQRT(DBLE(N)) */
  1934. cond_ok__ = sqrt((doublereal) nr);
  1935. /* [TP] COND_OK is a tuning parameter. */
  1936. if (condr1 < cond_ok__) {
  1937. /* implementation, this QRF should be implemented as the QRF */
  1938. /* of a lower triangular matrix. */
  1939. /* R1^t = Q2 * R2 */
  1940. i__1 = *lwork - (*n << 1);
  1941. dgeqrf_(n, &nr, &v[v_offset], ldv, &work[*n + 1], &work[(*
  1942. n << 1) + 1], &i__1, &ierr);
  1943. if (l2pert) {
  1944. xsc = sqrt(small) / epsln;
  1945. i__1 = nr;
  1946. for (p = 2; p <= i__1; ++p) {
  1947. i__2 = p - 1;
  1948. for (q = 1; q <= i__2; ++q) {
  1949. /* Computing MIN */
  1950. d__3 = (d__1 = v[p + p * v_dim1], abs(d__1)),
  1951. d__4 = (d__2 = v[q + q * v_dim1], abs(
  1952. d__2));
  1953. temp1 = xsc * f2cmin(d__3,d__4);
  1954. if ((d__1 = v[q + p * v_dim1], abs(d__1)) <=
  1955. temp1) {
  1956. v[q + p * v_dim1] = d_sign(&temp1, &v[q +
  1957. p * v_dim1]);
  1958. }
  1959. /* L3958: */
  1960. }
  1961. /* L3959: */
  1962. }
  1963. }
  1964. if (nr != *n) {
  1965. dlacpy_("A", n, &nr, &v[v_offset], ldv, &work[(*n <<
  1966. 1) + 1], n);
  1967. }
  1968. i__1 = nr - 1;
  1969. for (p = 1; p <= i__1; ++p) {
  1970. i__2 = nr - p;
  1971. dcopy_(&i__2, &v[p + (p + 1) * v_dim1], ldv, &v[p + 1
  1972. + p * v_dim1], &c__1);
  1973. /* L1969: */
  1974. }
  1975. condr2 = condr1;
  1976. } else {
  1977. /* Note that windowed pivoting would be equally good */
  1978. /* numerically, and more run-time efficient. So, in */
  1979. /* an optimal implementation, the next call to DGEQP3 */
  1980. /* should be replaced with eg. CALL SGEQPX (ACM TOMS #782) */
  1981. /* with properly (carefully) chosen parameters. */
  1982. /* R1^t * P2 = Q2 * R2 */
  1983. i__1 = nr;
  1984. for (p = 1; p <= i__1; ++p) {
  1985. iwork[*n + p] = 0;
  1986. /* L3003: */
  1987. }
  1988. i__1 = *lwork - (*n << 1);
  1989. dgeqp3_(n, &nr, &v[v_offset], ldv, &iwork[*n + 1], &work[*
  1990. n + 1], &work[(*n << 1) + 1], &i__1, &ierr);
  1991. /* * CALL DGEQRF( N, NR, V, LDV, WORK(N+1), WORK(2*N+1), */
  1992. /* * $ LWORK-2*N, IERR ) */
  1993. if (l2pert) {
  1994. xsc = sqrt(small);
  1995. i__1 = nr;
  1996. for (p = 2; p <= i__1; ++p) {
  1997. i__2 = p - 1;
  1998. for (q = 1; q <= i__2; ++q) {
  1999. /* Computing MIN */
  2000. d__3 = (d__1 = v[p + p * v_dim1], abs(d__1)),
  2001. d__4 = (d__2 = v[q + q * v_dim1], abs(
  2002. d__2));
  2003. temp1 = xsc * f2cmin(d__3,d__4);
  2004. if ((d__1 = v[q + p * v_dim1], abs(d__1)) <=
  2005. temp1) {
  2006. v[q + p * v_dim1] = d_sign(&temp1, &v[q +
  2007. p * v_dim1]);
  2008. }
  2009. /* L3968: */
  2010. }
  2011. /* L3969: */
  2012. }
  2013. }
  2014. dlacpy_("A", n, &nr, &v[v_offset], ldv, &work[(*n << 1) +
  2015. 1], n);
  2016. if (l2pert) {
  2017. xsc = sqrt(small);
  2018. i__1 = nr;
  2019. for (p = 2; p <= i__1; ++p) {
  2020. i__2 = p - 1;
  2021. for (q = 1; q <= i__2; ++q) {
  2022. /* Computing MIN */
  2023. d__3 = (d__1 = v[p + p * v_dim1], abs(d__1)),
  2024. d__4 = (d__2 = v[q + q * v_dim1], abs(
  2025. d__2));
  2026. temp1 = xsc * f2cmin(d__3,d__4);
  2027. v[p + q * v_dim1] = -d_sign(&temp1, &v[q + p *
  2028. v_dim1]);
  2029. /* L8971: */
  2030. }
  2031. /* L8970: */
  2032. }
  2033. } else {
  2034. i__1 = nr - 1;
  2035. i__2 = nr - 1;
  2036. dlaset_("L", &i__1, &i__2, &c_b34, &c_b34, &v[v_dim1
  2037. + 2], ldv);
  2038. }
  2039. /* Now, compute R2 = L3 * Q3, the LQ factorization. */
  2040. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2041. dgelqf_(&nr, &nr, &v[v_offset], ldv, &work[(*n << 1) + *n
  2042. * nr + 1], &work[(*n << 1) + *n * nr + nr + 1], &
  2043. i__1, &ierr);
  2044. dlacpy_("L", &nr, &nr, &v[v_offset], ldv, &work[(*n << 1)
  2045. + *n * nr + nr + 1], &nr);
  2046. i__1 = nr;
  2047. for (p = 1; p <= i__1; ++p) {
  2048. temp1 = dnrm2_(&p, &work[(*n << 1) + *n * nr + nr + p]
  2049. , &nr);
  2050. d__1 = 1. / temp1;
  2051. dscal_(&p, &d__1, &work[(*n << 1) + *n * nr + nr + p],
  2052. &nr);
  2053. /* L4950: */
  2054. }
  2055. dpocon_("L", &nr, &work[(*n << 1) + *n * nr + nr + 1], &
  2056. nr, &c_b35, &temp1, &work[(*n << 1) + *n * nr +
  2057. nr + nr * nr + 1], &iwork[*m + (*n << 1) + 1], &
  2058. ierr);
  2059. condr2 = 1. / sqrt(temp1);
  2060. if (condr2 >= cond_ok__) {
  2061. /* (this overwrites the copy of R2, as it will not be */
  2062. /* needed in this branch, but it does not overwritte the */
  2063. /* Huseholder vectors of Q2.). */
  2064. dlacpy_("U", &nr, &nr, &v[v_offset], ldv, &work[(*n <<
  2065. 1) + 1], n);
  2066. /* WORK(2*N+N*NR+1:2*N+N*NR+N) */
  2067. }
  2068. }
  2069. if (l2pert) {
  2070. xsc = sqrt(small);
  2071. i__1 = nr;
  2072. for (q = 2; q <= i__1; ++q) {
  2073. temp1 = xsc * v[q + q * v_dim1];
  2074. i__2 = q - 1;
  2075. for (p = 1; p <= i__2; ++p) {
  2076. /* V(p,q) = - DSIGN( TEMP1, V(q,p) ) */
  2077. v[p + q * v_dim1] = -d_sign(&temp1, &v[p + q *
  2078. v_dim1]);
  2079. /* L4969: */
  2080. }
  2081. /* L4968: */
  2082. }
  2083. } else {
  2084. i__1 = nr - 1;
  2085. i__2 = nr - 1;
  2086. dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 <<
  2087. 1) + 1], ldv);
  2088. }
  2089. /* Second preconditioning finished; continue with Jacobi SVD */
  2090. /* The input matrix is lower trinagular. */
  2091. /* Recover the right singular vectors as solution of a well */
  2092. /* conditioned triangular matrix equation. */
  2093. if (condr1 < cond_ok__) {
  2094. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2095. dgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[
  2096. 1], &nr, &u[u_offset], ldu, &work[(*n << 1) + *n *
  2097. nr + nr + 1], &i__1, info);
  2098. scalem = work[(*n << 1) + *n * nr + nr + 1];
  2099. numrank = i_dnnt(&work[(*n << 1) + *n * nr + nr + 2]);
  2100. i__1 = nr;
  2101. for (p = 1; p <= i__1; ++p) {
  2102. dcopy_(&nr, &v[p * v_dim1 + 1], &c__1, &u[p * u_dim1
  2103. + 1], &c__1);
  2104. dscal_(&nr, &sva[p], &v[p * v_dim1 + 1], &c__1);
  2105. /* L3970: */
  2106. }
  2107. if (nr == *n) {
  2108. /* :)) .. best case, R1 is inverted. The solution of this matrix */
  2109. /* equation is Q2*V2 = the product of the Jacobi rotations */
  2110. /* used in DGESVJ, premultiplied with the orthogonal matrix */
  2111. /* from the second QR factorization. */
  2112. dtrsm_("L", "U", "N", "N", &nr, &nr, &c_b35, &a[
  2113. a_offset], lda, &v[v_offset], ldv);
  2114. } else {
  2115. /* is inverted to get the product of the Jacobi rotations */
  2116. /* used in DGESVJ. The Q-factor from the second QR */
  2117. /* factorization is then built in explicitly. */
  2118. dtrsm_("L", "U", "T", "N", &nr, &nr, &c_b35, &work[(*
  2119. n << 1) + 1], n, &v[v_offset], ldv);
  2120. if (nr < *n) {
  2121. i__1 = *n - nr;
  2122. dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr +
  2123. 1 + v_dim1], ldv);
  2124. i__1 = *n - nr;
  2125. dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr +
  2126. 1) * v_dim1 + 1], ldv);
  2127. i__1 = *n - nr;
  2128. i__2 = *n - nr;
  2129. dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr
  2130. + 1 + (nr + 1) * v_dim1], ldv);
  2131. }
  2132. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2133. dormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n,
  2134. &work[*n + 1], &v[v_offset], ldv, &work[(*n <<
  2135. 1) + *n * nr + nr + 1], &i__1, &ierr);
  2136. }
  2137. } else if (condr2 < cond_ok__) {
  2138. /* :) .. the input matrix A is very likely a relative of */
  2139. /* the Kahan matrix :) */
  2140. /* The matrix R2 is inverted. The solution of the matrix equation */
  2141. /* is Q3^T*V3 = the product of the Jacobi rotations (appplied to */
  2142. /* the lower triangular L3 from the LQ factorization of */
  2143. /* R2=L3*Q3), pre-multiplied with the transposed Q3. */
  2144. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2145. dgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[
  2146. 1], &nr, &u[u_offset], ldu, &work[(*n << 1) + *n *
  2147. nr + nr + 1], &i__1, info);
  2148. scalem = work[(*n << 1) + *n * nr + nr + 1];
  2149. numrank = i_dnnt(&work[(*n << 1) + *n * nr + nr + 2]);
  2150. i__1 = nr;
  2151. for (p = 1; p <= i__1; ++p) {
  2152. dcopy_(&nr, &v[p * v_dim1 + 1], &c__1, &u[p * u_dim1
  2153. + 1], &c__1);
  2154. dscal_(&nr, &sva[p], &u[p * u_dim1 + 1], &c__1);
  2155. /* L3870: */
  2156. }
  2157. dtrsm_("L", "U", "N", "N", &nr, &nr, &c_b35, &work[(*n <<
  2158. 1) + 1], n, &u[u_offset], ldu);
  2159. i__1 = nr;
  2160. for (q = 1; q <= i__1; ++q) {
  2161. i__2 = nr;
  2162. for (p = 1; p <= i__2; ++p) {
  2163. work[(*n << 1) + *n * nr + nr + iwork[*n + p]] =
  2164. u[p + q * u_dim1];
  2165. /* L872: */
  2166. }
  2167. i__2 = nr;
  2168. for (p = 1; p <= i__2; ++p) {
  2169. u[p + q * u_dim1] = work[(*n << 1) + *n * nr + nr
  2170. + p];
  2171. /* L874: */
  2172. }
  2173. /* L873: */
  2174. }
  2175. if (nr < *n) {
  2176. i__1 = *n - nr;
  2177. dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 +
  2178. v_dim1], ldv);
  2179. i__1 = *n - nr;
  2180. dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) *
  2181. v_dim1 + 1], ldv);
  2182. i__1 = *n - nr;
  2183. i__2 = *n - nr;
  2184. dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1
  2185. + (nr + 1) * v_dim1], ldv);
  2186. }
  2187. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2188. dormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n, &
  2189. work[*n + 1], &v[v_offset], ldv, &work[(*n << 1)
  2190. + *n * nr + nr + 1], &i__1, &ierr);
  2191. } else {
  2192. /* Last line of defense. */
  2193. /* #:( This is a rather pathological case: no scaled condition */
  2194. /* improvement after two pivoted QR factorizations. Other */
  2195. /* possibility is that the rank revealing QR factorization */
  2196. /* or the condition estimator has failed, or the COND_OK */
  2197. /* is set very close to ONE (which is unnecessary). Normally, */
  2198. /* this branch should never be executed, but in rare cases of */
  2199. /* failure of the RRQR or condition estimator, the last line of */
  2200. /* defense ensures that DGEJSV completes the task. */
  2201. /* Compute the full SVD of L3 using DGESVJ with explicit */
  2202. /* accumulation of Jacobi rotations. */
  2203. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2204. dgesvj_("L", "U", "V", &nr, &nr, &v[v_offset], ldv, &sva[
  2205. 1], &nr, &u[u_offset], ldu, &work[(*n << 1) + *n *
  2206. nr + nr + 1], &i__1, info);
  2207. scalem = work[(*n << 1) + *n * nr + nr + 1];
  2208. numrank = i_dnnt(&work[(*n << 1) + *n * nr + nr + 2]);
  2209. if (nr < *n) {
  2210. i__1 = *n - nr;
  2211. dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 +
  2212. v_dim1], ldv);
  2213. i__1 = *n - nr;
  2214. dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) *
  2215. v_dim1 + 1], ldv);
  2216. i__1 = *n - nr;
  2217. i__2 = *n - nr;
  2218. dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1
  2219. + (nr + 1) * v_dim1], ldv);
  2220. }
  2221. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2222. dormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n, &
  2223. work[*n + 1], &v[v_offset], ldv, &work[(*n << 1)
  2224. + *n * nr + nr + 1], &i__1, &ierr);
  2225. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2226. dormlq_("L", "T", &nr, &nr, &nr, &work[(*n << 1) + 1], n,
  2227. &work[(*n << 1) + *n * nr + 1], &u[u_offset], ldu,
  2228. &work[(*n << 1) + *n * nr + nr + 1], &i__1, &
  2229. ierr);
  2230. i__1 = nr;
  2231. for (q = 1; q <= i__1; ++q) {
  2232. i__2 = nr;
  2233. for (p = 1; p <= i__2; ++p) {
  2234. work[(*n << 1) + *n * nr + nr + iwork[*n + p]] =
  2235. u[p + q * u_dim1];
  2236. /* L772: */
  2237. }
  2238. i__2 = nr;
  2239. for (p = 1; p <= i__2; ++p) {
  2240. u[p + q * u_dim1] = work[(*n << 1) + *n * nr + nr
  2241. + p];
  2242. /* L774: */
  2243. }
  2244. /* L773: */
  2245. }
  2246. }
  2247. /* Permute the rows of V using the (column) permutation from the */
  2248. /* first QRF. Also, scale the columns to make them unit in */
  2249. /* Euclidean norm. This applies to all cases. */
  2250. temp1 = sqrt((doublereal) (*n)) * epsln;
  2251. i__1 = *n;
  2252. for (q = 1; q <= i__1; ++q) {
  2253. i__2 = *n;
  2254. for (p = 1; p <= i__2; ++p) {
  2255. work[(*n << 1) + *n * nr + nr + iwork[p]] = v[p + q *
  2256. v_dim1];
  2257. /* L972: */
  2258. }
  2259. i__2 = *n;
  2260. for (p = 1; p <= i__2; ++p) {
  2261. v[p + q * v_dim1] = work[(*n << 1) + *n * nr + nr + p]
  2262. ;
  2263. /* L973: */
  2264. }
  2265. xsc = 1. / dnrm2_(n, &v[q * v_dim1 + 1], &c__1);
  2266. if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
  2267. dscal_(n, &xsc, &v[q * v_dim1 + 1], &c__1);
  2268. }
  2269. /* L1972: */
  2270. }
  2271. /* At this moment, V contains the right singular vectors of A. */
  2272. /* Next, assemble the left singular vector matrix U (M x N). */
  2273. if (nr < *m) {
  2274. i__1 = *m - nr;
  2275. dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &u[nr + 1 +
  2276. u_dim1], ldu);
  2277. if (nr < n1) {
  2278. i__1 = n1 - nr;
  2279. dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &u[(nr + 1) *
  2280. u_dim1 + 1], ldu);
  2281. i__1 = *m - nr;
  2282. i__2 = n1 - nr;
  2283. dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1
  2284. + (nr + 1) * u_dim1], ldu);
  2285. }
  2286. }
  2287. /* The Q matrix from the first QRF is built into the left singular */
  2288. /* matrix U. This applies to all cases. */
  2289. i__1 = *lwork - *n;
  2290. dormqr_("Left", "No_Tr", m, &n1, n, &a[a_offset], lda, &work[
  2291. 1], &u[u_offset], ldu, &work[*n + 1], &i__1, &ierr);
  2292. /* The columns of U are normalized. The cost is O(M*N) flops. */
  2293. temp1 = sqrt((doublereal) (*m)) * epsln;
  2294. i__1 = nr;
  2295. for (p = 1; p <= i__1; ++p) {
  2296. xsc = 1. / dnrm2_(m, &u[p * u_dim1 + 1], &c__1);
  2297. if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
  2298. dscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
  2299. }
  2300. /* L1973: */
  2301. }
  2302. /* If the initial QRF is computed with row pivoting, the left */
  2303. /* singular vectors must be adjusted. */
  2304. if (rowpiv) {
  2305. i__1 = *m - 1;
  2306. dlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n
  2307. << 1) + 1], &c_n1);
  2308. }
  2309. } else {
  2310. /* the second QRF is not needed */
  2311. dlacpy_("Upper", n, n, &a[a_offset], lda, &work[*n + 1], n);
  2312. if (l2pert) {
  2313. xsc = sqrt(small);
  2314. i__1 = *n;
  2315. for (p = 2; p <= i__1; ++p) {
  2316. temp1 = xsc * work[*n + (p - 1) * *n + p];
  2317. i__2 = p - 1;
  2318. for (q = 1; q <= i__2; ++q) {
  2319. work[*n + (q - 1) * *n + p] = -d_sign(&temp1, &
  2320. work[*n + (p - 1) * *n + q]);
  2321. /* L5971: */
  2322. }
  2323. /* L5970: */
  2324. }
  2325. } else {
  2326. i__1 = *n - 1;
  2327. i__2 = *n - 1;
  2328. dlaset_("Lower", &i__1, &i__2, &c_b34, &c_b34, &work[*n +
  2329. 2], n);
  2330. }
  2331. i__1 = *lwork - *n - *n * *n;
  2332. dgesvj_("Upper", "U", "N", n, n, &work[*n + 1], n, &sva[1], n,
  2333. &u[u_offset], ldu, &work[*n + *n * *n + 1], &i__1,
  2334. info);
  2335. scalem = work[*n + *n * *n + 1];
  2336. numrank = i_dnnt(&work[*n + *n * *n + 2]);
  2337. i__1 = *n;
  2338. for (p = 1; p <= i__1; ++p) {
  2339. dcopy_(n, &work[*n + (p - 1) * *n + 1], &c__1, &u[p *
  2340. u_dim1 + 1], &c__1);
  2341. dscal_(n, &sva[p], &work[*n + (p - 1) * *n + 1], &c__1);
  2342. /* L6970: */
  2343. }
  2344. dtrsm_("Left", "Upper", "NoTrans", "No UD", n, n, &c_b35, &a[
  2345. a_offset], lda, &work[*n + 1], n);
  2346. i__1 = *n;
  2347. for (p = 1; p <= i__1; ++p) {
  2348. dcopy_(n, &work[*n + p], n, &v[iwork[p] + v_dim1], ldv);
  2349. /* L6972: */
  2350. }
  2351. temp1 = sqrt((doublereal) (*n)) * epsln;
  2352. i__1 = *n;
  2353. for (p = 1; p <= i__1; ++p) {
  2354. xsc = 1. / dnrm2_(n, &v[p * v_dim1 + 1], &c__1);
  2355. if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
  2356. dscal_(n, &xsc, &v[p * v_dim1 + 1], &c__1);
  2357. }
  2358. /* L6971: */
  2359. }
  2360. /* Assemble the left singular vector matrix U (M x N). */
  2361. if (*n < *m) {
  2362. i__1 = *m - *n;
  2363. dlaset_("A", &i__1, n, &c_b34, &c_b34, &u[*n + 1 + u_dim1]
  2364. , ldu);
  2365. if (*n < n1) {
  2366. i__1 = n1 - *n;
  2367. dlaset_("A", n, &i__1, &c_b34, &c_b34, &u[(*n + 1) *
  2368. u_dim1 + 1], ldu);
  2369. i__1 = *m - *n;
  2370. i__2 = n1 - *n;
  2371. dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[*n + 1
  2372. + (*n + 1) * u_dim1], ldu);
  2373. }
  2374. }
  2375. i__1 = *lwork - *n;
  2376. dormqr_("Left", "No Tr", m, &n1, n, &a[a_offset], lda, &work[
  2377. 1], &u[u_offset], ldu, &work[*n + 1], &i__1, &ierr);
  2378. temp1 = sqrt((doublereal) (*m)) * epsln;
  2379. i__1 = n1;
  2380. for (p = 1; p <= i__1; ++p) {
  2381. xsc = 1. / dnrm2_(m, &u[p * u_dim1 + 1], &c__1);
  2382. if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
  2383. dscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
  2384. }
  2385. /* L6973: */
  2386. }
  2387. if (rowpiv) {
  2388. i__1 = *m - 1;
  2389. dlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n
  2390. << 1) + 1], &c_n1);
  2391. }
  2392. }
  2393. /* end of the >> almost orthogonal case << in the full SVD */
  2394. } else {
  2395. /* This branch deploys a preconditioned Jacobi SVD with explicitly */
  2396. /* accumulated rotations. It is included as optional, mainly for */
  2397. /* experimental purposes. It does perform well, and can also be used. */
  2398. /* In this implementation, this branch will be automatically activated */
  2399. /* if the condition number sigma_max(A) / sigma_min(A) is predicted */
  2400. /* to be greater than the overflow threshold. This is because the */
  2401. /* a posteriori computation of the singular vectors assumes robust */
  2402. /* implementation of BLAS and some LAPACK procedures, capable of working */
  2403. /* in presence of extreme values. Since that is not always the case, ... */
  2404. i__1 = nr;
  2405. for (p = 1; p <= i__1; ++p) {
  2406. i__2 = *n - p + 1;
  2407. dcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1], &
  2408. c__1);
  2409. /* L7968: */
  2410. }
  2411. if (l2pert) {
  2412. xsc = sqrt(small / epsln);
  2413. i__1 = nr;
  2414. for (q = 1; q <= i__1; ++q) {
  2415. temp1 = xsc * (d__1 = v[q + q * v_dim1], abs(d__1));
  2416. i__2 = *n;
  2417. for (p = 1; p <= i__2; ++p) {
  2418. if (p > q && (d__1 = v[p + q * v_dim1], abs(d__1)) <=
  2419. temp1 || p < q) {
  2420. v[p + q * v_dim1] = d_sign(&temp1, &v[p + q *
  2421. v_dim1]);
  2422. }
  2423. if (p < q) {
  2424. v[p + q * v_dim1] = -v[p + q * v_dim1];
  2425. }
  2426. /* L5968: */
  2427. }
  2428. /* L5969: */
  2429. }
  2430. } else {
  2431. i__1 = nr - 1;
  2432. i__2 = nr - 1;
  2433. dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
  2434. 1], ldv);
  2435. }
  2436. i__1 = *lwork - (*n << 1);
  2437. dgeqrf_(n, &nr, &v[v_offset], ldv, &work[*n + 1], &work[(*n << 1)
  2438. + 1], &i__1, &ierr);
  2439. dlacpy_("L", n, &nr, &v[v_offset], ldv, &work[(*n << 1) + 1], n);
  2440. i__1 = nr;
  2441. for (p = 1; p <= i__1; ++p) {
  2442. i__2 = nr - p + 1;
  2443. dcopy_(&i__2, &v[p + p * v_dim1], ldv, &u[p + p * u_dim1], &
  2444. c__1);
  2445. /* L7969: */
  2446. }
  2447. if (l2pert) {
  2448. xsc = sqrt(small / epsln);
  2449. i__1 = nr;
  2450. for (q = 2; q <= i__1; ++q) {
  2451. i__2 = q - 1;
  2452. for (p = 1; p <= i__2; ++p) {
  2453. /* Computing MIN */
  2454. d__3 = (d__1 = u[p + p * u_dim1], abs(d__1)), d__4 = (
  2455. d__2 = u[q + q * u_dim1], abs(d__2));
  2456. temp1 = xsc * f2cmin(d__3,d__4);
  2457. u[p + q * u_dim1] = -d_sign(&temp1, &u[q + p * u_dim1]
  2458. );
  2459. /* L9971: */
  2460. }
  2461. /* L9970: */
  2462. }
  2463. } else {
  2464. i__1 = nr - 1;
  2465. i__2 = nr - 1;
  2466. dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &u[(u_dim1 << 1) +
  2467. 1], ldu);
  2468. }
  2469. i__1 = *lwork - (*n << 1) - *n * nr;
  2470. dgesvj_("G", "U", "V", &nr, &nr, &u[u_offset], ldu, &sva[1], n, &
  2471. v[v_offset], ldv, &work[(*n << 1) + *n * nr + 1], &i__1,
  2472. info);
  2473. scalem = work[(*n << 1) + *n * nr + 1];
  2474. numrank = i_dnnt(&work[(*n << 1) + *n * nr + 2]);
  2475. if (nr < *n) {
  2476. i__1 = *n - nr;
  2477. dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 + v_dim1],
  2478. ldv);
  2479. i__1 = *n - nr;
  2480. dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) * v_dim1
  2481. + 1], ldv);
  2482. i__1 = *n - nr;
  2483. i__2 = *n - nr;
  2484. dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1 + (nr +
  2485. 1) * v_dim1], ldv);
  2486. }
  2487. i__1 = *lwork - (*n << 1) - *n * nr - nr;
  2488. dormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n, &work[*n +
  2489. 1], &v[v_offset], ldv, &work[(*n << 1) + *n * nr + nr + 1]
  2490. , &i__1, &ierr);
  2491. /* Permute the rows of V using the (column) permutation from the */
  2492. /* first QRF. Also, scale the columns to make them unit in */
  2493. /* Euclidean norm. This applies to all cases. */
  2494. temp1 = sqrt((doublereal) (*n)) * epsln;
  2495. i__1 = *n;
  2496. for (q = 1; q <= i__1; ++q) {
  2497. i__2 = *n;
  2498. for (p = 1; p <= i__2; ++p) {
  2499. work[(*n << 1) + *n * nr + nr + iwork[p]] = v[p + q *
  2500. v_dim1];
  2501. /* L8972: */
  2502. }
  2503. i__2 = *n;
  2504. for (p = 1; p <= i__2; ++p) {
  2505. v[p + q * v_dim1] = work[(*n << 1) + *n * nr + nr + p];
  2506. /* L8973: */
  2507. }
  2508. xsc = 1. / dnrm2_(n, &v[q * v_dim1 + 1], &c__1);
  2509. if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
  2510. dscal_(n, &xsc, &v[q * v_dim1 + 1], &c__1);
  2511. }
  2512. /* L7972: */
  2513. }
  2514. /* At this moment, V contains the right singular vectors of A. */
  2515. /* Next, assemble the left singular vector matrix U (M x N). */
  2516. if (nr < *m) {
  2517. i__1 = *m - nr;
  2518. dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &u[nr + 1 + u_dim1],
  2519. ldu);
  2520. if (nr < n1) {
  2521. i__1 = n1 - nr;
  2522. dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &u[(nr + 1) *
  2523. u_dim1 + 1], ldu);
  2524. i__1 = *m - nr;
  2525. i__2 = n1 - nr;
  2526. dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1 + (
  2527. nr + 1) * u_dim1], ldu);
  2528. }
  2529. }
  2530. i__1 = *lwork - *n;
  2531. dormqr_("Left", "No Tr", m, &n1, n, &a[a_offset], lda, &work[1], &
  2532. u[u_offset], ldu, &work[*n + 1], &i__1, &ierr);
  2533. if (rowpiv) {
  2534. i__1 = *m - 1;
  2535. dlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n << 1)
  2536. + 1], &c_n1);
  2537. }
  2538. }
  2539. if (transp) {
  2540. i__1 = *n;
  2541. for (p = 1; p <= i__1; ++p) {
  2542. dswap_(n, &u[p * u_dim1 + 1], &c__1, &v[p * v_dim1 + 1], &
  2543. c__1);
  2544. /* L6974: */
  2545. }
  2546. }
  2547. }
  2548. /* end of the full SVD */
  2549. /* Undo scaling, if necessary (and possible) */
  2550. if (uscal2 <= big / sva[1] * uscal1) {
  2551. dlascl_("G", &c__0, &c__0, &uscal1, &uscal2, &nr, &c__1, &sva[1], n, &
  2552. ierr);
  2553. uscal1 = 1.;
  2554. uscal2 = 1.;
  2555. }
  2556. if (nr < *n) {
  2557. i__1 = *n;
  2558. for (p = nr + 1; p <= i__1; ++p) {
  2559. sva[p] = 0.;
  2560. /* L3004: */
  2561. }
  2562. }
  2563. work[1] = uscal2 * scalem;
  2564. work[2] = uscal1;
  2565. if (errest) {
  2566. work[3] = sconda;
  2567. }
  2568. if (lsvec && rsvec) {
  2569. work[4] = condr1;
  2570. work[5] = condr2;
  2571. }
  2572. if (l2tran) {
  2573. work[6] = entra;
  2574. work[7] = entrat;
  2575. }
  2576. iwork[1] = nr;
  2577. iwork[2] = numrank;
  2578. iwork[3] = warning;
  2579. return;
  2580. } /* dgejsv_ */