You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

sgesdd.c 76 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289
  1. #include <math.h>
  2. #include <stdlib.h>
  3. #include <string.h>
  4. #include <stdio.h>
  5. #include <complex.h>
  6. #ifdef complex
  7. #undef complex
  8. #endif
  9. #ifdef I
  10. #undef I
  11. #endif
  12. #if defined(_WIN64)
  13. typedef long long BLASLONG;
  14. typedef unsigned long long BLASULONG;
  15. #else
  16. typedef long BLASLONG;
  17. typedef unsigned long BLASULONG;
  18. #endif
  19. #ifdef LAPACK_ILP64
  20. typedef BLASLONG blasint;
  21. #if defined(_WIN64)
  22. #define blasabs(x) llabs(x)
  23. #else
  24. #define blasabs(x) labs(x)
  25. #endif
  26. #else
  27. typedef int blasint;
  28. #define blasabs(x) abs(x)
  29. #endif
  30. typedef blasint integer;
  31. typedef unsigned int uinteger;
  32. typedef char *address;
  33. typedef short int shortint;
  34. typedef float real;
  35. typedef double doublereal;
  36. typedef struct { real r, i; } complex;
  37. typedef struct { doublereal r, i; } doublecomplex;
  38. #ifdef _MSC_VER
  39. static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
  40. static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
  41. static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
  42. static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
  43. #else
  44. static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
  45. static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
  46. static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
  47. static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
  48. #endif
  49. #define pCf(z) (*_pCf(z))
  50. #define pCd(z) (*_pCd(z))
  51. typedef int logical;
  52. typedef short int shortlogical;
  53. typedef char logical1;
  54. typedef char integer1;
  55. #define TRUE_ (1)
  56. #define FALSE_ (0)
  57. /* Extern is for use with -E */
  58. #ifndef Extern
  59. #define Extern extern
  60. #endif
  61. /* I/O stuff */
  62. typedef int flag;
  63. typedef int ftnlen;
  64. typedef int ftnint;
  65. /*external read, write*/
  66. typedef struct
  67. { flag cierr;
  68. ftnint ciunit;
  69. flag ciend;
  70. char *cifmt;
  71. ftnint cirec;
  72. } cilist;
  73. /*internal read, write*/
  74. typedef struct
  75. { flag icierr;
  76. char *iciunit;
  77. flag iciend;
  78. char *icifmt;
  79. ftnint icirlen;
  80. ftnint icirnum;
  81. } icilist;
  82. /*open*/
  83. typedef struct
  84. { flag oerr;
  85. ftnint ounit;
  86. char *ofnm;
  87. ftnlen ofnmlen;
  88. char *osta;
  89. char *oacc;
  90. char *ofm;
  91. ftnint orl;
  92. char *oblnk;
  93. } olist;
  94. /*close*/
  95. typedef struct
  96. { flag cerr;
  97. ftnint cunit;
  98. char *csta;
  99. } cllist;
  100. /*rewind, backspace, endfile*/
  101. typedef struct
  102. { flag aerr;
  103. ftnint aunit;
  104. } alist;
  105. /* inquire */
  106. typedef struct
  107. { flag inerr;
  108. ftnint inunit;
  109. char *infile;
  110. ftnlen infilen;
  111. ftnint *inex; /*parameters in standard's order*/
  112. ftnint *inopen;
  113. ftnint *innum;
  114. ftnint *innamed;
  115. char *inname;
  116. ftnlen innamlen;
  117. char *inacc;
  118. ftnlen inacclen;
  119. char *inseq;
  120. ftnlen inseqlen;
  121. char *indir;
  122. ftnlen indirlen;
  123. char *infmt;
  124. ftnlen infmtlen;
  125. char *inform;
  126. ftnint informlen;
  127. char *inunf;
  128. ftnlen inunflen;
  129. ftnint *inrecl;
  130. ftnint *innrec;
  131. char *inblank;
  132. ftnlen inblanklen;
  133. } inlist;
  134. #define VOID void
  135. union Multitype { /* for multiple entry points */
  136. integer1 g;
  137. shortint h;
  138. integer i;
  139. /* longint j; */
  140. real r;
  141. doublereal d;
  142. complex c;
  143. doublecomplex z;
  144. };
  145. typedef union Multitype Multitype;
  146. struct Vardesc { /* for Namelist */
  147. char *name;
  148. char *addr;
  149. ftnlen *dims;
  150. int type;
  151. };
  152. typedef struct Vardesc Vardesc;
  153. struct Namelist {
  154. char *name;
  155. Vardesc **vars;
  156. int nvars;
  157. };
  158. typedef struct Namelist Namelist;
  159. #define abs(x) ((x) >= 0 ? (x) : -(x))
  160. #define dabs(x) (fabs(x))
  161. #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
  162. #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
  163. #define dmin(a,b) (f2cmin(a,b))
  164. #define dmax(a,b) (f2cmax(a,b))
  165. #define bit_test(a,b) ((a) >> (b) & 1)
  166. #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
  167. #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
  168. #define abort_() { sig_die("Fortran abort routine called", 1); }
  169. #define c_abs(z) (cabsf(Cf(z)))
  170. #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
  171. #ifdef _MSC_VER
  172. #define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
  173. #define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/df(b)._Val[1]);}
  174. #else
  175. #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
  176. #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
  177. #endif
  178. #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
  179. #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
  180. #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
  181. //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
  182. #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
  183. #define d_abs(x) (fabs(*(x)))
  184. #define d_acos(x) (acos(*(x)))
  185. #define d_asin(x) (asin(*(x)))
  186. #define d_atan(x) (atan(*(x)))
  187. #define d_atn2(x, y) (atan2(*(x),*(y)))
  188. #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
  189. #define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
  190. #define d_cos(x) (cos(*(x)))
  191. #define d_cosh(x) (cosh(*(x)))
  192. #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
  193. #define d_exp(x) (exp(*(x)))
  194. #define d_imag(z) (cimag(Cd(z)))
  195. #define r_imag(z) (cimagf(Cf(z)))
  196. #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
  197. #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
  198. #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
  199. #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
  200. #define d_log(x) (log(*(x)))
  201. #define d_mod(x, y) (fmod(*(x), *(y)))
  202. #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
  203. #define d_nint(x) u_nint(*(x))
  204. #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
  205. #define d_sign(a,b) u_sign(*(a),*(b))
  206. #define r_sign(a,b) u_sign(*(a),*(b))
  207. #define d_sin(x) (sin(*(x)))
  208. #define d_sinh(x) (sinh(*(x)))
  209. #define d_sqrt(x) (sqrt(*(x)))
  210. #define d_tan(x) (tan(*(x)))
  211. #define d_tanh(x) (tanh(*(x)))
  212. #define i_abs(x) abs(*(x))
  213. #define i_dnnt(x) ((integer)u_nint(*(x)))
  214. #define i_len(s, n) (n)
  215. #define i_nint(x) ((integer)u_nint(*(x)))
  216. #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
  217. #define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
  218. #define pow_si(B,E) spow_ui(*(B),*(E))
  219. #define pow_ri(B,E) spow_ui(*(B),*(E))
  220. #define pow_di(B,E) dpow_ui(*(B),*(E))
  221. #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
  222. #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
  223. #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
  224. #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
  225. #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
  226. #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
  227. #define sig_die(s, kill) { exit(1); }
  228. #define s_stop(s, n) {exit(0);}
  229. static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
  230. #define z_abs(z) (cabs(Cd(z)))
  231. #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
  232. #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
  233. #define myexit_() break;
  234. #define mycycle() continue;
  235. #define myceiling(w) {ceil(w)}
  236. #define myhuge(w) {HUGE_VAL}
  237. //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
  238. #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
  239. /* procedure parameter types for -A and -C++ */
  240. #define F2C_proc_par_types 1
  241. #ifdef __cplusplus
  242. typedef logical (*L_fp)(...);
  243. #else
  244. typedef logical (*L_fp)();
  245. #endif
  246. static float spow_ui(float x, integer n) {
  247. float pow=1.0; unsigned long int u;
  248. if(n != 0) {
  249. if(n < 0) n = -n, x = 1/x;
  250. for(u = n; ; ) {
  251. if(u & 01) pow *= x;
  252. if(u >>= 1) x *= x;
  253. else break;
  254. }
  255. }
  256. return pow;
  257. }
  258. static double dpow_ui(double x, integer n) {
  259. double pow=1.0; unsigned long int u;
  260. if(n != 0) {
  261. if(n < 0) n = -n, x = 1/x;
  262. for(u = n; ; ) {
  263. if(u & 01) pow *= x;
  264. if(u >>= 1) x *= x;
  265. else break;
  266. }
  267. }
  268. return pow;
  269. }
  270. #ifdef _MSC_VER
  271. static _Fcomplex cpow_ui(complex x, integer n) {
  272. complex pow={1.0,0.0}; unsigned long int u;
  273. if(n != 0) {
  274. if(n < 0) n = -n, x.r = 1/x.r, x.i=1/x.i;
  275. for(u = n; ; ) {
  276. if(u & 01) pow.r *= x.r, pow.i *= x.i;
  277. if(u >>= 1) x.r *= x.r, x.i *= x.i;
  278. else break;
  279. }
  280. }
  281. _Fcomplex p={pow.r, pow.i};
  282. return p;
  283. }
  284. #else
  285. static _Complex float cpow_ui(_Complex float x, integer n) {
  286. _Complex float pow=1.0; unsigned long int u;
  287. if(n != 0) {
  288. if(n < 0) n = -n, x = 1/x;
  289. for(u = n; ; ) {
  290. if(u & 01) pow *= x;
  291. if(u >>= 1) x *= x;
  292. else break;
  293. }
  294. }
  295. return pow;
  296. }
  297. #endif
  298. #ifdef _MSC_VER
  299. static _Dcomplex zpow_ui(_Dcomplex x, integer n) {
  300. _Dcomplex pow={1.0,0.0}; unsigned long int u;
  301. if(n != 0) {
  302. if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1];
  303. for(u = n; ; ) {
  304. if(u & 01) pow._Val[0] *= x._Val[0], pow._Val[1] *= x._Val[1];
  305. if(u >>= 1) x._Val[0] *= x._Val[0], x._Val[1] *= x._Val[1];
  306. else break;
  307. }
  308. }
  309. _Dcomplex p = {pow._Val[0], pow._Val[1]};
  310. return p;
  311. }
  312. #else
  313. static _Complex double zpow_ui(_Complex double x, integer n) {
  314. _Complex double pow=1.0; unsigned long int u;
  315. if(n != 0) {
  316. if(n < 0) n = -n, x = 1/x;
  317. for(u = n; ; ) {
  318. if(u & 01) pow *= x;
  319. if(u >>= 1) x *= x;
  320. else break;
  321. }
  322. }
  323. return pow;
  324. }
  325. #endif
  326. static integer pow_ii(integer x, integer n) {
  327. integer pow; unsigned long int u;
  328. if (n <= 0) {
  329. if (n == 0 || x == 1) pow = 1;
  330. else if (x != -1) pow = x == 0 ? 1/x : 0;
  331. else n = -n;
  332. }
  333. if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
  334. u = n;
  335. for(pow = 1; ; ) {
  336. if(u & 01) pow *= x;
  337. if(u >>= 1) x *= x;
  338. else break;
  339. }
  340. }
  341. return pow;
  342. }
  343. static integer dmaxloc_(double *w, integer s, integer e, integer *n)
  344. {
  345. double m; integer i, mi;
  346. for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
  347. if (w[i-1]>m) mi=i ,m=w[i-1];
  348. return mi-s+1;
  349. }
  350. static integer smaxloc_(float *w, integer s, integer e, integer *n)
  351. {
  352. float m; integer i, mi;
  353. for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
  354. if (w[i-1]>m) mi=i ,m=w[i-1];
  355. return mi-s+1;
  356. }
  357. static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
  358. integer n = *n_, incx = *incx_, incy = *incy_, i;
  359. #ifdef _MSC_VER
  360. _Fcomplex zdotc = {0.0, 0.0};
  361. if (incx == 1 && incy == 1) {
  362. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  363. zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0];
  364. zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1];
  365. }
  366. } else {
  367. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  368. zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0];
  369. zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1];
  370. }
  371. }
  372. pCf(z) = zdotc;
  373. }
  374. #else
  375. _Complex float zdotc = 0.0;
  376. if (incx == 1 && incy == 1) {
  377. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  378. zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
  379. }
  380. } else {
  381. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  382. zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
  383. }
  384. }
  385. pCf(z) = zdotc;
  386. }
  387. #endif
  388. static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
  389. integer n = *n_, incx = *incx_, incy = *incy_, i;
  390. #ifdef _MSC_VER
  391. _Dcomplex zdotc = {0.0, 0.0};
  392. if (incx == 1 && incy == 1) {
  393. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  394. zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0];
  395. zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1];
  396. }
  397. } else {
  398. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  399. zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0];
  400. zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1];
  401. }
  402. }
  403. pCd(z) = zdotc;
  404. }
  405. #else
  406. _Complex double zdotc = 0.0;
  407. if (incx == 1 && incy == 1) {
  408. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  409. zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
  410. }
  411. } else {
  412. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  413. zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
  414. }
  415. }
  416. pCd(z) = zdotc;
  417. }
  418. #endif
  419. static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
  420. integer n = *n_, incx = *incx_, incy = *incy_, i;
  421. #ifdef _MSC_VER
  422. _Fcomplex zdotc = {0.0, 0.0};
  423. if (incx == 1 && incy == 1) {
  424. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  425. zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0];
  426. zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1];
  427. }
  428. } else {
  429. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  430. zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0];
  431. zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1];
  432. }
  433. }
  434. pCf(z) = zdotc;
  435. }
  436. #else
  437. _Complex float zdotc = 0.0;
  438. if (incx == 1 && incy == 1) {
  439. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  440. zdotc += Cf(&x[i]) * Cf(&y[i]);
  441. }
  442. } else {
  443. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  444. zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
  445. }
  446. }
  447. pCf(z) = zdotc;
  448. }
  449. #endif
  450. static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
  451. integer n = *n_, incx = *incx_, incy = *incy_, i;
  452. #ifdef _MSC_VER
  453. _Dcomplex zdotc = {0.0, 0.0};
  454. if (incx == 1 && incy == 1) {
  455. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  456. zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0];
  457. zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1];
  458. }
  459. } else {
  460. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  461. zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0];
  462. zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1];
  463. }
  464. }
  465. pCd(z) = zdotc;
  466. }
  467. #else
  468. _Complex double zdotc = 0.0;
  469. if (incx == 1 && incy == 1) {
  470. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  471. zdotc += Cd(&x[i]) * Cd(&y[i]);
  472. }
  473. } else {
  474. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  475. zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
  476. }
  477. }
  478. pCd(z) = zdotc;
  479. }
  480. #endif
  481. /* -- translated by f2c (version 20000121).
  482. You must link the resulting object file with the libraries:
  483. -lf2c -lm (in that order)
  484. */
  485. /* Table of constant values */
  486. static integer c_n1 = -1;
  487. static integer c__0 = 0;
  488. static real c_b63 = 0.f;
  489. static integer c__1 = 1;
  490. static real c_b84 = 1.f;
  491. /* > \brief \b SGESDD */
  492. /* =========== DOCUMENTATION =========== */
  493. /* Online html documentation available at */
  494. /* http://www.netlib.org/lapack/explore-html/ */
  495. /* > \htmlonly */
  496. /* > Download SGESDD + dependencies */
  497. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/sgesdd.
  498. f"> */
  499. /* > [TGZ]</a> */
  500. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/sgesdd.
  501. f"> */
  502. /* > [ZIP]</a> */
  503. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/sgesdd.
  504. f"> */
  505. /* > [TXT]</a> */
  506. /* > \endhtmlonly */
  507. /* Definition: */
  508. /* =========== */
  509. /* SUBROUTINE SGESDD( JOBZ, M, N, A, LDA, S, U, LDU, VT, LDVT, */
  510. /* WORK, LWORK, IWORK, INFO ) */
  511. /* CHARACTER JOBZ */
  512. /* INTEGER INFO, LDA, LDU, LDVT, LWORK, M, N */
  513. /* INTEGER IWORK( * ) */
  514. /* REAL A( LDA, * ), S( * ), U( LDU, * ), */
  515. /* $ VT( LDVT, * ), WORK( * ) */
  516. /* > \par Purpose: */
  517. /* ============= */
  518. /* > */
  519. /* > \verbatim */
  520. /* > */
  521. /* > SGESDD computes the singular value decomposition (SVD) of a real */
  522. /* > M-by-N matrix A, optionally computing the left and right singular */
  523. /* > vectors. If singular vectors are desired, it uses a */
  524. /* > divide-and-conquer algorithm. */
  525. /* > */
  526. /* > The SVD is written */
  527. /* > */
  528. /* > A = U * SIGMA * transpose(V) */
  529. /* > */
  530. /* > where SIGMA is an M-by-N matrix which is zero except for its */
  531. /* > f2cmin(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and */
  532. /* > V is an N-by-N orthogonal matrix. The diagonal elements of SIGMA */
  533. /* > are the singular values of A; they are real and non-negative, and */
  534. /* > are returned in descending order. The first f2cmin(m,n) columns of */
  535. /* > U and V are the left and right singular vectors of A. */
  536. /* > */
  537. /* > Note that the routine returns VT = V**T, not V. */
  538. /* > */
  539. /* > The divide and conquer algorithm makes very mild assumptions about */
  540. /* > floating point arithmetic. It will work on machines with a guard */
  541. /* > digit in add/subtract, or on those binary machines without guard */
  542. /* > digits which subtract like the Cray X-MP, Cray Y-MP, Cray C-90, or */
  543. /* > Cray-2. It could conceivably fail on hexadecimal or decimal machines */
  544. /* > without guard digits, but we know of none. */
  545. /* > \endverbatim */
  546. /* Arguments: */
  547. /* ========== */
  548. /* > \param[in] JOBZ */
  549. /* > \verbatim */
  550. /* > JOBZ is CHARACTER*1 */
  551. /* > Specifies options for computing all or part of the matrix U: */
  552. /* > = 'A': all M columns of U and all N rows of V**T are */
  553. /* > returned in the arrays U and VT; */
  554. /* > = 'S': the first f2cmin(M,N) columns of U and the first */
  555. /* > f2cmin(M,N) rows of V**T are returned in the arrays U */
  556. /* > and VT; */
  557. /* > = 'O': If M >= N, the first N columns of U are overwritten */
  558. /* > on the array A and all rows of V**T are returned in */
  559. /* > the array VT; */
  560. /* > otherwise, all columns of U are returned in the */
  561. /* > array U and the first M rows of V**T are overwritten */
  562. /* > in the array A; */
  563. /* > = 'N': no columns of U or rows of V**T are computed. */
  564. /* > \endverbatim */
  565. /* > */
  566. /* > \param[in] M */
  567. /* > \verbatim */
  568. /* > M is INTEGER */
  569. /* > The number of rows of the input matrix A. M >= 0. */
  570. /* > \endverbatim */
  571. /* > */
  572. /* > \param[in] N */
  573. /* > \verbatim */
  574. /* > N is INTEGER */
  575. /* > The number of columns of the input matrix A. N >= 0. */
  576. /* > \endverbatim */
  577. /* > */
  578. /* > \param[in,out] A */
  579. /* > \verbatim */
  580. /* > A is REAL array, dimension (LDA,N) */
  581. /* > On entry, the M-by-N matrix A. */
  582. /* > On exit, */
  583. /* > if JOBZ = 'O', A is overwritten with the first N columns */
  584. /* > of U (the left singular vectors, stored */
  585. /* > columnwise) if M >= N; */
  586. /* > A is overwritten with the first M rows */
  587. /* > of V**T (the right singular vectors, stored */
  588. /* > rowwise) otherwise. */
  589. /* > if JOBZ .ne. 'O', the contents of A are destroyed. */
  590. /* > \endverbatim */
  591. /* > */
  592. /* > \param[in] LDA */
  593. /* > \verbatim */
  594. /* > LDA is INTEGER */
  595. /* > The leading dimension of the array A. LDA >= f2cmax(1,M). */
  596. /* > \endverbatim */
  597. /* > */
  598. /* > \param[out] S */
  599. /* > \verbatim */
  600. /* > S is REAL array, dimension (f2cmin(M,N)) */
  601. /* > The singular values of A, sorted so that S(i) >= S(i+1). */
  602. /* > \endverbatim */
  603. /* > */
  604. /* > \param[out] U */
  605. /* > \verbatim */
  606. /* > U is REAL array, dimension (LDU,UCOL) */
  607. /* > UCOL = M if JOBZ = 'A' or JOBZ = 'O' and M < N; */
  608. /* > UCOL = f2cmin(M,N) if JOBZ = 'S'. */
  609. /* > If JOBZ = 'A' or JOBZ = 'O' and M < N, U contains the M-by-M */
  610. /* > orthogonal matrix U; */
  611. /* > if JOBZ = 'S', U contains the first f2cmin(M,N) columns of U */
  612. /* > (the left singular vectors, stored columnwise); */
  613. /* > if JOBZ = 'O' and M >= N, or JOBZ = 'N', U is not referenced. */
  614. /* > \endverbatim */
  615. /* > */
  616. /* > \param[in] LDU */
  617. /* > \verbatim */
  618. /* > LDU is INTEGER */
  619. /* > The leading dimension of the array U. LDU >= 1; if */
  620. /* > JOBZ = 'S' or 'A' or JOBZ = 'O' and M < N, LDU >= M. */
  621. /* > \endverbatim */
  622. /* > */
  623. /* > \param[out] VT */
  624. /* > \verbatim */
  625. /* > VT is REAL array, dimension (LDVT,N) */
  626. /* > If JOBZ = 'A' or JOBZ = 'O' and M >= N, VT contains the */
  627. /* > N-by-N orthogonal matrix V**T; */
  628. /* > if JOBZ = 'S', VT contains the first f2cmin(M,N) rows of */
  629. /* > V**T (the right singular vectors, stored rowwise); */
  630. /* > if JOBZ = 'O' and M < N, or JOBZ = 'N', VT is not referenced. */
  631. /* > \endverbatim */
  632. /* > */
  633. /* > \param[in] LDVT */
  634. /* > \verbatim */
  635. /* > LDVT is INTEGER */
  636. /* > The leading dimension of the array VT. LDVT >= 1; */
  637. /* > if JOBZ = 'A' or JOBZ = 'O' and M >= N, LDVT >= N; */
  638. /* > if JOBZ = 'S', LDVT >= f2cmin(M,N). */
  639. /* > \endverbatim */
  640. /* > */
  641. /* > \param[out] WORK */
  642. /* > \verbatim */
  643. /* > WORK is REAL array, dimension (MAX(1,LWORK)) */
  644. /* > On exit, if INFO = 0, WORK(1) returns the optimal LWORK; */
  645. /* > \endverbatim */
  646. /* > */
  647. /* > \param[in] LWORK */
  648. /* > \verbatim */
  649. /* > LWORK is INTEGER */
  650. /* > The dimension of the array WORK. LWORK >= 1. */
  651. /* > If LWORK = -1, a workspace query is assumed. The optimal */
  652. /* > size for the WORK array is calculated and stored in WORK(1), */
  653. /* > and no other work except argument checking is performed. */
  654. /* > */
  655. /* > Let mx = f2cmax(M,N) and mn = f2cmin(M,N). */
  656. /* > If JOBZ = 'N', LWORK >= 3*mn + f2cmax( mx, 7*mn ). */
  657. /* > If JOBZ = 'O', LWORK >= 3*mn + f2cmax( mx, 5*mn*mn + 4*mn ). */
  658. /* > If JOBZ = 'S', LWORK >= 4*mn*mn + 7*mn. */
  659. /* > If JOBZ = 'A', LWORK >= 4*mn*mn + 6*mn + mx. */
  660. /* > These are not tight minimums in all cases; see comments inside code. */
  661. /* > For good performance, LWORK should generally be larger; */
  662. /* > a query is recommended. */
  663. /* > \endverbatim */
  664. /* > */
  665. /* > \param[out] IWORK */
  666. /* > \verbatim */
  667. /* > IWORK is INTEGER array, dimension (8*f2cmin(M,N)) */
  668. /* > \endverbatim */
  669. /* > */
  670. /* > \param[out] INFO */
  671. /* > \verbatim */
  672. /* > INFO is INTEGER */
  673. /* > = 0: successful exit. */
  674. /* > < 0: if INFO = -i, the i-th argument had an illegal value. */
  675. /* > > 0: SBDSDC did not converge, updating process failed. */
  676. /* > \endverbatim */
  677. /* Authors: */
  678. /* ======== */
  679. /* > \author Univ. of Tennessee */
  680. /* > \author Univ. of California Berkeley */
  681. /* > \author Univ. of Colorado Denver */
  682. /* > \author NAG Ltd. */
  683. /* > \date June 2016 */
  684. /* > \ingroup realGEsing */
  685. /* > \par Contributors: */
  686. /* ================== */
  687. /* > */
  688. /* > Ming Gu and Huan Ren, Computer Science Division, University of */
  689. /* > California at Berkeley, USA */
  690. /* > */
  691. /* ===================================================================== */
  692. /* Subroutine */ int sgesdd_(char *jobz, integer *m, integer *n, real *a,
  693. integer *lda, real *s, real *u, integer *ldu, real *vt, integer *ldvt,
  694. real *work, integer *lwork, integer *iwork, integer *info)
  695. {
  696. /* System generated locals */
  697. integer a_dim1, a_offset, u_dim1, u_offset, vt_dim1, vt_offset, i__1,
  698. i__2, i__3;
  699. /* Local variables */
  700. integer lwork_sgelqf_mn__, lwork_sgeqrf_mn__, iscl, lwork_sorglq_mn__,
  701. lwork_sorglq_nn__;
  702. real anrm;
  703. integer idum[1], ierr, itau, lwork_sorgqr_mm__, lwork_sorgqr_mn__,
  704. lwork_sormbr_qln_mm__, lwork_sormbr_qln_mn__,
  705. lwork_sormbr_qln_nn__, lwork_sormbr_prt_mm__,
  706. lwork_sormbr_prt_mn__, lwork_sormbr_prt_nn__, i__;
  707. extern logical lsame_(char *, char *);
  708. integer chunk;
  709. extern /* Subroutine */ int sgemm_(char *, char *, integer *, integer *,
  710. integer *, real *, real *, integer *, real *, integer *, real *,
  711. real *, integer *);
  712. integer minmn, wrkbl, itaup, itauq, mnthr;
  713. logical wntqa;
  714. integer nwork;
  715. logical wntqn, wntqo, wntqs;
  716. integer ie, il, ir, bdspac, iu, lwork_sorgbr_p_mm__;
  717. extern /* Subroutine */ int sbdsdc_(char *, char *, integer *, real *,
  718. real *, real *, integer *, real *, integer *, real *, integer *,
  719. real *, integer *, integer *);
  720. integer lwork_sorgbr_q_nn__;
  721. extern /* Subroutine */ int sgebrd_(integer *, integer *, real *, integer
  722. *, real *, real *, real *, real *, real *, integer *, integer *);
  723. extern real slamch_(char *), slange_(char *, integer *, integer *,
  724. real *, integer *, real *);
  725. extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen);
  726. real bignum;
  727. extern /* Subroutine */ int sgelqf_(integer *, integer *, real *, integer
  728. *, real *, real *, integer *, integer *), slascl_(char *, integer
  729. *, integer *, real *, real *, integer *, integer *, real *,
  730. integer *, integer *), sgeqrf_(integer *, integer *, real
  731. *, integer *, real *, real *, integer *, integer *), slacpy_(char
  732. *, integer *, integer *, real *, integer *, real *, integer *), slaset_(char *, integer *, integer *, real *, real *,
  733. real *, integer *);
  734. extern logical sisnan_(real *);
  735. extern /* Subroutine */ int sorgbr_(char *, integer *, integer *, integer
  736. *, real *, integer *, real *, real *, integer *, integer *);
  737. integer ldwrkl;
  738. extern /* Subroutine */ int sormbr_(char *, char *, char *, integer *,
  739. integer *, integer *, real *, integer *, real *, real *, integer *
  740. , real *, integer *, integer *);
  741. integer ldwrkr, minwrk, ldwrku, maxwrk;
  742. extern /* Subroutine */ int sorglq_(integer *, integer *, integer *, real
  743. *, integer *, real *, real *, integer *, integer *);
  744. integer ldwkvt;
  745. real smlnum;
  746. logical wntqas;
  747. extern /* Subroutine */ int sorgqr_(integer *, integer *, integer *, real
  748. *, integer *, real *, real *, integer *, integer *);
  749. logical lquery;
  750. integer blk;
  751. real dum[1], eps;
  752. integer ivt, lwork_sgebrd_mm__, lwork_sgebrd_mn__, lwork_sgebrd_nn__;
  753. /* -- LAPACK driver routine (version 3.7.0) -- */
  754. /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
  755. /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
  756. /* June 2016 */
  757. /* ===================================================================== */
  758. /* Test the input arguments */
  759. /* Parameter adjustments */
  760. a_dim1 = *lda;
  761. a_offset = 1 + a_dim1 * 1;
  762. a -= a_offset;
  763. --s;
  764. u_dim1 = *ldu;
  765. u_offset = 1 + u_dim1 * 1;
  766. u -= u_offset;
  767. vt_dim1 = *ldvt;
  768. vt_offset = 1 + vt_dim1 * 1;
  769. vt -= vt_offset;
  770. --work;
  771. --iwork;
  772. /* Function Body */
  773. *info = 0;
  774. minmn = f2cmin(*m,*n);
  775. wntqa = lsame_(jobz, "A");
  776. wntqs = lsame_(jobz, "S");
  777. wntqas = wntqa || wntqs;
  778. wntqo = lsame_(jobz, "O");
  779. wntqn = lsame_(jobz, "N");
  780. lquery = *lwork == -1;
  781. if (! (wntqa || wntqs || wntqo || wntqn)) {
  782. *info = -1;
  783. } else if (*m < 0) {
  784. *info = -2;
  785. } else if (*n < 0) {
  786. *info = -3;
  787. } else if (*lda < f2cmax(1,*m)) {
  788. *info = -5;
  789. } else if (*ldu < 1 || wntqas && *ldu < *m || wntqo && *m < *n && *ldu < *
  790. m) {
  791. *info = -8;
  792. } else if (*ldvt < 1 || wntqa && *ldvt < *n || wntqs && *ldvt < minmn ||
  793. wntqo && *m >= *n && *ldvt < *n) {
  794. *info = -10;
  795. }
  796. /* Compute workspace */
  797. /* Note: Comments in the code beginning "Workspace:" describe the */
  798. /* minimal amount of workspace allocated at that point in the code, */
  799. /* as well as the preferred amount for good performance. */
  800. /* NB refers to the optimal block size for the immediately */
  801. /* following subroutine, as returned by ILAENV. */
  802. if (*info == 0) {
  803. minwrk = 1;
  804. maxwrk = 1;
  805. bdspac = 0;
  806. mnthr = (integer) (minmn * 11.f / 6.f);
  807. if (*m >= *n && minmn > 0) {
  808. /* Compute space needed for SBDSDC */
  809. if (wntqn) {
  810. /* sbdsdc needs only 4*N (or 6*N for uplo=L for LAPACK <= 3.6) */
  811. /* keep 7*N for backwards compatibility. */
  812. bdspac = *n * 7;
  813. } else {
  814. bdspac = *n * 3 * *n + (*n << 2);
  815. }
  816. /* Compute space preferred for each routine */
  817. sgebrd_(m, n, dum, m, dum, dum, dum, dum, dum, &c_n1, &ierr);
  818. lwork_sgebrd_mn__ = (integer) dum[0];
  819. sgebrd_(n, n, dum, n, dum, dum, dum, dum, dum, &c_n1, &ierr);
  820. lwork_sgebrd_nn__ = (integer) dum[0];
  821. sgeqrf_(m, n, dum, m, dum, dum, &c_n1, &ierr);
  822. lwork_sgeqrf_mn__ = (integer) dum[0];
  823. sorgbr_("Q", n, n, n, dum, n, dum, dum, &c_n1, &ierr);
  824. lwork_sorgbr_q_nn__ = (integer) dum[0];
  825. sorgqr_(m, m, n, dum, m, dum, dum, &c_n1, &ierr);
  826. lwork_sorgqr_mm__ = (integer) dum[0];
  827. sorgqr_(m, n, n, dum, m, dum, dum, &c_n1, &ierr);
  828. lwork_sorgqr_mn__ = (integer) dum[0];
  829. sormbr_("P", "R", "T", n, n, n, dum, n, dum, dum, n, dum, &c_n1, &
  830. ierr);
  831. lwork_sormbr_prt_nn__ = (integer) dum[0];
  832. sormbr_("Q", "L", "N", n, n, n, dum, n, dum, dum, n, dum, &c_n1, &
  833. ierr);
  834. lwork_sormbr_qln_nn__ = (integer) dum[0];
  835. sormbr_("Q", "L", "N", m, n, n, dum, m, dum, dum, m, dum, &c_n1, &
  836. ierr);
  837. lwork_sormbr_qln_mn__ = (integer) dum[0];
  838. sormbr_("Q", "L", "N", m, m, n, dum, m, dum, dum, m, dum, &c_n1, &
  839. ierr);
  840. lwork_sormbr_qln_mm__ = (integer) dum[0];
  841. if (*m >= mnthr) {
  842. if (wntqn) {
  843. /* Path 1 (M >> N, JOBZ='N') */
  844. wrkbl = *n + lwork_sgeqrf_mn__;
  845. /* Computing MAX */
  846. i__1 = wrkbl, i__2 = *n * 3 + lwork_sgebrd_nn__;
  847. wrkbl = f2cmax(i__1,i__2);
  848. /* Computing MAX */
  849. i__1 = wrkbl, i__2 = bdspac + *n;
  850. maxwrk = f2cmax(i__1,i__2);
  851. minwrk = bdspac + *n;
  852. } else if (wntqo) {
  853. /* Path 2 (M >> N, JOBZ='O') */
  854. wrkbl = *n + lwork_sgeqrf_mn__;
  855. /* Computing MAX */
  856. i__1 = wrkbl, i__2 = *n + lwork_sorgqr_mn__;
  857. wrkbl = f2cmax(i__1,i__2);
  858. /* Computing MAX */
  859. i__1 = wrkbl, i__2 = *n * 3 + lwork_sgebrd_nn__;
  860. wrkbl = f2cmax(i__1,i__2);
  861. /* Computing MAX */
  862. i__1 = wrkbl, i__2 = *n * 3 + lwork_sormbr_qln_nn__;
  863. wrkbl = f2cmax(i__1,i__2);
  864. /* Computing MAX */
  865. i__1 = wrkbl, i__2 = *n * 3 + lwork_sormbr_prt_nn__;
  866. wrkbl = f2cmax(i__1,i__2);
  867. /* Computing MAX */
  868. i__1 = wrkbl, i__2 = *n * 3 + bdspac;
  869. wrkbl = f2cmax(i__1,i__2);
  870. maxwrk = wrkbl + (*n << 1) * *n;
  871. minwrk = bdspac + (*n << 1) * *n + *n * 3;
  872. } else if (wntqs) {
  873. /* Path 3 (M >> N, JOBZ='S') */
  874. wrkbl = *n + lwork_sgeqrf_mn__;
  875. /* Computing MAX */
  876. i__1 = wrkbl, i__2 = *n + lwork_sorgqr_mn__;
  877. wrkbl = f2cmax(i__1,i__2);
  878. /* Computing MAX */
  879. i__1 = wrkbl, i__2 = *n * 3 + lwork_sgebrd_nn__;
  880. wrkbl = f2cmax(i__1,i__2);
  881. /* Computing MAX */
  882. i__1 = wrkbl, i__2 = *n * 3 + lwork_sormbr_qln_nn__;
  883. wrkbl = f2cmax(i__1,i__2);
  884. /* Computing MAX */
  885. i__1 = wrkbl, i__2 = *n * 3 + lwork_sormbr_prt_nn__;
  886. wrkbl = f2cmax(i__1,i__2);
  887. /* Computing MAX */
  888. i__1 = wrkbl, i__2 = *n * 3 + bdspac;
  889. wrkbl = f2cmax(i__1,i__2);
  890. maxwrk = wrkbl + *n * *n;
  891. minwrk = bdspac + *n * *n + *n * 3;
  892. } else if (wntqa) {
  893. /* Path 4 (M >> N, JOBZ='A') */
  894. wrkbl = *n + lwork_sgeqrf_mn__;
  895. /* Computing MAX */
  896. i__1 = wrkbl, i__2 = *n + lwork_sorgqr_mm__;
  897. wrkbl = f2cmax(i__1,i__2);
  898. /* Computing MAX */
  899. i__1 = wrkbl, i__2 = *n * 3 + lwork_sgebrd_nn__;
  900. wrkbl = f2cmax(i__1,i__2);
  901. /* Computing MAX */
  902. i__1 = wrkbl, i__2 = *n * 3 + lwork_sormbr_qln_nn__;
  903. wrkbl = f2cmax(i__1,i__2);
  904. /* Computing MAX */
  905. i__1 = wrkbl, i__2 = *n * 3 + lwork_sormbr_prt_nn__;
  906. wrkbl = f2cmax(i__1,i__2);
  907. /* Computing MAX */
  908. i__1 = wrkbl, i__2 = *n * 3 + bdspac;
  909. wrkbl = f2cmax(i__1,i__2);
  910. maxwrk = wrkbl + *n * *n;
  911. /* Computing MAX */
  912. i__1 = *n * 3 + bdspac, i__2 = *n + *m;
  913. minwrk = *n * *n + f2cmax(i__1,i__2);
  914. }
  915. } else {
  916. /* Path 5 (M >= N, but not much larger) */
  917. wrkbl = *n * 3 + lwork_sgebrd_mn__;
  918. if (wntqn) {
  919. /* Path 5n (M >= N, jobz='N') */
  920. /* Computing MAX */
  921. i__1 = wrkbl, i__2 = *n * 3 + bdspac;
  922. maxwrk = f2cmax(i__1,i__2);
  923. minwrk = *n * 3 + f2cmax(*m,bdspac);
  924. } else if (wntqo) {
  925. /* Path 5o (M >= N, jobz='O') */
  926. /* Computing MAX */
  927. i__1 = wrkbl, i__2 = *n * 3 + lwork_sormbr_prt_nn__;
  928. wrkbl = f2cmax(i__1,i__2);
  929. /* Computing MAX */
  930. i__1 = wrkbl, i__2 = *n * 3 + lwork_sormbr_qln_mn__;
  931. wrkbl = f2cmax(i__1,i__2);
  932. /* Computing MAX */
  933. i__1 = wrkbl, i__2 = *n * 3 + bdspac;
  934. wrkbl = f2cmax(i__1,i__2);
  935. maxwrk = wrkbl + *m * *n;
  936. /* Computing MAX */
  937. i__1 = *m, i__2 = *n * *n + bdspac;
  938. minwrk = *n * 3 + f2cmax(i__1,i__2);
  939. } else if (wntqs) {
  940. /* Path 5s (M >= N, jobz='S') */
  941. /* Computing MAX */
  942. i__1 = wrkbl, i__2 = *n * 3 + lwork_sormbr_qln_mn__;
  943. wrkbl = f2cmax(i__1,i__2);
  944. /* Computing MAX */
  945. i__1 = wrkbl, i__2 = *n * 3 + lwork_sormbr_prt_nn__;
  946. wrkbl = f2cmax(i__1,i__2);
  947. /* Computing MAX */
  948. i__1 = wrkbl, i__2 = *n * 3 + bdspac;
  949. maxwrk = f2cmax(i__1,i__2);
  950. minwrk = *n * 3 + f2cmax(*m,bdspac);
  951. } else if (wntqa) {
  952. /* Path 5a (M >= N, jobz='A') */
  953. /* Computing MAX */
  954. i__1 = wrkbl, i__2 = *n * 3 + lwork_sormbr_qln_mm__;
  955. wrkbl = f2cmax(i__1,i__2);
  956. /* Computing MAX */
  957. i__1 = wrkbl, i__2 = *n * 3 + lwork_sormbr_prt_nn__;
  958. wrkbl = f2cmax(i__1,i__2);
  959. /* Computing MAX */
  960. i__1 = wrkbl, i__2 = *n * 3 + bdspac;
  961. maxwrk = f2cmax(i__1,i__2);
  962. minwrk = *n * 3 + f2cmax(*m,bdspac);
  963. }
  964. }
  965. } else if (minmn > 0) {
  966. /* Compute space needed for SBDSDC */
  967. if (wntqn) {
  968. /* sbdsdc needs only 4*N (or 6*N for uplo=L for LAPACK <= 3.6) */
  969. /* keep 7*N for backwards compatibility. */
  970. bdspac = *m * 7;
  971. } else {
  972. bdspac = *m * 3 * *m + (*m << 2);
  973. }
  974. /* Compute space preferred for each routine */
  975. sgebrd_(m, n, dum, m, dum, dum, dum, dum, dum, &c_n1, &ierr);
  976. lwork_sgebrd_mn__ = (integer) dum[0];
  977. sgebrd_(m, m, &a[a_offset], m, &s[1], dum, dum, dum, dum, &c_n1, &
  978. ierr);
  979. lwork_sgebrd_mm__ = (integer) dum[0];
  980. sgelqf_(m, n, &a[a_offset], m, dum, dum, &c_n1, &ierr);
  981. lwork_sgelqf_mn__ = (integer) dum[0];
  982. sorglq_(n, n, m, dum, n, dum, dum, &c_n1, &ierr);
  983. lwork_sorglq_nn__ = (integer) dum[0];
  984. sorglq_(m, n, m, &a[a_offset], m, dum, dum, &c_n1, &ierr);
  985. lwork_sorglq_mn__ = (integer) dum[0];
  986. sorgbr_("P", m, m, m, &a[a_offset], n, dum, dum, &c_n1, &ierr);
  987. lwork_sorgbr_p_mm__ = (integer) dum[0];
  988. sormbr_("P", "R", "T", m, m, m, dum, m, dum, dum, m, dum, &c_n1, &
  989. ierr);
  990. lwork_sormbr_prt_mm__ = (integer) dum[0];
  991. sormbr_("P", "R", "T", m, n, m, dum, m, dum, dum, m, dum, &c_n1, &
  992. ierr);
  993. lwork_sormbr_prt_mn__ = (integer) dum[0];
  994. sormbr_("P", "R", "T", n, n, m, dum, n, dum, dum, n, dum, &c_n1, &
  995. ierr);
  996. lwork_sormbr_prt_nn__ = (integer) dum[0];
  997. sormbr_("Q", "L", "N", m, m, m, dum, m, dum, dum, m, dum, &c_n1, &
  998. ierr);
  999. lwork_sormbr_qln_mm__ = (integer) dum[0];
  1000. if (*n >= mnthr) {
  1001. if (wntqn) {
  1002. /* Path 1t (N >> M, JOBZ='N') */
  1003. wrkbl = *m + lwork_sgelqf_mn__;
  1004. /* Computing MAX */
  1005. i__1 = wrkbl, i__2 = *m * 3 + lwork_sgebrd_mm__;
  1006. wrkbl = f2cmax(i__1,i__2);
  1007. /* Computing MAX */
  1008. i__1 = wrkbl, i__2 = bdspac + *m;
  1009. maxwrk = f2cmax(i__1,i__2);
  1010. minwrk = bdspac + *m;
  1011. } else if (wntqo) {
  1012. /* Path 2t (N >> M, JOBZ='O') */
  1013. wrkbl = *m + lwork_sgelqf_mn__;
  1014. /* Computing MAX */
  1015. i__1 = wrkbl, i__2 = *m + lwork_sorglq_mn__;
  1016. wrkbl = f2cmax(i__1,i__2);
  1017. /* Computing MAX */
  1018. i__1 = wrkbl, i__2 = *m * 3 + lwork_sgebrd_mm__;
  1019. wrkbl = f2cmax(i__1,i__2);
  1020. /* Computing MAX */
  1021. i__1 = wrkbl, i__2 = *m * 3 + lwork_sormbr_qln_mm__;
  1022. wrkbl = f2cmax(i__1,i__2);
  1023. /* Computing MAX */
  1024. i__1 = wrkbl, i__2 = *m * 3 + lwork_sormbr_prt_mm__;
  1025. wrkbl = f2cmax(i__1,i__2);
  1026. /* Computing MAX */
  1027. i__1 = wrkbl, i__2 = *m * 3 + bdspac;
  1028. wrkbl = f2cmax(i__1,i__2);
  1029. maxwrk = wrkbl + (*m << 1) * *m;
  1030. minwrk = bdspac + (*m << 1) * *m + *m * 3;
  1031. } else if (wntqs) {
  1032. /* Path 3t (N >> M, JOBZ='S') */
  1033. wrkbl = *m + lwork_sgelqf_mn__;
  1034. /* Computing MAX */
  1035. i__1 = wrkbl, i__2 = *m + lwork_sorglq_mn__;
  1036. wrkbl = f2cmax(i__1,i__2);
  1037. /* Computing MAX */
  1038. i__1 = wrkbl, i__2 = *m * 3 + lwork_sgebrd_mm__;
  1039. wrkbl = f2cmax(i__1,i__2);
  1040. /* Computing MAX */
  1041. i__1 = wrkbl, i__2 = *m * 3 + lwork_sormbr_qln_mm__;
  1042. wrkbl = f2cmax(i__1,i__2);
  1043. /* Computing MAX */
  1044. i__1 = wrkbl, i__2 = *m * 3 + lwork_sormbr_prt_mm__;
  1045. wrkbl = f2cmax(i__1,i__2);
  1046. /* Computing MAX */
  1047. i__1 = wrkbl, i__2 = *m * 3 + bdspac;
  1048. wrkbl = f2cmax(i__1,i__2);
  1049. maxwrk = wrkbl + *m * *m;
  1050. minwrk = bdspac + *m * *m + *m * 3;
  1051. } else if (wntqa) {
  1052. /* Path 4t (N >> M, JOBZ='A') */
  1053. wrkbl = *m + lwork_sgelqf_mn__;
  1054. /* Computing MAX */
  1055. i__1 = wrkbl, i__2 = *m + lwork_sorglq_nn__;
  1056. wrkbl = f2cmax(i__1,i__2);
  1057. /* Computing MAX */
  1058. i__1 = wrkbl, i__2 = *m * 3 + lwork_sgebrd_mm__;
  1059. wrkbl = f2cmax(i__1,i__2);
  1060. /* Computing MAX */
  1061. i__1 = wrkbl, i__2 = *m * 3 + lwork_sormbr_qln_mm__;
  1062. wrkbl = f2cmax(i__1,i__2);
  1063. /* Computing MAX */
  1064. i__1 = wrkbl, i__2 = *m * 3 + lwork_sormbr_prt_mm__;
  1065. wrkbl = f2cmax(i__1,i__2);
  1066. /* Computing MAX */
  1067. i__1 = wrkbl, i__2 = *m * 3 + bdspac;
  1068. wrkbl = f2cmax(i__1,i__2);
  1069. maxwrk = wrkbl + *m * *m;
  1070. /* Computing MAX */
  1071. i__1 = *m * 3 + bdspac, i__2 = *m + *n;
  1072. minwrk = *m * *m + f2cmax(i__1,i__2);
  1073. }
  1074. } else {
  1075. /* Path 5t (N > M, but not much larger) */
  1076. wrkbl = *m * 3 + lwork_sgebrd_mn__;
  1077. if (wntqn) {
  1078. /* Path 5tn (N > M, jobz='N') */
  1079. /* Computing MAX */
  1080. i__1 = wrkbl, i__2 = *m * 3 + bdspac;
  1081. maxwrk = f2cmax(i__1,i__2);
  1082. minwrk = *m * 3 + f2cmax(*n,bdspac);
  1083. } else if (wntqo) {
  1084. /* Path 5to (N > M, jobz='O') */
  1085. /* Computing MAX */
  1086. i__1 = wrkbl, i__2 = *m * 3 + lwork_sormbr_qln_mm__;
  1087. wrkbl = f2cmax(i__1,i__2);
  1088. /* Computing MAX */
  1089. i__1 = wrkbl, i__2 = *m * 3 + lwork_sormbr_prt_mn__;
  1090. wrkbl = f2cmax(i__1,i__2);
  1091. /* Computing MAX */
  1092. i__1 = wrkbl, i__2 = *m * 3 + bdspac;
  1093. wrkbl = f2cmax(i__1,i__2);
  1094. maxwrk = wrkbl + *m * *n;
  1095. /* Computing MAX */
  1096. i__1 = *n, i__2 = *m * *m + bdspac;
  1097. minwrk = *m * 3 + f2cmax(i__1,i__2);
  1098. } else if (wntqs) {
  1099. /* Path 5ts (N > M, jobz='S') */
  1100. /* Computing MAX */
  1101. i__1 = wrkbl, i__2 = *m * 3 + lwork_sormbr_qln_mm__;
  1102. wrkbl = f2cmax(i__1,i__2);
  1103. /* Computing MAX */
  1104. i__1 = wrkbl, i__2 = *m * 3 + lwork_sormbr_prt_mn__;
  1105. wrkbl = f2cmax(i__1,i__2);
  1106. /* Computing MAX */
  1107. i__1 = wrkbl, i__2 = *m * 3 + bdspac;
  1108. maxwrk = f2cmax(i__1,i__2);
  1109. minwrk = *m * 3 + f2cmax(*n,bdspac);
  1110. } else if (wntqa) {
  1111. /* Path 5ta (N > M, jobz='A') */
  1112. /* Computing MAX */
  1113. i__1 = wrkbl, i__2 = *m * 3 + lwork_sormbr_qln_mm__;
  1114. wrkbl = f2cmax(i__1,i__2);
  1115. /* Computing MAX */
  1116. i__1 = wrkbl, i__2 = *m * 3 + lwork_sormbr_prt_nn__;
  1117. wrkbl = f2cmax(i__1,i__2);
  1118. /* Computing MAX */
  1119. i__1 = wrkbl, i__2 = *m * 3 + bdspac;
  1120. maxwrk = f2cmax(i__1,i__2);
  1121. minwrk = *m * 3 + f2cmax(*n,bdspac);
  1122. }
  1123. }
  1124. }
  1125. maxwrk = f2cmax(maxwrk,minwrk);
  1126. work[1] = (real) maxwrk;
  1127. if (*lwork < minwrk && ! lquery) {
  1128. *info = -12;
  1129. }
  1130. }
  1131. if (*info != 0) {
  1132. i__1 = -(*info);
  1133. xerbla_("SGESDD", &i__1, (ftnlen)6);
  1134. return 0;
  1135. } else if (lquery) {
  1136. return 0;
  1137. }
  1138. /* Quick return if possible */
  1139. if (*m == 0 || *n == 0) {
  1140. return 0;
  1141. }
  1142. /* Get machine constants */
  1143. eps = slamch_("P");
  1144. smlnum = sqrt(slamch_("S")) / eps;
  1145. bignum = 1.f / smlnum;
  1146. /* Scale A if f2cmax element outside range [SMLNUM,BIGNUM] */
  1147. anrm = slange_("M", m, n, &a[a_offset], lda, dum);
  1148. if (sisnan_(&anrm)) {
  1149. *info = -4;
  1150. return 0;
  1151. }
  1152. iscl = 0;
  1153. if (anrm > 0.f && anrm < smlnum) {
  1154. iscl = 1;
  1155. slascl_("G", &c__0, &c__0, &anrm, &smlnum, m, n, &a[a_offset], lda, &
  1156. ierr);
  1157. } else if (anrm > bignum) {
  1158. iscl = 1;
  1159. slascl_("G", &c__0, &c__0, &anrm, &bignum, m, n, &a[a_offset], lda, &
  1160. ierr);
  1161. }
  1162. if (*m >= *n) {
  1163. /* A has at least as many rows as columns. If A has sufficiently */
  1164. /* more rows than columns, first reduce using the QR */
  1165. /* decomposition (if sufficient workspace available) */
  1166. if (*m >= mnthr) {
  1167. if (wntqn) {
  1168. /* Path 1 (M >> N, JOBZ='N') */
  1169. /* No singular vectors to be computed */
  1170. itau = 1;
  1171. nwork = itau + *n;
  1172. /* Compute A=Q*R */
  1173. /* Workspace: need N [tau] + N [work] */
  1174. /* Workspace: prefer N [tau] + N*NB [work] */
  1175. i__1 = *lwork - nwork + 1;
  1176. sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
  1177. i__1, &ierr);
  1178. /* Zero out below R */
  1179. i__1 = *n - 1;
  1180. i__2 = *n - 1;
  1181. slaset_("L", &i__1, &i__2, &c_b63, &c_b63, &a[a_dim1 + 2],
  1182. lda);
  1183. ie = 1;
  1184. itauq = ie + *n;
  1185. itaup = itauq + *n;
  1186. nwork = itaup + *n;
  1187. /* Bidiagonalize R in A */
  1188. /* Workspace: need 3*N [e, tauq, taup] + N [work] */
  1189. /* Workspace: prefer 3*N [e, tauq, taup] + 2*N*NB [work] */
  1190. i__1 = *lwork - nwork + 1;
  1191. sgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[
  1192. itauq], &work[itaup], &work[nwork], &i__1, &ierr);
  1193. nwork = ie + *n;
  1194. /* Perform bidiagonal SVD, computing singular values only */
  1195. /* Workspace: need N [e] + BDSPAC */
  1196. sbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1,
  1197. dum, idum, &work[nwork], &iwork[1], info);
  1198. } else if (wntqo) {
  1199. /* Path 2 (M >> N, JOBZ = 'O') */
  1200. /* N left singular vectors to be overwritten on A and */
  1201. /* N right singular vectors to be computed in VT */
  1202. ir = 1;
  1203. /* WORK(IR) is LDWRKR by N */
  1204. if (*lwork >= *lda * *n + *n * *n + *n * 3 + bdspac) {
  1205. ldwrkr = *lda;
  1206. } else {
  1207. ldwrkr = (*lwork - *n * *n - *n * 3 - bdspac) / *n;
  1208. }
  1209. itau = ir + ldwrkr * *n;
  1210. nwork = itau + *n;
  1211. /* Compute A=Q*R */
  1212. /* Workspace: need N*N [R] + N [tau] + N [work] */
  1213. /* Workspace: prefer N*N [R] + N [tau] + N*NB [work] */
  1214. i__1 = *lwork - nwork + 1;
  1215. sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
  1216. i__1, &ierr);
  1217. /* Copy R to WORK(IR), zeroing out below it */
  1218. slacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
  1219. i__1 = *n - 1;
  1220. i__2 = *n - 1;
  1221. slaset_("L", &i__1, &i__2, &c_b63, &c_b63, &work[ir + 1], &
  1222. ldwrkr);
  1223. /* Generate Q in A */
  1224. /* Workspace: need N*N [R] + N [tau] + N [work] */
  1225. /* Workspace: prefer N*N [R] + N [tau] + N*NB [work] */
  1226. i__1 = *lwork - nwork + 1;
  1227. sorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
  1228. &i__1, &ierr);
  1229. ie = itau;
  1230. itauq = ie + *n;
  1231. itaup = itauq + *n;
  1232. nwork = itaup + *n;
  1233. /* Bidiagonalize R in WORK(IR) */
  1234. /* Workspace: need N*N [R] + 3*N [e, tauq, taup] + N [work] */
  1235. /* Workspace: prefer N*N [R] + 3*N [e, tauq, taup] + 2*N*NB [work] */
  1236. i__1 = *lwork - nwork + 1;
  1237. sgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
  1238. itauq], &work[itaup], &work[nwork], &i__1, &ierr);
  1239. /* WORK(IU) is N by N */
  1240. iu = nwork;
  1241. nwork = iu + *n * *n;
  1242. /* Perform bidiagonal SVD, computing left singular vectors */
  1243. /* of bidiagonal matrix in WORK(IU) and computing right */
  1244. /* singular vectors of bidiagonal matrix in VT */
  1245. /* Workspace: need N*N [R] + 3*N [e, tauq, taup] + N*N [U] + BDSPAC */
  1246. sbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[
  1247. vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
  1248. info);
  1249. /* Overwrite WORK(IU) by left singular vectors of R */
  1250. /* and VT by right singular vectors of R */
  1251. /* Workspace: need N*N [R] + 3*N [e, tauq, taup] + N*N [U] + N [work] */
  1252. /* Workspace: prefer N*N [R] + 3*N [e, tauq, taup] + N*N [U] + N*NB [work] */
  1253. i__1 = *lwork - nwork + 1;
  1254. sormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
  1255. itauq], &work[iu], n, &work[nwork], &i__1, &ierr);
  1256. i__1 = *lwork - nwork + 1;
  1257. sormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[
  1258. itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
  1259. ierr);
  1260. /* Multiply Q in A by left singular vectors of R in */
  1261. /* WORK(IU), storing result in WORK(IR) and copying to A */
  1262. /* Workspace: need N*N [R] + 3*N [e, tauq, taup] + N*N [U] */
  1263. /* Workspace: prefer M*N [R] + 3*N [e, tauq, taup] + N*N [U] */
  1264. i__1 = *m;
  1265. i__2 = ldwrkr;
  1266. for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
  1267. i__2) {
  1268. /* Computing MIN */
  1269. i__3 = *m - i__ + 1;
  1270. chunk = f2cmin(i__3,ldwrkr);
  1271. sgemm_("N", "N", &chunk, n, n, &c_b84, &a[i__ + a_dim1],
  1272. lda, &work[iu], n, &c_b63, &work[ir], &ldwrkr);
  1273. slacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
  1274. a_dim1], lda);
  1275. /* L10: */
  1276. }
  1277. } else if (wntqs) {
  1278. /* Path 3 (M >> N, JOBZ='S') */
  1279. /* N left singular vectors to be computed in U and */
  1280. /* N right singular vectors to be computed in VT */
  1281. ir = 1;
  1282. /* WORK(IR) is N by N */
  1283. ldwrkr = *n;
  1284. itau = ir + ldwrkr * *n;
  1285. nwork = itau + *n;
  1286. /* Compute A=Q*R */
  1287. /* Workspace: need N*N [R] + N [tau] + N [work] */
  1288. /* Workspace: prefer N*N [R] + N [tau] + N*NB [work] */
  1289. i__2 = *lwork - nwork + 1;
  1290. sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
  1291. i__2, &ierr);
  1292. /* Copy R to WORK(IR), zeroing out below it */
  1293. slacpy_("U", n, n, &a[a_offset], lda, &work[ir], &ldwrkr);
  1294. i__2 = *n - 1;
  1295. i__1 = *n - 1;
  1296. slaset_("L", &i__2, &i__1, &c_b63, &c_b63, &work[ir + 1], &
  1297. ldwrkr);
  1298. /* Generate Q in A */
  1299. /* Workspace: need N*N [R] + N [tau] + N [work] */
  1300. /* Workspace: prefer N*N [R] + N [tau] + N*NB [work] */
  1301. i__2 = *lwork - nwork + 1;
  1302. sorgqr_(m, n, n, &a[a_offset], lda, &work[itau], &work[nwork],
  1303. &i__2, &ierr);
  1304. ie = itau;
  1305. itauq = ie + *n;
  1306. itaup = itauq + *n;
  1307. nwork = itaup + *n;
  1308. /* Bidiagonalize R in WORK(IR) */
  1309. /* Workspace: need N*N [R] + 3*N [e, tauq, taup] + N [work] */
  1310. /* Workspace: prefer N*N [R] + 3*N [e, tauq, taup] + 2*N*NB [work] */
  1311. i__2 = *lwork - nwork + 1;
  1312. sgebrd_(n, n, &work[ir], &ldwrkr, &s[1], &work[ie], &work[
  1313. itauq], &work[itaup], &work[nwork], &i__2, &ierr);
  1314. /* Perform bidiagonal SVD, computing left singular vectors */
  1315. /* of bidiagoal matrix in U and computing right singular */
  1316. /* vectors of bidiagonal matrix in VT */
  1317. /* Workspace: need N*N [R] + 3*N [e, tauq, taup] + BDSPAC */
  1318. sbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
  1319. vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
  1320. info);
  1321. /* Overwrite U by left singular vectors of R and VT */
  1322. /* by right singular vectors of R */
  1323. /* Workspace: need N*N [R] + 3*N [e, tauq, taup] + N [work] */
  1324. /* Workspace: prefer N*N [R] + 3*N [e, tauq, taup] + N*NB [work] */
  1325. i__2 = *lwork - nwork + 1;
  1326. sormbr_("Q", "L", "N", n, n, n, &work[ir], &ldwrkr, &work[
  1327. itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
  1328. i__2 = *lwork - nwork + 1;
  1329. sormbr_("P", "R", "T", n, n, n, &work[ir], &ldwrkr, &work[
  1330. itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
  1331. ierr);
  1332. /* Multiply Q in A by left singular vectors of R in */
  1333. /* WORK(IR), storing result in U */
  1334. /* Workspace: need N*N [R] */
  1335. slacpy_("F", n, n, &u[u_offset], ldu, &work[ir], &ldwrkr);
  1336. sgemm_("N", "N", m, n, n, &c_b84, &a[a_offset], lda, &work[ir]
  1337. , &ldwrkr, &c_b63, &u[u_offset], ldu);
  1338. } else if (wntqa) {
  1339. /* Path 4 (M >> N, JOBZ='A') */
  1340. /* M left singular vectors to be computed in U and */
  1341. /* N right singular vectors to be computed in VT */
  1342. iu = 1;
  1343. /* WORK(IU) is N by N */
  1344. ldwrku = *n;
  1345. itau = iu + ldwrku * *n;
  1346. nwork = itau + *n;
  1347. /* Compute A=Q*R, copying result to U */
  1348. /* Workspace: need N*N [U] + N [tau] + N [work] */
  1349. /* Workspace: prefer N*N [U] + N [tau] + N*NB [work] */
  1350. i__2 = *lwork - nwork + 1;
  1351. sgeqrf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
  1352. i__2, &ierr);
  1353. slacpy_("L", m, n, &a[a_offset], lda, &u[u_offset], ldu);
  1354. /* Generate Q in U */
  1355. /* Workspace: need N*N [U] + N [tau] + M [work] */
  1356. /* Workspace: prefer N*N [U] + N [tau] + M*NB [work] */
  1357. i__2 = *lwork - nwork + 1;
  1358. sorgqr_(m, m, n, &u[u_offset], ldu, &work[itau], &work[nwork],
  1359. &i__2, &ierr);
  1360. /* Produce R in A, zeroing out other entries */
  1361. i__2 = *n - 1;
  1362. i__1 = *n - 1;
  1363. slaset_("L", &i__2, &i__1, &c_b63, &c_b63, &a[a_dim1 + 2],
  1364. lda);
  1365. ie = itau;
  1366. itauq = ie + *n;
  1367. itaup = itauq + *n;
  1368. nwork = itaup + *n;
  1369. /* Bidiagonalize R in A */
  1370. /* Workspace: need N*N [U] + 3*N [e, tauq, taup] + N [work] */
  1371. /* Workspace: prefer N*N [U] + 3*N [e, tauq, taup] + 2*N*NB [work] */
  1372. i__2 = *lwork - nwork + 1;
  1373. sgebrd_(n, n, &a[a_offset], lda, &s[1], &work[ie], &work[
  1374. itauq], &work[itaup], &work[nwork], &i__2, &ierr);
  1375. /* Perform bidiagonal SVD, computing left singular vectors */
  1376. /* of bidiagonal matrix in WORK(IU) and computing right */
  1377. /* singular vectors of bidiagonal matrix in VT */
  1378. /* Workspace: need N*N [U] + 3*N [e, tauq, taup] + BDSPAC */
  1379. sbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], n, &vt[
  1380. vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
  1381. info);
  1382. /* Overwrite WORK(IU) by left singular vectors of R and VT */
  1383. /* by right singular vectors of R */
  1384. /* Workspace: need N*N [U] + 3*N [e, tauq, taup] + N [work] */
  1385. /* Workspace: prefer N*N [U] + 3*N [e, tauq, taup] + N*NB [work] */
  1386. i__2 = *lwork - nwork + 1;
  1387. sormbr_("Q", "L", "N", n, n, n, &a[a_offset], lda, &work[
  1388. itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
  1389. ierr);
  1390. i__2 = *lwork - nwork + 1;
  1391. sormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
  1392. itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
  1393. ierr);
  1394. /* Multiply Q in U by left singular vectors of R in */
  1395. /* WORK(IU), storing result in A */
  1396. /* Workspace: need N*N [U] */
  1397. sgemm_("N", "N", m, n, n, &c_b84, &u[u_offset], ldu, &work[iu]
  1398. , &ldwrku, &c_b63, &a[a_offset], lda);
  1399. /* Copy left singular vectors of A from A to U */
  1400. slacpy_("F", m, n, &a[a_offset], lda, &u[u_offset], ldu);
  1401. }
  1402. } else {
  1403. /* M .LT. MNTHR */
  1404. /* Path 5 (M >= N, but not much larger) */
  1405. /* Reduce to bidiagonal form without QR decomposition */
  1406. ie = 1;
  1407. itauq = ie + *n;
  1408. itaup = itauq + *n;
  1409. nwork = itaup + *n;
  1410. /* Bidiagonalize A */
  1411. /* Workspace: need 3*N [e, tauq, taup] + M [work] */
  1412. /* Workspace: prefer 3*N [e, tauq, taup] + (M+N)*NB [work] */
  1413. i__2 = *lwork - nwork + 1;
  1414. sgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
  1415. work[itaup], &work[nwork], &i__2, &ierr);
  1416. if (wntqn) {
  1417. /* Path 5n (M >= N, JOBZ='N') */
  1418. /* Perform bidiagonal SVD, only computing singular values */
  1419. /* Workspace: need 3*N [e, tauq, taup] + BDSPAC */
  1420. sbdsdc_("U", "N", n, &s[1], &work[ie], dum, &c__1, dum, &c__1,
  1421. dum, idum, &work[nwork], &iwork[1], info);
  1422. } else if (wntqo) {
  1423. /* Path 5o (M >= N, JOBZ='O') */
  1424. iu = nwork;
  1425. if (*lwork >= *m * *n + *n * 3 + bdspac) {
  1426. /* WORK( IU ) is M by N */
  1427. ldwrku = *m;
  1428. nwork = iu + ldwrku * *n;
  1429. slaset_("F", m, n, &c_b63, &c_b63, &work[iu], &ldwrku);
  1430. /* IR is unused; silence compile warnings */
  1431. ir = -1;
  1432. } else {
  1433. /* WORK( IU ) is N by N */
  1434. ldwrku = *n;
  1435. nwork = iu + ldwrku * *n;
  1436. /* WORK(IR) is LDWRKR by N */
  1437. ir = nwork;
  1438. ldwrkr = (*lwork - *n * *n - *n * 3) / *n;
  1439. }
  1440. nwork = iu + ldwrku * *n;
  1441. /* Perform bidiagonal SVD, computing left singular vectors */
  1442. /* of bidiagonal matrix in WORK(IU) and computing right */
  1443. /* singular vectors of bidiagonal matrix in VT */
  1444. /* Workspace: need 3*N [e, tauq, taup] + N*N [U] + BDSPAC */
  1445. sbdsdc_("U", "I", n, &s[1], &work[ie], &work[iu], &ldwrku, &
  1446. vt[vt_offset], ldvt, dum, idum, &work[nwork], &iwork[
  1447. 1], info);
  1448. /* Overwrite VT by right singular vectors of A */
  1449. /* Workspace: need 3*N [e, tauq, taup] + N*N [U] + N [work] */
  1450. /* Workspace: prefer 3*N [e, tauq, taup] + N*N [U] + N*NB [work] */
  1451. i__2 = *lwork - nwork + 1;
  1452. sormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
  1453. itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
  1454. ierr);
  1455. if (*lwork >= *m * *n + *n * 3 + bdspac) {
  1456. /* Path 5o-fast */
  1457. /* Overwrite WORK(IU) by left singular vectors of A */
  1458. /* Workspace: need 3*N [e, tauq, taup] + M*N [U] + N [work] */
  1459. /* Workspace: prefer 3*N [e, tauq, taup] + M*N [U] + N*NB [work] */
  1460. i__2 = *lwork - nwork + 1;
  1461. sormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
  1462. itauq], &work[iu], &ldwrku, &work[nwork], &i__2, &
  1463. ierr);
  1464. /* Copy left singular vectors of A from WORK(IU) to A */
  1465. slacpy_("F", m, n, &work[iu], &ldwrku, &a[a_offset], lda);
  1466. } else {
  1467. /* Path 5o-slow */
  1468. /* Generate Q in A */
  1469. /* Workspace: need 3*N [e, tauq, taup] + N*N [U] + N [work] */
  1470. /* Workspace: prefer 3*N [e, tauq, taup] + N*N [U] + N*NB [work] */
  1471. i__2 = *lwork - nwork + 1;
  1472. sorgbr_("Q", m, n, n, &a[a_offset], lda, &work[itauq], &
  1473. work[nwork], &i__2, &ierr);
  1474. /* Multiply Q in A by left singular vectors of */
  1475. /* bidiagonal matrix in WORK(IU), storing result in */
  1476. /* WORK(IR) and copying to A */
  1477. /* Workspace: need 3*N [e, tauq, taup] + N*N [U] + NB*N [R] */
  1478. /* Workspace: prefer 3*N [e, tauq, taup] + N*N [U] + M*N [R] */
  1479. i__2 = *m;
  1480. i__1 = ldwrkr;
  1481. for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
  1482. i__1) {
  1483. /* Computing MIN */
  1484. i__3 = *m - i__ + 1;
  1485. chunk = f2cmin(i__3,ldwrkr);
  1486. sgemm_("N", "N", &chunk, n, n, &c_b84, &a[i__ +
  1487. a_dim1], lda, &work[iu], &ldwrku, &c_b63, &
  1488. work[ir], &ldwrkr);
  1489. slacpy_("F", &chunk, n, &work[ir], &ldwrkr, &a[i__ +
  1490. a_dim1], lda);
  1491. /* L20: */
  1492. }
  1493. }
  1494. } else if (wntqs) {
  1495. /* Path 5s (M >= N, JOBZ='S') */
  1496. /* Perform bidiagonal SVD, computing left singular vectors */
  1497. /* of bidiagonal matrix in U and computing right singular */
  1498. /* vectors of bidiagonal matrix in VT */
  1499. /* Workspace: need 3*N [e, tauq, taup] + BDSPAC */
  1500. slaset_("F", m, n, &c_b63, &c_b63, &u[u_offset], ldu);
  1501. sbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
  1502. vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
  1503. info);
  1504. /* Overwrite U by left singular vectors of A and VT */
  1505. /* by right singular vectors of A */
  1506. /* Workspace: need 3*N [e, tauq, taup] + N [work] */
  1507. /* Workspace: prefer 3*N [e, tauq, taup] + N*NB [work] */
  1508. i__1 = *lwork - nwork + 1;
  1509. sormbr_("Q", "L", "N", m, n, n, &a[a_offset], lda, &work[
  1510. itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
  1511. i__1 = *lwork - nwork + 1;
  1512. sormbr_("P", "R", "T", n, n, n, &a[a_offset], lda, &work[
  1513. itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
  1514. ierr);
  1515. } else if (wntqa) {
  1516. /* Path 5a (M >= N, JOBZ='A') */
  1517. /* Perform bidiagonal SVD, computing left singular vectors */
  1518. /* of bidiagonal matrix in U and computing right singular */
  1519. /* vectors of bidiagonal matrix in VT */
  1520. /* Workspace: need 3*N [e, tauq, taup] + BDSPAC */
  1521. slaset_("F", m, m, &c_b63, &c_b63, &u[u_offset], ldu);
  1522. sbdsdc_("U", "I", n, &s[1], &work[ie], &u[u_offset], ldu, &vt[
  1523. vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
  1524. info);
  1525. /* Set the right corner of U to identity matrix */
  1526. if (*m > *n) {
  1527. i__1 = *m - *n;
  1528. i__2 = *m - *n;
  1529. slaset_("F", &i__1, &i__2, &c_b63, &c_b84, &u[*n + 1 + (*
  1530. n + 1) * u_dim1], ldu);
  1531. }
  1532. /* Overwrite U by left singular vectors of A and VT */
  1533. /* by right singular vectors of A */
  1534. /* Workspace: need 3*N [e, tauq, taup] + M [work] */
  1535. /* Workspace: prefer 3*N [e, tauq, taup] + M*NB [work] */
  1536. i__1 = *lwork - nwork + 1;
  1537. sormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
  1538. itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
  1539. i__1 = *lwork - nwork + 1;
  1540. sormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[
  1541. itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
  1542. ierr);
  1543. }
  1544. }
  1545. } else {
  1546. /* A has more columns than rows. If A has sufficiently more */
  1547. /* columns than rows, first reduce using the LQ decomposition (if */
  1548. /* sufficient workspace available) */
  1549. if (*n >= mnthr) {
  1550. if (wntqn) {
  1551. /* Path 1t (N >> M, JOBZ='N') */
  1552. /* No singular vectors to be computed */
  1553. itau = 1;
  1554. nwork = itau + *m;
  1555. /* Compute A=L*Q */
  1556. /* Workspace: need M [tau] + M [work] */
  1557. /* Workspace: prefer M [tau] + M*NB [work] */
  1558. i__1 = *lwork - nwork + 1;
  1559. sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
  1560. i__1, &ierr);
  1561. /* Zero out above L */
  1562. i__1 = *m - 1;
  1563. i__2 = *m - 1;
  1564. slaset_("U", &i__1, &i__2, &c_b63, &c_b63, &a[(a_dim1 << 1) +
  1565. 1], lda);
  1566. ie = 1;
  1567. itauq = ie + *m;
  1568. itaup = itauq + *m;
  1569. nwork = itaup + *m;
  1570. /* Bidiagonalize L in A */
  1571. /* Workspace: need 3*M [e, tauq, taup] + M [work] */
  1572. /* Workspace: prefer 3*M [e, tauq, taup] + 2*M*NB [work] */
  1573. i__1 = *lwork - nwork + 1;
  1574. sgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[
  1575. itauq], &work[itaup], &work[nwork], &i__1, &ierr);
  1576. nwork = ie + *m;
  1577. /* Perform bidiagonal SVD, computing singular values only */
  1578. /* Workspace: need M [e] + BDSPAC */
  1579. sbdsdc_("U", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1,
  1580. dum, idum, &work[nwork], &iwork[1], info);
  1581. } else if (wntqo) {
  1582. /* Path 2t (N >> M, JOBZ='O') */
  1583. /* M right singular vectors to be overwritten on A and */
  1584. /* M left singular vectors to be computed in U */
  1585. ivt = 1;
  1586. /* WORK(IVT) is M by M */
  1587. /* WORK(IL) is M by M; it is later resized to M by chunk for gemm */
  1588. il = ivt + *m * *m;
  1589. if (*lwork >= *m * *n + *m * *m + *m * 3 + bdspac) {
  1590. ldwrkl = *m;
  1591. chunk = *n;
  1592. } else {
  1593. ldwrkl = *m;
  1594. chunk = (*lwork - *m * *m) / *m;
  1595. }
  1596. itau = il + ldwrkl * *m;
  1597. nwork = itau + *m;
  1598. /* Compute A=L*Q */
  1599. /* Workspace: need M*M [VT] + M*M [L] + M [tau] + M [work] */
  1600. /* Workspace: prefer M*M [VT] + M*M [L] + M [tau] + M*NB [work] */
  1601. i__1 = *lwork - nwork + 1;
  1602. sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
  1603. i__1, &ierr);
  1604. /* Copy L to WORK(IL), zeroing about above it */
  1605. slacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
  1606. i__1 = *m - 1;
  1607. i__2 = *m - 1;
  1608. slaset_("U", &i__1, &i__2, &c_b63, &c_b63, &work[il + ldwrkl],
  1609. &ldwrkl);
  1610. /* Generate Q in A */
  1611. /* Workspace: need M*M [VT] + M*M [L] + M [tau] + M [work] */
  1612. /* Workspace: prefer M*M [VT] + M*M [L] + M [tau] + M*NB [work] */
  1613. i__1 = *lwork - nwork + 1;
  1614. sorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
  1615. &i__1, &ierr);
  1616. ie = itau;
  1617. itauq = ie + *m;
  1618. itaup = itauq + *m;
  1619. nwork = itaup + *m;
  1620. /* Bidiagonalize L in WORK(IL) */
  1621. /* Workspace: need M*M [VT] + M*M [L] + 3*M [e, tauq, taup] + M [work] */
  1622. /* Workspace: prefer M*M [VT] + M*M [L] + 3*M [e, tauq, taup] + 2*M*NB [work] */
  1623. i__1 = *lwork - nwork + 1;
  1624. sgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[
  1625. itauq], &work[itaup], &work[nwork], &i__1, &ierr);
  1626. /* Perform bidiagonal SVD, computing left singular vectors */
  1627. /* of bidiagonal matrix in U, and computing right singular */
  1628. /* vectors of bidiagonal matrix in WORK(IVT) */
  1629. /* Workspace: need M*M [VT] + M*M [L] + 3*M [e, tauq, taup] + BDSPAC */
  1630. sbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
  1631. work[ivt], m, dum, idum, &work[nwork], &iwork[1],
  1632. info);
  1633. /* Overwrite U by left singular vectors of L and WORK(IVT) */
  1634. /* by right singular vectors of L */
  1635. /* Workspace: need M*M [VT] + M*M [L] + 3*M [e, tauq, taup] + M [work] */
  1636. /* Workspace: prefer M*M [VT] + M*M [L] + 3*M [e, tauq, taup] + M*NB [work] */
  1637. i__1 = *lwork - nwork + 1;
  1638. sormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
  1639. itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
  1640. i__1 = *lwork - nwork + 1;
  1641. sormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[
  1642. itaup], &work[ivt], m, &work[nwork], &i__1, &ierr);
  1643. /* Multiply right singular vectors of L in WORK(IVT) by Q */
  1644. /* in A, storing result in WORK(IL) and copying to A */
  1645. /* Workspace: need M*M [VT] + M*M [L] */
  1646. /* Workspace: prefer M*M [VT] + M*N [L] */
  1647. /* At this point, L is resized as M by chunk. */
  1648. i__1 = *n;
  1649. i__2 = chunk;
  1650. for (i__ = 1; i__2 < 0 ? i__ >= i__1 : i__ <= i__1; i__ +=
  1651. i__2) {
  1652. /* Computing MIN */
  1653. i__3 = *n - i__ + 1;
  1654. blk = f2cmin(i__3,chunk);
  1655. sgemm_("N", "N", m, &blk, m, &c_b84, &work[ivt], m, &a[
  1656. i__ * a_dim1 + 1], lda, &c_b63, &work[il], &
  1657. ldwrkl);
  1658. slacpy_("F", m, &blk, &work[il], &ldwrkl, &a[i__ * a_dim1
  1659. + 1], lda);
  1660. /* L30: */
  1661. }
  1662. } else if (wntqs) {
  1663. /* Path 3t (N >> M, JOBZ='S') */
  1664. /* M right singular vectors to be computed in VT and */
  1665. /* M left singular vectors to be computed in U */
  1666. il = 1;
  1667. /* WORK(IL) is M by M */
  1668. ldwrkl = *m;
  1669. itau = il + ldwrkl * *m;
  1670. nwork = itau + *m;
  1671. /* Compute A=L*Q */
  1672. /* Workspace: need M*M [L] + M [tau] + M [work] */
  1673. /* Workspace: prefer M*M [L] + M [tau] + M*NB [work] */
  1674. i__2 = *lwork - nwork + 1;
  1675. sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
  1676. i__2, &ierr);
  1677. /* Copy L to WORK(IL), zeroing out above it */
  1678. slacpy_("L", m, m, &a[a_offset], lda, &work[il], &ldwrkl);
  1679. i__2 = *m - 1;
  1680. i__1 = *m - 1;
  1681. slaset_("U", &i__2, &i__1, &c_b63, &c_b63, &work[il + ldwrkl],
  1682. &ldwrkl);
  1683. /* Generate Q in A */
  1684. /* Workspace: need M*M [L] + M [tau] + M [work] */
  1685. /* Workspace: prefer M*M [L] + M [tau] + M*NB [work] */
  1686. i__2 = *lwork - nwork + 1;
  1687. sorglq_(m, n, m, &a[a_offset], lda, &work[itau], &work[nwork],
  1688. &i__2, &ierr);
  1689. ie = itau;
  1690. itauq = ie + *m;
  1691. itaup = itauq + *m;
  1692. nwork = itaup + *m;
  1693. /* Bidiagonalize L in WORK(IU). */
  1694. /* Workspace: need M*M [L] + 3*M [e, tauq, taup] + M [work] */
  1695. /* Workspace: prefer M*M [L] + 3*M [e, tauq, taup] + 2*M*NB [work] */
  1696. i__2 = *lwork - nwork + 1;
  1697. sgebrd_(m, m, &work[il], &ldwrkl, &s[1], &work[ie], &work[
  1698. itauq], &work[itaup], &work[nwork], &i__2, &ierr);
  1699. /* Perform bidiagonal SVD, computing left singular vectors */
  1700. /* of bidiagonal matrix in U and computing right singular */
  1701. /* vectors of bidiagonal matrix in VT */
  1702. /* Workspace: need M*M [L] + 3*M [e, tauq, taup] + BDSPAC */
  1703. sbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
  1704. vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
  1705. info);
  1706. /* Overwrite U by left singular vectors of L and VT */
  1707. /* by right singular vectors of L */
  1708. /* Workspace: need M*M [L] + 3*M [e, tauq, taup] + M [work] */
  1709. /* Workspace: prefer M*M [L] + 3*M [e, tauq, taup] + M*NB [work] */
  1710. i__2 = *lwork - nwork + 1;
  1711. sormbr_("Q", "L", "N", m, m, m, &work[il], &ldwrkl, &work[
  1712. itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
  1713. i__2 = *lwork - nwork + 1;
  1714. sormbr_("P", "R", "T", m, m, m, &work[il], &ldwrkl, &work[
  1715. itaup], &vt[vt_offset], ldvt, &work[nwork], &i__2, &
  1716. ierr);
  1717. /* Multiply right singular vectors of L in WORK(IL) by */
  1718. /* Q in A, storing result in VT */
  1719. /* Workspace: need M*M [L] */
  1720. slacpy_("F", m, m, &vt[vt_offset], ldvt, &work[il], &ldwrkl);
  1721. sgemm_("N", "N", m, n, m, &c_b84, &work[il], &ldwrkl, &a[
  1722. a_offset], lda, &c_b63, &vt[vt_offset], ldvt);
  1723. } else if (wntqa) {
  1724. /* Path 4t (N >> M, JOBZ='A') */
  1725. /* N right singular vectors to be computed in VT and */
  1726. /* M left singular vectors to be computed in U */
  1727. ivt = 1;
  1728. /* WORK(IVT) is M by M */
  1729. ldwkvt = *m;
  1730. itau = ivt + ldwkvt * *m;
  1731. nwork = itau + *m;
  1732. /* Compute A=L*Q, copying result to VT */
  1733. /* Workspace: need M*M [VT] + M [tau] + M [work] */
  1734. /* Workspace: prefer M*M [VT] + M [tau] + M*NB [work] */
  1735. i__2 = *lwork - nwork + 1;
  1736. sgelqf_(m, n, &a[a_offset], lda, &work[itau], &work[nwork], &
  1737. i__2, &ierr);
  1738. slacpy_("U", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  1739. /* Generate Q in VT */
  1740. /* Workspace: need M*M [VT] + M [tau] + N [work] */
  1741. /* Workspace: prefer M*M [VT] + M [tau] + N*NB [work] */
  1742. i__2 = *lwork - nwork + 1;
  1743. sorglq_(n, n, m, &vt[vt_offset], ldvt, &work[itau], &work[
  1744. nwork], &i__2, &ierr);
  1745. /* Produce L in A, zeroing out other entries */
  1746. i__2 = *m - 1;
  1747. i__1 = *m - 1;
  1748. slaset_("U", &i__2, &i__1, &c_b63, &c_b63, &a[(a_dim1 << 1) +
  1749. 1], lda);
  1750. ie = itau;
  1751. itauq = ie + *m;
  1752. itaup = itauq + *m;
  1753. nwork = itaup + *m;
  1754. /* Bidiagonalize L in A */
  1755. /* Workspace: need M*M [VT] + 3*M [e, tauq, taup] + M [work] */
  1756. /* Workspace: prefer M*M [VT] + 3*M [e, tauq, taup] + 2*M*NB [work] */
  1757. i__2 = *lwork - nwork + 1;
  1758. sgebrd_(m, m, &a[a_offset], lda, &s[1], &work[ie], &work[
  1759. itauq], &work[itaup], &work[nwork], &i__2, &ierr);
  1760. /* Perform bidiagonal SVD, computing left singular vectors */
  1761. /* of bidiagonal matrix in U and computing right singular */
  1762. /* vectors of bidiagonal matrix in WORK(IVT) */
  1763. /* Workspace: need M*M [VT] + 3*M [e, tauq, taup] + BDSPAC */
  1764. sbdsdc_("U", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
  1765. work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1]
  1766. , info);
  1767. /* Overwrite U by left singular vectors of L and WORK(IVT) */
  1768. /* by right singular vectors of L */
  1769. /* Workspace: need M*M [VT] + 3*M [e, tauq, taup]+ M [work] */
  1770. /* Workspace: prefer M*M [VT] + 3*M [e, tauq, taup]+ M*NB [work] */
  1771. i__2 = *lwork - nwork + 1;
  1772. sormbr_("Q", "L", "N", m, m, m, &a[a_offset], lda, &work[
  1773. itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
  1774. i__2 = *lwork - nwork + 1;
  1775. sormbr_("P", "R", "T", m, m, m, &a[a_offset], lda, &work[
  1776. itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2, &
  1777. ierr);
  1778. /* Multiply right singular vectors of L in WORK(IVT) by */
  1779. /* Q in VT, storing result in A */
  1780. /* Workspace: need M*M [VT] */
  1781. sgemm_("N", "N", m, n, m, &c_b84, &work[ivt], &ldwkvt, &vt[
  1782. vt_offset], ldvt, &c_b63, &a[a_offset], lda);
  1783. /* Copy right singular vectors of A from A to VT */
  1784. slacpy_("F", m, n, &a[a_offset], lda, &vt[vt_offset], ldvt);
  1785. }
  1786. } else {
  1787. /* N .LT. MNTHR */
  1788. /* Path 5t (N > M, but not much larger) */
  1789. /* Reduce to bidiagonal form without LQ decomposition */
  1790. ie = 1;
  1791. itauq = ie + *m;
  1792. itaup = itauq + *m;
  1793. nwork = itaup + *m;
  1794. /* Bidiagonalize A */
  1795. /* Workspace: need 3*M [e, tauq, taup] + N [work] */
  1796. /* Workspace: prefer 3*M [e, tauq, taup] + (M+N)*NB [work] */
  1797. i__2 = *lwork - nwork + 1;
  1798. sgebrd_(m, n, &a[a_offset], lda, &s[1], &work[ie], &work[itauq], &
  1799. work[itaup], &work[nwork], &i__2, &ierr);
  1800. if (wntqn) {
  1801. /* Path 5tn (N > M, JOBZ='N') */
  1802. /* Perform bidiagonal SVD, only computing singular values */
  1803. /* Workspace: need 3*M [e, tauq, taup] + BDSPAC */
  1804. sbdsdc_("L", "N", m, &s[1], &work[ie], dum, &c__1, dum, &c__1,
  1805. dum, idum, &work[nwork], &iwork[1], info);
  1806. } else if (wntqo) {
  1807. /* Path 5to (N > M, JOBZ='O') */
  1808. ldwkvt = *m;
  1809. ivt = nwork;
  1810. if (*lwork >= *m * *n + *m * 3 + bdspac) {
  1811. /* WORK( IVT ) is M by N */
  1812. slaset_("F", m, n, &c_b63, &c_b63, &work[ivt], &ldwkvt);
  1813. nwork = ivt + ldwkvt * *n;
  1814. /* IL is unused; silence compile warnings */
  1815. il = -1;
  1816. } else {
  1817. /* WORK( IVT ) is M by M */
  1818. nwork = ivt + ldwkvt * *m;
  1819. il = nwork;
  1820. /* WORK(IL) is M by CHUNK */
  1821. chunk = (*lwork - *m * *m - *m * 3) / *m;
  1822. }
  1823. /* Perform bidiagonal SVD, computing left singular vectors */
  1824. /* of bidiagonal matrix in U and computing right singular */
  1825. /* vectors of bidiagonal matrix in WORK(IVT) */
  1826. /* Workspace: need 3*M [e, tauq, taup] + M*M [VT] + BDSPAC */
  1827. sbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &
  1828. work[ivt], &ldwkvt, dum, idum, &work[nwork], &iwork[1]
  1829. , info);
  1830. /* Overwrite U by left singular vectors of A */
  1831. /* Workspace: need 3*M [e, tauq, taup] + M*M [VT] + M [work] */
  1832. /* Workspace: prefer 3*M [e, tauq, taup] + M*M [VT] + M*NB [work] */
  1833. i__2 = *lwork - nwork + 1;
  1834. sormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
  1835. itauq], &u[u_offset], ldu, &work[nwork], &i__2, &ierr);
  1836. if (*lwork >= *m * *n + *m * 3 + bdspac) {
  1837. /* Path 5to-fast */
  1838. /* Overwrite WORK(IVT) by left singular vectors of A */
  1839. /* Workspace: need 3*M [e, tauq, taup] + M*N [VT] + M [work] */
  1840. /* Workspace: prefer 3*M [e, tauq, taup] + M*N [VT] + M*NB [work] */
  1841. i__2 = *lwork - nwork + 1;
  1842. sormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[
  1843. itaup], &work[ivt], &ldwkvt, &work[nwork], &i__2,
  1844. &ierr);
  1845. /* Copy right singular vectors of A from WORK(IVT) to A */
  1846. slacpy_("F", m, n, &work[ivt], &ldwkvt, &a[a_offset], lda);
  1847. } else {
  1848. /* Path 5to-slow */
  1849. /* Generate P**T in A */
  1850. /* Workspace: need 3*M [e, tauq, taup] + M*M [VT] + M [work] */
  1851. /* Workspace: prefer 3*M [e, tauq, taup] + M*M [VT] + M*NB [work] */
  1852. i__2 = *lwork - nwork + 1;
  1853. sorgbr_("P", m, n, m, &a[a_offset], lda, &work[itaup], &
  1854. work[nwork], &i__2, &ierr);
  1855. /* Multiply Q in A by right singular vectors of */
  1856. /* bidiagonal matrix in WORK(IVT), storing result in */
  1857. /* WORK(IL) and copying to A */
  1858. /* Workspace: need 3*M [e, tauq, taup] + M*M [VT] + M*NB [L] */
  1859. /* Workspace: prefer 3*M [e, tauq, taup] + M*M [VT] + M*N [L] */
  1860. i__2 = *n;
  1861. i__1 = chunk;
  1862. for (i__ = 1; i__1 < 0 ? i__ >= i__2 : i__ <= i__2; i__ +=
  1863. i__1) {
  1864. /* Computing MIN */
  1865. i__3 = *n - i__ + 1;
  1866. blk = f2cmin(i__3,chunk);
  1867. sgemm_("N", "N", m, &blk, m, &c_b84, &work[ivt], &
  1868. ldwkvt, &a[i__ * a_dim1 + 1], lda, &c_b63, &
  1869. work[il], m);
  1870. slacpy_("F", m, &blk, &work[il], m, &a[i__ * a_dim1 +
  1871. 1], lda);
  1872. /* L40: */
  1873. }
  1874. }
  1875. } else if (wntqs) {
  1876. /* Path 5ts (N > M, JOBZ='S') */
  1877. /* Perform bidiagonal SVD, computing left singular vectors */
  1878. /* of bidiagonal matrix in U and computing right singular */
  1879. /* vectors of bidiagonal matrix in VT */
  1880. /* Workspace: need 3*M [e, tauq, taup] + BDSPAC */
  1881. slaset_("F", m, n, &c_b63, &c_b63, &vt[vt_offset], ldvt);
  1882. sbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
  1883. vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
  1884. info);
  1885. /* Overwrite U by left singular vectors of A and VT */
  1886. /* by right singular vectors of A */
  1887. /* Workspace: need 3*M [e, tauq, taup] + M [work] */
  1888. /* Workspace: prefer 3*M [e, tauq, taup] + M*NB [work] */
  1889. i__1 = *lwork - nwork + 1;
  1890. sormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
  1891. itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
  1892. i__1 = *lwork - nwork + 1;
  1893. sormbr_("P", "R", "T", m, n, m, &a[a_offset], lda, &work[
  1894. itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
  1895. ierr);
  1896. } else if (wntqa) {
  1897. /* Path 5ta (N > M, JOBZ='A') */
  1898. /* Perform bidiagonal SVD, computing left singular vectors */
  1899. /* of bidiagonal matrix in U and computing right singular */
  1900. /* vectors of bidiagonal matrix in VT */
  1901. /* Workspace: need 3*M [e, tauq, taup] + BDSPAC */
  1902. slaset_("F", n, n, &c_b63, &c_b63, &vt[vt_offset], ldvt);
  1903. sbdsdc_("L", "I", m, &s[1], &work[ie], &u[u_offset], ldu, &vt[
  1904. vt_offset], ldvt, dum, idum, &work[nwork], &iwork[1],
  1905. info);
  1906. /* Set the right corner of VT to identity matrix */
  1907. if (*n > *m) {
  1908. i__1 = *n - *m;
  1909. i__2 = *n - *m;
  1910. slaset_("F", &i__1, &i__2, &c_b63, &c_b84, &vt[*m + 1 + (*
  1911. m + 1) * vt_dim1], ldvt);
  1912. }
  1913. /* Overwrite U by left singular vectors of A and VT */
  1914. /* by right singular vectors of A */
  1915. /* Workspace: need 3*M [e, tauq, taup] + N [work] */
  1916. /* Workspace: prefer 3*M [e, tauq, taup] + N*NB [work] */
  1917. i__1 = *lwork - nwork + 1;
  1918. sormbr_("Q", "L", "N", m, m, n, &a[a_offset], lda, &work[
  1919. itauq], &u[u_offset], ldu, &work[nwork], &i__1, &ierr);
  1920. i__1 = *lwork - nwork + 1;
  1921. sormbr_("P", "R", "T", n, n, m, &a[a_offset], lda, &work[
  1922. itaup], &vt[vt_offset], ldvt, &work[nwork], &i__1, &
  1923. ierr);
  1924. }
  1925. }
  1926. }
  1927. /* Undo scaling if necessary */
  1928. if (iscl == 1) {
  1929. if (anrm > bignum) {
  1930. slascl_("G", &c__0, &c__0, &bignum, &anrm, &minmn, &c__1, &s[1], &
  1931. minmn, &ierr);
  1932. }
  1933. if (anrm < smlnum) {
  1934. slascl_("G", &c__0, &c__0, &smlnum, &anrm, &minmn, &c__1, &s[1], &
  1935. minmn, &ierr);
  1936. }
  1937. }
  1938. /* Return optimal workspace in WORK(1) */
  1939. work[1] = (real) maxwrk;
  1940. return 0;
  1941. /* End of SGESDD */
  1942. } /* sgesdd_ */