You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

zlaqr5.c 63 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909
  1. #include <math.h>
  2. #include <stdlib.h>
  3. #include <string.h>
  4. #include <stdio.h>
  5. #include <complex.h>
  6. #ifdef complex
  7. #undef complex
  8. #endif
  9. #ifdef I
  10. #undef I
  11. #endif
  12. #if defined(_WIN64)
  13. typedef long long BLASLONG;
  14. typedef unsigned long long BLASULONG;
  15. #else
  16. typedef long BLASLONG;
  17. typedef unsigned long BLASULONG;
  18. #endif
  19. #ifdef LAPACK_ILP64
  20. typedef BLASLONG blasint;
  21. #if defined(_WIN64)
  22. #define blasabs(x) llabs(x)
  23. #else
  24. #define blasabs(x) labs(x)
  25. #endif
  26. #else
  27. typedef int blasint;
  28. #define blasabs(x) abs(x)
  29. #endif
  30. typedef blasint integer;
  31. typedef unsigned int uinteger;
  32. typedef char *address;
  33. typedef short int shortint;
  34. typedef float real;
  35. typedef double doublereal;
  36. typedef struct { real r, i; } complex;
  37. typedef struct { doublereal r, i; } doublecomplex;
  38. #ifdef _MSC_VER
  39. static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
  40. static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
  41. static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
  42. static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
  43. #else
  44. static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
  45. static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
  46. static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
  47. static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
  48. #endif
  49. #define pCf(z) (*_pCf(z))
  50. #define pCd(z) (*_pCd(z))
  51. typedef blasint logical;
  52. typedef char logical1;
  53. typedef char integer1;
  54. #define TRUE_ (1)
  55. #define FALSE_ (0)
  56. /* Extern is for use with -E */
  57. #ifndef Extern
  58. #define Extern extern
  59. #endif
  60. /* I/O stuff */
  61. typedef int flag;
  62. typedef int ftnlen;
  63. typedef int ftnint;
  64. /*external read, write*/
  65. typedef struct
  66. { flag cierr;
  67. ftnint ciunit;
  68. flag ciend;
  69. char *cifmt;
  70. ftnint cirec;
  71. } cilist;
  72. /*internal read, write*/
  73. typedef struct
  74. { flag icierr;
  75. char *iciunit;
  76. flag iciend;
  77. char *icifmt;
  78. ftnint icirlen;
  79. ftnint icirnum;
  80. } icilist;
  81. /*open*/
  82. typedef struct
  83. { flag oerr;
  84. ftnint ounit;
  85. char *ofnm;
  86. ftnlen ofnmlen;
  87. char *osta;
  88. char *oacc;
  89. char *ofm;
  90. ftnint orl;
  91. char *oblnk;
  92. } olist;
  93. /*close*/
  94. typedef struct
  95. { flag cerr;
  96. ftnint cunit;
  97. char *csta;
  98. } cllist;
  99. /*rewind, backspace, endfile*/
  100. typedef struct
  101. { flag aerr;
  102. ftnint aunit;
  103. } alist;
  104. /* inquire */
  105. typedef struct
  106. { flag inerr;
  107. ftnint inunit;
  108. char *infile;
  109. ftnlen infilen;
  110. ftnint *inex; /*parameters in standard's order*/
  111. ftnint *inopen;
  112. ftnint *innum;
  113. ftnint *innamed;
  114. char *inname;
  115. ftnlen innamlen;
  116. char *inacc;
  117. ftnlen inacclen;
  118. char *inseq;
  119. ftnlen inseqlen;
  120. char *indir;
  121. ftnlen indirlen;
  122. char *infmt;
  123. ftnlen infmtlen;
  124. char *inform;
  125. ftnint informlen;
  126. char *inunf;
  127. ftnlen inunflen;
  128. ftnint *inrecl;
  129. ftnint *innrec;
  130. char *inblank;
  131. ftnlen inblanklen;
  132. } inlist;
  133. #define VOID void
  134. union Multitype { /* for multiple entry points */
  135. integer1 g;
  136. shortint h;
  137. integer i;
  138. /* longint j; */
  139. real r;
  140. doublereal d;
  141. complex c;
  142. doublecomplex z;
  143. };
  144. typedef union Multitype Multitype;
  145. struct Vardesc { /* for Namelist */
  146. char *name;
  147. char *addr;
  148. ftnlen *dims;
  149. int type;
  150. };
  151. typedef struct Vardesc Vardesc;
  152. struct Namelist {
  153. char *name;
  154. Vardesc **vars;
  155. int nvars;
  156. };
  157. typedef struct Namelist Namelist;
  158. #define abs(x) ((x) >= 0 ? (x) : -(x))
  159. #define dabs(x) (fabs(x))
  160. #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
  161. #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
  162. #define dmin(a,b) (f2cmin(a,b))
  163. #define dmax(a,b) (f2cmax(a,b))
  164. #define bit_test(a,b) ((a) >> (b) & 1)
  165. #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
  166. #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
  167. #define abort_() { sig_die("Fortran abort routine called", 1); }
  168. #define c_abs(z) (cabsf(Cf(z)))
  169. #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
  170. #ifdef _MSC_VER
  171. #define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
  172. #define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/Cd(b)._Val[1]);}
  173. #else
  174. #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
  175. #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
  176. #endif
  177. #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
  178. #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
  179. #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
  180. //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
  181. #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
  182. #define d_abs(x) (fabs(*(x)))
  183. #define d_acos(x) (acos(*(x)))
  184. #define d_asin(x) (asin(*(x)))
  185. #define d_atan(x) (atan(*(x)))
  186. #define d_atn2(x, y) (atan2(*(x),*(y)))
  187. #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
  188. #define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
  189. #define d_cos(x) (cos(*(x)))
  190. #define d_cosh(x) (cosh(*(x)))
  191. #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
  192. #define d_exp(x) (exp(*(x)))
  193. #define d_imag(z) (cimag(Cd(z)))
  194. #define r_imag(z) (cimagf(Cf(z)))
  195. #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
  196. #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
  197. #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
  198. #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
  199. #define d_log(x) (log(*(x)))
  200. #define d_mod(x, y) (fmod(*(x), *(y)))
  201. #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
  202. #define d_nint(x) u_nint(*(x))
  203. #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
  204. #define d_sign(a,b) u_sign(*(a),*(b))
  205. #define r_sign(a,b) u_sign(*(a),*(b))
  206. #define d_sin(x) (sin(*(x)))
  207. #define d_sinh(x) (sinh(*(x)))
  208. #define d_sqrt(x) (sqrt(*(x)))
  209. #define d_tan(x) (tan(*(x)))
  210. #define d_tanh(x) (tanh(*(x)))
  211. #define i_abs(x) abs(*(x))
  212. #define i_dnnt(x) ((integer)u_nint(*(x)))
  213. #define i_len(s, n) (n)
  214. #define i_nint(x) ((integer)u_nint(*(x)))
  215. #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
  216. #define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
  217. #define pow_si(B,E) spow_ui(*(B),*(E))
  218. #define pow_ri(B,E) spow_ui(*(B),*(E))
  219. #define pow_di(B,E) dpow_ui(*(B),*(E))
  220. #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
  221. #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
  222. #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
  223. #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
  224. #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
  225. #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
  226. #define sig_die(s, kill) { exit(1); }
  227. #define s_stop(s, n) {exit(0);}
  228. static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
  229. #define z_abs(z) (cabs(Cd(z)))
  230. #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
  231. #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
  232. #define myexit_() break;
  233. #define mycycle_() continue;
  234. #define myceiling_(w) {ceil(w)}
  235. #define myhuge_(w) {HUGE_VAL}
  236. //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
  237. #define mymaxloc_(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
  238. /* procedure parameter types for -A and -C++ */
  239. #ifdef __cplusplus
  240. typedef logical (*L_fp)(...);
  241. #else
  242. typedef logical (*L_fp)();
  243. #endif
  244. static float spow_ui(float x, integer n) {
  245. float pow=1.0; unsigned long int u;
  246. if(n != 0) {
  247. if(n < 0) n = -n, x = 1/x;
  248. for(u = n; ; ) {
  249. if(u & 01) pow *= x;
  250. if(u >>= 1) x *= x;
  251. else break;
  252. }
  253. }
  254. return pow;
  255. }
  256. static double dpow_ui(double x, integer n) {
  257. double pow=1.0; unsigned long int u;
  258. if(n != 0) {
  259. if(n < 0) n = -n, x = 1/x;
  260. for(u = n; ; ) {
  261. if(u & 01) pow *= x;
  262. if(u >>= 1) x *= x;
  263. else break;
  264. }
  265. }
  266. return pow;
  267. }
  268. #ifdef _MSC_VER
  269. static _Fcomplex cpow_ui(complex x, integer n) {
  270. complex pow={1.0,0.0}; unsigned long int u;
  271. if(n != 0) {
  272. if(n < 0) n = -n, x.r = 1/x.r, x.i=1/x.i;
  273. for(u = n; ; ) {
  274. if(u & 01) pow.r *= x.r, pow.i *= x.i;
  275. if(u >>= 1) x.r *= x.r, x.i *= x.i;
  276. else break;
  277. }
  278. }
  279. _Fcomplex p={pow.r, pow.i};
  280. return p;
  281. }
  282. #else
  283. static _Complex float cpow_ui(_Complex float x, integer n) {
  284. _Complex float pow=1.0; unsigned long int u;
  285. if(n != 0) {
  286. if(n < 0) n = -n, x = 1/x;
  287. for(u = n; ; ) {
  288. if(u & 01) pow *= x;
  289. if(u >>= 1) x *= x;
  290. else break;
  291. }
  292. }
  293. return pow;
  294. }
  295. #endif
  296. #ifdef _MSC_VER
  297. static _Dcomplex zpow_ui(_Dcomplex x, integer n) {
  298. _Dcomplex pow={1.0,0.0}; unsigned long int u;
  299. if(n != 0) {
  300. if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1];
  301. for(u = n; ; ) {
  302. if(u & 01) pow._Val[0] *= x._Val[0], pow._Val[1] *= x._Val[1];
  303. if(u >>= 1) x._Val[0] *= x._Val[0], x._Val[1] *= x._Val[1];
  304. else break;
  305. }
  306. }
  307. _Dcomplex p = {pow._Val[0], pow._Val[1]};
  308. return p;
  309. }
  310. #else
  311. static _Complex double zpow_ui(_Complex double x, integer n) {
  312. _Complex double pow=1.0; unsigned long int u;
  313. if(n != 0) {
  314. if(n < 0) n = -n, x = 1/x;
  315. for(u = n; ; ) {
  316. if(u & 01) pow *= x;
  317. if(u >>= 1) x *= x;
  318. else break;
  319. }
  320. }
  321. return pow;
  322. }
  323. #endif
  324. static integer pow_ii(integer x, integer n) {
  325. integer pow; unsigned long int u;
  326. if (n <= 0) {
  327. if (n == 0 || x == 1) pow = 1;
  328. else if (x != -1) pow = x == 0 ? 1/x : 0;
  329. else n = -n;
  330. }
  331. if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
  332. u = n;
  333. for(pow = 1; ; ) {
  334. if(u & 01) pow *= x;
  335. if(u >>= 1) x *= x;
  336. else break;
  337. }
  338. }
  339. return pow;
  340. }
  341. static integer dmaxloc_(double *w, integer s, integer e, integer *n)
  342. {
  343. double m; integer i, mi;
  344. for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
  345. if (w[i-1]>m) mi=i ,m=w[i-1];
  346. return mi-s+1;
  347. }
  348. static integer smaxloc_(float *w, integer s, integer e, integer *n)
  349. {
  350. float m; integer i, mi;
  351. for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
  352. if (w[i-1]>m) mi=i ,m=w[i-1];
  353. return mi-s+1;
  354. }
  355. static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
  356. integer n = *n_, incx = *incx_, incy = *incy_, i;
  357. #ifdef _MSC_VER
  358. _Fcomplex zdotc = {0.0, 0.0};
  359. if (incx == 1 && incy == 1) {
  360. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  361. zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0];
  362. zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1];
  363. }
  364. } else {
  365. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  366. zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0];
  367. zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1];
  368. }
  369. }
  370. pCf(z) = zdotc;
  371. }
  372. #else
  373. _Complex float zdotc = 0.0;
  374. if (incx == 1 && incy == 1) {
  375. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  376. zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
  377. }
  378. } else {
  379. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  380. zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
  381. }
  382. }
  383. pCf(z) = zdotc;
  384. }
  385. #endif
  386. static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
  387. integer n = *n_, incx = *incx_, incy = *incy_, i;
  388. #ifdef _MSC_VER
  389. _Dcomplex zdotc = {0.0, 0.0};
  390. if (incx == 1 && incy == 1) {
  391. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  392. zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0];
  393. zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1];
  394. }
  395. } else {
  396. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  397. zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0];
  398. zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1];
  399. }
  400. }
  401. pCd(z) = zdotc;
  402. }
  403. #else
  404. _Complex double zdotc = 0.0;
  405. if (incx == 1 && incy == 1) {
  406. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  407. zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
  408. }
  409. } else {
  410. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  411. zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
  412. }
  413. }
  414. pCd(z) = zdotc;
  415. }
  416. #endif
  417. static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
  418. integer n = *n_, incx = *incx_, incy = *incy_, i;
  419. #ifdef _MSC_VER
  420. _Fcomplex zdotc = {0.0, 0.0};
  421. if (incx == 1 && incy == 1) {
  422. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  423. zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0];
  424. zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1];
  425. }
  426. } else {
  427. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  428. zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0];
  429. zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1];
  430. }
  431. }
  432. pCf(z) = zdotc;
  433. }
  434. #else
  435. _Complex float zdotc = 0.0;
  436. if (incx == 1 && incy == 1) {
  437. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  438. zdotc += Cf(&x[i]) * Cf(&y[i]);
  439. }
  440. } else {
  441. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  442. zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
  443. }
  444. }
  445. pCf(z) = zdotc;
  446. }
  447. #endif
  448. static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
  449. integer n = *n_, incx = *incx_, incy = *incy_, i;
  450. #ifdef _MSC_VER
  451. _Dcomplex zdotc = {0.0, 0.0};
  452. if (incx == 1 && incy == 1) {
  453. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  454. zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0];
  455. zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1];
  456. }
  457. } else {
  458. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  459. zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0];
  460. zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1];
  461. }
  462. }
  463. pCd(z) = zdotc;
  464. }
  465. #else
  466. _Complex double zdotc = 0.0;
  467. if (incx == 1 && incy == 1) {
  468. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  469. zdotc += Cd(&x[i]) * Cd(&y[i]);
  470. }
  471. } else {
  472. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  473. zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
  474. }
  475. }
  476. pCd(z) = zdotc;
  477. }
  478. #endif
  479. /* -- translated by f2c (version 20000121).
  480. You must link the resulting object file with the libraries:
  481. -lf2c -lm (in that order)
  482. */
  483. /* Table of constant values */
  484. static doublecomplex c_b1 = {0.,0.};
  485. static doublecomplex c_b2 = {1.,0.};
  486. static integer c__2 = 2;
  487. static integer c__1 = 1;
  488. static integer c__3 = 3;
  489. /* > \brief \b ZLAQR5 performs a single small-bulge multi-shift QR sweep. */
  490. /* =========== DOCUMENTATION =========== */
  491. /* Online html documentation available at */
  492. /* http://www.netlib.org/lapack/explore-html/ */
  493. /* > \htmlonly */
  494. /* > Download ZLAQR5 + dependencies */
  495. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/zlaqr5.
  496. f"> */
  497. /* > [TGZ]</a> */
  498. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/zlaqr5.
  499. f"> */
  500. /* > [ZIP]</a> */
  501. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/zlaqr5.
  502. f"> */
  503. /* > [TXT]</a> */
  504. /* > \endhtmlonly */
  505. /* Definition: */
  506. /* =========== */
  507. /* SUBROUTINE ZLAQR5( WANTT, WANTZ, KACC22, N, KTOP, KBOT, NSHFTS, S, */
  508. /* H, LDH, ILOZ, IHIZ, Z, LDZ, V, LDV, U, LDU, NV, */
  509. /* WV, LDWV, NH, WH, LDWH ) */
  510. /* INTEGER IHIZ, ILOZ, KACC22, KBOT, KTOP, LDH, LDU, LDV, */
  511. /* $ LDWH, LDWV, LDZ, N, NH, NSHFTS, NV */
  512. /* LOGICAL WANTT, WANTZ */
  513. /* COMPLEX*16 H( LDH, * ), S( * ), U( LDU, * ), V( LDV, * ), */
  514. /* $ WH( LDWH, * ), WV( LDWV, * ), Z( LDZ, * ) */
  515. /* > \par Purpose: */
  516. /* ============= */
  517. /* > */
  518. /* > \verbatim */
  519. /* > */
  520. /* > ZLAQR5, called by ZLAQR0, performs a */
  521. /* > single small-bulge multi-shift QR sweep. */
  522. /* > \endverbatim */
  523. /* Arguments: */
  524. /* ========== */
  525. /* > \param[in] WANTT */
  526. /* > \verbatim */
  527. /* > WANTT is LOGICAL */
  528. /* > WANTT = .true. if the triangular Schur factor */
  529. /* > is being computed. WANTT is set to .false. otherwise. */
  530. /* > \endverbatim */
  531. /* > */
  532. /* > \param[in] WANTZ */
  533. /* > \verbatim */
  534. /* > WANTZ is LOGICAL */
  535. /* > WANTZ = .true. if the unitary Schur factor is being */
  536. /* > computed. WANTZ is set to .false. otherwise. */
  537. /* > \endverbatim */
  538. /* > */
  539. /* > \param[in] KACC22 */
  540. /* > \verbatim */
  541. /* > KACC22 is INTEGER with value 0, 1, or 2. */
  542. /* > Specifies the computation mode of far-from-diagonal */
  543. /* > orthogonal updates. */
  544. /* > = 0: ZLAQR5 does not accumulate reflections and does not */
  545. /* > use matrix-matrix multiply to update far-from-diagonal */
  546. /* > matrix entries. */
  547. /* > = 1: ZLAQR5 accumulates reflections and uses matrix-matrix */
  548. /* > multiply to update the far-from-diagonal matrix entries. */
  549. /* > = 2: Same as KACC22 = 1. This option used to enable exploiting */
  550. /* > the 2-by-2 structure during matrix multiplications, but */
  551. /* > this is no longer supported. */
  552. /* > \endverbatim */
  553. /* > */
  554. /* > \param[in] N */
  555. /* > \verbatim */
  556. /* > N is INTEGER */
  557. /* > N is the order of the Hessenberg matrix H upon which this */
  558. /* > subroutine operates. */
  559. /* > \endverbatim */
  560. /* > */
  561. /* > \param[in] KTOP */
  562. /* > \verbatim */
  563. /* > KTOP is INTEGER */
  564. /* > \endverbatim */
  565. /* > */
  566. /* > \param[in] KBOT */
  567. /* > \verbatim */
  568. /* > KBOT is INTEGER */
  569. /* > These are the first and last rows and columns of an */
  570. /* > isolated diagonal block upon which the QR sweep is to be */
  571. /* > applied. It is assumed without a check that */
  572. /* > either KTOP = 1 or H(KTOP,KTOP-1) = 0 */
  573. /* > and */
  574. /* > either KBOT = N or H(KBOT+1,KBOT) = 0. */
  575. /* > \endverbatim */
  576. /* > */
  577. /* > \param[in] NSHFTS */
  578. /* > \verbatim */
  579. /* > NSHFTS is INTEGER */
  580. /* > NSHFTS gives the number of simultaneous shifts. NSHFTS */
  581. /* > must be positive and even. */
  582. /* > \endverbatim */
  583. /* > */
  584. /* > \param[in,out] S */
  585. /* > \verbatim */
  586. /* > S is COMPLEX*16 array, dimension (NSHFTS) */
  587. /* > S contains the shifts of origin that define the multi- */
  588. /* > shift QR sweep. On output S may be reordered. */
  589. /* > \endverbatim */
  590. /* > */
  591. /* > \param[in,out] H */
  592. /* > \verbatim */
  593. /* > H is COMPLEX*16 array, dimension (LDH,N) */
  594. /* > On input H contains a Hessenberg matrix. On output a */
  595. /* > multi-shift QR sweep with shifts SR(J)+i*SI(J) is applied */
  596. /* > to the isolated diagonal block in rows and columns KTOP */
  597. /* > through KBOT. */
  598. /* > \endverbatim */
  599. /* > */
  600. /* > \param[in] LDH */
  601. /* > \verbatim */
  602. /* > LDH is INTEGER */
  603. /* > LDH is the leading dimension of H just as declared in the */
  604. /* > calling procedure. LDH >= MAX(1,N). */
  605. /* > \endverbatim */
  606. /* > */
  607. /* > \param[in] ILOZ */
  608. /* > \verbatim */
  609. /* > ILOZ is INTEGER */
  610. /* > \endverbatim */
  611. /* > */
  612. /* > \param[in] IHIZ */
  613. /* > \verbatim */
  614. /* > IHIZ is INTEGER */
  615. /* > Specify the rows of Z to which transformations must be */
  616. /* > applied if WANTZ is .TRUE.. 1 <= ILOZ <= IHIZ <= N */
  617. /* > \endverbatim */
  618. /* > */
  619. /* > \param[in,out] Z */
  620. /* > \verbatim */
  621. /* > Z is COMPLEX*16 array, dimension (LDZ,IHIZ) */
  622. /* > If WANTZ = .TRUE., then the QR Sweep unitary */
  623. /* > similarity transformation is accumulated into */
  624. /* > Z(ILOZ:IHIZ,ILOZ:IHIZ) from the right. */
  625. /* > If WANTZ = .FALSE., then Z is unreferenced. */
  626. /* > \endverbatim */
  627. /* > */
  628. /* > \param[in] LDZ */
  629. /* > \verbatim */
  630. /* > LDZ is INTEGER */
  631. /* > LDA is the leading dimension of Z just as declared in */
  632. /* > the calling procedure. LDZ >= N. */
  633. /* > \endverbatim */
  634. /* > */
  635. /* > \param[out] V */
  636. /* > \verbatim */
  637. /* > V is COMPLEX*16 array, dimension (LDV,NSHFTS/2) */
  638. /* > \endverbatim */
  639. /* > */
  640. /* > \param[in] LDV */
  641. /* > \verbatim */
  642. /* > LDV is INTEGER */
  643. /* > LDV is the leading dimension of V as declared in the */
  644. /* > calling procedure. LDV >= 3. */
  645. /* > \endverbatim */
  646. /* > */
  647. /* > \param[out] U */
  648. /* > \verbatim */
  649. /* > U is COMPLEX*16 array, dimension (LDU,2*NSHFTS) */
  650. /* > \endverbatim */
  651. /* > */
  652. /* > \param[in] LDU */
  653. /* > \verbatim */
  654. /* > LDU is INTEGER */
  655. /* > LDU is the leading dimension of U just as declared in the */
  656. /* > in the calling subroutine. LDU >= 2*NSHFTS. */
  657. /* > \endverbatim */
  658. /* > */
  659. /* > \param[in] NV */
  660. /* > \verbatim */
  661. /* > NV is INTEGER */
  662. /* > NV is the number of rows in WV agailable for workspace. */
  663. /* > NV >= 1. */
  664. /* > \endverbatim */
  665. /* > */
  666. /* > \param[out] WV */
  667. /* > \verbatim */
  668. /* > WV is COMPLEX*16 array, dimension (LDWV,2*NSHFTS) */
  669. /* > \endverbatim */
  670. /* > */
  671. /* > \param[in] LDWV */
  672. /* > \verbatim */
  673. /* > LDWV is INTEGER */
  674. /* > LDWV is the leading dimension of WV as declared in the */
  675. /* > in the calling subroutine. LDWV >= NV. */
  676. /* > \endverbatim */
  677. /* > \param[in] NH */
  678. /* > \verbatim */
  679. /* > NH is INTEGER */
  680. /* > NH is the number of columns in array WH available for */
  681. /* > workspace. NH >= 1. */
  682. /* > \endverbatim */
  683. /* > */
  684. /* > \param[out] WH */
  685. /* > \verbatim */
  686. /* > WH is COMPLEX*16 array, dimension (LDWH,NH) */
  687. /* > \endverbatim */
  688. /* > */
  689. /* > \param[in] LDWH */
  690. /* > \verbatim */
  691. /* > LDWH is INTEGER */
  692. /* > Leading dimension of WH just as declared in the */
  693. /* > calling procedure. LDWH >= 2*NSHFTS. */
  694. /* > \endverbatim */
  695. /* > */
  696. /* Authors: */
  697. /* ======== */
  698. /* > \author Univ. of Tennessee */
  699. /* > \author Univ. of California Berkeley */
  700. /* > \author Univ. of Colorado Denver */
  701. /* > \author NAG Ltd. */
  702. /* > \date January 2021 */
  703. /* > \ingroup complex16OTHERauxiliary */
  704. /* > \par Contributors: */
  705. /* ================== */
  706. /* > */
  707. /* > Karen Braman and Ralph Byers, Department of Mathematics, */
  708. /* > University of Kansas, USA */
  709. /* > */
  710. /* > Lars Karlsson, Daniel Kressner, and Bruno Lang */
  711. /* > */
  712. /* > Thijs Steel, Department of Computer science, */
  713. /* > KU Leuven, Belgium */
  714. /* > \par References: */
  715. /* ================ */
  716. /* > */
  717. /* > K. Braman, R. Byers and R. Mathias, The Multi-Shift QR */
  718. /* > Algorithm Part I: Maintaining Well Focused Shifts, and Level 3 */
  719. /* > Performance, SIAM Journal of Matrix Analysis, volume 23, pages */
  720. /* > 929--947, 2002. */
  721. /* > */
  722. /* > Lars Karlsson, Daniel Kressner, and Bruno Lang, Optimally packed */
  723. /* > chains of bulges in multishift QR algorithms. */
  724. /* > ACM Trans. Math. Softw. 40, 2, Article 12 (February 2014). */
  725. /* > */
  726. /* ===================================================================== */
  727. /* Subroutine */ void zlaqr5_(logical *wantt, logical *wantz, integer *kacc22,
  728. integer *n, integer *ktop, integer *kbot, integer *nshfts,
  729. doublecomplex *s, doublecomplex *h__, integer *ldh, integer *iloz,
  730. integer *ihiz, doublecomplex *z__, integer *ldz, doublecomplex *v,
  731. integer *ldv, doublecomplex *u, integer *ldu, integer *nv,
  732. doublecomplex *wv, integer *ldwv, integer *nh, doublecomplex *wh,
  733. integer *ldwh)
  734. {
  735. /* System generated locals */
  736. integer h_dim1, h_offset, u_dim1, u_offset, v_dim1, v_offset, wh_dim1,
  737. wh_offset, wv_dim1, wv_offset, z_dim1, z_offset, i__1, i__2, i__3,
  738. i__4, i__5, i__6, i__7, i__8, i__9, i__10, i__11;
  739. doublereal d__1, d__2, d__3, d__4, d__5, d__6, d__7, d__8, d__9, d__10;
  740. doublecomplex z__1, z__2, z__3, z__4, z__5, z__6, z__7, z__8;
  741. /* Local variables */
  742. doublecomplex beta;
  743. logical bmp22;
  744. integer jcol, jlen, jbot, mbot, jtop, jrow, mtop, j, k, m;
  745. doublecomplex alpha;
  746. logical accum;
  747. integer ndcol, incol, krcol, nbmps;
  748. extern /* Subroutine */ void zgemm_(char *, char *, integer *, integer *,
  749. integer *, doublecomplex *, doublecomplex *, integer *,
  750. doublecomplex *, integer *, doublecomplex *, doublecomplex *,
  751. integer *);
  752. integer i2, k1, i4;
  753. extern /* Subroutine */ void dlabad_(doublereal *, doublereal *);
  754. doublereal h11, h12, h21, h22;
  755. extern /* Subroutine */ void zlaqr1_(integer *, doublecomplex *, integer *,
  756. doublecomplex *, doublecomplex *, doublecomplex *);
  757. integer m22;
  758. extern doublereal dlamch_(char *);
  759. integer ns, nu;
  760. doublecomplex vt[3];
  761. doublereal safmin, safmax;
  762. extern /* Subroutine */ void zlarfg_(integer *, doublecomplex *,
  763. doublecomplex *, integer *, doublecomplex *);
  764. doublecomplex refsum;
  765. extern /* Subroutine */ void zlacpy_(char *, integer *, integer *,
  766. doublecomplex *, integer *, doublecomplex *, integer *),
  767. zlaset_(char *, integer *, integer *, doublecomplex *,
  768. doublecomplex *, doublecomplex *, integer *);
  769. doublereal smlnum, scl;
  770. integer kdu, kms;
  771. doublereal ulp;
  772. doublereal tst1, tst2;
  773. /* -- LAPACK auxiliary routine (version 3.7.1) -- */
  774. /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
  775. /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
  776. /* June 2016 */
  777. /* ================================================================ */
  778. /* ==== If there are no shifts, then there is nothing to do. ==== */
  779. /* Parameter adjustments */
  780. --s;
  781. h_dim1 = *ldh;
  782. h_offset = 1 + h_dim1 * 1;
  783. h__ -= h_offset;
  784. z_dim1 = *ldz;
  785. z_offset = 1 + z_dim1 * 1;
  786. z__ -= z_offset;
  787. v_dim1 = *ldv;
  788. v_offset = 1 + v_dim1 * 1;
  789. v -= v_offset;
  790. u_dim1 = *ldu;
  791. u_offset = 1 + u_dim1 * 1;
  792. u -= u_offset;
  793. wv_dim1 = *ldwv;
  794. wv_offset = 1 + wv_dim1 * 1;
  795. wv -= wv_offset;
  796. wh_dim1 = *ldwh;
  797. wh_offset = 1 + wh_dim1 * 1;
  798. wh -= wh_offset;
  799. /* Function Body */
  800. if (*nshfts < 2) {
  801. return;
  802. }
  803. /* ==== If the active block is empty or 1-by-1, then there */
  804. /* . is nothing to do. ==== */
  805. if (*ktop >= *kbot) {
  806. return;
  807. }
  808. /* ==== NSHFTS is supposed to be even, but if it is odd, */
  809. /* . then simply reduce it by one. ==== */
  810. ns = *nshfts - *nshfts % 2;
  811. /* ==== Machine constants for deflation ==== */
  812. safmin = dlamch_("SAFE MINIMUM");
  813. safmax = 1. / safmin;
  814. dlabad_(&safmin, &safmax);
  815. ulp = dlamch_("PRECISION");
  816. smlnum = safmin * ((doublereal) (*n) / ulp);
  817. /* ==== Use accumulated reflections to update far-from-diagonal */
  818. /* . entries ? ==== */
  819. accum = *kacc22 == 1 || *kacc22 == 2;
  820. /* ==== clear trash ==== */
  821. if (*ktop + 2 <= *kbot) {
  822. i__1 = *ktop + 2 + *ktop * h_dim1;
  823. h__[i__1].r = 0., h__[i__1].i = 0.;
  824. }
  825. /* ==== NBMPS = number of 2-shift bulges in the chain ==== */
  826. nbmps = ns / 2;
  827. /* ==== KDU = width of slab ==== */
  828. kdu = nbmps << 2;
  829. /* ==== Create and chase chains of NBMPS bulges ==== */
  830. i__1 = *kbot - 2;
  831. i__2 = nbmps << 1;
  832. for (incol = *ktop - (nbmps << 1) + 1; i__2 < 0 ? incol >= i__1 : incol <=
  833. i__1; incol += i__2) {
  834. /* JTOP = Index from which updates from the right start. */
  835. if (accum) {
  836. jtop = f2cmax(*ktop,incol);
  837. } else if (*wantt) {
  838. jtop = 1;
  839. } else {
  840. jtop = *ktop;
  841. }
  842. ndcol = incol + kdu;
  843. if (accum) {
  844. zlaset_("ALL", &kdu, &kdu, &c_b1, &c_b2, &u[u_offset], ldu);
  845. }
  846. /* ==== Near-the-diagonal bulge chase. The following loop */
  847. /* . performs the near-the-diagonal part of a small bulge */
  848. /* . multi-shift QR sweep. Each 4*NBMPS column diagonal */
  849. /* . chunk extends from column INCOL to column NDCOL */
  850. /* . (including both column INCOL and column NDCOL). The */
  851. /* . following loop chases a 2*NBMPS+1 column long chain of */
  852. /* . NBMPS bulges 2*NBMPS columns to the right. (INCOL */
  853. /* . may be less than KTOP and and NDCOL may be greater than */
  854. /* . KBOT indicating phantom columns from which to chase */
  855. /* . bulges before they are actually introduced or to which */
  856. /* . to chase bulges beyond column KBOT.) ==== */
  857. /* Computing MIN */
  858. i__4 = incol + (nbmps << 1) - 1, i__5 = *kbot - 2;
  859. i__3 = f2cmin(i__4,i__5);
  860. for (krcol = incol; krcol <= i__3; ++krcol) {
  861. /* ==== Bulges number MTOP to MBOT are active double implicit */
  862. /* . shift bulges. There may or may not also be small */
  863. /* . 2-by-2 bulge, if there is room. The inactive bulges */
  864. /* . (if any) must wait until the active bulges have moved */
  865. /* . down the diagonal to make room. The phantom matrix */
  866. /* . paradigm described above helps keep track. ==== */
  867. /* Computing MAX */
  868. i__4 = 1, i__5 = (*ktop - krcol) / 2 + 1;
  869. mtop = f2cmax(i__4,i__5);
  870. /* Computing MIN */
  871. i__4 = nbmps, i__5 = (*kbot - krcol - 1) / 2;
  872. mbot = f2cmin(i__4,i__5);
  873. m22 = mbot + 1;
  874. bmp22 = mbot < nbmps && krcol + (m22 - 1 << 1) == *kbot - 2;
  875. /* ==== Generate reflections to chase the chain right */
  876. /* . one column. (The minimum value of K is KTOP-1.) ==== */
  877. if (bmp22) {
  878. /* ==== Special case: 2-by-2 reflection at bottom treated */
  879. /* . separately ==== */
  880. k = krcol + (m22 - 1 << 1);
  881. if (k == *ktop - 1) {
  882. zlaqr1_(&c__2, &h__[k + 1 + (k + 1) * h_dim1], ldh, &s[(
  883. m22 << 1) - 1], &s[m22 * 2], &v[m22 * v_dim1 + 1])
  884. ;
  885. i__4 = m22 * v_dim1 + 1;
  886. beta.r = v[i__4].r, beta.i = v[i__4].i;
  887. zlarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22
  888. * v_dim1 + 1]);
  889. } else {
  890. i__4 = k + 1 + k * h_dim1;
  891. beta.r = h__[i__4].r, beta.i = h__[i__4].i;
  892. i__4 = m22 * v_dim1 + 2;
  893. i__5 = k + 2 + k * h_dim1;
  894. v[i__4].r = h__[i__5].r, v[i__4].i = h__[i__5].i;
  895. zlarfg_(&c__2, &beta, &v[m22 * v_dim1 + 2], &c__1, &v[m22
  896. * v_dim1 + 1]);
  897. i__4 = k + 1 + k * h_dim1;
  898. h__[i__4].r = beta.r, h__[i__4].i = beta.i;
  899. i__4 = k + 2 + k * h_dim1;
  900. h__[i__4].r = 0., h__[i__4].i = 0.;
  901. }
  902. /* ==== Perform update from right within */
  903. /* . computational window. ==== */
  904. /* Computing MIN */
  905. i__5 = *kbot, i__6 = k + 3;
  906. i__4 = f2cmin(i__5,i__6);
  907. for (j = jtop; j <= i__4; ++j) {
  908. i__5 = m22 * v_dim1 + 1;
  909. i__6 = j + (k + 1) * h_dim1;
  910. i__7 = m22 * v_dim1 + 2;
  911. i__8 = j + (k + 2) * h_dim1;
  912. z__3.r = v[i__7].r * h__[i__8].r - v[i__7].i * h__[i__8]
  913. .i, z__3.i = v[i__7].r * h__[i__8].i + v[i__7].i *
  914. h__[i__8].r;
  915. z__2.r = h__[i__6].r + z__3.r, z__2.i = h__[i__6].i +
  916. z__3.i;
  917. z__1.r = v[i__5].r * z__2.r - v[i__5].i * z__2.i, z__1.i =
  918. v[i__5].r * z__2.i + v[i__5].i * z__2.r;
  919. refsum.r = z__1.r, refsum.i = z__1.i;
  920. i__5 = j + (k + 1) * h_dim1;
  921. i__6 = j + (k + 1) * h_dim1;
  922. z__1.r = h__[i__6].r - refsum.r, z__1.i = h__[i__6].i -
  923. refsum.i;
  924. h__[i__5].r = z__1.r, h__[i__5].i = z__1.i;
  925. i__5 = j + (k + 2) * h_dim1;
  926. i__6 = j + (k + 2) * h_dim1;
  927. d_cnjg(&z__3, &v[m22 * v_dim1 + 2]);
  928. z__2.r = refsum.r * z__3.r - refsum.i * z__3.i, z__2.i =
  929. refsum.r * z__3.i + refsum.i * z__3.r;
  930. z__1.r = h__[i__6].r - z__2.r, z__1.i = h__[i__6].i -
  931. z__2.i;
  932. h__[i__5].r = z__1.r, h__[i__5].i = z__1.i;
  933. /* L30: */
  934. }
  935. /* ==== Perform update from left within */
  936. /* . computational window. ==== */
  937. if (accum) {
  938. jbot = f2cmin(ndcol,*kbot);
  939. } else if (*wantt) {
  940. jbot = *n;
  941. } else {
  942. jbot = *kbot;
  943. }
  944. i__4 = jbot;
  945. for (j = k + 1; j <= i__4; ++j) {
  946. d_cnjg(&z__2, &v[m22 * v_dim1 + 1]);
  947. i__5 = k + 1 + j * h_dim1;
  948. d_cnjg(&z__5, &v[m22 * v_dim1 + 2]);
  949. i__6 = k + 2 + j * h_dim1;
  950. z__4.r = z__5.r * h__[i__6].r - z__5.i * h__[i__6].i,
  951. z__4.i = z__5.r * h__[i__6].i + z__5.i * h__[i__6]
  952. .r;
  953. z__3.r = h__[i__5].r + z__4.r, z__3.i = h__[i__5].i +
  954. z__4.i;
  955. z__1.r = z__2.r * z__3.r - z__2.i * z__3.i, z__1.i =
  956. z__2.r * z__3.i + z__2.i * z__3.r;
  957. refsum.r = z__1.r, refsum.i = z__1.i;
  958. i__5 = k + 1 + j * h_dim1;
  959. i__6 = k + 1 + j * h_dim1;
  960. z__1.r = h__[i__6].r - refsum.r, z__1.i = h__[i__6].i -
  961. refsum.i;
  962. h__[i__5].r = z__1.r, h__[i__5].i = z__1.i;
  963. i__5 = k + 2 + j * h_dim1;
  964. i__6 = k + 2 + j * h_dim1;
  965. i__7 = m22 * v_dim1 + 2;
  966. z__2.r = refsum.r * v[i__7].r - refsum.i * v[i__7].i,
  967. z__2.i = refsum.r * v[i__7].i + refsum.i * v[i__7]
  968. .r;
  969. z__1.r = h__[i__6].r - z__2.r, z__1.i = h__[i__6].i -
  970. z__2.i;
  971. h__[i__5].r = z__1.r, h__[i__5].i = z__1.i;
  972. /* L40: */
  973. }
  974. /* ==== The following convergence test requires that */
  975. /* . the tradition small-compared-to-nearby-diagonals */
  976. /* . criterion and the Ahues & Tisseur (LAWN 122, 1997) */
  977. /* . criteria both be satisfied. The latter improves */
  978. /* . accuracy in some examples. Falling back on an */
  979. /* . alternate convergence criterion when TST1 or TST2 */
  980. /* . is zero (as done here) is traditional but probably */
  981. /* . unnecessary. ==== */
  982. if (k >= *ktop) {
  983. i__4 = k + 1 + k * h_dim1;
  984. if (h__[i__4].r != 0. || h__[i__4].i != 0.) {
  985. i__4 = k + k * h_dim1;
  986. i__5 = k + 1 + (k + 1) * h_dim1;
  987. tst1 = (d__1 = h__[i__4].r, abs(d__1)) + (d__2 =
  988. d_imag(&h__[k + k * h_dim1]), abs(d__2)) + ((
  989. d__3 = h__[i__5].r, abs(d__3)) + (d__4 =
  990. d_imag(&h__[k + 1 + (k + 1) * h_dim1]), abs(
  991. d__4)));
  992. if (tst1 == 0.) {
  993. if (k >= *ktop + 1) {
  994. i__4 = k + (k - 1) * h_dim1;
  995. tst1 += (d__1 = h__[i__4].r, abs(d__1)) + (
  996. d__2 = d_imag(&h__[k + (k - 1) *
  997. h_dim1]), abs(d__2));
  998. }
  999. if (k >= *ktop + 2) {
  1000. i__4 = k + (k - 2) * h_dim1;
  1001. tst1 += (d__1 = h__[i__4].r, abs(d__1)) + (
  1002. d__2 = d_imag(&h__[k + (k - 2) *
  1003. h_dim1]), abs(d__2));
  1004. }
  1005. if (k >= *ktop + 3) {
  1006. i__4 = k + (k - 3) * h_dim1;
  1007. tst1 += (d__1 = h__[i__4].r, abs(d__1)) + (
  1008. d__2 = d_imag(&h__[k + (k - 3) *
  1009. h_dim1]), abs(d__2));
  1010. }
  1011. if (k <= *kbot - 2) {
  1012. i__4 = k + 2 + (k + 1) * h_dim1;
  1013. tst1 += (d__1 = h__[i__4].r, abs(d__1)) + (
  1014. d__2 = d_imag(&h__[k + 2 + (k + 1) *
  1015. h_dim1]), abs(d__2));
  1016. }
  1017. if (k <= *kbot - 3) {
  1018. i__4 = k + 3 + (k + 1) * h_dim1;
  1019. tst1 += (d__1 = h__[i__4].r, abs(d__1)) + (
  1020. d__2 = d_imag(&h__[k + 3 + (k + 1) *
  1021. h_dim1]), abs(d__2));
  1022. }
  1023. if (k <= *kbot - 4) {
  1024. i__4 = k + 4 + (k + 1) * h_dim1;
  1025. tst1 += (d__1 = h__[i__4].r, abs(d__1)) + (
  1026. d__2 = d_imag(&h__[k + 4 + (k + 1) *
  1027. h_dim1]), abs(d__2));
  1028. }
  1029. }
  1030. i__4 = k + 1 + k * h_dim1;
  1031. /* Computing MAX */
  1032. d__3 = smlnum, d__4 = ulp * tst1;
  1033. if ((d__1 = h__[i__4].r, abs(d__1)) + (d__2 = d_imag(&
  1034. h__[k + 1 + k * h_dim1]), abs(d__2)) <= f2cmax(
  1035. d__3,d__4)) {
  1036. /* Computing MAX */
  1037. i__4 = k + 1 + k * h_dim1;
  1038. i__5 = k + (k + 1) * h_dim1;
  1039. d__5 = (d__1 = h__[i__4].r, abs(d__1)) + (d__2 =
  1040. d_imag(&h__[k + 1 + k * h_dim1]), abs(
  1041. d__2)), d__6 = (d__3 = h__[i__5].r, abs(
  1042. d__3)) + (d__4 = d_imag(&h__[k + (k + 1) *
  1043. h_dim1]), abs(d__4));
  1044. h12 = f2cmax(d__5,d__6);
  1045. /* Computing MIN */
  1046. i__4 = k + 1 + k * h_dim1;
  1047. i__5 = k + (k + 1) * h_dim1;
  1048. d__5 = (d__1 = h__[i__4].r, abs(d__1)) + (d__2 =
  1049. d_imag(&h__[k + 1 + k * h_dim1]), abs(
  1050. d__2)), d__6 = (d__3 = h__[i__5].r, abs(
  1051. d__3)) + (d__4 = d_imag(&h__[k + (k + 1) *
  1052. h_dim1]), abs(d__4));
  1053. h21 = f2cmin(d__5,d__6);
  1054. i__4 = k + k * h_dim1;
  1055. i__5 = k + 1 + (k + 1) * h_dim1;
  1056. z__2.r = h__[i__4].r - h__[i__5].r, z__2.i = h__[
  1057. i__4].i - h__[i__5].i;
  1058. z__1.r = z__2.r, z__1.i = z__2.i;
  1059. /* Computing MAX */
  1060. i__6 = k + 1 + (k + 1) * h_dim1;
  1061. d__5 = (d__1 = h__[i__6].r, abs(d__1)) + (d__2 =
  1062. d_imag(&h__[k + 1 + (k + 1) * h_dim1]),
  1063. abs(d__2)), d__6 = (d__3 = z__1.r, abs(
  1064. d__3)) + (d__4 = d_imag(&z__1), abs(d__4))
  1065. ;
  1066. h11 = f2cmax(d__5,d__6);
  1067. i__4 = k + k * h_dim1;
  1068. i__5 = k + 1 + (k + 1) * h_dim1;
  1069. z__2.r = h__[i__4].r - h__[i__5].r, z__2.i = h__[
  1070. i__4].i - h__[i__5].i;
  1071. z__1.r = z__2.r, z__1.i = z__2.i;
  1072. /* Computing MIN */
  1073. i__6 = k + 1 + (k + 1) * h_dim1;
  1074. d__5 = (d__1 = h__[i__6].r, abs(d__1)) + (d__2 =
  1075. d_imag(&h__[k + 1 + (k + 1) * h_dim1]),
  1076. abs(d__2)), d__6 = (d__3 = z__1.r, abs(
  1077. d__3)) + (d__4 = d_imag(&z__1), abs(d__4))
  1078. ;
  1079. h22 = f2cmin(d__5,d__6);
  1080. scl = h11 + h12;
  1081. tst2 = h22 * (h11 / scl);
  1082. /* Computing MAX */
  1083. d__1 = smlnum, d__2 = ulp * tst2;
  1084. if (tst2 == 0. || h21 * (h12 / scl) <= f2cmax(d__1,
  1085. d__2)) {
  1086. i__4 = k + 1 + k * h_dim1;
  1087. h__[i__4].r = 0., h__[i__4].i = 0.;
  1088. }
  1089. }
  1090. }
  1091. }
  1092. /* ==== Accumulate orthogonal transformations. ==== */
  1093. if (accum) {
  1094. kms = k - incol;
  1095. /* Computing MAX */
  1096. i__4 = 1, i__5 = *ktop - incol;
  1097. i__6 = kdu;
  1098. for (j = f2cmax(i__4,i__5); j <= i__6; ++j) {
  1099. i__4 = m22 * v_dim1 + 1;
  1100. i__5 = j + (kms + 1) * u_dim1;
  1101. i__7 = m22 * v_dim1 + 2;
  1102. i__8 = j + (kms + 2) * u_dim1;
  1103. z__3.r = v[i__7].r * u[i__8].r - v[i__7].i * u[i__8]
  1104. .i, z__3.i = v[i__7].r * u[i__8].i + v[i__7]
  1105. .i * u[i__8].r;
  1106. z__2.r = u[i__5].r + z__3.r, z__2.i = u[i__5].i +
  1107. z__3.i;
  1108. z__1.r = v[i__4].r * z__2.r - v[i__4].i * z__2.i,
  1109. z__1.i = v[i__4].r * z__2.i + v[i__4].i *
  1110. z__2.r;
  1111. refsum.r = z__1.r, refsum.i = z__1.i;
  1112. i__4 = j + (kms + 1) * u_dim1;
  1113. i__5 = j + (kms + 1) * u_dim1;
  1114. z__1.r = u[i__5].r - refsum.r, z__1.i = u[i__5].i -
  1115. refsum.i;
  1116. u[i__4].r = z__1.r, u[i__4].i = z__1.i;
  1117. i__4 = j + (kms + 2) * u_dim1;
  1118. i__5 = j + (kms + 2) * u_dim1;
  1119. d_cnjg(&z__3, &v[m22 * v_dim1 + 2]);
  1120. z__2.r = refsum.r * z__3.r - refsum.i * z__3.i,
  1121. z__2.i = refsum.r * z__3.i + refsum.i *
  1122. z__3.r;
  1123. z__1.r = u[i__5].r - z__2.r, z__1.i = u[i__5].i -
  1124. z__2.i;
  1125. u[i__4].r = z__1.r, u[i__4].i = z__1.i;
  1126. /* L50: */
  1127. }
  1128. } else if (*wantz) {
  1129. i__6 = *ihiz;
  1130. for (j = *iloz; j <= i__6; ++j) {
  1131. i__4 = m22 * v_dim1 + 1;
  1132. i__5 = j + (k + 1) * z_dim1;
  1133. i__7 = m22 * v_dim1 + 2;
  1134. i__8 = j + (k + 2) * z_dim1;
  1135. z__3.r = v[i__7].r * z__[i__8].r - v[i__7].i * z__[
  1136. i__8].i, z__3.i = v[i__7].r * z__[i__8].i + v[
  1137. i__7].i * z__[i__8].r;
  1138. z__2.r = z__[i__5].r + z__3.r, z__2.i = z__[i__5].i +
  1139. z__3.i;
  1140. z__1.r = v[i__4].r * z__2.r - v[i__4].i * z__2.i,
  1141. z__1.i = v[i__4].r * z__2.i + v[i__4].i *
  1142. z__2.r;
  1143. refsum.r = z__1.r, refsum.i = z__1.i;
  1144. i__4 = j + (k + 1) * z_dim1;
  1145. i__5 = j + (k + 1) * z_dim1;
  1146. z__1.r = z__[i__5].r - refsum.r, z__1.i = z__[i__5].i
  1147. - refsum.i;
  1148. z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
  1149. i__4 = j + (k + 2) * z_dim1;
  1150. i__5 = j + (k + 2) * z_dim1;
  1151. d_cnjg(&z__3, &v[m22 * v_dim1 + 2]);
  1152. z__2.r = refsum.r * z__3.r - refsum.i * z__3.i,
  1153. z__2.i = refsum.r * z__3.i + refsum.i *
  1154. z__3.r;
  1155. z__1.r = z__[i__5].r - z__2.r, z__1.i = z__[i__5].i -
  1156. z__2.i;
  1157. z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
  1158. /* L60: */
  1159. }
  1160. }
  1161. }
  1162. /* ==== Normal case: Chain of 3-by-3 reflections ==== */
  1163. i__6 = mtop;
  1164. for (m = mbot; m >= i__6; --m) {
  1165. k = krcol + (m - 1 << 1);
  1166. if (k == *ktop - 1) {
  1167. zlaqr1_(&c__3, &h__[*ktop + *ktop * h_dim1], ldh, &s[(m <<
  1168. 1) - 1], &s[m * 2], &v[m * v_dim1 + 1]);
  1169. i__4 = m * v_dim1 + 1;
  1170. alpha.r = v[i__4].r, alpha.i = v[i__4].i;
  1171. zlarfg_(&c__3, &alpha, &v[m * v_dim1 + 2], &c__1, &v[m *
  1172. v_dim1 + 1]);
  1173. } else {
  1174. /* ==== Perform delayed transformation of row below */
  1175. /* . Mth bulge. Exploit fact that first two elements */
  1176. /* . of row are actually zero. ==== */
  1177. i__4 = m * v_dim1 + 1;
  1178. i__5 = m * v_dim1 + 3;
  1179. z__2.r = v[i__4].r * v[i__5].r - v[i__4].i * v[i__5].i,
  1180. z__2.i = v[i__4].r * v[i__5].i + v[i__4].i * v[
  1181. i__5].r;
  1182. i__7 = k + 3 + (k + 2) * h_dim1;
  1183. z__1.r = z__2.r * h__[i__7].r - z__2.i * h__[i__7].i,
  1184. z__1.i = z__2.r * h__[i__7].i + z__2.i * h__[i__7]
  1185. .r;
  1186. refsum.r = z__1.r, refsum.i = z__1.i;
  1187. i__4 = k + 3 + k * h_dim1;
  1188. z__1.r = -refsum.r, z__1.i = -refsum.i;
  1189. h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
  1190. i__4 = k + 3 + (k + 1) * h_dim1;
  1191. z__2.r = -refsum.r, z__2.i = -refsum.i;
  1192. d_cnjg(&z__3, &v[m * v_dim1 + 2]);
  1193. z__1.r = z__2.r * z__3.r - z__2.i * z__3.i, z__1.i =
  1194. z__2.r * z__3.i + z__2.i * z__3.r;
  1195. h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
  1196. i__4 = k + 3 + (k + 2) * h_dim1;
  1197. i__5 = k + 3 + (k + 2) * h_dim1;
  1198. d_cnjg(&z__3, &v[m * v_dim1 + 3]);
  1199. z__2.r = refsum.r * z__3.r - refsum.i * z__3.i, z__2.i =
  1200. refsum.r * z__3.i + refsum.i * z__3.r;
  1201. z__1.r = h__[i__5].r - z__2.r, z__1.i = h__[i__5].i -
  1202. z__2.i;
  1203. h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
  1204. /* ==== Calculate reflection to move */
  1205. /* . Mth bulge one step. ==== */
  1206. i__4 = k + 1 + k * h_dim1;
  1207. beta.r = h__[i__4].r, beta.i = h__[i__4].i;
  1208. i__4 = m * v_dim1 + 2;
  1209. i__5 = k + 2 + k * h_dim1;
  1210. v[i__4].r = h__[i__5].r, v[i__4].i = h__[i__5].i;
  1211. i__4 = m * v_dim1 + 3;
  1212. i__5 = k + 3 + k * h_dim1;
  1213. v[i__4].r = h__[i__5].r, v[i__4].i = h__[i__5].i;
  1214. zlarfg_(&c__3, &beta, &v[m * v_dim1 + 2], &c__1, &v[m *
  1215. v_dim1 + 1]);
  1216. /* ==== A Bulge may collapse because of vigilant */
  1217. /* . deflation or destructive underflow. In the */
  1218. /* . underflow case, try the two-small-subdiagonals */
  1219. /* . trick to try to reinflate the bulge. ==== */
  1220. i__4 = k + 3 + k * h_dim1;
  1221. i__5 = k + 3 + (k + 1) * h_dim1;
  1222. i__7 = k + 3 + (k + 2) * h_dim1;
  1223. if (h__[i__4].r != 0. || h__[i__4].i != 0. || (h__[i__5]
  1224. .r != 0. || h__[i__5].i != 0.) || h__[i__7].r ==
  1225. 0. && h__[i__7].i == 0.) {
  1226. /* ==== Typical case: not collapsed (yet). ==== */
  1227. i__4 = k + 1 + k * h_dim1;
  1228. h__[i__4].r = beta.r, h__[i__4].i = beta.i;
  1229. i__4 = k + 2 + k * h_dim1;
  1230. h__[i__4].r = 0., h__[i__4].i = 0.;
  1231. i__4 = k + 3 + k * h_dim1;
  1232. h__[i__4].r = 0., h__[i__4].i = 0.;
  1233. } else {
  1234. /* ==== Atypical case: collapsed. Attempt to */
  1235. /* . reintroduce ignoring H(K+1,K) and H(K+2,K). */
  1236. /* . If the fill resulting from the new */
  1237. /* . reflector is too large, then abandon it. */
  1238. /* . Otherwise, use the new one. ==== */
  1239. zlaqr1_(&c__3, &h__[k + 1 + (k + 1) * h_dim1], ldh, &
  1240. s[(m << 1) - 1], &s[m * 2], vt);
  1241. alpha.r = vt[0].r, alpha.i = vt[0].i;
  1242. zlarfg_(&c__3, &alpha, &vt[1], &c__1, vt);
  1243. d_cnjg(&z__2, vt);
  1244. i__4 = k + 1 + k * h_dim1;
  1245. d_cnjg(&z__5, &vt[1]);
  1246. i__5 = k + 2 + k * h_dim1;
  1247. z__4.r = z__5.r * h__[i__5].r - z__5.i * h__[i__5].i,
  1248. z__4.i = z__5.r * h__[i__5].i + z__5.i * h__[
  1249. i__5].r;
  1250. z__3.r = h__[i__4].r + z__4.r, z__3.i = h__[i__4].i +
  1251. z__4.i;
  1252. z__1.r = z__2.r * z__3.r - z__2.i * z__3.i, z__1.i =
  1253. z__2.r * z__3.i + z__2.i * z__3.r;
  1254. refsum.r = z__1.r, refsum.i = z__1.i;
  1255. i__4 = k + 2 + k * h_dim1;
  1256. z__3.r = refsum.r * vt[1].r - refsum.i * vt[1].i,
  1257. z__3.i = refsum.r * vt[1].i + refsum.i * vt[1]
  1258. .r;
  1259. z__2.r = h__[i__4].r - z__3.r, z__2.i = h__[i__4].i -
  1260. z__3.i;
  1261. z__1.r = z__2.r, z__1.i = z__2.i;
  1262. z__5.r = refsum.r * vt[2].r - refsum.i * vt[2].i,
  1263. z__5.i = refsum.r * vt[2].i + refsum.i * vt[2]
  1264. .r;
  1265. z__4.r = z__5.r, z__4.i = z__5.i;
  1266. i__5 = k + k * h_dim1;
  1267. i__7 = k + 1 + (k + 1) * h_dim1;
  1268. i__8 = k + 2 + (k + 2) * h_dim1;
  1269. if ((d__1 = z__1.r, abs(d__1)) + (d__2 = d_imag(&z__1)
  1270. , abs(d__2)) + ((d__3 = z__4.r, abs(d__3)) + (
  1271. d__4 = d_imag(&z__4), abs(d__4))) > ulp * ((
  1272. d__5 = h__[i__5].r, abs(d__5)) + (d__6 =
  1273. d_imag(&h__[k + k * h_dim1]), abs(d__6)) + ((
  1274. d__7 = h__[i__7].r, abs(d__7)) + (d__8 =
  1275. d_imag(&h__[k + 1 + (k + 1) * h_dim1]), abs(
  1276. d__8))) + ((d__9 = h__[i__8].r, abs(d__9)) + (
  1277. d__10 = d_imag(&h__[k + 2 + (k + 2) * h_dim1])
  1278. , abs(d__10))))) {
  1279. /* ==== Starting a new bulge here would */
  1280. /* . create non-negligible fill. Use */
  1281. /* . the old one with trepidation. ==== */
  1282. i__4 = k + 1 + k * h_dim1;
  1283. h__[i__4].r = beta.r, h__[i__4].i = beta.i;
  1284. i__4 = k + 2 + k * h_dim1;
  1285. h__[i__4].r = 0., h__[i__4].i = 0.;
  1286. i__4 = k + 3 + k * h_dim1;
  1287. h__[i__4].r = 0., h__[i__4].i = 0.;
  1288. } else {
  1289. /* ==== Starting a new bulge here would */
  1290. /* . create only negligible fill. */
  1291. /* . Replace the old reflector with */
  1292. /* . the new one. ==== */
  1293. i__4 = k + 1 + k * h_dim1;
  1294. i__5 = k + 1 + k * h_dim1;
  1295. z__1.r = h__[i__5].r - refsum.r, z__1.i = h__[
  1296. i__5].i - refsum.i;
  1297. h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
  1298. i__4 = k + 2 + k * h_dim1;
  1299. h__[i__4].r = 0., h__[i__4].i = 0.;
  1300. i__4 = k + 3 + k * h_dim1;
  1301. h__[i__4].r = 0., h__[i__4].i = 0.;
  1302. i__4 = m * v_dim1 + 1;
  1303. v[i__4].r = vt[0].r, v[i__4].i = vt[0].i;
  1304. i__4 = m * v_dim1 + 2;
  1305. v[i__4].r = vt[1].r, v[i__4].i = vt[1].i;
  1306. i__4 = m * v_dim1 + 3;
  1307. v[i__4].r = vt[2].r, v[i__4].i = vt[2].i;
  1308. }
  1309. }
  1310. }
  1311. /* ==== Apply reflection from the right and */
  1312. /* . the first column of update from the left. */
  1313. /* . These updates are required for the vigilant */
  1314. /* . deflation check. We still delay most of the */
  1315. /* . updates from the left for efficiency. ==== */
  1316. /* Computing MIN */
  1317. i__5 = *kbot, i__7 = k + 3;
  1318. i__4 = f2cmin(i__5,i__7);
  1319. for (j = jtop; j <= i__4; ++j) {
  1320. i__5 = m * v_dim1 + 1;
  1321. i__7 = j + (k + 1) * h_dim1;
  1322. i__8 = m * v_dim1 + 2;
  1323. i__9 = j + (k + 2) * h_dim1;
  1324. z__4.r = v[i__8].r * h__[i__9].r - v[i__8].i * h__[i__9]
  1325. .i, z__4.i = v[i__8].r * h__[i__9].i + v[i__8].i *
  1326. h__[i__9].r;
  1327. z__3.r = h__[i__7].r + z__4.r, z__3.i = h__[i__7].i +
  1328. z__4.i;
  1329. i__10 = m * v_dim1 + 3;
  1330. i__11 = j + (k + 3) * h_dim1;
  1331. z__5.r = v[i__10].r * h__[i__11].r - v[i__10].i * h__[
  1332. i__11].i, z__5.i = v[i__10].r * h__[i__11].i + v[
  1333. i__10].i * h__[i__11].r;
  1334. z__2.r = z__3.r + z__5.r, z__2.i = z__3.i + z__5.i;
  1335. z__1.r = v[i__5].r * z__2.r - v[i__5].i * z__2.i, z__1.i =
  1336. v[i__5].r * z__2.i + v[i__5].i * z__2.r;
  1337. refsum.r = z__1.r, refsum.i = z__1.i;
  1338. i__5 = j + (k + 1) * h_dim1;
  1339. i__7 = j + (k + 1) * h_dim1;
  1340. z__1.r = h__[i__7].r - refsum.r, z__1.i = h__[i__7].i -
  1341. refsum.i;
  1342. h__[i__5].r = z__1.r, h__[i__5].i = z__1.i;
  1343. i__5 = j + (k + 2) * h_dim1;
  1344. i__7 = j + (k + 2) * h_dim1;
  1345. d_cnjg(&z__3, &v[m * v_dim1 + 2]);
  1346. z__2.r = refsum.r * z__3.r - refsum.i * z__3.i, z__2.i =
  1347. refsum.r * z__3.i + refsum.i * z__3.r;
  1348. z__1.r = h__[i__7].r - z__2.r, z__1.i = h__[i__7].i -
  1349. z__2.i;
  1350. h__[i__5].r = z__1.r, h__[i__5].i = z__1.i;
  1351. i__5 = j + (k + 3) * h_dim1;
  1352. i__7 = j + (k + 3) * h_dim1;
  1353. d_cnjg(&z__3, &v[m * v_dim1 + 3]);
  1354. z__2.r = refsum.r * z__3.r - refsum.i * z__3.i, z__2.i =
  1355. refsum.r * z__3.i + refsum.i * z__3.r;
  1356. z__1.r = h__[i__7].r - z__2.r, z__1.i = h__[i__7].i -
  1357. z__2.i;
  1358. h__[i__5].r = z__1.r, h__[i__5].i = z__1.i;
  1359. /* L70: */
  1360. }
  1361. /* ==== Perform update from left for subsequent */
  1362. /* . column. ==== */
  1363. d_cnjg(&z__2, &v[m * v_dim1 + 1]);
  1364. i__4 = k + 1 + (k + 1) * h_dim1;
  1365. d_cnjg(&z__6, &v[m * v_dim1 + 2]);
  1366. i__5 = k + 2 + (k + 1) * h_dim1;
  1367. z__5.r = z__6.r * h__[i__5].r - z__6.i * h__[i__5].i, z__5.i =
  1368. z__6.r * h__[i__5].i + z__6.i * h__[i__5].r;
  1369. z__4.r = h__[i__4].r + z__5.r, z__4.i = h__[i__4].i + z__5.i;
  1370. d_cnjg(&z__8, &v[m * v_dim1 + 3]);
  1371. i__7 = k + 3 + (k + 1) * h_dim1;
  1372. z__7.r = z__8.r * h__[i__7].r - z__8.i * h__[i__7].i, z__7.i =
  1373. z__8.r * h__[i__7].i + z__8.i * h__[i__7].r;
  1374. z__3.r = z__4.r + z__7.r, z__3.i = z__4.i + z__7.i;
  1375. z__1.r = z__2.r * z__3.r - z__2.i * z__3.i, z__1.i = z__2.r *
  1376. z__3.i + z__2.i * z__3.r;
  1377. refsum.r = z__1.r, refsum.i = z__1.i;
  1378. i__4 = k + 1 + (k + 1) * h_dim1;
  1379. i__5 = k + 1 + (k + 1) * h_dim1;
  1380. z__1.r = h__[i__5].r - refsum.r, z__1.i = h__[i__5].i -
  1381. refsum.i;
  1382. h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
  1383. i__4 = k + 2 + (k + 1) * h_dim1;
  1384. i__5 = k + 2 + (k + 1) * h_dim1;
  1385. i__7 = m * v_dim1 + 2;
  1386. z__2.r = refsum.r * v[i__7].r - refsum.i * v[i__7].i, z__2.i =
  1387. refsum.r * v[i__7].i + refsum.i * v[i__7].r;
  1388. z__1.r = h__[i__5].r - z__2.r, z__1.i = h__[i__5].i - z__2.i;
  1389. h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
  1390. i__4 = k + 3 + (k + 1) * h_dim1;
  1391. i__5 = k + 3 + (k + 1) * h_dim1;
  1392. i__7 = m * v_dim1 + 3;
  1393. z__2.r = refsum.r * v[i__7].r - refsum.i * v[i__7].i, z__2.i =
  1394. refsum.r * v[i__7].i + refsum.i * v[i__7].r;
  1395. z__1.r = h__[i__5].r - z__2.r, z__1.i = h__[i__5].i - z__2.i;
  1396. h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
  1397. /* ==== The following convergence test requires that */
  1398. /* . the tradition small-compared-to-nearby-diagonals */
  1399. /* . criterion and the Ahues & Tisseur (LAWN 122, 1997) */
  1400. /* . criteria both be satisfied. The latter improves */
  1401. /* . accuracy in some examples. Falling back on an */
  1402. /* . alternate convergence criterion when TST1 or TST2 */
  1403. /* . is zero (as done here) is traditional but probably */
  1404. /* . unnecessary. ==== */
  1405. if (k < *ktop) {
  1406. mycycle_();
  1407. }
  1408. i__4 = k + 1 + k * h_dim1;
  1409. if (h__[i__4].r != 0. || h__[i__4].i != 0.) {
  1410. i__4 = k + k * h_dim1;
  1411. i__5 = k + 1 + (k + 1) * h_dim1;
  1412. tst1 = (d__1 = h__[i__4].r, abs(d__1)) + (d__2 = d_imag(&
  1413. h__[k + k * h_dim1]), abs(d__2)) + ((d__3 = h__[
  1414. i__5].r, abs(d__3)) + (d__4 = d_imag(&h__[k + 1 +
  1415. (k + 1) * h_dim1]), abs(d__4)));
  1416. if (tst1 == 0.) {
  1417. if (k >= *ktop + 1) {
  1418. i__4 = k + (k - 1) * h_dim1;
  1419. tst1 += (d__1 = h__[i__4].r, abs(d__1)) + (d__2 =
  1420. d_imag(&h__[k + (k - 1) * h_dim1]), abs(
  1421. d__2));
  1422. }
  1423. if (k >= *ktop + 2) {
  1424. i__4 = k + (k - 2) * h_dim1;
  1425. tst1 += (d__1 = h__[i__4].r, abs(d__1)) + (d__2 =
  1426. d_imag(&h__[k + (k - 2) * h_dim1]), abs(
  1427. d__2));
  1428. }
  1429. if (k >= *ktop + 3) {
  1430. i__4 = k + (k - 3) * h_dim1;
  1431. tst1 += (d__1 = h__[i__4].r, abs(d__1)) + (d__2 =
  1432. d_imag(&h__[k + (k - 3) * h_dim1]), abs(
  1433. d__2));
  1434. }
  1435. if (k <= *kbot - 2) {
  1436. i__4 = k + 2 + (k + 1) * h_dim1;
  1437. tst1 += (d__1 = h__[i__4].r, abs(d__1)) + (d__2 =
  1438. d_imag(&h__[k + 2 + (k + 1) * h_dim1]),
  1439. abs(d__2));
  1440. }
  1441. if (k <= *kbot - 3) {
  1442. i__4 = k + 3 + (k + 1) * h_dim1;
  1443. tst1 += (d__1 = h__[i__4].r, abs(d__1)) + (d__2 =
  1444. d_imag(&h__[k + 3 + (k + 1) * h_dim1]),
  1445. abs(d__2));
  1446. }
  1447. if (k <= *kbot - 4) {
  1448. i__4 = k + 4 + (k + 1) * h_dim1;
  1449. tst1 += (d__1 = h__[i__4].r, abs(d__1)) + (d__2 =
  1450. d_imag(&h__[k + 4 + (k + 1) * h_dim1]),
  1451. abs(d__2));
  1452. }
  1453. }
  1454. i__4 = k + 1 + k * h_dim1;
  1455. /* Computing MAX */
  1456. d__3 = smlnum, d__4 = ulp * tst1;
  1457. if ((d__1 = h__[i__4].r, abs(d__1)) + (d__2 = d_imag(&h__[
  1458. k + 1 + k * h_dim1]), abs(d__2)) <= f2cmax(d__3,d__4)
  1459. ) {
  1460. /* Computing MAX */
  1461. i__4 = k + 1 + k * h_dim1;
  1462. i__5 = k + (k + 1) * h_dim1;
  1463. d__5 = (d__1 = h__[i__4].r, abs(d__1)) + (d__2 =
  1464. d_imag(&h__[k + 1 + k * h_dim1]), abs(d__2)),
  1465. d__6 = (d__3 = h__[i__5].r, abs(d__3)) + (
  1466. d__4 = d_imag(&h__[k + (k + 1) * h_dim1]),
  1467. abs(d__4));
  1468. h12 = f2cmax(d__5,d__6);
  1469. /* Computing MIN */
  1470. i__4 = k + 1 + k * h_dim1;
  1471. i__5 = k + (k + 1) * h_dim1;
  1472. d__5 = (d__1 = h__[i__4].r, abs(d__1)) + (d__2 =
  1473. d_imag(&h__[k + 1 + k * h_dim1]), abs(d__2)),
  1474. d__6 = (d__3 = h__[i__5].r, abs(d__3)) + (
  1475. d__4 = d_imag(&h__[k + (k + 1) * h_dim1]),
  1476. abs(d__4));
  1477. h21 = f2cmin(d__5,d__6);
  1478. i__4 = k + k * h_dim1;
  1479. i__5 = k + 1 + (k + 1) * h_dim1;
  1480. z__2.r = h__[i__4].r - h__[i__5].r, z__2.i = h__[i__4]
  1481. .i - h__[i__5].i;
  1482. z__1.r = z__2.r, z__1.i = z__2.i;
  1483. /* Computing MAX */
  1484. i__7 = k + 1 + (k + 1) * h_dim1;
  1485. d__5 = (d__1 = h__[i__7].r, abs(d__1)) + (d__2 =
  1486. d_imag(&h__[k + 1 + (k + 1) * h_dim1]), abs(
  1487. d__2)), d__6 = (d__3 = z__1.r, abs(d__3)) + (
  1488. d__4 = d_imag(&z__1), abs(d__4));
  1489. h11 = f2cmax(d__5,d__6);
  1490. i__4 = k + k * h_dim1;
  1491. i__5 = k + 1 + (k + 1) * h_dim1;
  1492. z__2.r = h__[i__4].r - h__[i__5].r, z__2.i = h__[i__4]
  1493. .i - h__[i__5].i;
  1494. z__1.r = z__2.r, z__1.i = z__2.i;
  1495. /* Computing MIN */
  1496. i__7 = k + 1 + (k + 1) * h_dim1;
  1497. d__5 = (d__1 = h__[i__7].r, abs(d__1)) + (d__2 =
  1498. d_imag(&h__[k + 1 + (k + 1) * h_dim1]), abs(
  1499. d__2)), d__6 = (d__3 = z__1.r, abs(d__3)) + (
  1500. d__4 = d_imag(&z__1), abs(d__4));
  1501. h22 = f2cmin(d__5,d__6);
  1502. scl = h11 + h12;
  1503. tst2 = h22 * (h11 / scl);
  1504. /* Computing MAX */
  1505. d__1 = smlnum, d__2 = ulp * tst2;
  1506. if (tst2 == 0. || h21 * (h12 / scl) <= f2cmax(d__1,d__2))
  1507. {
  1508. i__4 = k + 1 + k * h_dim1;
  1509. h__[i__4].r = 0., h__[i__4].i = 0.;
  1510. }
  1511. }
  1512. }
  1513. /* L80: */
  1514. }
  1515. /* ==== Multiply H by reflections from the left ==== */
  1516. if (accum) {
  1517. jbot = f2cmin(ndcol,*kbot);
  1518. } else if (*wantt) {
  1519. jbot = *n;
  1520. } else {
  1521. jbot = *kbot;
  1522. }
  1523. i__6 = mtop;
  1524. for (m = mbot; m >= i__6; --m) {
  1525. k = krcol + (m - 1 << 1);
  1526. /* Computing MAX */
  1527. i__4 = *ktop, i__5 = krcol + (m << 1);
  1528. i__7 = jbot;
  1529. for (j = f2cmax(i__4,i__5); j <= i__7; ++j) {
  1530. d_cnjg(&z__2, &v[m * v_dim1 + 1]);
  1531. i__4 = k + 1 + j * h_dim1;
  1532. d_cnjg(&z__6, &v[m * v_dim1 + 2]);
  1533. i__5 = k + 2 + j * h_dim1;
  1534. z__5.r = z__6.r * h__[i__5].r - z__6.i * h__[i__5].i,
  1535. z__5.i = z__6.r * h__[i__5].i + z__6.i * h__[i__5]
  1536. .r;
  1537. z__4.r = h__[i__4].r + z__5.r, z__4.i = h__[i__4].i +
  1538. z__5.i;
  1539. d_cnjg(&z__8, &v[m * v_dim1 + 3]);
  1540. i__8 = k + 3 + j * h_dim1;
  1541. z__7.r = z__8.r * h__[i__8].r - z__8.i * h__[i__8].i,
  1542. z__7.i = z__8.r * h__[i__8].i + z__8.i * h__[i__8]
  1543. .r;
  1544. z__3.r = z__4.r + z__7.r, z__3.i = z__4.i + z__7.i;
  1545. z__1.r = z__2.r * z__3.r - z__2.i * z__3.i, z__1.i =
  1546. z__2.r * z__3.i + z__2.i * z__3.r;
  1547. refsum.r = z__1.r, refsum.i = z__1.i;
  1548. i__4 = k + 1 + j * h_dim1;
  1549. i__5 = k + 1 + j * h_dim1;
  1550. z__1.r = h__[i__5].r - refsum.r, z__1.i = h__[i__5].i -
  1551. refsum.i;
  1552. h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
  1553. i__4 = k + 2 + j * h_dim1;
  1554. i__5 = k + 2 + j * h_dim1;
  1555. i__8 = m * v_dim1 + 2;
  1556. z__2.r = refsum.r * v[i__8].r - refsum.i * v[i__8].i,
  1557. z__2.i = refsum.r * v[i__8].i + refsum.i * v[i__8]
  1558. .r;
  1559. z__1.r = h__[i__5].r - z__2.r, z__1.i = h__[i__5].i -
  1560. z__2.i;
  1561. h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
  1562. i__4 = k + 3 + j * h_dim1;
  1563. i__5 = k + 3 + j * h_dim1;
  1564. i__8 = m * v_dim1 + 3;
  1565. z__2.r = refsum.r * v[i__8].r - refsum.i * v[i__8].i,
  1566. z__2.i = refsum.r * v[i__8].i + refsum.i * v[i__8]
  1567. .r;
  1568. z__1.r = h__[i__5].r - z__2.r, z__1.i = h__[i__5].i -
  1569. z__2.i;
  1570. h__[i__4].r = z__1.r, h__[i__4].i = z__1.i;
  1571. /* L90: */
  1572. }
  1573. /* L100: */
  1574. }
  1575. /* ==== Accumulate orthogonal transformations. ==== */
  1576. if (accum) {
  1577. /* ==== Accumulate U. (If needed, update Z later */
  1578. /* . with an efficient matrix-matrix */
  1579. /* . multiply.) ==== */
  1580. i__6 = mtop;
  1581. for (m = mbot; m >= i__6; --m) {
  1582. k = krcol + (m - 1 << 1);
  1583. kms = k - incol;
  1584. /* Computing MAX */
  1585. i__7 = 1, i__4 = *ktop - incol;
  1586. i2 = f2cmax(i__7,i__4);
  1587. /* Computing MAX */
  1588. i__7 = i2, i__4 = kms - (krcol - incol) + 1;
  1589. i2 = f2cmax(i__7,i__4);
  1590. /* Computing MIN */
  1591. i__7 = kdu, i__4 = krcol + (mbot - 1 << 1) - incol + 5;
  1592. i4 = f2cmin(i__7,i__4);
  1593. i__7 = i4;
  1594. for (j = i2; j <= i__7; ++j) {
  1595. i__4 = m * v_dim1 + 1;
  1596. i__5 = j + (kms + 1) * u_dim1;
  1597. i__8 = m * v_dim1 + 2;
  1598. i__9 = j + (kms + 2) * u_dim1;
  1599. z__4.r = v[i__8].r * u[i__9].r - v[i__8].i * u[i__9]
  1600. .i, z__4.i = v[i__8].r * u[i__9].i + v[i__8]
  1601. .i * u[i__9].r;
  1602. z__3.r = u[i__5].r + z__4.r, z__3.i = u[i__5].i +
  1603. z__4.i;
  1604. i__10 = m * v_dim1 + 3;
  1605. i__11 = j + (kms + 3) * u_dim1;
  1606. z__5.r = v[i__10].r * u[i__11].r - v[i__10].i * u[
  1607. i__11].i, z__5.i = v[i__10].r * u[i__11].i +
  1608. v[i__10].i * u[i__11].r;
  1609. z__2.r = z__3.r + z__5.r, z__2.i = z__3.i + z__5.i;
  1610. z__1.r = v[i__4].r * z__2.r - v[i__4].i * z__2.i,
  1611. z__1.i = v[i__4].r * z__2.i + v[i__4].i *
  1612. z__2.r;
  1613. refsum.r = z__1.r, refsum.i = z__1.i;
  1614. i__4 = j + (kms + 1) * u_dim1;
  1615. i__5 = j + (kms + 1) * u_dim1;
  1616. z__1.r = u[i__5].r - refsum.r, z__1.i = u[i__5].i -
  1617. refsum.i;
  1618. u[i__4].r = z__1.r, u[i__4].i = z__1.i;
  1619. i__4 = j + (kms + 2) * u_dim1;
  1620. i__5 = j + (kms + 2) * u_dim1;
  1621. d_cnjg(&z__3, &v[m * v_dim1 + 2]);
  1622. z__2.r = refsum.r * z__3.r - refsum.i * z__3.i,
  1623. z__2.i = refsum.r * z__3.i + refsum.i *
  1624. z__3.r;
  1625. z__1.r = u[i__5].r - z__2.r, z__1.i = u[i__5].i -
  1626. z__2.i;
  1627. u[i__4].r = z__1.r, u[i__4].i = z__1.i;
  1628. i__4 = j + (kms + 3) * u_dim1;
  1629. i__5 = j + (kms + 3) * u_dim1;
  1630. d_cnjg(&z__3, &v[m * v_dim1 + 3]);
  1631. z__2.r = refsum.r * z__3.r - refsum.i * z__3.i,
  1632. z__2.i = refsum.r * z__3.i + refsum.i *
  1633. z__3.r;
  1634. z__1.r = u[i__5].r - z__2.r, z__1.i = u[i__5].i -
  1635. z__2.i;
  1636. u[i__4].r = z__1.r, u[i__4].i = z__1.i;
  1637. /* L110: */
  1638. }
  1639. /* L120: */
  1640. }
  1641. } else if (*wantz) {
  1642. /* ==== U is not accumulated, so update Z */
  1643. /* . now by multiplying by reflections */
  1644. /* . from the right. ==== */
  1645. i__6 = mtop;
  1646. for (m = mbot; m >= i__6; --m) {
  1647. k = krcol + (m - 1 << 1);
  1648. i__7 = *ihiz;
  1649. for (j = *iloz; j <= i__7; ++j) {
  1650. i__4 = m * v_dim1 + 1;
  1651. i__5 = j + (k + 1) * z_dim1;
  1652. i__8 = m * v_dim1 + 2;
  1653. i__9 = j + (k + 2) * z_dim1;
  1654. z__4.r = v[i__8].r * z__[i__9].r - v[i__8].i * z__[
  1655. i__9].i, z__4.i = v[i__8].r * z__[i__9].i + v[
  1656. i__8].i * z__[i__9].r;
  1657. z__3.r = z__[i__5].r + z__4.r, z__3.i = z__[i__5].i +
  1658. z__4.i;
  1659. i__10 = m * v_dim1 + 3;
  1660. i__11 = j + (k + 3) * z_dim1;
  1661. z__5.r = v[i__10].r * z__[i__11].r - v[i__10].i * z__[
  1662. i__11].i, z__5.i = v[i__10].r * z__[i__11].i
  1663. + v[i__10].i * z__[i__11].r;
  1664. z__2.r = z__3.r + z__5.r, z__2.i = z__3.i + z__5.i;
  1665. z__1.r = v[i__4].r * z__2.r - v[i__4].i * z__2.i,
  1666. z__1.i = v[i__4].r * z__2.i + v[i__4].i *
  1667. z__2.r;
  1668. refsum.r = z__1.r, refsum.i = z__1.i;
  1669. i__4 = j + (k + 1) * z_dim1;
  1670. i__5 = j + (k + 1) * z_dim1;
  1671. z__1.r = z__[i__5].r - refsum.r, z__1.i = z__[i__5].i
  1672. - refsum.i;
  1673. z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
  1674. i__4 = j + (k + 2) * z_dim1;
  1675. i__5 = j + (k + 2) * z_dim1;
  1676. d_cnjg(&z__3, &v[m * v_dim1 + 2]);
  1677. z__2.r = refsum.r * z__3.r - refsum.i * z__3.i,
  1678. z__2.i = refsum.r * z__3.i + refsum.i *
  1679. z__3.r;
  1680. z__1.r = z__[i__5].r - z__2.r, z__1.i = z__[i__5].i -
  1681. z__2.i;
  1682. z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
  1683. i__4 = j + (k + 3) * z_dim1;
  1684. i__5 = j + (k + 3) * z_dim1;
  1685. d_cnjg(&z__3, &v[m * v_dim1 + 3]);
  1686. z__2.r = refsum.r * z__3.r - refsum.i * z__3.i,
  1687. z__2.i = refsum.r * z__3.i + refsum.i *
  1688. z__3.r;
  1689. z__1.r = z__[i__5].r - z__2.r, z__1.i = z__[i__5].i -
  1690. z__2.i;
  1691. z__[i__4].r = z__1.r, z__[i__4].i = z__1.i;
  1692. /* L130: */
  1693. }
  1694. /* L140: */
  1695. }
  1696. }
  1697. /* ==== End of near-the-diagonal bulge chase. ==== */
  1698. /* L145: */
  1699. }
  1700. /* ==== Use U (if accumulated) to update far-from-diagonal */
  1701. /* . entries in H. If required, use U to update Z as */
  1702. /* . well. ==== */
  1703. if (accum) {
  1704. if (*wantt) {
  1705. jtop = 1;
  1706. jbot = *n;
  1707. } else {
  1708. jtop = *ktop;
  1709. jbot = *kbot;
  1710. }
  1711. /* Computing MAX */
  1712. i__3 = 1, i__6 = *ktop - incol;
  1713. k1 = f2cmax(i__3,i__6);
  1714. /* Computing MAX */
  1715. i__3 = 0, i__6 = ndcol - *kbot;
  1716. nu = kdu - f2cmax(i__3,i__6) - k1 + 1;
  1717. /* ==== Horizontal Multiply ==== */
  1718. i__3 = jbot;
  1719. i__6 = *nh;
  1720. for (jcol = f2cmin(ndcol,*kbot) + 1; i__6 < 0 ? jcol >= i__3 : jcol
  1721. <= i__3; jcol += i__6) {
  1722. /* Computing MIN */
  1723. i__7 = *nh, i__4 = jbot - jcol + 1;
  1724. jlen = f2cmin(i__7,i__4);
  1725. zgemm_("C", "N", &nu, &jlen, &nu, &c_b2, &u[k1 + k1 * u_dim1],
  1726. ldu, &h__[incol + k1 + jcol * h_dim1], ldh, &c_b1, &
  1727. wh[wh_offset], ldwh);
  1728. zlacpy_("ALL", &nu, &jlen, &wh[wh_offset], ldwh, &h__[incol +
  1729. k1 + jcol * h_dim1], ldh);
  1730. /* L150: */
  1731. }
  1732. /* ==== Vertical multiply ==== */
  1733. i__6 = f2cmax(*ktop,incol) - 1;
  1734. i__3 = *nv;
  1735. for (jrow = jtop; i__3 < 0 ? jrow >= i__6 : jrow <= i__6; jrow +=
  1736. i__3) {
  1737. /* Computing MIN */
  1738. i__7 = *nv, i__4 = f2cmax(*ktop,incol) - jrow;
  1739. jlen = f2cmin(i__7,i__4);
  1740. zgemm_("N", "N", &jlen, &nu, &nu, &c_b2, &h__[jrow + (incol +
  1741. k1) * h_dim1], ldh, &u[k1 + k1 * u_dim1], ldu, &c_b1,
  1742. &wv[wv_offset], ldwv);
  1743. zlacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &h__[jrow + (
  1744. incol + k1) * h_dim1], ldh);
  1745. /* L160: */
  1746. }
  1747. /* ==== Z multiply (also vertical) ==== */
  1748. if (*wantz) {
  1749. i__3 = *ihiz;
  1750. i__6 = *nv;
  1751. for (jrow = *iloz; i__6 < 0 ? jrow >= i__3 : jrow <= i__3;
  1752. jrow += i__6) {
  1753. /* Computing MIN */
  1754. i__7 = *nv, i__4 = *ihiz - jrow + 1;
  1755. jlen = f2cmin(i__7,i__4);
  1756. zgemm_("N", "N", &jlen, &nu, &nu, &c_b2, &z__[jrow + (
  1757. incol + k1) * z_dim1], ldz, &u[k1 + k1 * u_dim1],
  1758. ldu, &c_b1, &wv[wv_offset], ldwv);
  1759. zlacpy_("ALL", &jlen, &nu, &wv[wv_offset], ldwv, &z__[
  1760. jrow + (incol + k1) * z_dim1], ldz);
  1761. /* L170: */
  1762. }
  1763. }
  1764. }
  1765. /* L180: */
  1766. }
  1767. /* ==== End of ZLAQR5 ==== */
  1768. return;
  1769. } /* zlaqr5_ */