You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

zgghd3.c 51 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662
  1. /* f2c.h -- Standard Fortran to C header file */
  2. /** barf [ba:rf] 2. "He suggested using FORTRAN, and everybody barfed."
  3. - From The Shogakukan DICTIONARY OF NEW ENGLISH (Second edition) */
  4. #ifndef F2C_INCLUDE
  5. #define F2C_INCLUDE
  6. #include <math.h>
  7. #include <stdlib.h>
  8. #include <string.h>
  9. #include <stdio.h>
  10. #include <complex.h>
  11. #ifdef complex
  12. #undef complex
  13. #endif
  14. #ifdef I
  15. #undef I
  16. #endif
  17. #if defined(_WIN64)
  18. typedef long long BLASLONG;
  19. typedef unsigned long long BLASULONG;
  20. #else
  21. typedef long BLASLONG;
  22. typedef unsigned long BLASULONG;
  23. #endif
  24. #ifdef LAPACK_ILP64
  25. typedef BLASLONG blasint;
  26. #if defined(_WIN64)
  27. #define blasabs(x) llabs(x)
  28. #else
  29. #define blasabs(x) labs(x)
  30. #endif
  31. #else
  32. typedef int blasint;
  33. #define blasabs(x) abs(x)
  34. #endif
  35. typedef blasint integer;
  36. typedef unsigned int uinteger;
  37. typedef char *address;
  38. typedef short int shortint;
  39. typedef float real;
  40. typedef double doublereal;
  41. typedef struct { real r, i; } complex;
  42. typedef struct { doublereal r, i; } doublecomplex;
  43. static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
  44. static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
  45. static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
  46. static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
  47. #define pCf(z) (*_pCf(z))
  48. #define pCd(z) (*_pCd(z))
  49. typedef int logical;
  50. typedef short int shortlogical;
  51. typedef char logical1;
  52. typedef char integer1;
  53. #define TRUE_ (1)
  54. #define FALSE_ (0)
  55. /* Extern is for use with -E */
  56. #ifndef Extern
  57. #define Extern extern
  58. #endif
  59. /* I/O stuff */
  60. typedef int flag;
  61. typedef int ftnlen;
  62. typedef int ftnint;
  63. /*external read, write*/
  64. typedef struct
  65. { flag cierr;
  66. ftnint ciunit;
  67. flag ciend;
  68. char *cifmt;
  69. ftnint cirec;
  70. } cilist;
  71. /*internal read, write*/
  72. typedef struct
  73. { flag icierr;
  74. char *iciunit;
  75. flag iciend;
  76. char *icifmt;
  77. ftnint icirlen;
  78. ftnint icirnum;
  79. } icilist;
  80. /*open*/
  81. typedef struct
  82. { flag oerr;
  83. ftnint ounit;
  84. char *ofnm;
  85. ftnlen ofnmlen;
  86. char *osta;
  87. char *oacc;
  88. char *ofm;
  89. ftnint orl;
  90. char *oblnk;
  91. } olist;
  92. /*close*/
  93. typedef struct
  94. { flag cerr;
  95. ftnint cunit;
  96. char *csta;
  97. } cllist;
  98. /*rewind, backspace, endfile*/
  99. typedef struct
  100. { flag aerr;
  101. ftnint aunit;
  102. } alist;
  103. /* inquire */
  104. typedef struct
  105. { flag inerr;
  106. ftnint inunit;
  107. char *infile;
  108. ftnlen infilen;
  109. ftnint *inex; /*parameters in standard's order*/
  110. ftnint *inopen;
  111. ftnint *innum;
  112. ftnint *innamed;
  113. char *inname;
  114. ftnlen innamlen;
  115. char *inacc;
  116. ftnlen inacclen;
  117. char *inseq;
  118. ftnlen inseqlen;
  119. char *indir;
  120. ftnlen indirlen;
  121. char *infmt;
  122. ftnlen infmtlen;
  123. char *inform;
  124. ftnint informlen;
  125. char *inunf;
  126. ftnlen inunflen;
  127. ftnint *inrecl;
  128. ftnint *innrec;
  129. char *inblank;
  130. ftnlen inblanklen;
  131. } inlist;
  132. #define VOID void
  133. union Multitype { /* for multiple entry points */
  134. integer1 g;
  135. shortint h;
  136. integer i;
  137. /* longint j; */
  138. real r;
  139. doublereal d;
  140. complex c;
  141. doublecomplex z;
  142. };
  143. typedef union Multitype Multitype;
  144. struct Vardesc { /* for Namelist */
  145. char *name;
  146. char *addr;
  147. ftnlen *dims;
  148. int type;
  149. };
  150. typedef struct Vardesc Vardesc;
  151. struct Namelist {
  152. char *name;
  153. Vardesc **vars;
  154. int nvars;
  155. };
  156. typedef struct Namelist Namelist;
  157. #define abs(x) ((x) >= 0 ? (x) : -(x))
  158. #define dabs(x) (fabs(x))
  159. #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
  160. #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
  161. #define dmin(a,b) (f2cmin(a,b))
  162. #define dmax(a,b) (f2cmax(a,b))
  163. #define bit_test(a,b) ((a) >> (b) & 1)
  164. #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
  165. #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
  166. #define abort_() { sig_die("Fortran abort routine called", 1); }
  167. #define c_abs(z) (cabsf(Cf(z)))
  168. #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
  169. #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
  170. #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
  171. #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
  172. #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
  173. #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
  174. //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
  175. #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
  176. #define d_abs(x) (fabs(*(x)))
  177. #define d_acos(x) (acos(*(x)))
  178. #define d_asin(x) (asin(*(x)))
  179. #define d_atan(x) (atan(*(x)))
  180. #define d_atn2(x, y) (atan2(*(x),*(y)))
  181. #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
  182. #define r_cnjg(R, Z) { pCf(R) = conj(Cf(Z)); }
  183. #define d_cos(x) (cos(*(x)))
  184. #define d_cosh(x) (cosh(*(x)))
  185. #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
  186. #define d_exp(x) (exp(*(x)))
  187. #define d_imag(z) (cimag(Cd(z)))
  188. #define r_imag(z) (cimag(Cf(z)))
  189. #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
  190. #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
  191. #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
  192. #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
  193. #define d_log(x) (log(*(x)))
  194. #define d_mod(x, y) (fmod(*(x), *(y)))
  195. #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
  196. #define d_nint(x) u_nint(*(x))
  197. #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
  198. #define d_sign(a,b) u_sign(*(a),*(b))
  199. #define r_sign(a,b) u_sign(*(a),*(b))
  200. #define d_sin(x) (sin(*(x)))
  201. #define d_sinh(x) (sinh(*(x)))
  202. #define d_sqrt(x) (sqrt(*(x)))
  203. #define d_tan(x) (tan(*(x)))
  204. #define d_tanh(x) (tanh(*(x)))
  205. #define i_abs(x) abs(*(x))
  206. #define i_dnnt(x) ((integer)u_nint(*(x)))
  207. #define i_len(s, n) (n)
  208. #define i_nint(x) ((integer)u_nint(*(x)))
  209. #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
  210. #define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
  211. #define pow_si(B,E) spow_ui(*(B),*(E))
  212. #define pow_ri(B,E) spow_ui(*(B),*(E))
  213. #define pow_di(B,E) dpow_ui(*(B),*(E))
  214. #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
  215. #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
  216. #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
  217. #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
  218. #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
  219. #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
  220. #define sig_die(s, kill) { exit(1); }
  221. #define s_stop(s, n) {exit(0);}
  222. static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
  223. #define z_abs(z) (cabs(Cd(z)))
  224. #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
  225. #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
  226. #define myexit_() break;
  227. #define mycycle() continue;
  228. #define myceiling(w) {ceil(w)}
  229. #define myhuge(w) {HUGE_VAL}
  230. //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
  231. #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
  232. /* procedure parameter types for -A and -C++ */
  233. #define F2C_proc_par_types 1
  234. #ifdef __cplusplus
  235. typedef logical (*L_fp)(...);
  236. #else
  237. typedef logical (*L_fp)();
  238. #endif
  239. static float spow_ui(float x, integer n) {
  240. float pow=1.0; unsigned long int u;
  241. if(n != 0) {
  242. if(n < 0) n = -n, x = 1/x;
  243. for(u = n; ; ) {
  244. if(u & 01) pow *= x;
  245. if(u >>= 1) x *= x;
  246. else break;
  247. }
  248. }
  249. return pow;
  250. }
  251. static double dpow_ui(double x, integer n) {
  252. double pow=1.0; unsigned long int u;
  253. if(n != 0) {
  254. if(n < 0) n = -n, x = 1/x;
  255. for(u = n; ; ) {
  256. if(u & 01) pow *= x;
  257. if(u >>= 1) x *= x;
  258. else break;
  259. }
  260. }
  261. return pow;
  262. }
  263. static _Complex float cpow_ui(_Complex float x, integer n) {
  264. _Complex float pow=1.0; unsigned long int u;
  265. if(n != 0) {
  266. if(n < 0) n = -n, x = 1/x;
  267. for(u = n; ; ) {
  268. if(u & 01) pow *= x;
  269. if(u >>= 1) x *= x;
  270. else break;
  271. }
  272. }
  273. return pow;
  274. }
  275. static _Complex double zpow_ui(_Complex double x, integer n) {
  276. _Complex double pow=1.0; unsigned long int u;
  277. if(n != 0) {
  278. if(n < 0) n = -n, x = 1/x;
  279. for(u = n; ; ) {
  280. if(u & 01) pow *= x;
  281. if(u >>= 1) x *= x;
  282. else break;
  283. }
  284. }
  285. return pow;
  286. }
  287. static integer pow_ii(integer x, integer n) {
  288. integer pow; unsigned long int u;
  289. if (n <= 0) {
  290. if (n == 0 || x == 1) pow = 1;
  291. else if (x != -1) pow = x == 0 ? 1/x : 0;
  292. else n = -n;
  293. }
  294. if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
  295. u = n;
  296. for(pow = 1; ; ) {
  297. if(u & 01) pow *= x;
  298. if(u >>= 1) x *= x;
  299. else break;
  300. }
  301. }
  302. return pow;
  303. }
  304. static integer dmaxloc_(double *w, integer s, integer e, integer *n)
  305. {
  306. double m; integer i, mi;
  307. for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
  308. if (w[i-1]>m) mi=i ,m=w[i-1];
  309. return mi-s+1;
  310. }
  311. static integer smaxloc_(float *w, integer s, integer e, integer *n)
  312. {
  313. float m; integer i, mi;
  314. for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
  315. if (w[i-1]>m) mi=i ,m=w[i-1];
  316. return mi-s+1;
  317. }
  318. static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
  319. integer n = *n_, incx = *incx_, incy = *incy_, i;
  320. _Complex float zdotc = 0.0;
  321. if (incx == 1 && incy == 1) {
  322. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  323. zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
  324. }
  325. } else {
  326. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  327. zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
  328. }
  329. }
  330. pCf(z) = zdotc;
  331. }
  332. static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
  333. integer n = *n_, incx = *incx_, incy = *incy_, i;
  334. _Complex double zdotc = 0.0;
  335. if (incx == 1 && incy == 1) {
  336. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  337. zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
  338. }
  339. } else {
  340. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  341. zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
  342. }
  343. }
  344. pCd(z) = zdotc;
  345. }
  346. static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
  347. integer n = *n_, incx = *incx_, incy = *incy_, i;
  348. _Complex float zdotc = 0.0;
  349. if (incx == 1 && incy == 1) {
  350. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  351. zdotc += Cf(&x[i]) * Cf(&y[i]);
  352. }
  353. } else {
  354. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  355. zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
  356. }
  357. }
  358. pCf(z) = zdotc;
  359. }
  360. static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
  361. integer n = *n_, incx = *incx_, incy = *incy_, i;
  362. _Complex double zdotc = 0.0;
  363. if (incx == 1 && incy == 1) {
  364. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  365. zdotc += Cd(&x[i]) * Cd(&y[i]);
  366. }
  367. } else {
  368. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  369. zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
  370. }
  371. }
  372. pCd(z) = zdotc;
  373. }
  374. #endif
  375. /* -- translated by f2c (version 20000121).
  376. You must link the resulting object file with the libraries:
  377. -lf2c -lm (in that order)
  378. */
  379. /* Table of constant values */
  380. static doublecomplex c_b1 = {1.,0.};
  381. static doublecomplex c_b2 = {0.,0.};
  382. static integer c__1 = 1;
  383. static integer c_n1 = -1;
  384. static integer c__2 = 2;
  385. static integer c__3 = 3;
  386. static integer c__16 = 16;
  387. /* > \brief \b ZGGHD3 */
  388. /* =========== DOCUMENTATION =========== */
  389. /* Online html documentation available at */
  390. /* http://www.netlib.org/lapack/explore-html/ */
  391. /* > \htmlonly */
  392. /* > Download ZGGHD3 + dependencies */
  393. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/zgghd3.
  394. f"> */
  395. /* > [TGZ]</a> */
  396. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/zgghd3.
  397. f"> */
  398. /* > [ZIP]</a> */
  399. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/zgghd3.
  400. f"> */
  401. /* > [TXT]</a> */
  402. /* > \endhtmlonly */
  403. /* Definition: */
  404. /* =========== */
  405. /* SUBROUTINE ZGGHD3( COMPQ, COMPZ, N, ILO, IHI, A, LDA, B, LDB, Q, */
  406. /* LDQ, Z, LDZ, WORK, LWORK, INFO ) */
  407. /* CHARACTER COMPQ, COMPZ */
  408. /* INTEGER IHI, ILO, INFO, LDA, LDB, LDQ, LDZ, N, LWORK */
  409. /* COMPLEX*16 A( LDA, * ), B( LDB, * ), Q( LDQ, * ), */
  410. /* $ Z( LDZ, * ), WORK( * ) */
  411. /* > \par Purpose: */
  412. /* ============= */
  413. /* > */
  414. /* > \verbatim */
  415. /* > */
  416. /* > ZGGHD3 reduces a pair of complex matrices (A,B) to generalized upper */
  417. /* > Hessenberg form using unitary transformations, where A is a */
  418. /* > general matrix and B is upper triangular. The form of the */
  419. /* > generalized eigenvalue problem is */
  420. /* > A*x = lambda*B*x, */
  421. /* > and B is typically made upper triangular by computing its QR */
  422. /* > factorization and moving the unitary matrix Q to the left side */
  423. /* > of the equation. */
  424. /* > */
  425. /* > This subroutine simultaneously reduces A to a Hessenberg matrix H: */
  426. /* > Q**H*A*Z = H */
  427. /* > and transforms B to another upper triangular matrix T: */
  428. /* > Q**H*B*Z = T */
  429. /* > in order to reduce the problem to its standard form */
  430. /* > H*y = lambda*T*y */
  431. /* > where y = Z**H*x. */
  432. /* > */
  433. /* > The unitary matrices Q and Z are determined as products of Givens */
  434. /* > rotations. They may either be formed explicitly, or they may be */
  435. /* > postmultiplied into input matrices Q1 and Z1, so that */
  436. /* > Q1 * A * Z1**H = (Q1*Q) * H * (Z1*Z)**H */
  437. /* > Q1 * B * Z1**H = (Q1*Q) * T * (Z1*Z)**H */
  438. /* > If Q1 is the unitary matrix from the QR factorization of B in the */
  439. /* > original equation A*x = lambda*B*x, then ZGGHD3 reduces the original */
  440. /* > problem to generalized Hessenberg form. */
  441. /* > */
  442. /* > This is a blocked variant of CGGHRD, using matrix-matrix */
  443. /* > multiplications for parts of the computation to enhance performance. */
  444. /* > \endverbatim */
  445. /* Arguments: */
  446. /* ========== */
  447. /* > \param[in] COMPQ */
  448. /* > \verbatim */
  449. /* > COMPQ is CHARACTER*1 */
  450. /* > = 'N': do not compute Q; */
  451. /* > = 'I': Q is initialized to the unit matrix, and the */
  452. /* > unitary matrix Q is returned; */
  453. /* > = 'V': Q must contain a unitary matrix Q1 on entry, */
  454. /* > and the product Q1*Q is returned. */
  455. /* > \endverbatim */
  456. /* > */
  457. /* > \param[in] COMPZ */
  458. /* > \verbatim */
  459. /* > COMPZ is CHARACTER*1 */
  460. /* > = 'N': do not compute Z; */
  461. /* > = 'I': Z is initialized to the unit matrix, and the */
  462. /* > unitary matrix Z is returned; */
  463. /* > = 'V': Z must contain a unitary matrix Z1 on entry, */
  464. /* > and the product Z1*Z is returned. */
  465. /* > \endverbatim */
  466. /* > */
  467. /* > \param[in] N */
  468. /* > \verbatim */
  469. /* > N is INTEGER */
  470. /* > The order of the matrices A and B. N >= 0. */
  471. /* > \endverbatim */
  472. /* > */
  473. /* > \param[in] ILO */
  474. /* > \verbatim */
  475. /* > ILO is INTEGER */
  476. /* > \endverbatim */
  477. /* > */
  478. /* > \param[in] IHI */
  479. /* > \verbatim */
  480. /* > IHI is INTEGER */
  481. /* > */
  482. /* > ILO and IHI mark the rows and columns of A which are to be */
  483. /* > reduced. It is assumed that A is already upper triangular */
  484. /* > in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are */
  485. /* > normally set by a previous call to ZGGBAL; otherwise they */
  486. /* > should be set to 1 and N respectively. */
  487. /* > 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
  488. /* > \endverbatim */
  489. /* > */
  490. /* > \param[in,out] A */
  491. /* > \verbatim */
  492. /* > A is COMPLEX*16 array, dimension (LDA, N) */
  493. /* > On entry, the N-by-N general matrix to be reduced. */
  494. /* > On exit, the upper triangle and the first subdiagonal of A */
  495. /* > are overwritten with the upper Hessenberg matrix H, and the */
  496. /* > rest is set to zero. */
  497. /* > \endverbatim */
  498. /* > */
  499. /* > \param[in] LDA */
  500. /* > \verbatim */
  501. /* > LDA is INTEGER */
  502. /* > The leading dimension of the array A. LDA >= f2cmax(1,N). */
  503. /* > \endverbatim */
  504. /* > */
  505. /* > \param[in,out] B */
  506. /* > \verbatim */
  507. /* > B is COMPLEX*16 array, dimension (LDB, N) */
  508. /* > On entry, the N-by-N upper triangular matrix B. */
  509. /* > On exit, the upper triangular matrix T = Q**H B Z. The */
  510. /* > elements below the diagonal are set to zero. */
  511. /* > \endverbatim */
  512. /* > */
  513. /* > \param[in] LDB */
  514. /* > \verbatim */
  515. /* > LDB is INTEGER */
  516. /* > The leading dimension of the array B. LDB >= f2cmax(1,N). */
  517. /* > \endverbatim */
  518. /* > */
  519. /* > \param[in,out] Q */
  520. /* > \verbatim */
  521. /* > Q is COMPLEX*16 array, dimension (LDQ, N) */
  522. /* > On entry, if COMPQ = 'V', the unitary matrix Q1, typically */
  523. /* > from the QR factorization of B. */
  524. /* > On exit, if COMPQ='I', the unitary matrix Q, and if */
  525. /* > COMPQ = 'V', the product Q1*Q. */
  526. /* > Not referenced if COMPQ='N'. */
  527. /* > \endverbatim */
  528. /* > */
  529. /* > \param[in] LDQ */
  530. /* > \verbatim */
  531. /* > LDQ is INTEGER */
  532. /* > The leading dimension of the array Q. */
  533. /* > LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. */
  534. /* > \endverbatim */
  535. /* > */
  536. /* > \param[in,out] Z */
  537. /* > \verbatim */
  538. /* > Z is COMPLEX*16 array, dimension (LDZ, N) */
  539. /* > On entry, if COMPZ = 'V', the unitary matrix Z1. */
  540. /* > On exit, if COMPZ='I', the unitary matrix Z, and if */
  541. /* > COMPZ = 'V', the product Z1*Z. */
  542. /* > Not referenced if COMPZ='N'. */
  543. /* > \endverbatim */
  544. /* > */
  545. /* > \param[in] LDZ */
  546. /* > \verbatim */
  547. /* > LDZ is INTEGER */
  548. /* > The leading dimension of the array Z. */
  549. /* > LDZ >= N if COMPZ='V' or 'I'; LDZ >= 1 otherwise. */
  550. /* > \endverbatim */
  551. /* > */
  552. /* > \param[out] WORK */
  553. /* > \verbatim */
  554. /* > WORK is COMPLEX*16 array, dimension (LWORK) */
  555. /* > On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
  556. /* > \endverbatim */
  557. /* > */
  558. /* > \param[in] LWORK */
  559. /* > \verbatim */
  560. /* > LWORK is INTEGER */
  561. /* > The length of the array WORK. LWORK >= 1. */
  562. /* > For optimum performance LWORK >= 6*N*NB, where NB is the */
  563. /* > optimal blocksize. */
  564. /* > */
  565. /* > If LWORK = -1, then a workspace query is assumed; the routine */
  566. /* > only calculates the optimal size of the WORK array, returns */
  567. /* > this value as the first entry of the WORK array, and no error */
  568. /* > message related to LWORK is issued by XERBLA. */
  569. /* > \endverbatim */
  570. /* > */
  571. /* > \param[out] INFO */
  572. /* > \verbatim */
  573. /* > INFO is INTEGER */
  574. /* > = 0: successful exit. */
  575. /* > < 0: if INFO = -i, the i-th argument had an illegal value. */
  576. /* > \endverbatim */
  577. /* Authors: */
  578. /* ======== */
  579. /* > \author Univ. of Tennessee */
  580. /* > \author Univ. of California Berkeley */
  581. /* > \author Univ. of Colorado Denver */
  582. /* > \author NAG Ltd. */
  583. /* > \date January 2015 */
  584. /* > \ingroup complex16OTHERcomputational */
  585. /* > \par Further Details: */
  586. /* ===================== */
  587. /* > */
  588. /* > \verbatim */
  589. /* > */
  590. /* > This routine reduces A to Hessenberg form and maintains B in */
  591. /* > using a blocked variant of Moler and Stewart's original algorithm, */
  592. /* > as described by Kagstrom, Kressner, Quintana-Orti, and Quintana-Orti */
  593. /* > (BIT 2008). */
  594. /* > \endverbatim */
  595. /* > */
  596. /* ===================================================================== */
  597. /* Subroutine */ int zgghd3_(char *compq, char *compz, integer *n, integer *
  598. ilo, integer *ihi, doublecomplex *a, integer *lda, doublecomplex *b,
  599. integer *ldb, doublecomplex *q, integer *ldq, doublecomplex *z__,
  600. integer *ldz, doublecomplex *work, integer *lwork, integer *info)
  601. {
  602. /* System generated locals */
  603. integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
  604. z_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7, i__8, i__9;
  605. doublecomplex z__1, z__2, z__3, z__4;
  606. /* Local variables */
  607. logical blk22;
  608. integer cola, jcol, ierr;
  609. doublecomplex temp;
  610. integer jrow, topq, ppwo;
  611. extern /* Subroutine */ int zrot_(integer *, doublecomplex *, integer *,
  612. doublecomplex *, integer *, doublereal *, doublecomplex *);
  613. doublecomplex temp1, temp2, temp3;
  614. doublereal c__;
  615. integer kacc22, i__, j, k;
  616. doublecomplex s;
  617. extern logical lsame_(char *, char *);
  618. integer nbmin;
  619. doublecomplex ctemp;
  620. extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
  621. integer *, doublecomplex *, doublecomplex *, integer *,
  622. doublecomplex *, integer *, doublecomplex *, doublecomplex *,
  623. integer *);
  624. integer nblst;
  625. logical initq;
  626. doublecomplex c1, c2;
  627. extern /* Subroutine */ int zgemv_(char *, integer *, integer *,
  628. doublecomplex *, doublecomplex *, integer *, doublecomplex *,
  629. integer *, doublecomplex *, doublecomplex *, integer *);
  630. logical wantq;
  631. integer j0;
  632. logical initz;
  633. extern /* Subroutine */ int zunm22_(char *, char *, integer *, integer *,
  634. integer *, integer *, doublecomplex *, integer *, doublecomplex *,
  635. integer *, doublecomplex *, integer *, integer *)
  636. ;
  637. logical wantz;
  638. doublecomplex s1, s2;
  639. extern /* Subroutine */ int ztrmv_(char *, char *, char *, integer *,
  640. doublecomplex *, integer *, doublecomplex *, integer *);
  641. char compq2[1], compz2[1];
  642. integer nb, jj, nh, nx, pw;
  643. extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen);
  644. extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
  645. integer *, integer *, ftnlen, ftnlen);
  646. extern /* Subroutine */ int zgghrd_(char *, char *, integer *, integer *,
  647. integer *, doublecomplex *, integer *, doublecomplex *, integer *,
  648. doublecomplex *, integer *, doublecomplex *, integer *, integer *
  649. ), zlaset_(char *, integer *, integer *,
  650. doublecomplex *, doublecomplex *, doublecomplex *, integer *), zlartg_(doublecomplex *, doublecomplex *, doublereal *,
  651. doublecomplex *, doublecomplex *), zlacpy_(char *, integer *,
  652. integer *, doublecomplex *, integer *, doublecomplex *, integer *);
  653. integer lwkopt;
  654. logical lquery;
  655. integer nnb, len, top, ppw, n2nb;
  656. /* -- LAPACK computational routine (version 3.8.0) -- */
  657. /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
  658. /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
  659. /* January 2015 */
  660. /* ===================================================================== */
  661. /* Decode and test the input parameters. */
  662. /* Parameter adjustments */
  663. a_dim1 = *lda;
  664. a_offset = 1 + a_dim1 * 1;
  665. a -= a_offset;
  666. b_dim1 = *ldb;
  667. b_offset = 1 + b_dim1 * 1;
  668. b -= b_offset;
  669. q_dim1 = *ldq;
  670. q_offset = 1 + q_dim1 * 1;
  671. q -= q_offset;
  672. z_dim1 = *ldz;
  673. z_offset = 1 + z_dim1 * 1;
  674. z__ -= z_offset;
  675. --work;
  676. /* Function Body */
  677. *info = 0;
  678. nb = ilaenv_(&c__1, "ZGGHD3", " ", n, ilo, ihi, &c_n1, (ftnlen)6, (ftnlen)
  679. 1);
  680. /* Computing MAX */
  681. i__1 = *n * 6 * nb;
  682. lwkopt = f2cmax(i__1,1);
  683. z__1.r = (doublereal) lwkopt, z__1.i = 0.;
  684. work[1].r = z__1.r, work[1].i = z__1.i;
  685. initq = lsame_(compq, "I");
  686. wantq = initq || lsame_(compq, "V");
  687. initz = lsame_(compz, "I");
  688. wantz = initz || lsame_(compz, "V");
  689. lquery = *lwork == -1;
  690. if (! lsame_(compq, "N") && ! wantq) {
  691. *info = -1;
  692. } else if (! lsame_(compz, "N") && ! wantz) {
  693. *info = -2;
  694. } else if (*n < 0) {
  695. *info = -3;
  696. } else if (*ilo < 1) {
  697. *info = -4;
  698. } else if (*ihi > *n || *ihi < *ilo - 1) {
  699. *info = -5;
  700. } else if (*lda < f2cmax(1,*n)) {
  701. *info = -7;
  702. } else if (*ldb < f2cmax(1,*n)) {
  703. *info = -9;
  704. } else if (wantq && *ldq < *n || *ldq < 1) {
  705. *info = -11;
  706. } else if (wantz && *ldz < *n || *ldz < 1) {
  707. *info = -13;
  708. } else if (*lwork < 1 && ! lquery) {
  709. *info = -15;
  710. }
  711. if (*info != 0) {
  712. i__1 = -(*info);
  713. xerbla_("ZGGHD3", &i__1, (ftnlen)6);
  714. return 0;
  715. } else if (lquery) {
  716. return 0;
  717. }
  718. /* Initialize Q and Z if desired. */
  719. if (initq) {
  720. zlaset_("All", n, n, &c_b2, &c_b1, &q[q_offset], ldq);
  721. }
  722. if (initz) {
  723. zlaset_("All", n, n, &c_b2, &c_b1, &z__[z_offset], ldz);
  724. }
  725. /* Zero out lower triangle of B. */
  726. if (*n > 1) {
  727. i__1 = *n - 1;
  728. i__2 = *n - 1;
  729. zlaset_("Lower", &i__1, &i__2, &c_b2, &c_b2, &b[b_dim1 + 2], ldb);
  730. }
  731. /* Quick return if possible */
  732. nh = *ihi - *ilo + 1;
  733. if (nh <= 1) {
  734. work[1].r = 1., work[1].i = 0.;
  735. return 0;
  736. }
  737. /* Determine the blocksize. */
  738. nbmin = ilaenv_(&c__2, "ZGGHD3", " ", n, ilo, ihi, &c_n1, (ftnlen)6, (
  739. ftnlen)1);
  740. if (nb > 1 && nb < nh) {
  741. /* Determine when to use unblocked instead of blocked code. */
  742. /* Computing MAX */
  743. i__1 = nb, i__2 = ilaenv_(&c__3, "ZGGHD3", " ", n, ilo, ihi, &c_n1, (
  744. ftnlen)6, (ftnlen)1);
  745. nx = f2cmax(i__1,i__2);
  746. if (nx < nh) {
  747. /* Determine if workspace is large enough for blocked code. */
  748. if (*lwork < lwkopt) {
  749. /* Not enough workspace to use optimal NB: determine the */
  750. /* minimum value of NB, and reduce NB or force use of */
  751. /* unblocked code. */
  752. /* Computing MAX */
  753. i__1 = 2, i__2 = ilaenv_(&c__2, "ZGGHD3", " ", n, ilo, ihi, &
  754. c_n1, (ftnlen)6, (ftnlen)1);
  755. nbmin = f2cmax(i__1,i__2);
  756. if (*lwork >= *n * 6 * nbmin) {
  757. nb = *lwork / (*n * 6);
  758. } else {
  759. nb = 1;
  760. }
  761. }
  762. }
  763. }
  764. if (nb < nbmin || nb >= nh) {
  765. /* Use unblocked code below */
  766. jcol = *ilo;
  767. } else {
  768. /* Use blocked code */
  769. kacc22 = ilaenv_(&c__16, "ZGGHD3", " ", n, ilo, ihi, &c_n1, (ftnlen)6,
  770. (ftnlen)1);
  771. blk22 = kacc22 == 2;
  772. i__1 = *ihi - 2;
  773. i__2 = nb;
  774. for (jcol = *ilo; i__2 < 0 ? jcol >= i__1 : jcol <= i__1; jcol +=
  775. i__2) {
  776. /* Computing MIN */
  777. i__3 = nb, i__4 = *ihi - jcol - 1;
  778. nnb = f2cmin(i__3,i__4);
  779. /* Initialize small unitary factors that will hold the */
  780. /* accumulated Givens rotations in workspace. */
  781. /* N2NB denotes the number of 2*NNB-by-2*NNB factors */
  782. /* NBLST denotes the (possibly smaller) order of the last */
  783. /* factor. */
  784. n2nb = (*ihi - jcol - 1) / nnb - 1;
  785. nblst = *ihi - jcol - n2nb * nnb;
  786. zlaset_("All", &nblst, &nblst, &c_b2, &c_b1, &work[1], &nblst);
  787. pw = nblst * nblst + 1;
  788. i__3 = n2nb;
  789. for (i__ = 1; i__ <= i__3; ++i__) {
  790. i__4 = nnb << 1;
  791. i__5 = nnb << 1;
  792. i__6 = nnb << 1;
  793. zlaset_("All", &i__4, &i__5, &c_b2, &c_b1, &work[pw], &i__6);
  794. pw += (nnb << 2) * nnb;
  795. }
  796. /* Reduce columns JCOL:JCOL+NNB-1 of A to Hessenberg form. */
  797. i__3 = jcol + nnb - 1;
  798. for (j = jcol; j <= i__3; ++j) {
  799. /* Reduce Jth column of A. Store cosines and sines in Jth */
  800. /* column of A and B, respectively. */
  801. i__4 = j + 2;
  802. for (i__ = *ihi; i__ >= i__4; --i__) {
  803. i__5 = i__ - 1 + j * a_dim1;
  804. temp.r = a[i__5].r, temp.i = a[i__5].i;
  805. zlartg_(&temp, &a[i__ + j * a_dim1], &c__, &s, &a[i__ - 1
  806. + j * a_dim1]);
  807. i__5 = i__ + j * a_dim1;
  808. z__1.r = c__, z__1.i = 0.;
  809. a[i__5].r = z__1.r, a[i__5].i = z__1.i;
  810. i__5 = i__ + j * b_dim1;
  811. b[i__5].r = s.r, b[i__5].i = s.i;
  812. }
  813. /* Accumulate Givens rotations into workspace array. */
  814. ppw = (nblst + 1) * (nblst - 2) - j + jcol + 1;
  815. len = j + 2 - jcol;
  816. jrow = j + n2nb * nnb + 2;
  817. i__4 = jrow;
  818. for (i__ = *ihi; i__ >= i__4; --i__) {
  819. i__5 = i__ + j * a_dim1;
  820. ctemp.r = a[i__5].r, ctemp.i = a[i__5].i;
  821. i__5 = i__ + j * b_dim1;
  822. s.r = b[i__5].r, s.i = b[i__5].i;
  823. i__5 = ppw + len - 1;
  824. for (jj = ppw; jj <= i__5; ++jj) {
  825. i__6 = jj + nblst;
  826. temp.r = work[i__6].r, temp.i = work[i__6].i;
  827. i__6 = jj + nblst;
  828. z__2.r = ctemp.r * temp.r - ctemp.i * temp.i, z__2.i =
  829. ctemp.r * temp.i + ctemp.i * temp.r;
  830. i__7 = jj;
  831. z__3.r = s.r * work[i__7].r - s.i * work[i__7].i,
  832. z__3.i = s.r * work[i__7].i + s.i * work[i__7]
  833. .r;
  834. z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
  835. work[i__6].r = z__1.r, work[i__6].i = z__1.i;
  836. i__6 = jj;
  837. d_cnjg(&z__3, &s);
  838. z__2.r = z__3.r * temp.r - z__3.i * temp.i, z__2.i =
  839. z__3.r * temp.i + z__3.i * temp.r;
  840. i__7 = jj;
  841. z__4.r = ctemp.r * work[i__7].r - ctemp.i * work[i__7]
  842. .i, z__4.i = ctemp.r * work[i__7].i + ctemp.i
  843. * work[i__7].r;
  844. z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
  845. work[i__6].r = z__1.r, work[i__6].i = z__1.i;
  846. }
  847. ++len;
  848. ppw = ppw - nblst - 1;
  849. }
  850. ppwo = nblst * nblst + (nnb + j - jcol - 1 << 1) * nnb + nnb;
  851. j0 = jrow - nnb;
  852. i__4 = j + 2;
  853. i__5 = -nnb;
  854. for (jrow = j0; i__5 < 0 ? jrow >= i__4 : jrow <= i__4; jrow
  855. += i__5) {
  856. ppw = ppwo;
  857. len = j + 2 - jcol;
  858. i__6 = jrow;
  859. for (i__ = jrow + nnb - 1; i__ >= i__6; --i__) {
  860. i__7 = i__ + j * a_dim1;
  861. ctemp.r = a[i__7].r, ctemp.i = a[i__7].i;
  862. i__7 = i__ + j * b_dim1;
  863. s.r = b[i__7].r, s.i = b[i__7].i;
  864. i__7 = ppw + len - 1;
  865. for (jj = ppw; jj <= i__7; ++jj) {
  866. i__8 = jj + (nnb << 1);
  867. temp.r = work[i__8].r, temp.i = work[i__8].i;
  868. i__8 = jj + (nnb << 1);
  869. z__2.r = ctemp.r * temp.r - ctemp.i * temp.i,
  870. z__2.i = ctemp.r * temp.i + ctemp.i *
  871. temp.r;
  872. i__9 = jj;
  873. z__3.r = s.r * work[i__9].r - s.i * work[i__9].i,
  874. z__3.i = s.r * work[i__9].i + s.i * work[
  875. i__9].r;
  876. z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
  877. z__3.i;
  878. work[i__8].r = z__1.r, work[i__8].i = z__1.i;
  879. i__8 = jj;
  880. d_cnjg(&z__3, &s);
  881. z__2.r = z__3.r * temp.r - z__3.i * temp.i,
  882. z__2.i = z__3.r * temp.i + z__3.i *
  883. temp.r;
  884. i__9 = jj;
  885. z__4.r = ctemp.r * work[i__9].r - ctemp.i * work[
  886. i__9].i, z__4.i = ctemp.r * work[i__9].i
  887. + ctemp.i * work[i__9].r;
  888. z__1.r = z__2.r + z__4.r, z__1.i = z__2.i +
  889. z__4.i;
  890. work[i__8].r = z__1.r, work[i__8].i = z__1.i;
  891. }
  892. ++len;
  893. ppw = ppw - (nnb << 1) - 1;
  894. }
  895. ppwo += (nnb << 2) * nnb;
  896. }
  897. /* TOP denotes the number of top rows in A and B that will */
  898. /* not be updated during the next steps. */
  899. if (jcol <= 2) {
  900. top = 0;
  901. } else {
  902. top = jcol;
  903. }
  904. /* Propagate transformations through B and replace stored */
  905. /* left sines/cosines by right sines/cosines. */
  906. i__5 = j + 1;
  907. for (jj = *n; jj >= i__5; --jj) {
  908. /* Update JJth column of B. */
  909. /* Computing MIN */
  910. i__4 = jj + 1;
  911. i__6 = j + 2;
  912. for (i__ = f2cmin(i__4,*ihi); i__ >= i__6; --i__) {
  913. i__4 = i__ + j * a_dim1;
  914. ctemp.r = a[i__4].r, ctemp.i = a[i__4].i;
  915. i__4 = i__ + j * b_dim1;
  916. s.r = b[i__4].r, s.i = b[i__4].i;
  917. i__4 = i__ + jj * b_dim1;
  918. temp.r = b[i__4].r, temp.i = b[i__4].i;
  919. i__4 = i__ + jj * b_dim1;
  920. z__2.r = ctemp.r * temp.r - ctemp.i * temp.i, z__2.i =
  921. ctemp.r * temp.i + ctemp.i * temp.r;
  922. d_cnjg(&z__4, &s);
  923. i__7 = i__ - 1 + jj * b_dim1;
  924. z__3.r = z__4.r * b[i__7].r - z__4.i * b[i__7].i,
  925. z__3.i = z__4.r * b[i__7].i + z__4.i * b[i__7]
  926. .r;
  927. z__1.r = z__2.r - z__3.r, z__1.i = z__2.i - z__3.i;
  928. b[i__4].r = z__1.r, b[i__4].i = z__1.i;
  929. i__4 = i__ - 1 + jj * b_dim1;
  930. z__2.r = s.r * temp.r - s.i * temp.i, z__2.i = s.r *
  931. temp.i + s.i * temp.r;
  932. i__7 = i__ - 1 + jj * b_dim1;
  933. z__3.r = ctemp.r * b[i__7].r - ctemp.i * b[i__7].i,
  934. z__3.i = ctemp.r * b[i__7].i + ctemp.i * b[
  935. i__7].r;
  936. z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
  937. b[i__4].r = z__1.r, b[i__4].i = z__1.i;
  938. }
  939. /* Annihilate B( JJ+1, JJ ). */
  940. if (jj < *ihi) {
  941. i__6 = jj + 1 + (jj + 1) * b_dim1;
  942. temp.r = b[i__6].r, temp.i = b[i__6].i;
  943. zlartg_(&temp, &b[jj + 1 + jj * b_dim1], &c__, &s, &b[
  944. jj + 1 + (jj + 1) * b_dim1]);
  945. i__6 = jj + 1 + jj * b_dim1;
  946. b[i__6].r = 0., b[i__6].i = 0.;
  947. i__6 = jj - top;
  948. zrot_(&i__6, &b[top + 1 + (jj + 1) * b_dim1], &c__1, &
  949. b[top + 1 + jj * b_dim1], &c__1, &c__, &s);
  950. i__6 = jj + 1 + j * a_dim1;
  951. z__1.r = c__, z__1.i = 0.;
  952. a[i__6].r = z__1.r, a[i__6].i = z__1.i;
  953. i__6 = jj + 1 + j * b_dim1;
  954. d_cnjg(&z__2, &s);
  955. z__1.r = -z__2.r, z__1.i = -z__2.i;
  956. b[i__6].r = z__1.r, b[i__6].i = z__1.i;
  957. }
  958. }
  959. /* Update A by transformations from right. */
  960. jj = (*ihi - j - 1) % 3;
  961. i__5 = jj + 1;
  962. for (i__ = *ihi - j - 3; i__ >= i__5; i__ += -3) {
  963. i__6 = j + 1 + i__ + j * a_dim1;
  964. ctemp.r = a[i__6].r, ctemp.i = a[i__6].i;
  965. i__6 = j + 1 + i__ + j * b_dim1;
  966. z__1.r = -b[i__6].r, z__1.i = -b[i__6].i;
  967. s.r = z__1.r, s.i = z__1.i;
  968. i__6 = j + 2 + i__ + j * a_dim1;
  969. c1.r = a[i__6].r, c1.i = a[i__6].i;
  970. i__6 = j + 2 + i__ + j * b_dim1;
  971. z__1.r = -b[i__6].r, z__1.i = -b[i__6].i;
  972. s1.r = z__1.r, s1.i = z__1.i;
  973. i__6 = j + 3 + i__ + j * a_dim1;
  974. c2.r = a[i__6].r, c2.i = a[i__6].i;
  975. i__6 = j + 3 + i__ + j * b_dim1;
  976. z__1.r = -b[i__6].r, z__1.i = -b[i__6].i;
  977. s2.r = z__1.r, s2.i = z__1.i;
  978. i__6 = *ihi;
  979. for (k = top + 1; k <= i__6; ++k) {
  980. i__4 = k + (j + i__) * a_dim1;
  981. temp.r = a[i__4].r, temp.i = a[i__4].i;
  982. i__4 = k + (j + i__ + 1) * a_dim1;
  983. temp1.r = a[i__4].r, temp1.i = a[i__4].i;
  984. i__4 = k + (j + i__ + 2) * a_dim1;
  985. temp2.r = a[i__4].r, temp2.i = a[i__4].i;
  986. i__4 = k + (j + i__ + 3) * a_dim1;
  987. temp3.r = a[i__4].r, temp3.i = a[i__4].i;
  988. i__4 = k + (j + i__ + 3) * a_dim1;
  989. z__2.r = c2.r * temp3.r - c2.i * temp3.i, z__2.i =
  990. c2.r * temp3.i + c2.i * temp3.r;
  991. d_cnjg(&z__4, &s2);
  992. z__3.r = z__4.r * temp2.r - z__4.i * temp2.i, z__3.i =
  993. z__4.r * temp2.i + z__4.i * temp2.r;
  994. z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
  995. a[i__4].r = z__1.r, a[i__4].i = z__1.i;
  996. z__3.r = -s2.r, z__3.i = -s2.i;
  997. z__2.r = z__3.r * temp3.r - z__3.i * temp3.i, z__2.i =
  998. z__3.r * temp3.i + z__3.i * temp3.r;
  999. z__4.r = c2.r * temp2.r - c2.i * temp2.i, z__4.i =
  1000. c2.r * temp2.i + c2.i * temp2.r;
  1001. z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
  1002. temp2.r = z__1.r, temp2.i = z__1.i;
  1003. i__4 = k + (j + i__ + 2) * a_dim1;
  1004. z__2.r = c1.r * temp2.r - c1.i * temp2.i, z__2.i =
  1005. c1.r * temp2.i + c1.i * temp2.r;
  1006. d_cnjg(&z__4, &s1);
  1007. z__3.r = z__4.r * temp1.r - z__4.i * temp1.i, z__3.i =
  1008. z__4.r * temp1.i + z__4.i * temp1.r;
  1009. z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
  1010. a[i__4].r = z__1.r, a[i__4].i = z__1.i;
  1011. z__3.r = -s1.r, z__3.i = -s1.i;
  1012. z__2.r = z__3.r * temp2.r - z__3.i * temp2.i, z__2.i =
  1013. z__3.r * temp2.i + z__3.i * temp2.r;
  1014. z__4.r = c1.r * temp1.r - c1.i * temp1.i, z__4.i =
  1015. c1.r * temp1.i + c1.i * temp1.r;
  1016. z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
  1017. temp1.r = z__1.r, temp1.i = z__1.i;
  1018. i__4 = k + (j + i__ + 1) * a_dim1;
  1019. z__2.r = ctemp.r * temp1.r - ctemp.i * temp1.i,
  1020. z__2.i = ctemp.r * temp1.i + ctemp.i *
  1021. temp1.r;
  1022. d_cnjg(&z__4, &s);
  1023. z__3.r = z__4.r * temp.r - z__4.i * temp.i, z__3.i =
  1024. z__4.r * temp.i + z__4.i * temp.r;
  1025. z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
  1026. a[i__4].r = z__1.r, a[i__4].i = z__1.i;
  1027. i__4 = k + (j + i__) * a_dim1;
  1028. z__3.r = -s.r, z__3.i = -s.i;
  1029. z__2.r = z__3.r * temp1.r - z__3.i * temp1.i, z__2.i =
  1030. z__3.r * temp1.i + z__3.i * temp1.r;
  1031. z__4.r = ctemp.r * temp.r - ctemp.i * temp.i, z__4.i =
  1032. ctemp.r * temp.i + ctemp.i * temp.r;
  1033. z__1.r = z__2.r + z__4.r, z__1.i = z__2.i + z__4.i;
  1034. a[i__4].r = z__1.r, a[i__4].i = z__1.i;
  1035. }
  1036. }
  1037. if (jj > 0) {
  1038. for (i__ = jj; i__ >= 1; --i__) {
  1039. i__5 = j + 1 + i__ + j * a_dim1;
  1040. c__ = a[i__5].r;
  1041. i__5 = *ihi - top;
  1042. d_cnjg(&z__2, &b[j + 1 + i__ + j * b_dim1]);
  1043. z__1.r = -z__2.r, z__1.i = -z__2.i;
  1044. zrot_(&i__5, &a[top + 1 + (j + i__ + 1) * a_dim1], &
  1045. c__1, &a[top + 1 + (j + i__) * a_dim1], &c__1,
  1046. &c__, &z__1);
  1047. }
  1048. }
  1049. /* Update (J+1)th column of A by transformations from left. */
  1050. if (j < jcol + nnb - 1) {
  1051. len = j + 1 - jcol;
  1052. /* Multiply with the trailing accumulated unitary */
  1053. /* matrix, which takes the form */
  1054. /* [ U11 U12 ] */
  1055. /* U = [ ], */
  1056. /* [ U21 U22 ] */
  1057. /* where U21 is a LEN-by-LEN matrix and U12 is lower */
  1058. /* triangular. */
  1059. jrow = *ihi - nblst + 1;
  1060. zgemv_("Conjugate", &nblst, &len, &c_b1, &work[1], &nblst,
  1061. &a[jrow + (j + 1) * a_dim1], &c__1, &c_b2, &work[
  1062. pw], &c__1);
  1063. ppw = pw + len;
  1064. i__5 = jrow + nblst - len - 1;
  1065. for (i__ = jrow; i__ <= i__5; ++i__) {
  1066. i__6 = ppw;
  1067. i__4 = i__ + (j + 1) * a_dim1;
  1068. work[i__6].r = a[i__4].r, work[i__6].i = a[i__4].i;
  1069. ++ppw;
  1070. }
  1071. i__5 = nblst - len;
  1072. ztrmv_("Lower", "Conjugate", "Non-unit", &i__5, &work[len
  1073. * nblst + 1], &nblst, &work[pw + len], &c__1);
  1074. i__5 = nblst - len;
  1075. zgemv_("Conjugate", &len, &i__5, &c_b1, &work[(len + 1) *
  1076. nblst - len + 1], &nblst, &a[jrow + nblst - len +
  1077. (j + 1) * a_dim1], &c__1, &c_b1, &work[pw + len],
  1078. &c__1);
  1079. ppw = pw;
  1080. i__5 = jrow + nblst - 1;
  1081. for (i__ = jrow; i__ <= i__5; ++i__) {
  1082. i__6 = i__ + (j + 1) * a_dim1;
  1083. i__4 = ppw;
  1084. a[i__6].r = work[i__4].r, a[i__6].i = work[i__4].i;
  1085. ++ppw;
  1086. }
  1087. /* Multiply with the other accumulated unitary */
  1088. /* matrices, which take the form */
  1089. /* [ U11 U12 0 ] */
  1090. /* [ ] */
  1091. /* U = [ U21 U22 0 ], */
  1092. /* [ ] */
  1093. /* [ 0 0 I ] */
  1094. /* where I denotes the (NNB-LEN)-by-(NNB-LEN) identity */
  1095. /* matrix, U21 is a LEN-by-LEN upper triangular matrix */
  1096. /* and U12 is an NNB-by-NNB lower triangular matrix. */
  1097. ppwo = nblst * nblst + 1;
  1098. j0 = jrow - nnb;
  1099. i__5 = jcol + 1;
  1100. i__6 = -nnb;
  1101. for (jrow = j0; i__6 < 0 ? jrow >= i__5 : jrow <= i__5;
  1102. jrow += i__6) {
  1103. ppw = pw + len;
  1104. i__4 = jrow + nnb - 1;
  1105. for (i__ = jrow; i__ <= i__4; ++i__) {
  1106. i__7 = ppw;
  1107. i__8 = i__ + (j + 1) * a_dim1;
  1108. work[i__7].r = a[i__8].r, work[i__7].i = a[i__8]
  1109. .i;
  1110. ++ppw;
  1111. }
  1112. ppw = pw;
  1113. i__4 = jrow + nnb + len - 1;
  1114. for (i__ = jrow + nnb; i__ <= i__4; ++i__) {
  1115. i__7 = ppw;
  1116. i__8 = i__ + (j + 1) * a_dim1;
  1117. work[i__7].r = a[i__8].r, work[i__7].i = a[i__8]
  1118. .i;
  1119. ++ppw;
  1120. }
  1121. i__4 = nnb << 1;
  1122. ztrmv_("Upper", "Conjugate", "Non-unit", &len, &work[
  1123. ppwo + nnb], &i__4, &work[pw], &c__1);
  1124. i__4 = nnb << 1;
  1125. ztrmv_("Lower", "Conjugate", "Non-unit", &nnb, &work[
  1126. ppwo + (len << 1) * nnb], &i__4, &work[pw +
  1127. len], &c__1);
  1128. i__4 = nnb << 1;
  1129. zgemv_("Conjugate", &nnb, &len, &c_b1, &work[ppwo], &
  1130. i__4, &a[jrow + (j + 1) * a_dim1], &c__1, &
  1131. c_b1, &work[pw], &c__1);
  1132. i__4 = nnb << 1;
  1133. zgemv_("Conjugate", &len, &nnb, &c_b1, &work[ppwo + (
  1134. len << 1) * nnb + nnb], &i__4, &a[jrow + nnb
  1135. + (j + 1) * a_dim1], &c__1, &c_b1, &work[pw +
  1136. len], &c__1);
  1137. ppw = pw;
  1138. i__4 = jrow + len + nnb - 1;
  1139. for (i__ = jrow; i__ <= i__4; ++i__) {
  1140. i__7 = i__ + (j + 1) * a_dim1;
  1141. i__8 = ppw;
  1142. a[i__7].r = work[i__8].r, a[i__7].i = work[i__8]
  1143. .i;
  1144. ++ppw;
  1145. }
  1146. ppwo += (nnb << 2) * nnb;
  1147. }
  1148. }
  1149. }
  1150. /* Apply accumulated unitary matrices to A. */
  1151. cola = *n - jcol - nnb + 1;
  1152. j = *ihi - nblst + 1;
  1153. zgemm_("Conjugate", "No Transpose", &nblst, &cola, &nblst, &c_b1,
  1154. &work[1], &nblst, &a[j + (jcol + nnb) * a_dim1], lda, &
  1155. c_b2, &work[pw], &nblst);
  1156. zlacpy_("All", &nblst, &cola, &work[pw], &nblst, &a[j + (jcol +
  1157. nnb) * a_dim1], lda);
  1158. ppwo = nblst * nblst + 1;
  1159. j0 = j - nnb;
  1160. i__3 = jcol + 1;
  1161. i__6 = -nnb;
  1162. for (j = j0; i__6 < 0 ? j >= i__3 : j <= i__3; j += i__6) {
  1163. if (blk22) {
  1164. /* Exploit the structure of */
  1165. /* [ U11 U12 ] */
  1166. /* U = [ ] */
  1167. /* [ U21 U22 ], */
  1168. /* where all blocks are NNB-by-NNB, U21 is upper */
  1169. /* triangular and U12 is lower triangular. */
  1170. i__5 = nnb << 1;
  1171. i__4 = nnb << 1;
  1172. i__7 = *lwork - pw + 1;
  1173. zunm22_("Left", "Conjugate", &i__5, &cola, &nnb, &nnb, &
  1174. work[ppwo], &i__4, &a[j + (jcol + nnb) * a_dim1],
  1175. lda, &work[pw], &i__7, &ierr);
  1176. } else {
  1177. /* Ignore the structure of U. */
  1178. i__5 = nnb << 1;
  1179. i__4 = nnb << 1;
  1180. i__7 = nnb << 1;
  1181. i__8 = nnb << 1;
  1182. zgemm_("Conjugate", "No Transpose", &i__5, &cola, &i__4, &
  1183. c_b1, &work[ppwo], &i__7, &a[j + (jcol + nnb) *
  1184. a_dim1], lda, &c_b2, &work[pw], &i__8);
  1185. i__5 = nnb << 1;
  1186. i__4 = nnb << 1;
  1187. zlacpy_("All", &i__5, &cola, &work[pw], &i__4, &a[j + (
  1188. jcol + nnb) * a_dim1], lda);
  1189. }
  1190. ppwo += (nnb << 2) * nnb;
  1191. }
  1192. /* Apply accumulated unitary matrices to Q. */
  1193. if (wantq) {
  1194. j = *ihi - nblst + 1;
  1195. if (initq) {
  1196. /* Computing MAX */
  1197. i__6 = 2, i__3 = j - jcol + 1;
  1198. topq = f2cmax(i__6,i__3);
  1199. nh = *ihi - topq + 1;
  1200. } else {
  1201. topq = 1;
  1202. nh = *n;
  1203. }
  1204. zgemm_("No Transpose", "No Transpose", &nh, &nblst, &nblst, &
  1205. c_b1, &q[topq + j * q_dim1], ldq, &work[1], &nblst, &
  1206. c_b2, &work[pw], &nh);
  1207. zlacpy_("All", &nh, &nblst, &work[pw], &nh, &q[topq + j *
  1208. q_dim1], ldq);
  1209. ppwo = nblst * nblst + 1;
  1210. j0 = j - nnb;
  1211. i__6 = jcol + 1;
  1212. i__3 = -nnb;
  1213. for (j = j0; i__3 < 0 ? j >= i__6 : j <= i__6; j += i__3) {
  1214. if (initq) {
  1215. /* Computing MAX */
  1216. i__5 = 2, i__4 = j - jcol + 1;
  1217. topq = f2cmax(i__5,i__4);
  1218. nh = *ihi - topq + 1;
  1219. }
  1220. if (blk22) {
  1221. /* Exploit the structure of U. */
  1222. i__5 = nnb << 1;
  1223. i__4 = nnb << 1;
  1224. i__7 = *lwork - pw + 1;
  1225. zunm22_("Right", "No Transpose", &nh, &i__5, &nnb, &
  1226. nnb, &work[ppwo], &i__4, &q[topq + j * q_dim1]
  1227. , ldq, &work[pw], &i__7, &ierr);
  1228. } else {
  1229. /* Ignore the structure of U. */
  1230. i__5 = nnb << 1;
  1231. i__4 = nnb << 1;
  1232. i__7 = nnb << 1;
  1233. zgemm_("No Transpose", "No Transpose", &nh, &i__5, &
  1234. i__4, &c_b1, &q[topq + j * q_dim1], ldq, &
  1235. work[ppwo], &i__7, &c_b2, &work[pw], &nh);
  1236. i__5 = nnb << 1;
  1237. zlacpy_("All", &nh, &i__5, &work[pw], &nh, &q[topq +
  1238. j * q_dim1], ldq);
  1239. }
  1240. ppwo += (nnb << 2) * nnb;
  1241. }
  1242. }
  1243. /* Accumulate right Givens rotations if required. */
  1244. if (wantz || top > 0) {
  1245. /* Initialize small unitary factors that will hold the */
  1246. /* accumulated Givens rotations in workspace. */
  1247. zlaset_("All", &nblst, &nblst, &c_b2, &c_b1, &work[1], &nblst);
  1248. pw = nblst * nblst + 1;
  1249. i__3 = n2nb;
  1250. for (i__ = 1; i__ <= i__3; ++i__) {
  1251. i__6 = nnb << 1;
  1252. i__5 = nnb << 1;
  1253. i__4 = nnb << 1;
  1254. zlaset_("All", &i__6, &i__5, &c_b2, &c_b1, &work[pw], &
  1255. i__4);
  1256. pw += (nnb << 2) * nnb;
  1257. }
  1258. /* Accumulate Givens rotations into workspace array. */
  1259. i__3 = jcol + nnb - 1;
  1260. for (j = jcol; j <= i__3; ++j) {
  1261. ppw = (nblst + 1) * (nblst - 2) - j + jcol + 1;
  1262. len = j + 2 - jcol;
  1263. jrow = j + n2nb * nnb + 2;
  1264. i__6 = jrow;
  1265. for (i__ = *ihi; i__ >= i__6; --i__) {
  1266. i__5 = i__ + j * a_dim1;
  1267. ctemp.r = a[i__5].r, ctemp.i = a[i__5].i;
  1268. i__5 = i__ + j * a_dim1;
  1269. a[i__5].r = 0., a[i__5].i = 0.;
  1270. i__5 = i__ + j * b_dim1;
  1271. s.r = b[i__5].r, s.i = b[i__5].i;
  1272. i__5 = i__ + j * b_dim1;
  1273. b[i__5].r = 0., b[i__5].i = 0.;
  1274. i__5 = ppw + len - 1;
  1275. for (jj = ppw; jj <= i__5; ++jj) {
  1276. i__4 = jj + nblst;
  1277. temp.r = work[i__4].r, temp.i = work[i__4].i;
  1278. i__4 = jj + nblst;
  1279. z__2.r = ctemp.r * temp.r - ctemp.i * temp.i,
  1280. z__2.i = ctemp.r * temp.i + ctemp.i *
  1281. temp.r;
  1282. d_cnjg(&z__4, &s);
  1283. i__7 = jj;
  1284. z__3.r = z__4.r * work[i__7].r - z__4.i * work[
  1285. i__7].i, z__3.i = z__4.r * work[i__7].i +
  1286. z__4.i * work[i__7].r;
  1287. z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
  1288. z__3.i;
  1289. work[i__4].r = z__1.r, work[i__4].i = z__1.i;
  1290. i__4 = jj;
  1291. z__2.r = s.r * temp.r - s.i * temp.i, z__2.i =
  1292. s.r * temp.i + s.i * temp.r;
  1293. i__7 = jj;
  1294. z__3.r = ctemp.r * work[i__7].r - ctemp.i * work[
  1295. i__7].i, z__3.i = ctemp.r * work[i__7].i
  1296. + ctemp.i * work[i__7].r;
  1297. z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
  1298. z__3.i;
  1299. work[i__4].r = z__1.r, work[i__4].i = z__1.i;
  1300. }
  1301. ++len;
  1302. ppw = ppw - nblst - 1;
  1303. }
  1304. ppwo = nblst * nblst + (nnb + j - jcol - 1 << 1) * nnb +
  1305. nnb;
  1306. j0 = jrow - nnb;
  1307. i__6 = j + 2;
  1308. i__5 = -nnb;
  1309. for (jrow = j0; i__5 < 0 ? jrow >= i__6 : jrow <= i__6;
  1310. jrow += i__5) {
  1311. ppw = ppwo;
  1312. len = j + 2 - jcol;
  1313. i__4 = jrow;
  1314. for (i__ = jrow + nnb - 1; i__ >= i__4; --i__) {
  1315. i__7 = i__ + j * a_dim1;
  1316. ctemp.r = a[i__7].r, ctemp.i = a[i__7].i;
  1317. i__7 = i__ + j * a_dim1;
  1318. a[i__7].r = 0., a[i__7].i = 0.;
  1319. i__7 = i__ + j * b_dim1;
  1320. s.r = b[i__7].r, s.i = b[i__7].i;
  1321. i__7 = i__ + j * b_dim1;
  1322. b[i__7].r = 0., b[i__7].i = 0.;
  1323. i__7 = ppw + len - 1;
  1324. for (jj = ppw; jj <= i__7; ++jj) {
  1325. i__8 = jj + (nnb << 1);
  1326. temp.r = work[i__8].r, temp.i = work[i__8].i;
  1327. i__8 = jj + (nnb << 1);
  1328. z__2.r = ctemp.r * temp.r - ctemp.i * temp.i,
  1329. z__2.i = ctemp.r * temp.i + ctemp.i *
  1330. temp.r;
  1331. d_cnjg(&z__4, &s);
  1332. i__9 = jj;
  1333. z__3.r = z__4.r * work[i__9].r - z__4.i *
  1334. work[i__9].i, z__3.i = z__4.r * work[
  1335. i__9].i + z__4.i * work[i__9].r;
  1336. z__1.r = z__2.r - z__3.r, z__1.i = z__2.i -
  1337. z__3.i;
  1338. work[i__8].r = z__1.r, work[i__8].i = z__1.i;
  1339. i__8 = jj;
  1340. z__2.r = s.r * temp.r - s.i * temp.i, z__2.i =
  1341. s.r * temp.i + s.i * temp.r;
  1342. i__9 = jj;
  1343. z__3.r = ctemp.r * work[i__9].r - ctemp.i *
  1344. work[i__9].i, z__3.i = ctemp.r * work[
  1345. i__9].i + ctemp.i * work[i__9].r;
  1346. z__1.r = z__2.r + z__3.r, z__1.i = z__2.i +
  1347. z__3.i;
  1348. work[i__8].r = z__1.r, work[i__8].i = z__1.i;
  1349. }
  1350. ++len;
  1351. ppw = ppw - (nnb << 1) - 1;
  1352. }
  1353. ppwo += (nnb << 2) * nnb;
  1354. }
  1355. }
  1356. } else {
  1357. i__3 = *ihi - jcol - 1;
  1358. zlaset_("Lower", &i__3, &nnb, &c_b2, &c_b2, &a[jcol + 2 +
  1359. jcol * a_dim1], lda);
  1360. i__3 = *ihi - jcol - 1;
  1361. zlaset_("Lower", &i__3, &nnb, &c_b2, &c_b2, &b[jcol + 2 +
  1362. jcol * b_dim1], ldb);
  1363. }
  1364. /* Apply accumulated unitary matrices to A and B. */
  1365. if (top > 0) {
  1366. j = *ihi - nblst + 1;
  1367. zgemm_("No Transpose", "No Transpose", &top, &nblst, &nblst, &
  1368. c_b1, &a[j * a_dim1 + 1], lda, &work[1], &nblst, &
  1369. c_b2, &work[pw], &top);
  1370. zlacpy_("All", &top, &nblst, &work[pw], &top, &a[j * a_dim1 +
  1371. 1], lda);
  1372. ppwo = nblst * nblst + 1;
  1373. j0 = j - nnb;
  1374. i__3 = jcol + 1;
  1375. i__5 = -nnb;
  1376. for (j = j0; i__5 < 0 ? j >= i__3 : j <= i__3; j += i__5) {
  1377. if (blk22) {
  1378. /* Exploit the structure of U. */
  1379. i__6 = nnb << 1;
  1380. i__4 = nnb << 1;
  1381. i__7 = *lwork - pw + 1;
  1382. zunm22_("Right", "No Transpose", &top, &i__6, &nnb, &
  1383. nnb, &work[ppwo], &i__4, &a[j * a_dim1 + 1],
  1384. lda, &work[pw], &i__7, &ierr);
  1385. } else {
  1386. /* Ignore the structure of U. */
  1387. i__6 = nnb << 1;
  1388. i__4 = nnb << 1;
  1389. i__7 = nnb << 1;
  1390. zgemm_("No Transpose", "No Transpose", &top, &i__6, &
  1391. i__4, &c_b1, &a[j * a_dim1 + 1], lda, &work[
  1392. ppwo], &i__7, &c_b2, &work[pw], &top);
  1393. i__6 = nnb << 1;
  1394. zlacpy_("All", &top, &i__6, &work[pw], &top, &a[j *
  1395. a_dim1 + 1], lda);
  1396. }
  1397. ppwo += (nnb << 2) * nnb;
  1398. }
  1399. j = *ihi - nblst + 1;
  1400. zgemm_("No Transpose", "No Transpose", &top, &nblst, &nblst, &
  1401. c_b1, &b[j * b_dim1 + 1], ldb, &work[1], &nblst, &
  1402. c_b2, &work[pw], &top);
  1403. zlacpy_("All", &top, &nblst, &work[pw], &top, &b[j * b_dim1 +
  1404. 1], ldb);
  1405. ppwo = nblst * nblst + 1;
  1406. j0 = j - nnb;
  1407. i__5 = jcol + 1;
  1408. i__3 = -nnb;
  1409. for (j = j0; i__3 < 0 ? j >= i__5 : j <= i__5; j += i__3) {
  1410. if (blk22) {
  1411. /* Exploit the structure of U. */
  1412. i__6 = nnb << 1;
  1413. i__4 = nnb << 1;
  1414. i__7 = *lwork - pw + 1;
  1415. zunm22_("Right", "No Transpose", &top, &i__6, &nnb, &
  1416. nnb, &work[ppwo], &i__4, &b[j * b_dim1 + 1],
  1417. ldb, &work[pw], &i__7, &ierr);
  1418. } else {
  1419. /* Ignore the structure of U. */
  1420. i__6 = nnb << 1;
  1421. i__4 = nnb << 1;
  1422. i__7 = nnb << 1;
  1423. zgemm_("No Transpose", "No Transpose", &top, &i__6, &
  1424. i__4, &c_b1, &b[j * b_dim1 + 1], ldb, &work[
  1425. ppwo], &i__7, &c_b2, &work[pw], &top);
  1426. i__6 = nnb << 1;
  1427. zlacpy_("All", &top, &i__6, &work[pw], &top, &b[j *
  1428. b_dim1 + 1], ldb);
  1429. }
  1430. ppwo += (nnb << 2) * nnb;
  1431. }
  1432. }
  1433. /* Apply accumulated unitary matrices to Z. */
  1434. if (wantz) {
  1435. j = *ihi - nblst + 1;
  1436. if (initq) {
  1437. /* Computing MAX */
  1438. i__3 = 2, i__5 = j - jcol + 1;
  1439. topq = f2cmax(i__3,i__5);
  1440. nh = *ihi - topq + 1;
  1441. } else {
  1442. topq = 1;
  1443. nh = *n;
  1444. }
  1445. zgemm_("No Transpose", "No Transpose", &nh, &nblst, &nblst, &
  1446. c_b1, &z__[topq + j * z_dim1], ldz, &work[1], &nblst,
  1447. &c_b2, &work[pw], &nh);
  1448. zlacpy_("All", &nh, &nblst, &work[pw], &nh, &z__[topq + j *
  1449. z_dim1], ldz);
  1450. ppwo = nblst * nblst + 1;
  1451. j0 = j - nnb;
  1452. i__3 = jcol + 1;
  1453. i__5 = -nnb;
  1454. for (j = j0; i__5 < 0 ? j >= i__3 : j <= i__3; j += i__5) {
  1455. if (initq) {
  1456. /* Computing MAX */
  1457. i__6 = 2, i__4 = j - jcol + 1;
  1458. topq = f2cmax(i__6,i__4);
  1459. nh = *ihi - topq + 1;
  1460. }
  1461. if (blk22) {
  1462. /* Exploit the structure of U. */
  1463. i__6 = nnb << 1;
  1464. i__4 = nnb << 1;
  1465. i__7 = *lwork - pw + 1;
  1466. zunm22_("Right", "No Transpose", &nh, &i__6, &nnb, &
  1467. nnb, &work[ppwo], &i__4, &z__[topq + j *
  1468. z_dim1], ldz, &work[pw], &i__7, &ierr);
  1469. } else {
  1470. /* Ignore the structure of U. */
  1471. i__6 = nnb << 1;
  1472. i__4 = nnb << 1;
  1473. i__7 = nnb << 1;
  1474. zgemm_("No Transpose", "No Transpose", &nh, &i__6, &
  1475. i__4, &c_b1, &z__[topq + j * z_dim1], ldz, &
  1476. work[ppwo], &i__7, &c_b2, &work[pw], &nh);
  1477. i__6 = nnb << 1;
  1478. zlacpy_("All", &nh, &i__6, &work[pw], &nh, &z__[topq
  1479. + j * z_dim1], ldz);
  1480. }
  1481. ppwo += (nnb << 2) * nnb;
  1482. }
  1483. }
  1484. }
  1485. }
  1486. /* Use unblocked code to reduce the rest of the matrix */
  1487. /* Avoid re-initialization of modified Q and Z. */
  1488. *(unsigned char *)compq2 = *(unsigned char *)compq;
  1489. *(unsigned char *)compz2 = *(unsigned char *)compz;
  1490. if (jcol != *ilo) {
  1491. if (wantq) {
  1492. *(unsigned char *)compq2 = 'V';
  1493. }
  1494. if (wantz) {
  1495. *(unsigned char *)compz2 = 'V';
  1496. }
  1497. }
  1498. if (jcol < *ihi) {
  1499. zgghrd_(compq2, compz2, n, &jcol, ihi, &a[a_offset], lda, &b[b_offset]
  1500. , ldb, &q[q_offset], ldq, &z__[z_offset], ldz, &ierr);
  1501. }
  1502. z__1.r = (doublereal) lwkopt, z__1.i = 0.;
  1503. work[1].r = z__1.r, work[1].i = z__1.i;
  1504. return 0;
  1505. /* End of ZGGHD3 */
  1506. } /* zgghd3_ */