You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

sgghd3.c 40 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324
  1. #include <math.h>
  2. #include <stdlib.h>
  3. #include <string.h>
  4. #include <stdio.h>
  5. #include <complex.h>
  6. #ifdef complex
  7. #undef complex
  8. #endif
  9. #ifdef I
  10. #undef I
  11. #endif
  12. #if defined(_WIN64)
  13. typedef long long BLASLONG;
  14. typedef unsigned long long BLASULONG;
  15. #else
  16. typedef long BLASLONG;
  17. typedef unsigned long BLASULONG;
  18. #endif
  19. #ifdef LAPACK_ILP64
  20. typedef BLASLONG blasint;
  21. #if defined(_WIN64)
  22. #define blasabs(x) llabs(x)
  23. #else
  24. #define blasabs(x) labs(x)
  25. #endif
  26. #else
  27. typedef int blasint;
  28. #define blasabs(x) abs(x)
  29. #endif
  30. typedef blasint integer;
  31. typedef unsigned int uinteger;
  32. typedef char *address;
  33. typedef short int shortint;
  34. typedef float real;
  35. typedef double doublereal;
  36. typedef struct { real r, i; } complex;
  37. typedef struct { doublereal r, i; } doublecomplex;
  38. #ifdef _MSC_VER
  39. static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
  40. static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
  41. static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
  42. static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
  43. #else
  44. static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
  45. static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
  46. static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
  47. static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
  48. #endif
  49. #define pCf(z) (*_pCf(z))
  50. #define pCd(z) (*_pCd(z))
  51. typedef blasint logical;
  52. typedef char logical1;
  53. typedef char integer1;
  54. #define TRUE_ (1)
  55. #define FALSE_ (0)
  56. /* Extern is for use with -E */
  57. #ifndef Extern
  58. #define Extern extern
  59. #endif
  60. /* I/O stuff */
  61. typedef int flag;
  62. typedef int ftnlen;
  63. typedef int ftnint;
  64. /*external read, write*/
  65. typedef struct
  66. { flag cierr;
  67. ftnint ciunit;
  68. flag ciend;
  69. char *cifmt;
  70. ftnint cirec;
  71. } cilist;
  72. /*internal read, write*/
  73. typedef struct
  74. { flag icierr;
  75. char *iciunit;
  76. flag iciend;
  77. char *icifmt;
  78. ftnint icirlen;
  79. ftnint icirnum;
  80. } icilist;
  81. /*open*/
  82. typedef struct
  83. { flag oerr;
  84. ftnint ounit;
  85. char *ofnm;
  86. ftnlen ofnmlen;
  87. char *osta;
  88. char *oacc;
  89. char *ofm;
  90. ftnint orl;
  91. char *oblnk;
  92. } olist;
  93. /*close*/
  94. typedef struct
  95. { flag cerr;
  96. ftnint cunit;
  97. char *csta;
  98. } cllist;
  99. /*rewind, backspace, endfile*/
  100. typedef struct
  101. { flag aerr;
  102. ftnint aunit;
  103. } alist;
  104. /* inquire */
  105. typedef struct
  106. { flag inerr;
  107. ftnint inunit;
  108. char *infile;
  109. ftnlen infilen;
  110. ftnint *inex; /*parameters in standard's order*/
  111. ftnint *inopen;
  112. ftnint *innum;
  113. ftnint *innamed;
  114. char *inname;
  115. ftnlen innamlen;
  116. char *inacc;
  117. ftnlen inacclen;
  118. char *inseq;
  119. ftnlen inseqlen;
  120. char *indir;
  121. ftnlen indirlen;
  122. char *infmt;
  123. ftnlen infmtlen;
  124. char *inform;
  125. ftnint informlen;
  126. char *inunf;
  127. ftnlen inunflen;
  128. ftnint *inrecl;
  129. ftnint *innrec;
  130. char *inblank;
  131. ftnlen inblanklen;
  132. } inlist;
  133. #define VOID void
  134. union Multitype { /* for multiple entry points */
  135. integer1 g;
  136. shortint h;
  137. integer i;
  138. /* longint j; */
  139. real r;
  140. doublereal d;
  141. complex c;
  142. doublecomplex z;
  143. };
  144. typedef union Multitype Multitype;
  145. struct Vardesc { /* for Namelist */
  146. char *name;
  147. char *addr;
  148. ftnlen *dims;
  149. int type;
  150. };
  151. typedef struct Vardesc Vardesc;
  152. struct Namelist {
  153. char *name;
  154. Vardesc **vars;
  155. int nvars;
  156. };
  157. typedef struct Namelist Namelist;
  158. #define abs(x) ((x) >= 0 ? (x) : -(x))
  159. #define dabs(x) (fabs(x))
  160. #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
  161. #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
  162. #define dmin(a,b) (f2cmin(a,b))
  163. #define dmax(a,b) (f2cmax(a,b))
  164. #define bit_test(a,b) ((a) >> (b) & 1)
  165. #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
  166. #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
  167. #define abort_() { sig_die("Fortran abort routine called", 1); }
  168. #define c_abs(z) (cabsf(Cf(z)))
  169. #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
  170. #ifdef _MSC_VER
  171. #define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
  172. #define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/df(b)._Val[1]);}
  173. #else
  174. #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
  175. #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
  176. #endif
  177. #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
  178. #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
  179. #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
  180. //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
  181. #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
  182. #define d_abs(x) (fabs(*(x)))
  183. #define d_acos(x) (acos(*(x)))
  184. #define d_asin(x) (asin(*(x)))
  185. #define d_atan(x) (atan(*(x)))
  186. #define d_atn2(x, y) (atan2(*(x),*(y)))
  187. #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
  188. #define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
  189. #define d_cos(x) (cos(*(x)))
  190. #define d_cosh(x) (cosh(*(x)))
  191. #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
  192. #define d_exp(x) (exp(*(x)))
  193. #define d_imag(z) (cimag(Cd(z)))
  194. #define r_imag(z) (cimagf(Cf(z)))
  195. #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
  196. #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
  197. #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
  198. #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
  199. #define d_log(x) (log(*(x)))
  200. #define d_mod(x, y) (fmod(*(x), *(y)))
  201. #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
  202. #define d_nint(x) u_nint(*(x))
  203. #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
  204. #define d_sign(a,b) u_sign(*(a),*(b))
  205. #define r_sign(a,b) u_sign(*(a),*(b))
  206. #define d_sin(x) (sin(*(x)))
  207. #define d_sinh(x) (sinh(*(x)))
  208. #define d_sqrt(x) (sqrt(*(x)))
  209. #define d_tan(x) (tan(*(x)))
  210. #define d_tanh(x) (tanh(*(x)))
  211. #define i_abs(x) abs(*(x))
  212. #define i_dnnt(x) ((integer)u_nint(*(x)))
  213. #define i_len(s, n) (n)
  214. #define i_nint(x) ((integer)u_nint(*(x)))
  215. #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
  216. #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
  217. #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
  218. #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
  219. #define sig_die(s, kill) { exit(1); }
  220. #define s_stop(s, n) {exit(0);}
  221. #define z_abs(z) (cabs(Cd(z)))
  222. #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
  223. #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
  224. #define myexit_() break;
  225. #define mycycle() continue;
  226. #define myceiling(w) {ceil(w)}
  227. #define myhuge(w) {HUGE_VAL}
  228. //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
  229. #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
  230. /* -- translated by f2c (version 20000121).
  231. You must link the resulting object file with the libraries:
  232. -lf2c -lm (in that order)
  233. */
  234. /* Table of constant values */
  235. static integer c__1 = 1;
  236. static integer c_n1 = -1;
  237. static real c_b14 = 0.f;
  238. static real c_b15 = 1.f;
  239. static integer c__2 = 2;
  240. static integer c__3 = 3;
  241. static integer c__16 = 16;
  242. /* > \brief \b SGGHD3 */
  243. /* =========== DOCUMENTATION =========== */
  244. /* Online html documentation available at */
  245. /* http://www.netlib.org/lapack/explore-html/ */
  246. /* > \htmlonly */
  247. /* > Download SGGHRD + dependencies */
  248. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/sgghd3.
  249. f"> */
  250. /* > [TGZ]</a> */
  251. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/sgghd3.
  252. f"> */
  253. /* > [ZIP]</a> */
  254. /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/sgghd3.
  255. f"> */
  256. /* > [TXT]</a> */
  257. /* > \endhtmlonly */
  258. /* Definition: */
  259. /* =========== */
  260. /* SUBROUTINE SGGHD3( COMPQ, COMPZ, N, ILO, IHI, A, LDA, B, LDB, Q, */
  261. /* LDQ, Z, LDZ, WORK, LWORK, INFO ) */
  262. /* CHARACTER COMPQ, COMPZ */
  263. /* INTEGER IHI, ILO, INFO, LDA, LDB, LDQ, LDZ, N, LWORK */
  264. /* REAL A( LDA, * ), B( LDB, * ), Q( LDQ, * ), */
  265. /* $ Z( LDZ, * ), WORK( * ) */
  266. /* > \par Purpose: */
  267. /* ============= */
  268. /* > */
  269. /* > \verbatim */
  270. /* > */
  271. /* > SGGHD3 reduces a pair of real matrices (A,B) to generalized upper */
  272. /* > Hessenberg form using orthogonal transformations, where A is a */
  273. /* > general matrix and B is upper triangular. The form of the */
  274. /* > generalized eigenvalue problem is */
  275. /* > A*x = lambda*B*x, */
  276. /* > and B is typically made upper triangular by computing its QR */
  277. /* > factorization and moving the orthogonal matrix Q to the left side */
  278. /* > of the equation. */
  279. /* > */
  280. /* > This subroutine simultaneously reduces A to a Hessenberg matrix H: */
  281. /* > Q**T*A*Z = H */
  282. /* > and transforms B to another upper triangular matrix T: */
  283. /* > Q**T*B*Z = T */
  284. /* > in order to reduce the problem to its standard form */
  285. /* > H*y = lambda*T*y */
  286. /* > where y = Z**T*x. */
  287. /* > */
  288. /* > The orthogonal matrices Q and Z are determined as products of Givens */
  289. /* > rotations. They may either be formed explicitly, or they may be */
  290. /* > postmultiplied into input matrices Q1 and Z1, so that */
  291. /* > */
  292. /* > Q1 * A * Z1**T = (Q1*Q) * H * (Z1*Z)**T */
  293. /* > */
  294. /* > Q1 * B * Z1**T = (Q1*Q) * T * (Z1*Z)**T */
  295. /* > */
  296. /* > If Q1 is the orthogonal matrix from the QR factorization of B in the */
  297. /* > original equation A*x = lambda*B*x, then SGGHD3 reduces the original */
  298. /* > problem to generalized Hessenberg form. */
  299. /* > */
  300. /* > This is a blocked variant of SGGHRD, using matrix-matrix */
  301. /* > multiplications for parts of the computation to enhance performance. */
  302. /* > \endverbatim */
  303. /* Arguments: */
  304. /* ========== */
  305. /* > \param[in] COMPQ */
  306. /* > \verbatim */
  307. /* > COMPQ is CHARACTER*1 */
  308. /* > = 'N': do not compute Q; */
  309. /* > = 'I': Q is initialized to the unit matrix, and the */
  310. /* > orthogonal matrix Q is returned; */
  311. /* > = 'V': Q must contain an orthogonal matrix Q1 on entry, */
  312. /* > and the product Q1*Q is returned. */
  313. /* > \endverbatim */
  314. /* > */
  315. /* > \param[in] COMPZ */
  316. /* > \verbatim */
  317. /* > COMPZ is CHARACTER*1 */
  318. /* > = 'N': do not compute Z; */
  319. /* > = 'I': Z is initialized to the unit matrix, and the */
  320. /* > orthogonal matrix Z is returned; */
  321. /* > = 'V': Z must contain an orthogonal matrix Z1 on entry, */
  322. /* > and the product Z1*Z is returned. */
  323. /* > \endverbatim */
  324. /* > */
  325. /* > \param[in] N */
  326. /* > \verbatim */
  327. /* > N is INTEGER */
  328. /* > The order of the matrices A and B. N >= 0. */
  329. /* > \endverbatim */
  330. /* > */
  331. /* > \param[in] ILO */
  332. /* > \verbatim */
  333. /* > ILO is INTEGER */
  334. /* > \endverbatim */
  335. /* > */
  336. /* > \param[in] IHI */
  337. /* > \verbatim */
  338. /* > IHI is INTEGER */
  339. /* > */
  340. /* > ILO and IHI mark the rows and columns of A which are to be */
  341. /* > reduced. It is assumed that A is already upper triangular */
  342. /* > in rows and columns 1:ILO-1 and IHI+1:N. ILO and IHI are */
  343. /* > normally set by a previous call to SGGBAL; otherwise they */
  344. /* > should be set to 1 and N respectively. */
  345. /* > 1 <= ILO <= IHI <= N, if N > 0; ILO=1 and IHI=0, if N=0. */
  346. /* > \endverbatim */
  347. /* > */
  348. /* > \param[in,out] A */
  349. /* > \verbatim */
  350. /* > A is REAL array, dimension (LDA, N) */
  351. /* > On entry, the N-by-N general matrix to be reduced. */
  352. /* > On exit, the upper triangle and the first subdiagonal of A */
  353. /* > are overwritten with the upper Hessenberg matrix H, and the */
  354. /* > rest is set to zero. */
  355. /* > \endverbatim */
  356. /* > */
  357. /* > \param[in] LDA */
  358. /* > \verbatim */
  359. /* > LDA is INTEGER */
  360. /* > The leading dimension of the array A. LDA >= f2cmax(1,N). */
  361. /* > \endverbatim */
  362. /* > */
  363. /* > \param[in,out] B */
  364. /* > \verbatim */
  365. /* > B is REAL array, dimension (LDB, N) */
  366. /* > On entry, the N-by-N upper triangular matrix B. */
  367. /* > On exit, the upper triangular matrix T = Q**T B Z. The */
  368. /* > elements below the diagonal are set to zero. */
  369. /* > \endverbatim */
  370. /* > */
  371. /* > \param[in] LDB */
  372. /* > \verbatim */
  373. /* > LDB is INTEGER */
  374. /* > The leading dimension of the array B. LDB >= f2cmax(1,N). */
  375. /* > \endverbatim */
  376. /* > */
  377. /* > \param[in,out] Q */
  378. /* > \verbatim */
  379. /* > Q is REAL array, dimension (LDQ, N) */
  380. /* > On entry, if COMPQ = 'V', the orthogonal matrix Q1, */
  381. /* > typically from the QR factorization of B. */
  382. /* > On exit, if COMPQ='I', the orthogonal matrix Q, and if */
  383. /* > COMPQ = 'V', the product Q1*Q. */
  384. /* > Not referenced if COMPQ='N'. */
  385. /* > \endverbatim */
  386. /* > */
  387. /* > \param[in] LDQ */
  388. /* > \verbatim */
  389. /* > LDQ is INTEGER */
  390. /* > The leading dimension of the array Q. */
  391. /* > LDQ >= N if COMPQ='V' or 'I'; LDQ >= 1 otherwise. */
  392. /* > \endverbatim */
  393. /* > */
  394. /* > \param[in,out] Z */
  395. /* > \verbatim */
  396. /* > Z is REAL array, dimension (LDZ, N) */
  397. /* > On entry, if COMPZ = 'V', the orthogonal matrix Z1. */
  398. /* > On exit, if COMPZ='I', the orthogonal matrix Z, and if */
  399. /* > COMPZ = 'V', the product Z1*Z. */
  400. /* > Not referenced if COMPZ='N'. */
  401. /* > \endverbatim */
  402. /* > */
  403. /* > \param[in] LDZ */
  404. /* > \verbatim */
  405. /* > LDZ is INTEGER */
  406. /* > The leading dimension of the array Z. */
  407. /* > LDZ >= N if COMPZ='V' or 'I'; LDZ >= 1 otherwise. */
  408. /* > \endverbatim */
  409. /* > */
  410. /* > \param[out] WORK */
  411. /* > \verbatim */
  412. /* > WORK is REAL array, dimension (LWORK) */
  413. /* > On exit, if INFO = 0, WORK(1) returns the optimal LWORK. */
  414. /* > \endverbatim */
  415. /* > */
  416. /* > \param[in] LWORK */
  417. /* > \verbatim */
  418. /* > LWORK is INTEGER */
  419. /* > The length of the array WORK. LWORK >= 1. */
  420. /* > For optimum performance LWORK >= 6*N*NB, where NB is the */
  421. /* > optimal blocksize. */
  422. /* > */
  423. /* > If LWORK = -1, then a workspace query is assumed; the routine */
  424. /* > only calculates the optimal size of the WORK array, returns */
  425. /* > this value as the first entry of the WORK array, and no error */
  426. /* > message related to LWORK is issued by XERBLA. */
  427. /* > \endverbatim */
  428. /* > */
  429. /* > \param[out] INFO */
  430. /* > \verbatim */
  431. /* > INFO is INTEGER */
  432. /* > = 0: successful exit. */
  433. /* > < 0: if INFO = -i, the i-th argument had an illegal value. */
  434. /* > \endverbatim */
  435. /* Authors: */
  436. /* ======== */
  437. /* > \author Univ. of Tennessee */
  438. /* > \author Univ. of California Berkeley */
  439. /* > \author Univ. of Colorado Denver */
  440. /* > \author NAG Ltd. */
  441. /* > \date January 2015 */
  442. /* > \ingroup realOTHERcomputational */
  443. /* > \par Further Details: */
  444. /* ===================== */
  445. /* > */
  446. /* > \verbatim */
  447. /* > */
  448. /* > This routine reduces A to Hessenberg form and maintains B in */
  449. /* > using a blocked variant of Moler and Stewart's original algorithm, */
  450. /* > as described by Kagstrom, Kressner, Quintana-Orti, and Quintana-Orti */
  451. /* > (BIT 2008). */
  452. /* > \endverbatim */
  453. /* > */
  454. /* ===================================================================== */
  455. /* Subroutine */ void sgghd3_(char *compq, char *compz, integer *n, integer *
  456. ilo, integer *ihi, real *a, integer *lda, real *b, integer *ldb, real
  457. *q, integer *ldq, real *z__, integer *ldz, real *work, integer *lwork,
  458. integer *info)
  459. {
  460. /* System generated locals */
  461. integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
  462. z_offset, i__1, i__2, i__3, i__4, i__5, i__6, i__7, i__8;
  463. real r__1;
  464. /* Local variables */
  465. logical blk22;
  466. integer cola, jcol, ierr;
  467. real temp;
  468. integer jrow, topq, ppwo;
  469. extern /* Subroutine */ void srot_(integer *, real *, integer *, real *,
  470. integer *, real *, real *);
  471. real temp1, temp2, temp3, c__;
  472. integer kacc22, i__, j, k;
  473. real s;
  474. extern logical lsame_(char *, char *);
  475. integer nbmin;
  476. extern /* Subroutine */ void sgemm_(char *, char *, integer *, integer *,
  477. integer *, real *, real *, integer *, real *, integer *, real *,
  478. real *, integer *), sgemv_(char *, integer *,
  479. integer *, real *, real *, integer *, real *, integer *, real *,
  480. real *, integer *);
  481. integer nblst;
  482. logical initq;
  483. real c1, c2;
  484. extern /* Subroutine */ void sorm22_(char *, char *, integer *, integer *,
  485. integer *, integer *, real *, integer *, real *, integer *, real *
  486. , integer *, integer *);
  487. logical wantq;
  488. integer j0;
  489. logical initz, wantz;
  490. real s1, s2;
  491. extern /* Subroutine */ void strmv_(char *, char *, char *, integer *,
  492. real *, integer *, real *, integer *);
  493. char compq2[1], compz2[1];
  494. integer nb, jj, nh, nx, pw;
  495. extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen);
  496. extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
  497. integer *, integer *, ftnlen, ftnlen);
  498. extern /* Subroutine */ void sgghrd_(char *, char *, integer *, integer *,
  499. integer *, real *, integer *, real *, integer *, real *, integer *
  500. , real *, integer *, integer *), slaset_(char *,
  501. integer *, integer *, real *, real *, real *, integer *),
  502. slartg_(real *, real *, real *, real *, real *), slacpy_(char *,
  503. integer *, integer *, real *, integer *, real *, integer *);
  504. integer lwkopt;
  505. logical lquery;
  506. integer nnb, len, top, ppw, n2nb;
  507. /* -- LAPACK computational routine (version 3.8.0) -- */
  508. /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
  509. /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
  510. /* January 2015 */
  511. /* ===================================================================== */
  512. /* Decode and test the input parameters. */
  513. /* Parameter adjustments */
  514. a_dim1 = *lda;
  515. a_offset = 1 + a_dim1 * 1;
  516. a -= a_offset;
  517. b_dim1 = *ldb;
  518. b_offset = 1 + b_dim1 * 1;
  519. b -= b_offset;
  520. q_dim1 = *ldq;
  521. q_offset = 1 + q_dim1 * 1;
  522. q -= q_offset;
  523. z_dim1 = *ldz;
  524. z_offset = 1 + z_dim1 * 1;
  525. z__ -= z_offset;
  526. --work;
  527. /* Function Body */
  528. *info = 0;
  529. nb = ilaenv_(&c__1, "SGGHD3", " ", n, ilo, ihi, &c_n1, (ftnlen)6, (ftnlen)
  530. 1);
  531. /* Computing MAX */
  532. i__1 = *n * 6 * nb;
  533. lwkopt = f2cmax(i__1,1);
  534. work[1] = (real) lwkopt;
  535. initq = lsame_(compq, "I");
  536. wantq = initq || lsame_(compq, "V");
  537. initz = lsame_(compz, "I");
  538. wantz = initz || lsame_(compz, "V");
  539. lquery = *lwork == -1;
  540. if (! lsame_(compq, "N") && ! wantq) {
  541. *info = -1;
  542. } else if (! lsame_(compz, "N") && ! wantz) {
  543. *info = -2;
  544. } else if (*n < 0) {
  545. *info = -3;
  546. } else if (*ilo < 1) {
  547. *info = -4;
  548. } else if (*ihi > *n || *ihi < *ilo - 1) {
  549. *info = -5;
  550. } else if (*lda < f2cmax(1,*n)) {
  551. *info = -7;
  552. } else if (*ldb < f2cmax(1,*n)) {
  553. *info = -9;
  554. } else if (wantq && *ldq < *n || *ldq < 1) {
  555. *info = -11;
  556. } else if (wantz && *ldz < *n || *ldz < 1) {
  557. *info = -13;
  558. } else if (*lwork < 1 && ! lquery) {
  559. *info = -15;
  560. }
  561. if (*info != 0) {
  562. i__1 = -(*info);
  563. xerbla_("SGGHD3", &i__1, (ftnlen)6);
  564. return;
  565. } else if (lquery) {
  566. return;
  567. }
  568. /* Initialize Q and Z if desired. */
  569. if (initq) {
  570. slaset_("All", n, n, &c_b14, &c_b15, &q[q_offset], ldq);
  571. }
  572. if (initz) {
  573. slaset_("All", n, n, &c_b14, &c_b15, &z__[z_offset], ldz);
  574. }
  575. /* Zero out lower triangle of B. */
  576. if (*n > 1) {
  577. i__1 = *n - 1;
  578. i__2 = *n - 1;
  579. slaset_("Lower", &i__1, &i__2, &c_b14, &c_b14, &b[b_dim1 + 2], ldb);
  580. }
  581. /* Quick return if possible */
  582. nh = *ihi - *ilo + 1;
  583. if (nh <= 1) {
  584. work[1] = 1.f;
  585. return;
  586. }
  587. /* Determine the blocksize. */
  588. nbmin = ilaenv_(&c__2, "SGGHD3", " ", n, ilo, ihi, &c_n1, (ftnlen)6, (
  589. ftnlen)1);
  590. if (nb > 1 && nb < nh) {
  591. /* Determine when to use unblocked instead of blocked code. */
  592. /* Computing MAX */
  593. i__1 = nb, i__2 = ilaenv_(&c__3, "SGGHD3", " ", n, ilo, ihi, &c_n1, (
  594. ftnlen)6, (ftnlen)1);
  595. nx = f2cmax(i__1,i__2);
  596. if (nx < nh) {
  597. /* Determine if workspace is large enough for blocked code. */
  598. if (*lwork < lwkopt) {
  599. /* Not enough workspace to use optimal NB: determine the */
  600. /* minimum value of NB, and reduce NB or force use of */
  601. /* unblocked code. */
  602. /* Computing MAX */
  603. i__1 = 2, i__2 = ilaenv_(&c__2, "SGGHD3", " ", n, ilo, ihi, &
  604. c_n1, (ftnlen)6, (ftnlen)1);
  605. nbmin = f2cmax(i__1,i__2);
  606. if (*lwork >= *n * 6 * nbmin) {
  607. nb = *lwork / (*n * 6);
  608. } else {
  609. nb = 1;
  610. }
  611. }
  612. }
  613. }
  614. if (nb < nbmin || nb >= nh) {
  615. /* Use unblocked code below */
  616. jcol = *ilo;
  617. } else {
  618. /* Use blocked code */
  619. kacc22 = ilaenv_(&c__16, "SGGHD3", " ", n, ilo, ihi, &c_n1, (ftnlen)6,
  620. (ftnlen)1);
  621. blk22 = kacc22 == 2;
  622. i__1 = *ihi - 2;
  623. i__2 = nb;
  624. for (jcol = *ilo; i__2 < 0 ? jcol >= i__1 : jcol <= i__1; jcol +=
  625. i__2) {
  626. /* Computing MIN */
  627. i__3 = nb, i__4 = *ihi - jcol - 1;
  628. nnb = f2cmin(i__3,i__4);
  629. /* Initialize small orthogonal factors that will hold the */
  630. /* accumulated Givens rotations in workspace. */
  631. /* N2NB denotes the number of 2*NNB-by-2*NNB factors */
  632. /* NBLST denotes the (possibly smaller) order of the last */
  633. /* factor. */
  634. n2nb = (*ihi - jcol - 1) / nnb - 1;
  635. nblst = *ihi - jcol - n2nb * nnb;
  636. slaset_("All", &nblst, &nblst, &c_b14, &c_b15, &work[1], &nblst);
  637. pw = nblst * nblst + 1;
  638. i__3 = n2nb;
  639. for (i__ = 1; i__ <= i__3; ++i__) {
  640. i__4 = nnb << 1;
  641. i__5 = nnb << 1;
  642. i__6 = nnb << 1;
  643. slaset_("All", &i__4, &i__5, &c_b14, &c_b15, &work[pw], &i__6);
  644. pw += (nnb << 2) * nnb;
  645. }
  646. /* Reduce columns JCOL:JCOL+NNB-1 of A to Hessenberg form. */
  647. i__3 = jcol + nnb - 1;
  648. for (j = jcol; j <= i__3; ++j) {
  649. /* Reduce Jth column of A. Store cosines and sines in Jth */
  650. /* column of A and B, respectively. */
  651. i__4 = j + 2;
  652. for (i__ = *ihi; i__ >= i__4; --i__) {
  653. temp = a[i__ - 1 + j * a_dim1];
  654. slartg_(&temp, &a[i__ + j * a_dim1], &c__, &s, &a[i__ - 1
  655. + j * a_dim1]);
  656. a[i__ + j * a_dim1] = c__;
  657. b[i__ + j * b_dim1] = s;
  658. }
  659. /* Accumulate Givens rotations into workspace array. */
  660. ppw = (nblst + 1) * (nblst - 2) - j + jcol + 1;
  661. len = j + 2 - jcol;
  662. jrow = j + n2nb * nnb + 2;
  663. i__4 = jrow;
  664. for (i__ = *ihi; i__ >= i__4; --i__) {
  665. c__ = a[i__ + j * a_dim1];
  666. s = b[i__ + j * b_dim1];
  667. i__5 = ppw + len - 1;
  668. for (jj = ppw; jj <= i__5; ++jj) {
  669. temp = work[jj + nblst];
  670. work[jj + nblst] = c__ * temp - s * work[jj];
  671. work[jj] = s * temp + c__ * work[jj];
  672. }
  673. ++len;
  674. ppw = ppw - nblst - 1;
  675. }
  676. ppwo = nblst * nblst + (nnb + j - jcol - 1 << 1) * nnb + nnb;
  677. j0 = jrow - nnb;
  678. i__4 = j + 2;
  679. i__5 = -nnb;
  680. for (jrow = j0; i__5 < 0 ? jrow >= i__4 : jrow <= i__4; jrow
  681. += i__5) {
  682. ppw = ppwo;
  683. len = j + 2 - jcol;
  684. i__6 = jrow;
  685. for (i__ = jrow + nnb - 1; i__ >= i__6; --i__) {
  686. c__ = a[i__ + j * a_dim1];
  687. s = b[i__ + j * b_dim1];
  688. i__7 = ppw + len - 1;
  689. for (jj = ppw; jj <= i__7; ++jj) {
  690. temp = work[jj + (nnb << 1)];
  691. work[jj + (nnb << 1)] = c__ * temp - s * work[jj];
  692. work[jj] = s * temp + c__ * work[jj];
  693. }
  694. ++len;
  695. ppw = ppw - (nnb << 1) - 1;
  696. }
  697. ppwo += (nnb << 2) * nnb;
  698. }
  699. /* TOP denotes the number of top rows in A and B that will */
  700. /* not be updated during the next steps. */
  701. if (jcol <= 2) {
  702. top = 0;
  703. } else {
  704. top = jcol;
  705. }
  706. /* Propagate transformations through B and replace stored */
  707. /* left sines/cosines by right sines/cosines. */
  708. i__5 = j + 1;
  709. for (jj = *n; jj >= i__5; --jj) {
  710. /* Update JJth column of B. */
  711. /* Computing MIN */
  712. i__4 = jj + 1;
  713. i__6 = j + 2;
  714. for (i__ = f2cmin(i__4,*ihi); i__ >= i__6; --i__) {
  715. c__ = a[i__ + j * a_dim1];
  716. s = b[i__ + j * b_dim1];
  717. temp = b[i__ + jj * b_dim1];
  718. b[i__ + jj * b_dim1] = c__ * temp - s * b[i__ - 1 +
  719. jj * b_dim1];
  720. b[i__ - 1 + jj * b_dim1] = s * temp + c__ * b[i__ - 1
  721. + jj * b_dim1];
  722. }
  723. /* Annihilate B( JJ+1, JJ ). */
  724. if (jj < *ihi) {
  725. temp = b[jj + 1 + (jj + 1) * b_dim1];
  726. slartg_(&temp, &b[jj + 1 + jj * b_dim1], &c__, &s, &b[
  727. jj + 1 + (jj + 1) * b_dim1]);
  728. b[jj + 1 + jj * b_dim1] = 0.f;
  729. i__6 = jj - top;
  730. srot_(&i__6, &b[top + 1 + (jj + 1) * b_dim1], &c__1, &
  731. b[top + 1 + jj * b_dim1], &c__1, &c__, &s);
  732. a[jj + 1 + j * a_dim1] = c__;
  733. b[jj + 1 + j * b_dim1] = -s;
  734. }
  735. }
  736. /* Update A by transformations from right. */
  737. /* Explicit loop unrolling provides better performance */
  738. /* compared to SLASR. */
  739. /* CALL SLASR( 'Right', 'Variable', 'Backward', IHI-TOP, */
  740. /* $ IHI-J, A( J+2, J ), B( J+2, J ), */
  741. /* $ A( TOP+1, J+1 ), LDA ) */
  742. jj = (*ihi - j - 1) % 3;
  743. i__5 = jj + 1;
  744. for (i__ = *ihi - j - 3; i__ >= i__5; i__ += -3) {
  745. c__ = a[j + 1 + i__ + j * a_dim1];
  746. s = -b[j + 1 + i__ + j * b_dim1];
  747. c1 = a[j + 2 + i__ + j * a_dim1];
  748. s1 = -b[j + 2 + i__ + j * b_dim1];
  749. c2 = a[j + 3 + i__ + j * a_dim1];
  750. s2 = -b[j + 3 + i__ + j * b_dim1];
  751. i__6 = *ihi;
  752. for (k = top + 1; k <= i__6; ++k) {
  753. temp = a[k + (j + i__) * a_dim1];
  754. temp1 = a[k + (j + i__ + 1) * a_dim1];
  755. temp2 = a[k + (j + i__ + 2) * a_dim1];
  756. temp3 = a[k + (j + i__ + 3) * a_dim1];
  757. a[k + (j + i__ + 3) * a_dim1] = c2 * temp3 + s2 *
  758. temp2;
  759. temp2 = -s2 * temp3 + c2 * temp2;
  760. a[k + (j + i__ + 2) * a_dim1] = c1 * temp2 + s1 *
  761. temp1;
  762. temp1 = -s1 * temp2 + c1 * temp1;
  763. a[k + (j + i__ + 1) * a_dim1] = c__ * temp1 + s *
  764. temp;
  765. a[k + (j + i__) * a_dim1] = -s * temp1 + c__ * temp;
  766. }
  767. }
  768. if (jj > 0) {
  769. for (i__ = jj; i__ >= 1; --i__) {
  770. i__5 = *ihi - top;
  771. r__1 = -b[j + 1 + i__ + j * b_dim1];
  772. srot_(&i__5, &a[top + 1 + (j + i__ + 1) * a_dim1], &
  773. c__1, &a[top + 1 + (j + i__) * a_dim1], &c__1,
  774. &a[j + 1 + i__ + j * a_dim1], &r__1);
  775. }
  776. }
  777. /* Update (J+1)th column of A by transformations from left. */
  778. if (j < jcol + nnb - 1) {
  779. len = j + 1 - jcol;
  780. /* Multiply with the trailing accumulated orthogonal */
  781. /* matrix, which takes the form */
  782. /* [ U11 U12 ] */
  783. /* U = [ ], */
  784. /* [ U21 U22 ] */
  785. /* where U21 is a LEN-by-LEN matrix and U12 is lower */
  786. /* triangular. */
  787. jrow = *ihi - nblst + 1;
  788. sgemv_("Transpose", &nblst, &len, &c_b15, &work[1], &
  789. nblst, &a[jrow + (j + 1) * a_dim1], &c__1, &c_b14,
  790. &work[pw], &c__1);
  791. ppw = pw + len;
  792. i__5 = jrow + nblst - len - 1;
  793. for (i__ = jrow; i__ <= i__5; ++i__) {
  794. work[ppw] = a[i__ + (j + 1) * a_dim1];
  795. ++ppw;
  796. }
  797. i__5 = nblst - len;
  798. strmv_("Lower", "Transpose", "Non-unit", &i__5, &work[len
  799. * nblst + 1], &nblst, &work[pw + len], &c__1);
  800. i__5 = nblst - len;
  801. sgemv_("Transpose", &len, &i__5, &c_b15, &work[(len + 1) *
  802. nblst - len + 1], &nblst, &a[jrow + nblst - len
  803. + (j + 1) * a_dim1], &c__1, &c_b15, &work[pw +
  804. len], &c__1);
  805. ppw = pw;
  806. i__5 = jrow + nblst - 1;
  807. for (i__ = jrow; i__ <= i__5; ++i__) {
  808. a[i__ + (j + 1) * a_dim1] = work[ppw];
  809. ++ppw;
  810. }
  811. /* Multiply with the other accumulated orthogonal */
  812. /* matrices, which take the form */
  813. /* [ U11 U12 0 ] */
  814. /* [ ] */
  815. /* U = [ U21 U22 0 ], */
  816. /* [ ] */
  817. /* [ 0 0 I ] */
  818. /* where I denotes the (NNB-LEN)-by-(NNB-LEN) identity */
  819. /* matrix, U21 is a LEN-by-LEN upper triangular matrix */
  820. /* and U12 is an NNB-by-NNB lower triangular matrix. */
  821. ppwo = nblst * nblst + 1;
  822. j0 = jrow - nnb;
  823. i__5 = jcol + 1;
  824. i__6 = -nnb;
  825. for (jrow = j0; i__6 < 0 ? jrow >= i__5 : jrow <= i__5;
  826. jrow += i__6) {
  827. ppw = pw + len;
  828. i__4 = jrow + nnb - 1;
  829. for (i__ = jrow; i__ <= i__4; ++i__) {
  830. work[ppw] = a[i__ + (j + 1) * a_dim1];
  831. ++ppw;
  832. }
  833. ppw = pw;
  834. i__4 = jrow + nnb + len - 1;
  835. for (i__ = jrow + nnb; i__ <= i__4; ++i__) {
  836. work[ppw] = a[i__ + (j + 1) * a_dim1];
  837. ++ppw;
  838. }
  839. i__4 = nnb << 1;
  840. strmv_("Upper", "Transpose", "Non-unit", &len, &work[
  841. ppwo + nnb], &i__4, &work[pw], &c__1);
  842. i__4 = nnb << 1;
  843. strmv_("Lower", "Transpose", "Non-unit", &nnb, &work[
  844. ppwo + (len << 1) * nnb], &i__4, &work[pw +
  845. len], &c__1);
  846. i__4 = nnb << 1;
  847. sgemv_("Transpose", &nnb, &len, &c_b15, &work[ppwo], &
  848. i__4, &a[jrow + (j + 1) * a_dim1], &c__1, &
  849. c_b15, &work[pw], &c__1);
  850. i__4 = nnb << 1;
  851. sgemv_("Transpose", &len, &nnb, &c_b15, &work[ppwo + (
  852. len << 1) * nnb + nnb], &i__4, &a[jrow + nnb
  853. + (j + 1) * a_dim1], &c__1, &c_b15, &work[pw
  854. + len], &c__1);
  855. ppw = pw;
  856. i__4 = jrow + len + nnb - 1;
  857. for (i__ = jrow; i__ <= i__4; ++i__) {
  858. a[i__ + (j + 1) * a_dim1] = work[ppw];
  859. ++ppw;
  860. }
  861. ppwo += (nnb << 2) * nnb;
  862. }
  863. }
  864. }
  865. /* Apply accumulated orthogonal matrices to A. */
  866. cola = *n - jcol - nnb + 1;
  867. j = *ihi - nblst + 1;
  868. sgemm_("Transpose", "No Transpose", &nblst, &cola, &nblst, &c_b15,
  869. &work[1], &nblst, &a[j + (jcol + nnb) * a_dim1], lda, &
  870. c_b14, &work[pw], &nblst);
  871. slacpy_("All", &nblst, &cola, &work[pw], &nblst, &a[j + (jcol +
  872. nnb) * a_dim1], lda);
  873. ppwo = nblst * nblst + 1;
  874. j0 = j - nnb;
  875. i__3 = jcol + 1;
  876. i__6 = -nnb;
  877. for (j = j0; i__6 < 0 ? j >= i__3 : j <= i__3; j += i__6) {
  878. if (blk22) {
  879. /* Exploit the structure of */
  880. /* [ U11 U12 ] */
  881. /* U = [ ] */
  882. /* [ U21 U22 ], */
  883. /* where all blocks are NNB-by-NNB, U21 is upper */
  884. /* triangular and U12 is lower triangular. */
  885. i__5 = nnb << 1;
  886. i__4 = nnb << 1;
  887. i__7 = *lwork - pw + 1;
  888. sorm22_("Left", "Transpose", &i__5, &cola, &nnb, &nnb, &
  889. work[ppwo], &i__4, &a[j + (jcol + nnb) * a_dim1],
  890. lda, &work[pw], &i__7, &ierr);
  891. } else {
  892. /* Ignore the structure of U. */
  893. i__5 = nnb << 1;
  894. i__4 = nnb << 1;
  895. i__7 = nnb << 1;
  896. i__8 = nnb << 1;
  897. sgemm_("Transpose", "No Transpose", &i__5, &cola, &i__4, &
  898. c_b15, &work[ppwo], &i__7, &a[j + (jcol + nnb) *
  899. a_dim1], lda, &c_b14, &work[pw], &i__8);
  900. i__5 = nnb << 1;
  901. i__4 = nnb << 1;
  902. slacpy_("All", &i__5, &cola, &work[pw], &i__4, &a[j + (
  903. jcol + nnb) * a_dim1], lda);
  904. }
  905. ppwo += (nnb << 2) * nnb;
  906. }
  907. /* Apply accumulated orthogonal matrices to Q. */
  908. if (wantq) {
  909. j = *ihi - nblst + 1;
  910. if (initq) {
  911. /* Computing MAX */
  912. i__6 = 2, i__3 = j - jcol + 1;
  913. topq = f2cmax(i__6,i__3);
  914. nh = *ihi - topq + 1;
  915. } else {
  916. topq = 1;
  917. nh = *n;
  918. }
  919. sgemm_("No Transpose", "No Transpose", &nh, &nblst, &nblst, &
  920. c_b15, &q[topq + j * q_dim1], ldq, &work[1], &nblst, &
  921. c_b14, &work[pw], &nh);
  922. slacpy_("All", &nh, &nblst, &work[pw], &nh, &q[topq + j *
  923. q_dim1], ldq);
  924. ppwo = nblst * nblst + 1;
  925. j0 = j - nnb;
  926. i__6 = jcol + 1;
  927. i__3 = -nnb;
  928. for (j = j0; i__3 < 0 ? j >= i__6 : j <= i__6; j += i__3) {
  929. if (initq) {
  930. /* Computing MAX */
  931. i__5 = 2, i__4 = j - jcol + 1;
  932. topq = f2cmax(i__5,i__4);
  933. nh = *ihi - topq + 1;
  934. }
  935. if (blk22) {
  936. /* Exploit the structure of U. */
  937. i__5 = nnb << 1;
  938. i__4 = nnb << 1;
  939. i__7 = *lwork - pw + 1;
  940. sorm22_("Right", "No Transpose", &nh, &i__5, &nnb, &
  941. nnb, &work[ppwo], &i__4, &q[topq + j * q_dim1]
  942. , ldq, &work[pw], &i__7, &ierr);
  943. } else {
  944. /* Ignore the structure of U. */
  945. i__5 = nnb << 1;
  946. i__4 = nnb << 1;
  947. i__7 = nnb << 1;
  948. sgemm_("No Transpose", "No Transpose", &nh, &i__5, &
  949. i__4, &c_b15, &q[topq + j * q_dim1], ldq, &
  950. work[ppwo], &i__7, &c_b14, &work[pw], &nh);
  951. i__5 = nnb << 1;
  952. slacpy_("All", &nh, &i__5, &work[pw], &nh, &q[topq +
  953. j * q_dim1], ldq);
  954. }
  955. ppwo += (nnb << 2) * nnb;
  956. }
  957. }
  958. /* Accumulate right Givens rotations if required. */
  959. if (wantz || top > 0) {
  960. /* Initialize small orthogonal factors that will hold the */
  961. /* accumulated Givens rotations in workspace. */
  962. slaset_("All", &nblst, &nblst, &c_b14, &c_b15, &work[1], &
  963. nblst);
  964. pw = nblst * nblst + 1;
  965. i__3 = n2nb;
  966. for (i__ = 1; i__ <= i__3; ++i__) {
  967. i__6 = nnb << 1;
  968. i__5 = nnb << 1;
  969. i__4 = nnb << 1;
  970. slaset_("All", &i__6, &i__5, &c_b14, &c_b15, &work[pw], &
  971. i__4);
  972. pw += (nnb << 2) * nnb;
  973. }
  974. /* Accumulate Givens rotations into workspace array. */
  975. i__3 = jcol + nnb - 1;
  976. for (j = jcol; j <= i__3; ++j) {
  977. ppw = (nblst + 1) * (nblst - 2) - j + jcol + 1;
  978. len = j + 2 - jcol;
  979. jrow = j + n2nb * nnb + 2;
  980. i__6 = jrow;
  981. for (i__ = *ihi; i__ >= i__6; --i__) {
  982. c__ = a[i__ + j * a_dim1];
  983. a[i__ + j * a_dim1] = 0.f;
  984. s = b[i__ + j * b_dim1];
  985. b[i__ + j * b_dim1] = 0.f;
  986. i__5 = ppw + len - 1;
  987. for (jj = ppw; jj <= i__5; ++jj) {
  988. temp = work[jj + nblst];
  989. work[jj + nblst] = c__ * temp - s * work[jj];
  990. work[jj] = s * temp + c__ * work[jj];
  991. }
  992. ++len;
  993. ppw = ppw - nblst - 1;
  994. }
  995. ppwo = nblst * nblst + (nnb + j - jcol - 1 << 1) * nnb +
  996. nnb;
  997. j0 = jrow - nnb;
  998. i__6 = j + 2;
  999. i__5 = -nnb;
  1000. for (jrow = j0; i__5 < 0 ? jrow >= i__6 : jrow <= i__6;
  1001. jrow += i__5) {
  1002. ppw = ppwo;
  1003. len = j + 2 - jcol;
  1004. i__4 = jrow;
  1005. for (i__ = jrow + nnb - 1; i__ >= i__4; --i__) {
  1006. c__ = a[i__ + j * a_dim1];
  1007. a[i__ + j * a_dim1] = 0.f;
  1008. s = b[i__ + j * b_dim1];
  1009. b[i__ + j * b_dim1] = 0.f;
  1010. i__7 = ppw + len - 1;
  1011. for (jj = ppw; jj <= i__7; ++jj) {
  1012. temp = work[jj + (nnb << 1)];
  1013. work[jj + (nnb << 1)] = c__ * temp - s * work[
  1014. jj];
  1015. work[jj] = s * temp + c__ * work[jj];
  1016. }
  1017. ++len;
  1018. ppw = ppw - (nnb << 1) - 1;
  1019. }
  1020. ppwo += (nnb << 2) * nnb;
  1021. }
  1022. }
  1023. } else {
  1024. i__3 = *ihi - jcol - 1;
  1025. slaset_("Lower", &i__3, &nnb, &c_b14, &c_b14, &a[jcol + 2 +
  1026. jcol * a_dim1], lda);
  1027. i__3 = *ihi - jcol - 1;
  1028. slaset_("Lower", &i__3, &nnb, &c_b14, &c_b14, &b[jcol + 2 +
  1029. jcol * b_dim1], ldb);
  1030. }
  1031. /* Apply accumulated orthogonal matrices to A and B. */
  1032. if (top > 0) {
  1033. j = *ihi - nblst + 1;
  1034. sgemm_("No Transpose", "No Transpose", &top, &nblst, &nblst, &
  1035. c_b15, &a[j * a_dim1 + 1], lda, &work[1], &nblst, &
  1036. c_b14, &work[pw], &top);
  1037. slacpy_("All", &top, &nblst, &work[pw], &top, &a[j * a_dim1 +
  1038. 1], lda);
  1039. ppwo = nblst * nblst + 1;
  1040. j0 = j - nnb;
  1041. i__3 = jcol + 1;
  1042. i__5 = -nnb;
  1043. for (j = j0; i__5 < 0 ? j >= i__3 : j <= i__3; j += i__5) {
  1044. if (blk22) {
  1045. /* Exploit the structure of U. */
  1046. i__6 = nnb << 1;
  1047. i__4 = nnb << 1;
  1048. i__7 = *lwork - pw + 1;
  1049. sorm22_("Right", "No Transpose", &top, &i__6, &nnb, &
  1050. nnb, &work[ppwo], &i__4, &a[j * a_dim1 + 1],
  1051. lda, &work[pw], &i__7, &ierr);
  1052. } else {
  1053. /* Ignore the structure of U. */
  1054. i__6 = nnb << 1;
  1055. i__4 = nnb << 1;
  1056. i__7 = nnb << 1;
  1057. sgemm_("No Transpose", "No Transpose", &top, &i__6, &
  1058. i__4, &c_b15, &a[j * a_dim1 + 1], lda, &work[
  1059. ppwo], &i__7, &c_b14, &work[pw], &top);
  1060. i__6 = nnb << 1;
  1061. slacpy_("All", &top, &i__6, &work[pw], &top, &a[j *
  1062. a_dim1 + 1], lda);
  1063. }
  1064. ppwo += (nnb << 2) * nnb;
  1065. }
  1066. j = *ihi - nblst + 1;
  1067. sgemm_("No Transpose", "No Transpose", &top, &nblst, &nblst, &
  1068. c_b15, &b[j * b_dim1 + 1], ldb, &work[1], &nblst, &
  1069. c_b14, &work[pw], &top);
  1070. slacpy_("All", &top, &nblst, &work[pw], &top, &b[j * b_dim1 +
  1071. 1], ldb);
  1072. ppwo = nblst * nblst + 1;
  1073. j0 = j - nnb;
  1074. i__5 = jcol + 1;
  1075. i__3 = -nnb;
  1076. for (j = j0; i__3 < 0 ? j >= i__5 : j <= i__5; j += i__3) {
  1077. if (blk22) {
  1078. /* Exploit the structure of U. */
  1079. i__6 = nnb << 1;
  1080. i__4 = nnb << 1;
  1081. i__7 = *lwork - pw + 1;
  1082. sorm22_("Right", "No Transpose", &top, &i__6, &nnb, &
  1083. nnb, &work[ppwo], &i__4, &b[j * b_dim1 + 1],
  1084. ldb, &work[pw], &i__7, &ierr);
  1085. } else {
  1086. /* Ignore the structure of U. */
  1087. i__6 = nnb << 1;
  1088. i__4 = nnb << 1;
  1089. i__7 = nnb << 1;
  1090. sgemm_("No Transpose", "No Transpose", &top, &i__6, &
  1091. i__4, &c_b15, &b[j * b_dim1 + 1], ldb, &work[
  1092. ppwo], &i__7, &c_b14, &work[pw], &top);
  1093. i__6 = nnb << 1;
  1094. slacpy_("All", &top, &i__6, &work[pw], &top, &b[j *
  1095. b_dim1 + 1], ldb);
  1096. }
  1097. ppwo += (nnb << 2) * nnb;
  1098. }
  1099. }
  1100. /* Apply accumulated orthogonal matrices to Z. */
  1101. if (wantz) {
  1102. j = *ihi - nblst + 1;
  1103. if (initq) {
  1104. /* Computing MAX */
  1105. i__3 = 2, i__5 = j - jcol + 1;
  1106. topq = f2cmax(i__3,i__5);
  1107. nh = *ihi - topq + 1;
  1108. } else {
  1109. topq = 1;
  1110. nh = *n;
  1111. }
  1112. sgemm_("No Transpose", "No Transpose", &nh, &nblst, &nblst, &
  1113. c_b15, &z__[topq + j * z_dim1], ldz, &work[1], &nblst,
  1114. &c_b14, &work[pw], &nh);
  1115. slacpy_("All", &nh, &nblst, &work[pw], &nh, &z__[topq + j *
  1116. z_dim1], ldz);
  1117. ppwo = nblst * nblst + 1;
  1118. j0 = j - nnb;
  1119. i__3 = jcol + 1;
  1120. i__5 = -nnb;
  1121. for (j = j0; i__5 < 0 ? j >= i__3 : j <= i__3; j += i__5) {
  1122. if (initq) {
  1123. /* Computing MAX */
  1124. i__6 = 2, i__4 = j - jcol + 1;
  1125. topq = f2cmax(i__6,i__4);
  1126. nh = *ihi - topq + 1;
  1127. }
  1128. if (blk22) {
  1129. /* Exploit the structure of U. */
  1130. i__6 = nnb << 1;
  1131. i__4 = nnb << 1;
  1132. i__7 = *lwork - pw + 1;
  1133. sorm22_("Right", "No Transpose", &nh, &i__6, &nnb, &
  1134. nnb, &work[ppwo], &i__4, &z__[topq + j *
  1135. z_dim1], ldz, &work[pw], &i__7, &ierr);
  1136. } else {
  1137. /* Ignore the structure of U. */
  1138. i__6 = nnb << 1;
  1139. i__4 = nnb << 1;
  1140. i__7 = nnb << 1;
  1141. sgemm_("No Transpose", "No Transpose", &nh, &i__6, &
  1142. i__4, &c_b15, &z__[topq + j * z_dim1], ldz, &
  1143. work[ppwo], &i__7, &c_b14, &work[pw], &nh);
  1144. i__6 = nnb << 1;
  1145. slacpy_("All", &nh, &i__6, &work[pw], &nh, &z__[topq
  1146. + j * z_dim1], ldz);
  1147. }
  1148. ppwo += (nnb << 2) * nnb;
  1149. }
  1150. }
  1151. }
  1152. }
  1153. /* Use unblocked code to reduce the rest of the matrix */
  1154. /* Avoid re-initialization of modified Q and Z. */
  1155. *(unsigned char *)compq2 = *(unsigned char *)compq;
  1156. *(unsigned char *)compz2 = *(unsigned char *)compz;
  1157. if (jcol != *ilo) {
  1158. if (wantq) {
  1159. *(unsigned char *)compq2 = 'V';
  1160. }
  1161. if (wantz) {
  1162. *(unsigned char *)compz2 = 'V';
  1163. }
  1164. }
  1165. if (jcol < *ihi) {
  1166. sgghrd_(compq2, compz2, n, &jcol, ihi, &a[a_offset], lda, &b[b_offset]
  1167. , ldb, &q[q_offset], ldq, &z__[z_offset], ldz, &ierr);
  1168. }
  1169. work[1] = (real) lwkopt;
  1170. return;
  1171. /* End of SGGHD3 */
  1172. } /* sgghd3_ */