You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ztrsyl3.c 57 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900
  1. /* f2c.h -- Standard Fortran to C header file */
  2. /** barf [ba:rf] 2. "He suggested using FORTRAN, and everybody barfed."
  3. - From The Shogakukan DICTIONARY OF NEW ENGLISH (Second edition) */
  4. #ifndef F2C_INCLUDE
  5. #define F2C_INCLUDE
  6. #include <math.h>
  7. #include <stdlib.h>
  8. #include <string.h>
  9. #include <stdio.h>
  10. #include <complex.h>
  11. #ifdef complex
  12. #undef complex
  13. #endif
  14. #ifdef I
  15. #undef I
  16. #endif
  17. typedef int integer;
  18. typedef unsigned int uinteger;
  19. typedef char *address;
  20. typedef short int shortint;
  21. typedef float real;
  22. typedef double doublereal;
  23. typedef struct { real r, i; } complex;
  24. typedef struct { doublereal r, i; } doublecomplex;
  25. static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
  26. static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
  27. static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
  28. static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
  29. #define pCf(z) (*_pCf(z))
  30. #define pCd(z) (*_pCd(z))
  31. typedef int logical;
  32. typedef short int shortlogical;
  33. typedef char logical1;
  34. typedef char integer1;
  35. #define TRUE_ (1)
  36. #define FALSE_ (0)
  37. /* Extern is for use with -E */
  38. #ifndef Extern
  39. #define Extern extern
  40. #endif
  41. /* I/O stuff */
  42. typedef int flag;
  43. typedef int ftnlen;
  44. typedef int ftnint;
  45. /*external read, write*/
  46. typedef struct
  47. { flag cierr;
  48. ftnint ciunit;
  49. flag ciend;
  50. char *cifmt;
  51. ftnint cirec;
  52. } cilist;
  53. /*internal read, write*/
  54. typedef struct
  55. { flag icierr;
  56. char *iciunit;
  57. flag iciend;
  58. char *icifmt;
  59. ftnint icirlen;
  60. ftnint icirnum;
  61. } icilist;
  62. /*open*/
  63. typedef struct
  64. { flag oerr;
  65. ftnint ounit;
  66. char *ofnm;
  67. ftnlen ofnmlen;
  68. char *osta;
  69. char *oacc;
  70. char *ofm;
  71. ftnint orl;
  72. char *oblnk;
  73. } olist;
  74. /*close*/
  75. typedef struct
  76. { flag cerr;
  77. ftnint cunit;
  78. char *csta;
  79. } cllist;
  80. /*rewind, backspace, endfile*/
  81. typedef struct
  82. { flag aerr;
  83. ftnint aunit;
  84. } alist;
  85. /* inquire */
  86. typedef struct
  87. { flag inerr;
  88. ftnint inunit;
  89. char *infile;
  90. ftnlen infilen;
  91. ftnint *inex; /*parameters in standard's order*/
  92. ftnint *inopen;
  93. ftnint *innum;
  94. ftnint *innamed;
  95. char *inname;
  96. ftnlen innamlen;
  97. char *inacc;
  98. ftnlen inacclen;
  99. char *inseq;
  100. ftnlen inseqlen;
  101. char *indir;
  102. ftnlen indirlen;
  103. char *infmt;
  104. ftnlen infmtlen;
  105. char *inform;
  106. ftnint informlen;
  107. char *inunf;
  108. ftnlen inunflen;
  109. ftnint *inrecl;
  110. ftnint *innrec;
  111. char *inblank;
  112. ftnlen inblanklen;
  113. } inlist;
  114. #define VOID void
  115. union Multitype { /* for multiple entry points */
  116. integer1 g;
  117. shortint h;
  118. integer i;
  119. /* longint j; */
  120. real r;
  121. doublereal d;
  122. complex c;
  123. doublecomplex z;
  124. };
  125. typedef union Multitype Multitype;
  126. struct Vardesc { /* for Namelist */
  127. char *name;
  128. char *addr;
  129. ftnlen *dims;
  130. int type;
  131. };
  132. typedef struct Vardesc Vardesc;
  133. struct Namelist {
  134. char *name;
  135. Vardesc **vars;
  136. int nvars;
  137. };
  138. typedef struct Namelist Namelist;
  139. #define exponent(x)
  140. #define abs(x) ((x) >= 0 ? (x) : -(x))
  141. #define dabs(x) (fabs(x))
  142. #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
  143. #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
  144. #define dmin(a,b) (f2cmin(a,b))
  145. #define dmax(a,b) (f2cmax(a,b))
  146. #define bit_test(a,b) ((a) >> (b) & 1)
  147. #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
  148. #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
  149. #define abort_() { sig_die("Fortran abort routine called", 1); }
  150. #define c_abs(z) (cabsf(Cf(z)))
  151. #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
  152. #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
  153. #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
  154. #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
  155. #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
  156. #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
  157. //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
  158. #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
  159. #define d_abs(x) (fabs(*(x)))
  160. #define d_acos(x) (acos(*(x)))
  161. #define d_asin(x) (asin(*(x)))
  162. #define d_atan(x) (atan(*(x)))
  163. #define d_atn2(x, y) (atan2(*(x),*(y)))
  164. #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
  165. #define r_cnjg(R, Z) { pCf(R) = conj(Cf(Z)); }
  166. #define d_cos(x) (cos(*(x)))
  167. #define d_cosh(x) (cosh(*(x)))
  168. #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
  169. #define d_exp(x) (exp(*(x)))
  170. #define d_imag(z) (cimag(Cd(z)))
  171. #define r_imag(z) (cimag(Cf(z)))
  172. #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
  173. #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
  174. #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
  175. #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
  176. #define d_log(x) (log(*(x)))
  177. #define d_mod(x, y) (fmod(*(x), *(y)))
  178. #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
  179. #define d_nint(x) u_nint(*(x))
  180. #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
  181. #define d_sign(a,b) u_sign(*(a),*(b))
  182. #define r_sign(a,b) u_sign(*(a),*(b))
  183. #define d_sin(x) (sin(*(x)))
  184. #define d_sinh(x) (sinh(*(x)))
  185. #define d_sqrt(x) (sqrt(*(x)))
  186. #define d_tan(x) (tan(*(x)))
  187. #define d_tanh(x) (tanh(*(x)))
  188. #define i_abs(x) abs(*(x))
  189. #define i_dnnt(x) ((integer)u_nint(*(x)))
  190. #define i_len(s, n) (n)
  191. #define i_nint(x) ((integer)u_nint(*(x)))
  192. #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
  193. #define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
  194. #define pow_si(B,E) spow_ui(*(B),*(E))
  195. #define pow_ri(B,E) spow_ui(*(B),*(E))
  196. #define pow_di(B,E) dpow_ui(*(B),*(E))
  197. #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
  198. #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
  199. #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
  200. #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
  201. #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
  202. #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
  203. #define sig_die(s, kill) { exit(1); }
  204. #define s_stop(s, n) {exit(0);}
  205. static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
  206. #define z_abs(z) (cabs(Cd(z)))
  207. #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
  208. #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
  209. #define myexit_() break;
  210. #define mycycle_() continue;
  211. #define myceiling_(w) ceil(w)
  212. #define myhuge_(w) HUGE_VAL
  213. //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
  214. #define mymaxloc_(w,s,e,n) dmaxloc_(w,*(s),*(e),n)
  215. #define myexp_(w) my_expfunc(w)
  216. static int my_expfunc(double *x) {int e; (void)frexp(*x,&e); return e;}
  217. /* procedure parameter types for -A and -C++ */
  218. #define F2C_proc_par_types 1
  219. #ifdef __cplusplus
  220. typedef logical (*L_fp)(...);
  221. #else
  222. typedef logical (*L_fp)();
  223. #endif
  224. static float spow_ui(float x, integer n) {
  225. float pow=1.0; unsigned long int u;
  226. if(n != 0) {
  227. if(n < 0) n = -n, x = 1/x;
  228. for(u = n; ; ) {
  229. if(u & 01) pow *= x;
  230. if(u >>= 1) x *= x;
  231. else break;
  232. }
  233. }
  234. return pow;
  235. }
  236. static double dpow_ui(double x, integer n) {
  237. double pow=1.0; unsigned long int u;
  238. if(n != 0) {
  239. if(n < 0) n = -n, x = 1/x;
  240. for(u = n; ; ) {
  241. if(u & 01) pow *= x;
  242. if(u >>= 1) x *= x;
  243. else break;
  244. }
  245. }
  246. return pow;
  247. }
  248. static _Complex float cpow_ui(_Complex float x, integer n) {
  249. _Complex float pow=1.0; unsigned long int u;
  250. if(n != 0) {
  251. if(n < 0) n = -n, x = 1/x;
  252. for(u = n; ; ) {
  253. if(u & 01) pow *= x;
  254. if(u >>= 1) x *= x;
  255. else break;
  256. }
  257. }
  258. return pow;
  259. }
  260. static _Complex double zpow_ui(_Complex double x, integer n) {
  261. _Complex double pow=1.0; unsigned long int u;
  262. if(n != 0) {
  263. if(n < 0) n = -n, x = 1/x;
  264. for(u = n; ; ) {
  265. if(u & 01) pow *= x;
  266. if(u >>= 1) x *= x;
  267. else break;
  268. }
  269. }
  270. return pow;
  271. }
  272. static integer pow_ii(integer x, integer n) {
  273. integer pow; unsigned long int u;
  274. if (n <= 0) {
  275. if (n == 0 || x == 1) pow = 1;
  276. else if (x != -1) pow = x == 0 ? 1/x : 0;
  277. else n = -n;
  278. }
  279. if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
  280. u = n;
  281. for(pow = 1; ; ) {
  282. if(u & 01) pow *= x;
  283. if(u >>= 1) x *= x;
  284. else break;
  285. }
  286. }
  287. return pow;
  288. }
  289. static integer dmaxloc_(double *w, integer s, integer e, integer *n)
  290. {
  291. double m; integer i, mi;
  292. for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
  293. if (w[i-1]>m) mi=i ,m=w[i-1];
  294. return mi-s+1;
  295. }
  296. static integer smaxloc_(float *w, integer s, integer e, integer *n)
  297. {
  298. float m; integer i, mi;
  299. for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
  300. if (w[i-1]>m) mi=i ,m=w[i-1];
  301. return mi-s+1;
  302. }
  303. static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
  304. integer n = *n_, incx = *incx_, incy = *incy_, i;
  305. _Complex float zdotc = 0.0;
  306. if (incx == 1 && incy == 1) {
  307. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  308. zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
  309. }
  310. } else {
  311. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  312. zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
  313. }
  314. }
  315. pCf(z) = zdotc;
  316. }
  317. static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
  318. integer n = *n_, incx = *incx_, incy = *incy_, i;
  319. _Complex double zdotc = 0.0;
  320. if (incx == 1 && incy == 1) {
  321. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  322. zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
  323. }
  324. } else {
  325. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  326. zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
  327. }
  328. }
  329. pCd(z) = zdotc;
  330. }
  331. static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
  332. integer n = *n_, incx = *incx_, incy = *incy_, i;
  333. _Complex float zdotc = 0.0;
  334. if (incx == 1 && incy == 1) {
  335. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  336. zdotc += Cf(&x[i]) * Cf(&y[i]);
  337. }
  338. } else {
  339. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  340. zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
  341. }
  342. }
  343. pCf(z) = zdotc;
  344. }
  345. static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
  346. integer n = *n_, incx = *incx_, incy = *incy_, i;
  347. _Complex double zdotc = 0.0;
  348. if (incx == 1 && incy == 1) {
  349. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  350. zdotc += Cd(&x[i]) * Cd(&y[i]);
  351. }
  352. } else {
  353. for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
  354. zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
  355. }
  356. }
  357. pCd(z) = zdotc;
  358. }
  359. #endif
  360. /* -- translated by f2c (version 20000121).
  361. You must link the resulting object file with the libraries:
  362. -lf2c -lm (in that order)
  363. */
  364. /* Table of constant values */
  365. static doublecomplex c_b1 = {1.,0.};
  366. static integer c__1 = 1;
  367. static integer c_n1 = -1;
  368. static doublereal c_b18 = 2.;
  369. static doublereal c_b106 = 1.;
  370. /* > \brief \b ZTRSYL3 */
  371. /* Definition: */
  372. /* =========== */
  373. /* > \par Purpose */
  374. /* ============= */
  375. /* > */
  376. /* > \verbatim */
  377. /* > */
  378. /* > ZTRSYL3 solves the complex Sylvester matrix equation: */
  379. /* > */
  380. /* > op(A)*X + X*op(B) = scale*C or */
  381. /* > op(A)*X - X*op(B) = scale*C, */
  382. /* > */
  383. /* > where op(A) = A or A**H, and A and B are both upper triangular. A is */
  384. /* > M-by-M and B is N-by-N; the right hand side C and the solution X are */
  385. /* > M-by-N; and scale is an output scale factor, set <= 1 to avoid */
  386. /* > overflow in X. */
  387. /* > */
  388. /* > This is the block version of the algorithm. */
  389. /* > \endverbatim */
  390. /* Arguments */
  391. /* ========= */
  392. /* > \param[in] TRANA */
  393. /* > \verbatim */
  394. /* > TRANA is CHARACTER*1 */
  395. /* > Specifies the option op(A): */
  396. /* > = 'N': op(A) = A (No transpose) */
  397. /* > = 'C': op(A) = A**H (Conjugate transpose) */
  398. /* > \endverbatim */
  399. /* > */
  400. /* > \param[in] TRANB */
  401. /* > \verbatim */
  402. /* > TRANB is CHARACTER*1 */
  403. /* > Specifies the option op(B): */
  404. /* > = 'N': op(B) = B (No transpose) */
  405. /* > = 'C': op(B) = B**H (Conjugate transpose) */
  406. /* > \endverbatim */
  407. /* > */
  408. /* > \param[in] ISGN */
  409. /* > \verbatim */
  410. /* > ISGN is INTEGER */
  411. /* > Specifies the sign in the equation: */
  412. /* > = +1: solve op(A)*X + X*op(B) = scale*C */
  413. /* > = -1: solve op(A)*X - X*op(B) = scale*C */
  414. /* > \endverbatim */
  415. /* > */
  416. /* > \param[in] M */
  417. /* > \verbatim */
  418. /* > M is INTEGER */
  419. /* > The order of the matrix A, and the number of rows in the */
  420. /* > matrices X and C. M >= 0. */
  421. /* > \endverbatim */
  422. /* > */
  423. /* > \param[in] N */
  424. /* > \verbatim */
  425. /* > N is INTEGER */
  426. /* > The order of the matrix B, and the number of columns in the */
  427. /* > matrices X and C. N >= 0. */
  428. /* > \endverbatim */
  429. /* > */
  430. /* > \param[in] A */
  431. /* > \verbatim */
  432. /* > A is COMPLEX*16 array, dimension (LDA,M) */
  433. /* > The upper triangular matrix A. */
  434. /* > \endverbatim */
  435. /* > */
  436. /* > \param[in] LDA */
  437. /* > \verbatim */
  438. /* > LDA is INTEGER */
  439. /* > The leading dimension of the array A. LDA >= f2cmax(1,M). */
  440. /* > \endverbatim */
  441. /* > */
  442. /* > \param[in] B */
  443. /* > \verbatim */
  444. /* > B is COMPLEX*16 array, dimension (LDB,N) */
  445. /* > The upper triangular matrix B. */
  446. /* > \endverbatim */
  447. /* > */
  448. /* > \param[in] LDB */
  449. /* > \verbatim */
  450. /* > LDB is INTEGER */
  451. /* > The leading dimension of the array B. LDB >= f2cmax(1,N). */
  452. /* > \endverbatim */
  453. /* > */
  454. /* > \param[in,out] C */
  455. /* > \verbatim */
  456. /* > C is COMPLEX*16 array, dimension (LDC,N) */
  457. /* > On entry, the M-by-N right hand side matrix C. */
  458. /* > On exit, C is overwritten by the solution matrix X. */
  459. /* > \endverbatim */
  460. /* > */
  461. /* > \param[in] LDC */
  462. /* > \verbatim */
  463. /* > LDC is INTEGER */
  464. /* > The leading dimension of the array C. LDC >= f2cmax(1,M) */
  465. /* > \endverbatim */
  466. /* > */
  467. /* > \param[out] SCALE */
  468. /* > \verbatim */
  469. /* > SCALE is DOUBLE PRECISION */
  470. /* > The scale factor, scale, set <= 1 to avoid overflow in X. */
  471. /* > \endverbatim */
  472. /* > */
  473. /* > \param[out] SWORK */
  474. /* > \verbatim */
  475. /* > SWORK is DOUBLE PRECISION array, dimension (MAX(2, ROWS), */
  476. /* > MAX(1,COLS)). */
  477. /* > On exit, if INFO = 0, SWORK(1) returns the optimal value ROWS */
  478. /* > and SWORK(2) returns the optimal COLS. */
  479. /* > \endverbatim */
  480. /* > */
  481. /* > \param[in] LDSWORK */
  482. /* > \verbatim */
  483. /* > LDSWORK is INTEGER */
  484. /* > LDSWORK >= MAX(2,ROWS), where ROWS = ((M + NB - 1) / NB + 1) */
  485. /* > and NB is the optimal block size. */
  486. /* > */
  487. /* > If LDSWORK = -1, then a workspace query is assumed; the routine */
  488. /* > only calculates the optimal dimensions of the SWORK matrix, */
  489. /* > returns these values as the first and second entry of the SWORK */
  490. /* > matrix, and no error message related LWORK is issued by XERBLA. */
  491. /* > \endverbatim */
  492. /* > */
  493. /* > \param[out] INFO */
  494. /* > \verbatim */
  495. /* > INFO is INTEGER */
  496. /* > = 0: successful exit */
  497. /* > < 0: if INFO = -i, the i-th argument had an illegal value */
  498. /* > = 1: A and B have common or very close eigenvalues; perturbed */
  499. /* > values were used to solve the equation (but the matrices */
  500. /* > A and B are unchanged). */
  501. /* > \endverbatim */
  502. /* > \ingroup complex16SYcomputational */
  503. /* ===================================================================== */
  504. /* References: */
  505. /* E. S. Quintana-Orti and R. A. Van De Geijn (2003). Formal derivation of */
  506. /* algorithms: The triangular Sylvester equation, ACM Transactions */
  507. /* on Mathematical Software (TOMS), volume 29, pages 218--243. */
  508. /* A. Schwarz and C. C. Kjelgaard Mikkelsen (2020). Robust Task-Parallel */
  509. /* Solution of the Triangular Sylvester Equation. Lecture Notes in */
  510. /* Computer Science, vol 12043, pages 82--92, Springer. */
  511. /* Contributor: */
  512. /* Angelika Schwarz, Umea University, Sweden. */
  513. /* ===================================================================== */
  514. /* Subroutine */ int ztrsyl3_(char *trana, char *tranb, integer *isgn,
  515. integer *m, integer *n, doublecomplex *a, integer *lda, doublecomplex
  516. *b, integer *ldb, doublecomplex *c__, integer *ldc, doublereal *scale,
  517. doublereal *swork, integer *ldswork, integer *info)
  518. {
  519. /* System generated locals */
  520. integer a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset, swork_dim1,
  521. swork_offset, i__1, i__2, i__3, i__4, i__5, i__6;
  522. doublereal d__1, d__2, d__3, d__4;
  523. doublecomplex z__1;
  524. /* Local variables */
  525. doublereal scal;
  526. doublecomplex csgn;
  527. doublereal anrm, bnrm, cnrm;
  528. integer awrk, bwrk;
  529. doublereal *wnrm, xnrm;
  530. integer i__, j, k, l;
  531. extern logical lsame_(char *, char *);
  532. integer iinfo;
  533. extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
  534. integer *, doublecomplex *, doublecomplex *, integer *,
  535. doublecomplex *, integer *, doublecomplex *, doublecomplex *,
  536. integer *);
  537. integer i1, i2, j1, j2, k1, k2, l1, l2;
  538. // extern integer myexp_(doublereal *);
  539. integer nb, jj, ll;
  540. extern doublereal dlamch_(char *);
  541. doublereal scaloc, scamin;
  542. extern doublereal dlarmm_(doublereal *, doublereal *, doublereal *);
  543. extern /* Subroutine */ int xerbla_(char *, integer *);
  544. extern integer ilaenv_(integer *, char *, char *, integer *, integer *,
  545. integer *, integer *, ftnlen, ftnlen);
  546. extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
  547. integer *, doublereal *);
  548. doublereal bignum;
  549. extern /* Subroutine */ int zdscal_(integer *, doublereal *,
  550. doublecomplex *, integer *), zlascl_(char *, integer *, integer *,
  551. doublereal *, doublereal *, integer *, integer *, doublecomplex *
  552. , integer *, integer *);
  553. logical notrna, notrnb;
  554. doublereal smlnum;
  555. logical lquery;
  556. extern /* Subroutine */ int ztrsyl_(char *, char *, integer *, integer *,
  557. integer *, doublecomplex *, integer *, doublecomplex *, integer *,
  558. doublecomplex *, integer *, doublereal *, integer *);
  559. integer nba, nbb;
  560. doublereal buf, sgn;
  561. /* Decode and Test input parameters */
  562. /* Parameter adjustments */
  563. a_dim1 = *lda;
  564. a_offset = 1 + a_dim1 * 1;
  565. a -= a_offset;
  566. b_dim1 = *ldb;
  567. b_offset = 1 + b_dim1 * 1;
  568. b -= b_offset;
  569. c_dim1 = *ldc;
  570. c_offset = 1 + c_dim1 * 1;
  571. c__ -= c_offset;
  572. swork_dim1 = *ldswork;
  573. swork_offset = 1 + swork_dim1 * 1;
  574. swork -= swork_offset;
  575. /* Function Body */
  576. notrna = lsame_(trana, "N");
  577. notrnb = lsame_(tranb, "N");
  578. /* Use the same block size for all matrices. */
  579. /* Computing MAX */
  580. i__1 = 8, i__2 = ilaenv_(&c__1, "ZTRSYL", "", m, n, &c_n1, &c_n1, (ftnlen)
  581. 6, (ftnlen)0);
  582. nb = f2cmax(i__1,i__2);
  583. /* Compute number of blocks in A and B */
  584. /* Computing MAX */
  585. i__1 = 1, i__2 = (*m + nb - 1) / nb;
  586. nba = f2cmax(i__1,i__2);
  587. /* Computing MAX */
  588. i__1 = 1, i__2 = (*n + nb - 1) / nb;
  589. nbb = f2cmax(i__1,i__2);
  590. /* Compute workspace */
  591. *info = 0;
  592. lquery = *ldswork == -1;
  593. if (lquery) {
  594. *ldswork = 2;
  595. swork[swork_dim1 + 1] = (doublereal) f2cmax(nba,nbb);
  596. swork[swork_dim1 + 2] = (doublereal) ((nbb << 1) + nba);
  597. }
  598. /* Test the input arguments */
  599. if (! notrna && ! lsame_(trana, "C")) {
  600. *info = -1;
  601. } else if (! notrnb && ! lsame_(tranb, "C")) {
  602. *info = -2;
  603. } else if (*isgn != 1 && *isgn != -1) {
  604. *info = -3;
  605. } else if (*m < 0) {
  606. *info = -4;
  607. } else if (*n < 0) {
  608. *info = -5;
  609. } else if (*lda < f2cmax(1,*m)) {
  610. *info = -7;
  611. } else if (*ldb < f2cmax(1,*n)) {
  612. *info = -9;
  613. } else if (*ldc < f2cmax(1,*m)) {
  614. *info = -11;
  615. }
  616. if (*info != 0) {
  617. i__1 = -(*info);
  618. xerbla_("ZTRSYL3", &i__1);
  619. return 0;
  620. } else if (lquery) {
  621. return 0;
  622. }
  623. /* Quick return if possible */
  624. *scale = 1.;
  625. if (*m == 0 || *n == 0) {
  626. return 0;
  627. }
  628. wnrm = (doublereal*)malloc(f2cmax(*m,*n)*sizeof(doublereal));
  629. /* Use unblocked code for small problems or if insufficient */
  630. /* workspace is provided */
  631. if (f2cmin(nba,nbb) == 1 || *ldswork < f2cmax(nba,nbb)) {
  632. ztrsyl_(trana, tranb, isgn, m, n, &a[a_offset], lda, &b[b_offset],
  633. ldb, &c__[c_offset], ldc, scale, info);
  634. return 0;
  635. }
  636. /* Set constants to control overflow */
  637. smlnum = dlamch_("S");
  638. bignum = 1. / smlnum;
  639. /* Set local scaling factors. */
  640. i__1 = nbb;
  641. for (l = 1; l <= i__1; ++l) {
  642. i__2 = nba;
  643. for (k = 1; k <= i__2; ++k) {
  644. swork[k + l * swork_dim1] = 1.;
  645. }
  646. }
  647. /* Fallback scaling factor to prevent flushing of SWORK( K, L ) to zero. */
  648. /* This scaling is to ensure compatibility with TRSYL and may get flushed. */
  649. buf = 1.;
  650. /* Compute upper bounds of blocks of A and B */
  651. awrk = nbb;
  652. i__1 = nba;
  653. for (k = 1; k <= i__1; ++k) {
  654. k1 = (k - 1) * nb + 1;
  655. /* Computing MIN */
  656. i__2 = k * nb;
  657. k2 = f2cmin(i__2,*m) + 1;
  658. i__2 = nba;
  659. for (l = k; l <= i__2; ++l) {
  660. l1 = (l - 1) * nb + 1;
  661. /* Computing MIN */
  662. i__3 = l * nb;
  663. l2 = f2cmin(i__3,*m) + 1;
  664. if (notrna) {
  665. i__3 = k2 - k1;
  666. i__4 = l2 - l1;
  667. swork[k + (awrk + l) * swork_dim1] = zlange_("I", &i__3, &
  668. i__4, &a[k1 + l1 * a_dim1], lda, wnrm);
  669. } else {
  670. i__3 = k2 - k1;
  671. i__4 = l2 - l1;
  672. swork[l + (awrk + k) * swork_dim1] = zlange_("1", &i__3, &
  673. i__4, &a[k1 + l1 * a_dim1], lda, wnrm);
  674. }
  675. }
  676. }
  677. bwrk = nbb + nba;
  678. i__1 = nbb;
  679. for (k = 1; k <= i__1; ++k) {
  680. k1 = (k - 1) * nb + 1;
  681. /* Computing MIN */
  682. i__2 = k * nb;
  683. k2 = f2cmin(i__2,*n) + 1;
  684. i__2 = nbb;
  685. for (l = k; l <= i__2; ++l) {
  686. l1 = (l - 1) * nb + 1;
  687. /* Computing MIN */
  688. i__3 = l * nb;
  689. l2 = f2cmin(i__3,*n) + 1;
  690. if (notrnb) {
  691. i__3 = k2 - k1;
  692. i__4 = l2 - l1;
  693. swork[k + (bwrk + l) * swork_dim1] = zlange_("I", &i__3, &
  694. i__4, &b[k1 + l1 * b_dim1], ldb, wnrm);
  695. } else {
  696. i__3 = k2 - k1;
  697. i__4 = l2 - l1;
  698. swork[l + (bwrk + k) * swork_dim1] = zlange_("1", &i__3, &
  699. i__4, &b[k1 + l1 * b_dim1], ldb, wnrm);
  700. }
  701. }
  702. }
  703. sgn = (doublereal) (*isgn);
  704. z__1.r = sgn, z__1.i = 0.;
  705. csgn.r = z__1.r, csgn.i = z__1.i;
  706. if (notrna && notrnb) {
  707. /* Solve A*X + ISGN*X*B = scale*C. */
  708. /* The (K,L)th block of X is determined starting from */
  709. /* bottom-left corner column by column by */
  710. /* A(K,K)*X(K,L) + ISGN*X(K,L)*B(L,L) = C(K,L) - R(K,L) */
  711. /* Where */
  712. /* M L-1 */
  713. /* R(K,L) = SUM [A(K,I)*X(I,L)] + ISGN*SUM [X(K,J)*B(J,L)]. */
  714. /* I=K+1 J=1 */
  715. /* Start loop over block rows (index = K) and block columns (index = L) */
  716. for (k = nba; k >= 1; --k) {
  717. /* K1: row index of the first row in X( K, L ) */
  718. /* K2: row index of the first row in X( K+1, L ) */
  719. /* so the K2 - K1 is the column count of the block X( K, L ) */
  720. k1 = (k - 1) * nb + 1;
  721. /* Computing MIN */
  722. i__1 = k * nb;
  723. k2 = f2cmin(i__1,*m) + 1;
  724. i__1 = nbb;
  725. for (l = 1; l <= i__1; ++l) {
  726. /* L1: column index of the first column in X( K, L ) */
  727. /* L2: column index of the first column in X( K, L + 1) */
  728. /* so that L2 - L1 is the row count of the block X( K, L ) */
  729. l1 = (l - 1) * nb + 1;
  730. /* Computing MIN */
  731. i__2 = l * nb;
  732. l2 = f2cmin(i__2,*n) + 1;
  733. i__2 = k2 - k1;
  734. i__3 = l2 - l1;
  735. ztrsyl_(trana, tranb, isgn, &i__2, &i__3, &a[k1 + k1 * a_dim1]
  736. , lda, &b[l1 + l1 * b_dim1], ldb, &c__[k1 + l1 *
  737. c_dim1], ldc, &scaloc, &iinfo);
  738. *info = f2cmax(*info,iinfo);
  739. if (scaloc * swork[k + l * swork_dim1] == 0.) {
  740. if (scaloc == 0.) {
  741. /* The magnitude of the largest entry of X(K1:K2-1, L1:L2-1) */
  742. /* is larger than the product of BIGNUM**2 and cannot be */
  743. /* represented in the form (1/SCALE)*X(K1:K2-1, L1:L2-1). */
  744. /* Mark the computation as pointless. */
  745. buf = 0.;
  746. } else {
  747. i__2 = myexp_(&scaloc);
  748. buf *= pow_di(&c_b18, &i__2);
  749. }
  750. i__2 = nbb;
  751. for (jj = 1; jj <= i__2; ++jj) {
  752. i__3 = nba;
  753. for (ll = 1; ll <= i__3; ++ll) {
  754. /* Bound by BIGNUM to not introduce Inf. The value */
  755. /* is irrelevant; corresponding entries of the */
  756. /* solution will be flushed in consistency scaling. */
  757. /* Computing MIN */
  758. i__4 = myexp_(&scaloc);
  759. d__1 = bignum, d__2 = swork[ll + jj * swork_dim1]
  760. / pow_di(&c_b18, &i__4);
  761. swork[ll + jj * swork_dim1] = f2cmin(d__1,d__2);
  762. }
  763. }
  764. }
  765. swork[k + l * swork_dim1] = scaloc * swork[k + l * swork_dim1]
  766. ;
  767. i__2 = k2 - k1;
  768. i__3 = l2 - l1;
  769. xnrm = zlange_("I", &i__2, &i__3, &c__[k1 + l1 * c_dim1], ldc,
  770. wnrm);
  771. for (i__ = k - 1; i__ >= 1; --i__) {
  772. /* C( I, L ) := C( I, L ) - A( I, K ) * C( K, L ) */
  773. i1 = (i__ - 1) * nb + 1;
  774. /* Computing MIN */
  775. i__2 = i__ * nb;
  776. i2 = f2cmin(i__2,*m) + 1;
  777. /* Compute scaling factor to survive the linear update */
  778. /* simulating consistent scaling. */
  779. i__2 = i2 - i1;
  780. i__3 = l2 - l1;
  781. cnrm = zlange_("I", &i__2, &i__3, &c__[i1 + l1 * c_dim1],
  782. ldc, wnrm);
  783. /* Computing MIN */
  784. d__1 = swork[i__ + l * swork_dim1], d__2 = swork[k + l *
  785. swork_dim1];
  786. scamin = f2cmin(d__1,d__2);
  787. cnrm *= scamin / swork[i__ + l * swork_dim1];
  788. xnrm *= scamin / swork[k + l * swork_dim1];
  789. anrm = swork[i__ + (awrk + k) * swork_dim1];
  790. scaloc = dlarmm_(&anrm, &xnrm, &cnrm);
  791. if (scaloc * scamin == 0.) {
  792. /* Use second scaling factor to prevent flushing to zero. */
  793. i__2 = myexp_(&scaloc);
  794. buf *= pow_di(&c_b18, &i__2);
  795. i__2 = nbb;
  796. for (jj = 1; jj <= i__2; ++jj) {
  797. i__3 = nba;
  798. for (ll = 1; ll <= i__3; ++ll) {
  799. /* Computing MIN */
  800. i__4 = myexp_(&scaloc);
  801. d__1 = bignum, d__2 = swork[ll + jj *
  802. swork_dim1] / pow_di(&c_b18, &i__4);
  803. swork[ll + jj * swork_dim1] = f2cmin(d__1,d__2);
  804. }
  805. }
  806. i__2 = myexp_(&scaloc);
  807. scamin /= pow_di(&c_b18, &i__2);
  808. i__2 = myexp_(&scaloc);
  809. scaloc /= pow_di(&c_b18, &i__2);
  810. }
  811. cnrm *= scaloc;
  812. xnrm *= scaloc;
  813. /* Simultaneously apply the robust update factor and the */
  814. /* consistency scaling factor to C( I, L ) and C( K, L ). */
  815. scal = scamin / swork[k + l * swork_dim1] * scaloc;
  816. if (scal != 1.) {
  817. i__2 = l2 - 1;
  818. for (jj = l1; jj <= i__2; ++jj) {
  819. i__3 = k2 - k1;
  820. zdscal_(&i__3, &scal, &c__[k1 + jj * c_dim1], &
  821. c__1);
  822. }
  823. }
  824. scal = scamin / swork[i__ + l * swork_dim1] * scaloc;
  825. if (scal != 1.) {
  826. i__2 = l2 - 1;
  827. for (ll = l1; ll <= i__2; ++ll) {
  828. i__3 = i2 - i1;
  829. zdscal_(&i__3, &scal, &c__[i1 + ll * c_dim1], &
  830. c__1);
  831. }
  832. }
  833. /* Record current scaling factor */
  834. swork[k + l * swork_dim1] = scamin * scaloc;
  835. swork[i__ + l * swork_dim1] = scamin * scaloc;
  836. i__2 = i2 - i1;
  837. i__3 = l2 - l1;
  838. i__4 = k2 - k1;
  839. z__1.r = -1., z__1.i = 0.;
  840. zgemm_("N", "N", &i__2, &i__3, &i__4, &z__1, &a[i1 + k1 *
  841. a_dim1], lda, &c__[k1 + l1 * c_dim1], ldc, &c_b1,
  842. &c__[i1 + l1 * c_dim1], ldc)
  843. ;
  844. }
  845. i__2 = nbb;
  846. for (j = l + 1; j <= i__2; ++j) {
  847. /* C( K, J ) := C( K, J ) - SGN * C( K, L ) * B( L, J ) */
  848. j1 = (j - 1) * nb + 1;
  849. /* Computing MIN */
  850. i__3 = j * nb;
  851. j2 = f2cmin(i__3,*n) + 1;
  852. /* Compute scaling factor to survive the linear update */
  853. /* simulating consistent scaling. */
  854. i__3 = k2 - k1;
  855. i__4 = j2 - j1;
  856. cnrm = zlange_("I", &i__3, &i__4, &c__[k1 + j1 * c_dim1],
  857. ldc, wnrm);
  858. /* Computing MIN */
  859. d__1 = swork[k + j * swork_dim1], d__2 = swork[k + l *
  860. swork_dim1];
  861. scamin = f2cmin(d__1,d__2);
  862. cnrm *= scamin / swork[k + j * swork_dim1];
  863. xnrm *= scamin / swork[k + l * swork_dim1];
  864. bnrm = swork[l + (bwrk + j) * swork_dim1];
  865. scaloc = dlarmm_(&bnrm, &xnrm, &cnrm);
  866. if (scaloc * scamin == 0.) {
  867. /* Use second scaling factor to prevent flushing to zero. */
  868. i__3 = myexp_(&scaloc);
  869. buf *= pow_di(&c_b18, &i__3);
  870. i__3 = nbb;
  871. for (jj = 1; jj <= i__3; ++jj) {
  872. i__4 = nba;
  873. for (ll = 1; ll <= i__4; ++ll) {
  874. /* Computing MIN */
  875. i__5 = myexp_(&scaloc);
  876. d__1 = bignum, d__2 = swork[ll + jj *
  877. swork_dim1] / pow_di(&c_b18, &i__5);
  878. swork[ll + jj * swork_dim1] = f2cmin(d__1,d__2);
  879. }
  880. }
  881. i__3 = myexp_(&scaloc);
  882. scamin /= pow_di(&c_b18, &i__3);
  883. i__3 = myexp_(&scaloc);
  884. scaloc /= pow_di(&c_b18, &i__3);
  885. }
  886. cnrm *= scaloc;
  887. xnrm *= scaloc;
  888. /* Simultaneously apply the robust update factor and the */
  889. /* consistency scaling factor to C( K, J ) and C( K, L). */
  890. scal = scamin / swork[k + l * swork_dim1] * scaloc;
  891. if (scal != 1.) {
  892. i__3 = l2 - 1;
  893. for (ll = l1; ll <= i__3; ++ll) {
  894. i__4 = k2 - k1;
  895. zdscal_(&i__4, &scal, &c__[k1 + ll * c_dim1], &
  896. c__1);
  897. }
  898. }
  899. scal = scamin / swork[k + j * swork_dim1] * scaloc;
  900. if (scal != 1.) {
  901. i__3 = j2 - 1;
  902. for (jj = j1; jj <= i__3; ++jj) {
  903. i__4 = k2 - k1;
  904. zdscal_(&i__4, &scal, &c__[k1 + jj * c_dim1], &
  905. c__1);
  906. }
  907. }
  908. /* Record current scaling factor */
  909. swork[k + l * swork_dim1] = scamin * scaloc;
  910. swork[k + j * swork_dim1] = scamin * scaloc;
  911. i__3 = k2 - k1;
  912. i__4 = j2 - j1;
  913. i__5 = l2 - l1;
  914. z__1.r = -csgn.r, z__1.i = -csgn.i;
  915. zgemm_("N", "N", &i__3, &i__4, &i__5, &z__1, &c__[k1 + l1
  916. * c_dim1], ldc, &b[l1 + j1 * b_dim1], ldb, &c_b1,
  917. &c__[k1 + j1 * c_dim1], ldc)
  918. ;
  919. }
  920. }
  921. }
  922. } else if (! notrna && notrnb) {
  923. /* Solve A**H *X + ISGN*X*B = scale*C. */
  924. /* The (K,L)th block of X is determined starting from */
  925. /* upper-left corner column by column by */
  926. /* A(K,K)**H*X(K,L) + ISGN*X(K,L)*B(L,L) = C(K,L) - R(K,L) */
  927. /* Where */
  928. /* K-1 L-1 */
  929. /* R(K,L) = SUM [A(I,K)**H*X(I,L)] +ISGN*SUM [X(K,J)*B(J,L)] */
  930. /* I=1 J=1 */
  931. /* Start loop over block rows (index = K) and block columns (index = L) */
  932. i__1 = nba;
  933. for (k = 1; k <= i__1; ++k) {
  934. /* K1: row index of the first row in X( K, L ) */
  935. /* K2: row index of the first row in X( K+1, L ) */
  936. /* so the K2 - K1 is the column count of the block X( K, L ) */
  937. k1 = (k - 1) * nb + 1;
  938. /* Computing MIN */
  939. i__2 = k * nb;
  940. k2 = f2cmin(i__2,*m) + 1;
  941. i__2 = nbb;
  942. for (l = 1; l <= i__2; ++l) {
  943. /* L1: column index of the first column in X( K, L ) */
  944. /* L2: column index of the first column in X( K, L + 1) */
  945. /* so that L2 - L1 is the row count of the block X( K, L ) */
  946. l1 = (l - 1) * nb + 1;
  947. /* Computing MIN */
  948. i__3 = l * nb;
  949. l2 = f2cmin(i__3,*n) + 1;
  950. i__3 = k2 - k1;
  951. i__4 = l2 - l1;
  952. ztrsyl_(trana, tranb, isgn, &i__3, &i__4, &a[k1 + k1 * a_dim1]
  953. , lda, &b[l1 + l1 * b_dim1], ldb, &c__[k1 + l1 *
  954. c_dim1], ldc, &scaloc, &iinfo);
  955. *info = f2cmax(*info,iinfo);
  956. if (scaloc * swork[k + l * swork_dim1] == 0.) {
  957. if (scaloc == 0.) {
  958. /* The magnitude of the largest entry of X(K1:K2-1, L1:L2-1) */
  959. /* is larger than the product of BIGNUM**2 and cannot be */
  960. /* represented in the form (1/SCALE)*X(K1:K2-1, L1:L2-1). */
  961. /* Mark the computation as pointless. */
  962. buf = 0.;
  963. } else {
  964. /* Use second scaling factor to prevent flushing to zero. */
  965. i__3 = myexp_(&scaloc);
  966. buf *= pow_di(&c_b18, &i__3);
  967. }
  968. i__3 = nbb;
  969. for (jj = 1; jj <= i__3; ++jj) {
  970. i__4 = nba;
  971. for (ll = 1; ll <= i__4; ++ll) {
  972. /* Bound by BIGNUM to not introduce Inf. The value */
  973. /* is irrelevant; corresponding entries of the */
  974. /* solution will be flushed in consistency scaling. */
  975. /* Computing MIN */
  976. i__5 = myexp_(&scaloc);
  977. d__1 = bignum, d__2 = swork[ll + jj * swork_dim1]
  978. / pow_di(&c_b18, &i__5);
  979. swork[ll + jj * swork_dim1] = f2cmin(d__1,d__2);
  980. }
  981. }
  982. }
  983. swork[k + l * swork_dim1] = scaloc * swork[k + l * swork_dim1]
  984. ;
  985. i__3 = k2 - k1;
  986. i__4 = l2 - l1;
  987. xnrm = zlange_("I", &i__3, &i__4, &c__[k1 + l1 * c_dim1], ldc,
  988. wnrm);
  989. i__3 = nba;
  990. for (i__ = k + 1; i__ <= i__3; ++i__) {
  991. /* C( I, L ) := C( I, L ) - A( K, I )**H * C( K, L ) */
  992. i1 = (i__ - 1) * nb + 1;
  993. /* Computing MIN */
  994. i__4 = i__ * nb;
  995. i2 = f2cmin(i__4,*m) + 1;
  996. /* Compute scaling factor to survive the linear update */
  997. /* simulating consistent scaling. */
  998. i__4 = i2 - i1;
  999. i__5 = l2 - l1;
  1000. cnrm = zlange_("I", &i__4, &i__5, &c__[i1 + l1 * c_dim1],
  1001. ldc, wnrm);
  1002. /* Computing MIN */
  1003. d__1 = swork[i__ + l * swork_dim1], d__2 = swork[k + l *
  1004. swork_dim1];
  1005. scamin = f2cmin(d__1,d__2);
  1006. cnrm *= scamin / swork[i__ + l * swork_dim1];
  1007. xnrm *= scamin / swork[k + l * swork_dim1];
  1008. anrm = swork[i__ + (awrk + k) * swork_dim1];
  1009. scaloc = dlarmm_(&anrm, &xnrm, &cnrm);
  1010. if (scaloc * scamin == 0.) {
  1011. /* Use second scaling factor to prevent flushing to zero. */
  1012. i__4 = myexp_(&scaloc);
  1013. buf *= pow_di(&c_b18, &i__4);
  1014. i__4 = nbb;
  1015. for (jj = 1; jj <= i__4; ++jj) {
  1016. i__5 = nba;
  1017. for (ll = 1; ll <= i__5; ++ll) {
  1018. /* Computing MIN */
  1019. i__6 = myexp_(&scaloc);
  1020. d__1 = bignum, d__2 = swork[ll + jj *
  1021. swork_dim1] / pow_di(&c_b18, &i__6);
  1022. swork[ll + jj * swork_dim1] = f2cmin(d__1,d__2);
  1023. }
  1024. }
  1025. i__4 = myexp_(&scaloc);
  1026. scamin /= pow_di(&c_b18, &i__4);
  1027. i__4 = myexp_(&scaloc);
  1028. scaloc /= pow_di(&c_b18, &i__4);
  1029. }
  1030. cnrm *= scaloc;
  1031. xnrm *= scaloc;
  1032. /* Simultaneously apply the robust update factor and the */
  1033. /* consistency scaling factor to to C( I, L ) and C( K, L). */
  1034. scal = scamin / swork[k + l * swork_dim1] * scaloc;
  1035. if (scal != 1.) {
  1036. i__4 = l2 - 1;
  1037. for (ll = l1; ll <= i__4; ++ll) {
  1038. i__5 = k2 - k1;
  1039. zdscal_(&i__5, &scal, &c__[k1 + ll * c_dim1], &
  1040. c__1);
  1041. }
  1042. }
  1043. scal = scamin / swork[i__ + l * swork_dim1] * scaloc;
  1044. if (scal != 1.) {
  1045. i__4 = l2 - 1;
  1046. for (ll = l1; ll <= i__4; ++ll) {
  1047. i__5 = i2 - i1;
  1048. zdscal_(&i__5, &scal, &c__[i1 + ll * c_dim1], &
  1049. c__1);
  1050. }
  1051. }
  1052. /* Record current scaling factor */
  1053. swork[k + l * swork_dim1] = scamin * scaloc;
  1054. swork[i__ + l * swork_dim1] = scamin * scaloc;
  1055. i__4 = i2 - i1;
  1056. i__5 = l2 - l1;
  1057. i__6 = k2 - k1;
  1058. z__1.r = -1., z__1.i = 0.;
  1059. zgemm_("C", "N", &i__4, &i__5, &i__6, &z__1, &a[k1 + i1 *
  1060. a_dim1], lda, &c__[k1 + l1 * c_dim1], ldc, &c_b1,
  1061. &c__[i1 + l1 * c_dim1], ldc)
  1062. ;
  1063. }
  1064. i__3 = nbb;
  1065. for (j = l + 1; j <= i__3; ++j) {
  1066. /* C( K, J ) := C( K, J ) - SGN * C( K, L ) * B( L, J ) */
  1067. j1 = (j - 1) * nb + 1;
  1068. /* Computing MIN */
  1069. i__4 = j * nb;
  1070. j2 = f2cmin(i__4,*n) + 1;
  1071. /* Compute scaling factor to survive the linear update */
  1072. /* simulating consistent scaling. */
  1073. i__4 = k2 - k1;
  1074. i__5 = j2 - j1;
  1075. cnrm = zlange_("I", &i__4, &i__5, &c__[k1 + j1 * c_dim1],
  1076. ldc, wnrm);
  1077. /* Computing MIN */
  1078. d__1 = swork[k + j * swork_dim1], d__2 = swork[k + l *
  1079. swork_dim1];
  1080. scamin = f2cmin(d__1,d__2);
  1081. cnrm *= scamin / swork[k + j * swork_dim1];
  1082. xnrm *= scamin / swork[k + l * swork_dim1];
  1083. bnrm = swork[l + (bwrk + j) * swork_dim1];
  1084. scaloc = dlarmm_(&bnrm, &xnrm, &cnrm);
  1085. if (scaloc * scamin == 0.) {
  1086. /* Use second scaling factor to prevent flushing to zero. */
  1087. i__4 = myexp_(&scaloc);
  1088. buf *= pow_di(&c_b18, &i__4);
  1089. i__4 = nbb;
  1090. for (jj = 1; jj <= i__4; ++jj) {
  1091. i__5 = nba;
  1092. for (ll = 1; ll <= i__5; ++ll) {
  1093. /* Computing MIN */
  1094. i__6 = myexp_(&scaloc);
  1095. d__1 = bignum, d__2 = swork[ll + jj *
  1096. swork_dim1] / pow_di(&c_b18, &i__6);
  1097. swork[ll + jj * swork_dim1] = f2cmin(d__1,d__2);
  1098. }
  1099. }
  1100. i__4 = myexp_(&scaloc);
  1101. scamin /= pow_di(&c_b18, &i__4);
  1102. i__4 = myexp_(&scaloc);
  1103. scaloc /= pow_di(&c_b18, &i__4);
  1104. }
  1105. cnrm *= scaloc;
  1106. xnrm *= scaloc;
  1107. /* Simultaneously apply the robust update factor and the */
  1108. /* consistency scaling factor to to C( K, J ) and C( K, L). */
  1109. scal = scamin / swork[k + l * swork_dim1] * scaloc;
  1110. if (scal != 1.) {
  1111. i__4 = l2 - 1;
  1112. for (ll = l1; ll <= i__4; ++ll) {
  1113. i__5 = k2 - k1;
  1114. zdscal_(&i__5, &scal, &c__[k1 + ll * c_dim1], &
  1115. c__1);
  1116. }
  1117. }
  1118. scal = scamin / swork[k + j * swork_dim1] * scaloc;
  1119. if (scal != 1.) {
  1120. i__4 = j2 - 1;
  1121. for (jj = j1; jj <= i__4; ++jj) {
  1122. i__5 = k2 - k1;
  1123. zdscal_(&i__5, &scal, &c__[k1 + jj * c_dim1], &
  1124. c__1);
  1125. }
  1126. }
  1127. /* Record current scaling factor */
  1128. swork[k + l * swork_dim1] = scamin * scaloc;
  1129. swork[k + j * swork_dim1] = scamin * scaloc;
  1130. i__4 = k2 - k1;
  1131. i__5 = j2 - j1;
  1132. i__6 = l2 - l1;
  1133. z__1.r = -csgn.r, z__1.i = -csgn.i;
  1134. zgemm_("N", "N", &i__4, &i__5, &i__6, &z__1, &c__[k1 + l1
  1135. * c_dim1], ldc, &b[l1 + j1 * b_dim1], ldb, &c_b1,
  1136. &c__[k1 + j1 * c_dim1], ldc)
  1137. ;
  1138. }
  1139. }
  1140. }
  1141. } else if (! notrna && ! notrnb) {
  1142. /* Solve A**H *X + ISGN*X*B**H = scale*C. */
  1143. /* The (K,L)th block of X is determined starting from */
  1144. /* top-right corner column by column by */
  1145. /* A(K,K)**H*X(K,L) + ISGN*X(K,L)*B(L,L)**H = C(K,L) - R(K,L) */
  1146. /* Where */
  1147. /* K-1 N */
  1148. /* R(K,L) = SUM [A(I,K)**H*X(I,L)] + ISGN*SUM [X(K,J)*B(L,J)**H]. */
  1149. /* I=1 J=L+1 */
  1150. /* Start loop over block rows (index = K) and block columns (index = L) */
  1151. i__1 = nba;
  1152. for (k = 1; k <= i__1; ++k) {
  1153. /* K1: row index of the first row in X( K, L ) */
  1154. /* K2: row index of the first row in X( K+1, L ) */
  1155. /* so the K2 - K1 is the column count of the block X( K, L ) */
  1156. k1 = (k - 1) * nb + 1;
  1157. /* Computing MIN */
  1158. i__2 = k * nb;
  1159. k2 = f2cmin(i__2,*m) + 1;
  1160. for (l = nbb; l >= 1; --l) {
  1161. /* L1: column index of the first column in X( K, L ) */
  1162. /* L2: column index of the first column in X( K, L + 1) */
  1163. /* so that L2 - L1 is the row count of the block X( K, L ) */
  1164. l1 = (l - 1) * nb + 1;
  1165. /* Computing MIN */
  1166. i__2 = l * nb;
  1167. l2 = f2cmin(i__2,*n) + 1;
  1168. i__2 = k2 - k1;
  1169. i__3 = l2 - l1;
  1170. ztrsyl_(trana, tranb, isgn, &i__2, &i__3, &a[k1 + k1 * a_dim1]
  1171. , lda, &b[l1 + l1 * b_dim1], ldb, &c__[k1 + l1 *
  1172. c_dim1], ldc, &scaloc, &iinfo);
  1173. *info = f2cmax(*info,iinfo);
  1174. if (scaloc * swork[k + l * swork_dim1] == 0.) {
  1175. if (scaloc == 0.) {
  1176. /* The magnitude of the largest entry of X(K1:K2-1, L1:L2-1) */
  1177. /* is larger than the product of BIGNUM**2 and cannot be */
  1178. /* represented in the form (1/SCALE)*X(K1:K2-1, L1:L2-1). */
  1179. /* Mark the computation as pointless. */
  1180. buf = 0.;
  1181. } else {
  1182. /* Use second scaling factor to prevent flushing to zero. */
  1183. i__2 = myexp_(&scaloc);
  1184. buf *= pow_di(&c_b18, &i__2);
  1185. }
  1186. i__2 = nbb;
  1187. for (jj = 1; jj <= i__2; ++jj) {
  1188. i__3 = nba;
  1189. for (ll = 1; ll <= i__3; ++ll) {
  1190. /* Bound by BIGNUM to not introduce Inf. The value */
  1191. /* is irrelevant; corresponding entries of the */
  1192. /* solution will be flushed in consistency scaling. */
  1193. /* Computing MIN */
  1194. i__4 = myexp_(&scaloc);
  1195. d__1 = bignum, d__2 = swork[ll + jj * swork_dim1]
  1196. / pow_di(&c_b18, &i__4);
  1197. swork[ll + jj * swork_dim1] = f2cmin(d__1,d__2);
  1198. }
  1199. }
  1200. }
  1201. swork[k + l * swork_dim1] = scaloc * swork[k + l * swork_dim1]
  1202. ;
  1203. i__2 = k2 - k1;
  1204. i__3 = l2 - l1;
  1205. xnrm = zlange_("I", &i__2, &i__3, &c__[k1 + l1 * c_dim1], ldc,
  1206. wnrm);
  1207. i__2 = nba;
  1208. for (i__ = k + 1; i__ <= i__2; ++i__) {
  1209. /* C( I, L ) := C( I, L ) - A( K, I )**H * C( K, L ) */
  1210. i1 = (i__ - 1) * nb + 1;
  1211. /* Computing MIN */
  1212. i__3 = i__ * nb;
  1213. i2 = f2cmin(i__3,*m) + 1;
  1214. /* Compute scaling factor to survive the linear update */
  1215. /* simulating consistent scaling. */
  1216. i__3 = i2 - i1;
  1217. i__4 = l2 - l1;
  1218. cnrm = zlange_("I", &i__3, &i__4, &c__[i1 + l1 * c_dim1],
  1219. ldc, wnrm);
  1220. /* Computing MIN */
  1221. d__1 = swork[i__ + l * swork_dim1], d__2 = swork[k + l *
  1222. swork_dim1];
  1223. scamin = f2cmin(d__1,d__2);
  1224. cnrm *= scamin / swork[i__ + l * swork_dim1];
  1225. xnrm *= scamin / swork[k + l * swork_dim1];
  1226. anrm = swork[i__ + (awrk + k) * swork_dim1];
  1227. scaloc = dlarmm_(&anrm, &xnrm, &cnrm);
  1228. if (scaloc * scamin == 0.) {
  1229. /* Use second scaling factor to prevent flushing to zero. */
  1230. i__3 = myexp_(&scaloc);
  1231. buf *= pow_di(&c_b18, &i__3);
  1232. i__3 = nbb;
  1233. for (jj = 1; jj <= i__3; ++jj) {
  1234. i__4 = nba;
  1235. for (ll = 1; ll <= i__4; ++ll) {
  1236. /* Computing MIN */
  1237. i__5 = myexp_(&scaloc);
  1238. d__1 = bignum, d__2 = swork[ll + jj *
  1239. swork_dim1] / pow_di(&c_b18, &i__5);
  1240. swork[ll + jj * swork_dim1] = f2cmin(d__1,d__2);
  1241. }
  1242. }
  1243. i__3 = myexp_(&scaloc);
  1244. scamin /= pow_di(&c_b18, &i__3);
  1245. i__3 = myexp_(&scaloc);
  1246. scaloc /= pow_di(&c_b18, &i__3);
  1247. }
  1248. cnrm *= scaloc;
  1249. xnrm *= scaloc;
  1250. /* Simultaneously apply the robust update factor and the */
  1251. /* consistency scaling factor to C( I, L ) and C( K, L). */
  1252. scal = scamin / swork[k + l * swork_dim1] * scaloc;
  1253. if (scal != 1.) {
  1254. i__3 = l2 - 1;
  1255. for (ll = l1; ll <= i__3; ++ll) {
  1256. i__4 = k2 - k1;
  1257. zdscal_(&i__4, &scal, &c__[k1 + ll * c_dim1], &
  1258. c__1);
  1259. }
  1260. }
  1261. scal = scamin / swork[i__ + l * swork_dim1] * scaloc;
  1262. if (scal != 1.) {
  1263. i__3 = l2 - 1;
  1264. for (ll = l1; ll <= i__3; ++ll) {
  1265. i__4 = i2 - i1;
  1266. zdscal_(&i__4, &scal, &c__[i1 + ll * c_dim1], &
  1267. c__1);
  1268. }
  1269. }
  1270. /* Record current scaling factor */
  1271. swork[k + l * swork_dim1] = scamin * scaloc;
  1272. swork[i__ + l * swork_dim1] = scamin * scaloc;
  1273. i__3 = i2 - i1;
  1274. i__4 = l2 - l1;
  1275. i__5 = k2 - k1;
  1276. z__1.r = -1., z__1.i = 0.;
  1277. zgemm_("C", "N", &i__3, &i__4, &i__5, &z__1, &a[k1 + i1 *
  1278. a_dim1], lda, &c__[k1 + l1 * c_dim1], ldc, &c_b1,
  1279. &c__[i1 + l1 * c_dim1], ldc)
  1280. ;
  1281. }
  1282. i__2 = l - 1;
  1283. for (j = 1; j <= i__2; ++j) {
  1284. /* C( K, J ) := C( K, J ) - SGN * C( K, L ) * B( J, L )**H */
  1285. j1 = (j - 1) * nb + 1;
  1286. /* Computing MIN */
  1287. i__3 = j * nb;
  1288. j2 = f2cmin(i__3,*n) + 1;
  1289. /* Compute scaling factor to survive the linear update */
  1290. /* simulating consistent scaling. */
  1291. i__3 = k2 - k1;
  1292. i__4 = j2 - j1;
  1293. cnrm = zlange_("I", &i__3, &i__4, &c__[k1 + j1 * c_dim1],
  1294. ldc, wnrm);
  1295. /* Computing MIN */
  1296. d__1 = swork[k + j * swork_dim1], d__2 = swork[k + l *
  1297. swork_dim1];
  1298. scamin = f2cmin(d__1,d__2);
  1299. cnrm *= scamin / swork[k + j * swork_dim1];
  1300. xnrm *= scamin / swork[k + l * swork_dim1];
  1301. bnrm = swork[l + (bwrk + j) * swork_dim1];
  1302. scaloc = dlarmm_(&bnrm, &xnrm, &cnrm);
  1303. if (scaloc * scamin == 0.) {
  1304. /* Use second scaling factor to prevent flushing to zero. */
  1305. i__3 = myexp_(&scaloc);
  1306. buf *= pow_di(&c_b18, &i__3);
  1307. i__3 = nbb;
  1308. for (jj = 1; jj <= i__3; ++jj) {
  1309. i__4 = nba;
  1310. for (ll = 1; ll <= i__4; ++ll) {
  1311. /* Computing MIN */
  1312. i__5 = myexp_(&scaloc);
  1313. d__1 = bignum, d__2 = swork[ll + jj *
  1314. swork_dim1] / pow_di(&c_b18, &i__5);
  1315. swork[ll + jj * swork_dim1] = f2cmin(d__1,d__2);
  1316. }
  1317. }
  1318. i__3 = myexp_(&scaloc);
  1319. scamin /= pow_di(&c_b18, &i__3);
  1320. i__3 = myexp_(&scaloc);
  1321. scaloc /= pow_di(&c_b18, &i__3);
  1322. }
  1323. cnrm *= scaloc;
  1324. xnrm *= scaloc;
  1325. /* Simultaneously apply the robust update factor and the */
  1326. /* consistency scaling factor to C( K, J ) and C( K, L). */
  1327. scal = scamin / swork[k + l * swork_dim1] * scaloc;
  1328. if (scal != 1.) {
  1329. i__3 = l2 - 1;
  1330. for (ll = l1; ll <= i__3; ++ll) {
  1331. i__4 = k2 - k1;
  1332. zdscal_(&i__4, &scal, &c__[k1 + ll * c_dim1], &
  1333. c__1);
  1334. }
  1335. }
  1336. scal = scamin / swork[k + j * swork_dim1] * scaloc;
  1337. if (scal != 1.) {
  1338. i__3 = j2 - 1;
  1339. for (jj = j1; jj <= i__3; ++jj) {
  1340. i__4 = k2 - k1;
  1341. zdscal_(&i__4, &scal, &c__[k1 + jj * c_dim1], &
  1342. c__1);
  1343. }
  1344. }
  1345. /* Record current scaling factor */
  1346. swork[k + l * swork_dim1] = scamin * scaloc;
  1347. swork[k + j * swork_dim1] = scamin * scaloc;
  1348. i__3 = k2 - k1;
  1349. i__4 = j2 - j1;
  1350. i__5 = l2 - l1;
  1351. z__1.r = -csgn.r, z__1.i = -csgn.i;
  1352. zgemm_("N", "C", &i__3, &i__4, &i__5, &z__1, &c__[k1 + l1
  1353. * c_dim1], ldc, &b[j1 + l1 * b_dim1], ldb, &c_b1,
  1354. &c__[k1 + j1 * c_dim1], ldc)
  1355. ;
  1356. }
  1357. }
  1358. }
  1359. } else if (notrna && ! notrnb) {
  1360. /* Solve A*X + ISGN*X*B**H = scale*C. */
  1361. /* The (K,L)th block of X is determined starting from */
  1362. /* bottom-right corner column by column by */
  1363. /* A(K,K)*X(K,L) + ISGN*X(K,L)*B(L,L)**H = C(K,L) - R(K,L) */
  1364. /* Where */
  1365. /* M N */
  1366. /* R(K,L) = SUM [A(K,I)*X(I,L)] + ISGN*SUM [X(K,J)*B(L,J)**H]. */
  1367. /* I=K+1 J=L+1 */
  1368. /* Start loop over block rows (index = K) and block columns (index = L) */
  1369. for (k = nba; k >= 1; --k) {
  1370. /* K1: row index of the first row in X( K, L ) */
  1371. /* K2: row index of the first row in X( K+1, L ) */
  1372. /* so the K2 - K1 is the column count of the block X( K, L ) */
  1373. k1 = (k - 1) * nb + 1;
  1374. /* Computing MIN */
  1375. i__1 = k * nb;
  1376. k2 = f2cmin(i__1,*m) + 1;
  1377. for (l = nbb; l >= 1; --l) {
  1378. /* L1: column index of the first column in X( K, L ) */
  1379. /* L2: column index of the first column in X( K, L + 1) */
  1380. /* so that L2 - L1 is the row count of the block X( K, L ) */
  1381. l1 = (l - 1) * nb + 1;
  1382. /* Computing MIN */
  1383. i__1 = l * nb;
  1384. l2 = f2cmin(i__1,*n) + 1;
  1385. i__1 = k2 - k1;
  1386. i__2 = l2 - l1;
  1387. ztrsyl_(trana, tranb, isgn, &i__1, &i__2, &a[k1 + k1 * a_dim1]
  1388. , lda, &b[l1 + l1 * b_dim1], ldb, &c__[k1 + l1 *
  1389. c_dim1], ldc, &scaloc, &iinfo);
  1390. *info = f2cmax(*info,iinfo);
  1391. if (scaloc * swork[k + l * swork_dim1] == 0.) {
  1392. if (scaloc == 0.) {
  1393. /* The magnitude of the largest entry of X(K1:K2-1, L1:L2-1) */
  1394. /* is larger than the product of BIGNUM**2 and cannot be */
  1395. /* represented in the form (1/SCALE)*X(K1:K2-1, L1:L2-1). */
  1396. /* Mark the computation as pointless. */
  1397. buf = 0.;
  1398. } else {
  1399. /* Use second scaling factor to prevent flushing to zero. */
  1400. i__1 = myexp_(&scaloc);
  1401. buf *= pow_di(&c_b18, &i__1);
  1402. }
  1403. i__1 = nbb;
  1404. for (jj = 1; jj <= i__1; ++jj) {
  1405. i__2 = nba;
  1406. for (ll = 1; ll <= i__2; ++ll) {
  1407. /* Bound by BIGNUM to not introduce Inf. The value */
  1408. /* is irrelevant; corresponding entries of the */
  1409. /* solution will be flushed in consistency scaling. */
  1410. /* Computing MIN */
  1411. i__3 = myexp_(&scaloc);
  1412. d__1 = bignum, d__2 = swork[ll + jj * swork_dim1]
  1413. / pow_di(&c_b18, &i__3);
  1414. swork[ll + jj * swork_dim1] = f2cmin(d__1,d__2);
  1415. }
  1416. }
  1417. }
  1418. swork[k + l * swork_dim1] = scaloc * swork[k + l * swork_dim1]
  1419. ;
  1420. i__1 = k2 - k1;
  1421. i__2 = l2 - l1;
  1422. xnrm = zlange_("I", &i__1, &i__2, &c__[k1 + l1 * c_dim1], ldc,
  1423. wnrm);
  1424. i__1 = k - 1;
  1425. for (i__ = 1; i__ <= i__1; ++i__) {
  1426. /* C( I, L ) := C( I, L ) - A( I, K ) * C( K, L ) */
  1427. i1 = (i__ - 1) * nb + 1;
  1428. /* Computing MIN */
  1429. i__2 = i__ * nb;
  1430. i2 = f2cmin(i__2,*m) + 1;
  1431. /* Compute scaling factor to survive the linear update */
  1432. /* simulating consistent scaling. */
  1433. i__2 = i2 - i1;
  1434. i__3 = l2 - l1;
  1435. cnrm = zlange_("I", &i__2, &i__3, &c__[i1 + l1 * c_dim1],
  1436. ldc, wnrm);
  1437. /* Computing MIN */
  1438. d__1 = swork[i__ + l * swork_dim1], d__2 = swork[k + l *
  1439. swork_dim1];
  1440. scamin = f2cmin(d__1,d__2);
  1441. cnrm *= scamin / swork[i__ + l * swork_dim1];
  1442. xnrm *= scamin / swork[k + l * swork_dim1];
  1443. anrm = swork[i__ + (awrk + k) * swork_dim1];
  1444. scaloc = dlarmm_(&anrm, &xnrm, &cnrm);
  1445. if (scaloc * scamin == 0.) {
  1446. /* Use second scaling factor to prevent flushing to zero. */
  1447. i__2 = myexp_(&scaloc);
  1448. buf *= pow_di(&c_b18, &i__2);
  1449. i__2 = nbb;
  1450. for (jj = 1; jj <= i__2; ++jj) {
  1451. i__3 = nba;
  1452. for (ll = 1; ll <= i__3; ++ll) {
  1453. /* Computing MIN */
  1454. i__4 = myexp_(&scaloc);
  1455. d__1 = bignum, d__2 = swork[ll + jj *
  1456. swork_dim1] / pow_di(&c_b18, &i__4);
  1457. swork[ll + jj * swork_dim1] = f2cmin(d__1,d__2);
  1458. }
  1459. }
  1460. i__2 = myexp_(&scaloc);
  1461. scamin /= pow_di(&c_b18, &i__2);
  1462. i__2 = myexp_(&scaloc);
  1463. scaloc /= pow_di(&c_b18, &i__2);
  1464. }
  1465. cnrm *= scaloc;
  1466. xnrm *= scaloc;
  1467. /* Simultaneously apply the robust update factor and the */
  1468. /* consistency scaling factor to C( I, L ) and C( K, L). */
  1469. scal = scamin / swork[k + l * swork_dim1] * scaloc;
  1470. if (scal != 1.) {
  1471. i__2 = l2 - 1;
  1472. for (ll = l1; ll <= i__2; ++ll) {
  1473. i__3 = k2 - k1;
  1474. zdscal_(&i__3, &scal, &c__[k1 + ll * c_dim1], &
  1475. c__1);
  1476. }
  1477. }
  1478. scal = scamin / swork[i__ + l * swork_dim1] * scaloc;
  1479. if (scal != 1.) {
  1480. i__2 = l2 - 1;
  1481. for (ll = l1; ll <= i__2; ++ll) {
  1482. i__3 = i2 - i1;
  1483. zdscal_(&i__3, &scal, &c__[i1 + ll * c_dim1], &
  1484. c__1);
  1485. }
  1486. }
  1487. /* Record current scaling factor */
  1488. swork[k + l * swork_dim1] = scamin * scaloc;
  1489. swork[i__ + l * swork_dim1] = scamin * scaloc;
  1490. i__2 = i2 - i1;
  1491. i__3 = l2 - l1;
  1492. i__4 = k2 - k1;
  1493. z__1.r = -1., z__1.i = 0.;
  1494. zgemm_("N", "N", &i__2, &i__3, &i__4, &z__1, &a[i1 + k1 *
  1495. a_dim1], lda, &c__[k1 + l1 * c_dim1], ldc, &c_b1,
  1496. &c__[i1 + l1 * c_dim1], ldc)
  1497. ;
  1498. }
  1499. i__1 = l - 1;
  1500. for (j = 1; j <= i__1; ++j) {
  1501. /* C( K, J ) := C( K, J ) - SGN * C( K, L ) * B( J, L )**H */
  1502. j1 = (j - 1) * nb + 1;
  1503. /* Computing MIN */
  1504. i__2 = j * nb;
  1505. j2 = f2cmin(i__2,*n) + 1;
  1506. /* Compute scaling factor to survive the linear update */
  1507. /* simulating consistent scaling. */
  1508. i__2 = k2 - k1;
  1509. i__3 = j2 - j1;
  1510. cnrm = zlange_("I", &i__2, &i__3, &c__[k1 + j1 * c_dim1],
  1511. ldc, wnrm);
  1512. /* Computing MIN */
  1513. d__1 = swork[k + j * swork_dim1], d__2 = swork[k + l *
  1514. swork_dim1];
  1515. scamin = f2cmin(d__1,d__2);
  1516. cnrm *= scamin / swork[k + j * swork_dim1];
  1517. xnrm *= scamin / swork[k + l * swork_dim1];
  1518. bnrm = swork[l + (bwrk + j) * swork_dim1];
  1519. scaloc = dlarmm_(&bnrm, &xnrm, &cnrm);
  1520. if (scaloc * scamin == 0.) {
  1521. /* Use second scaling factor to prevent flushing to zero. */
  1522. i__2 = myexp_(&scaloc);
  1523. buf *= pow_di(&c_b18, &i__2);
  1524. i__2 = nbb;
  1525. for (jj = 1; jj <= i__2; ++jj) {
  1526. i__3 = nba;
  1527. for (ll = 1; ll <= i__3; ++ll) {
  1528. /* Computing MIN */
  1529. i__4 = myexp_(&scaloc);
  1530. d__1 = bignum, d__2 = swork[ll + jj *
  1531. swork_dim1] / pow_di(&c_b18, &i__4);
  1532. swork[ll + jj * swork_dim1] = f2cmin(d__1,d__2);
  1533. }
  1534. }
  1535. i__2 = myexp_(&scaloc);
  1536. scamin /= pow_di(&c_b18, &i__2);
  1537. i__2 = myexp_(&scaloc);
  1538. scaloc /= pow_di(&c_b18, &i__2);
  1539. }
  1540. cnrm *= scaloc;
  1541. xnrm *= scaloc;
  1542. /* Simultaneously apply the robust update factor and the */
  1543. /* consistency scaling factor to C( K, J ) and C( K, L). */
  1544. scal = scamin / swork[k + l * swork_dim1] * scaloc;
  1545. if (scal != 1.) {
  1546. i__2 = l2 - 1;
  1547. for (jj = l1; jj <= i__2; ++jj) {
  1548. i__3 = k2 - k1;
  1549. zdscal_(&i__3, &scal, &c__[k1 + jj * c_dim1], &
  1550. c__1);
  1551. }
  1552. }
  1553. scal = scamin / swork[k + j * swork_dim1] * scaloc;
  1554. if (scal != 1.) {
  1555. i__2 = j2 - 1;
  1556. for (jj = j1; jj <= i__2; ++jj) {
  1557. i__3 = k2 - k1;
  1558. zdscal_(&i__3, &scal, &c__[k1 + jj * c_dim1], &
  1559. c__1);
  1560. }
  1561. }
  1562. /* Record current scaling factor */
  1563. swork[k + l * swork_dim1] = scamin * scaloc;
  1564. swork[k + j * swork_dim1] = scamin * scaloc;
  1565. i__2 = k2 - k1;
  1566. i__3 = j2 - j1;
  1567. i__4 = l2 - l1;
  1568. z__1.r = -csgn.r, z__1.i = -csgn.i;
  1569. zgemm_("N", "C", &i__2, &i__3, &i__4, &z__1, &c__[k1 + l1
  1570. * c_dim1], ldc, &b[j1 + l1 * b_dim1], ldb, &c_b1,
  1571. &c__[k1 + j1 * c_dim1], ldc)
  1572. ;
  1573. }
  1574. }
  1575. }
  1576. }
  1577. free(wnrm);
  1578. /* Reduce local scaling factors */
  1579. *scale = swork[swork_dim1 + 1];
  1580. i__1 = nba;
  1581. for (k = 1; k <= i__1; ++k) {
  1582. i__2 = nbb;
  1583. for (l = 1; l <= i__2; ++l) {
  1584. /* Computing MIN */
  1585. d__1 = *scale, d__2 = swork[k + l * swork_dim1];
  1586. *scale = f2cmin(d__1,d__2);
  1587. }
  1588. }
  1589. if (*scale == 0.) {
  1590. /* The magnitude of the largest entry of the solution is larger */
  1591. /* than the product of BIGNUM**2 and cannot be represented in the */
  1592. /* form (1/SCALE)*X if SCALE is DOUBLE PRECISION. Set SCALE to */
  1593. /* zero and give up. */
  1594. swork[swork_dim1 + 1] = (doublereal) f2cmax(nba,nbb);
  1595. swork[swork_dim1 + 2] = (doublereal) ((nbb << 1) + nba);
  1596. return 0;
  1597. }
  1598. /* Realize consistent scaling */
  1599. i__1 = nba;
  1600. for (k = 1; k <= i__1; ++k) {
  1601. k1 = (k - 1) * nb + 1;
  1602. /* Computing MIN */
  1603. i__2 = k * nb;
  1604. k2 = f2cmin(i__2,*m) + 1;
  1605. i__2 = nbb;
  1606. for (l = 1; l <= i__2; ++l) {
  1607. l1 = (l - 1) * nb + 1;
  1608. /* Computing MIN */
  1609. i__3 = l * nb;
  1610. l2 = f2cmin(i__3,*n) + 1;
  1611. scal = *scale / swork[k + l * swork_dim1];
  1612. if (scal != 1.) {
  1613. i__3 = l2 - 1;
  1614. for (ll = l1; ll <= i__3; ++ll) {
  1615. i__4 = k2 - k1;
  1616. zdscal_(&i__4, &scal, &c__[k1 + ll * c_dim1], &c__1);
  1617. }
  1618. }
  1619. }
  1620. }
  1621. if (buf != 1. && buf > 0.) {
  1622. /* Decrease SCALE as much as possible. */
  1623. /* Computing MIN */
  1624. d__1 = *scale / smlnum, d__2 = 1. / buf;
  1625. scaloc = f2cmin(d__1,d__2);
  1626. buf *= scaloc;
  1627. *scale /= scaloc;
  1628. }
  1629. if (buf != 1. && buf > 0.) {
  1630. /* In case of overly aggressive scaling during the computation, */
  1631. /* flushing of the global scale factor may be prevented by */
  1632. /* undoing some of the scaling. This step is to ensure that */
  1633. /* this routine flushes only scale factors that TRSYL also */
  1634. /* flushes and be usable as a drop-in replacement. */
  1635. /* How much can the normwise largest entry be upscaled? */
  1636. /* Computing MAX */
  1637. i__1 = c_dim1 + 1;
  1638. d__3 = (d__1 = c__[i__1].r, abs(d__1)), d__4 = (d__2 = d_imag(&c__[
  1639. c_dim1 + 1]), abs(d__2));
  1640. scal = f2cmax(d__3,d__4);
  1641. i__1 = *m;
  1642. for (k = 1; k <= i__1; ++k) {
  1643. i__2 = *n;
  1644. for (l = 1; l <= i__2; ++l) {
  1645. /* Computing MAX */
  1646. i__3 = k + l * c_dim1;
  1647. d__3 = scal, d__4 = (d__1 = c__[i__3].r, abs(d__1)), d__3 =
  1648. f2cmax(d__3,d__4), d__4 = (d__2 = d_imag(&c__[k + l *
  1649. c_dim1]), abs(d__2));
  1650. scal = f2cmax(d__3,d__4);
  1651. }
  1652. }
  1653. /* Increase BUF as close to 1 as possible and apply scaling. */
  1654. /* Computing MIN */
  1655. d__1 = bignum / scal, d__2 = 1. / buf;
  1656. scaloc = f2cmin(d__1,d__2);
  1657. buf *= scaloc;
  1658. zlascl_("G", &c_n1, &c_n1, &c_b106, &scaloc, m, n, &c__[c_offset],
  1659. ldc, &iinfo);
  1660. }
  1661. /* Combine with buffer scaling factor. SCALE will be flushed if */
  1662. /* BUF is less than one here. */
  1663. *scale *= buf;
  1664. /* Restore workspace dimensions */
  1665. swork[swork_dim1 + 1] = (doublereal) f2cmax(nba,nbb);
  1666. swork[swork_dim1 + 2] = (doublereal) ((nbb << 1) + nba);
  1667. return 0;
  1668. /* End of ZTRSYL3 */
  1669. } /* ztrsyl3_ */