You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

sgesvd.f 135 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502
  1. *> \brief <b> SGESVD computes the singular value decomposition (SVD) for GE matrices</b>
  2. *
  3. * =========== DOCUMENTATION ===========
  4. *
  5. * Online html documentation available at
  6. * http://www.netlib.org/lapack/explore-html/
  7. *
  8. *> \htmlonly
  9. *> Download SGESVD + dependencies
  10. *> <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/sgesvd.f">
  11. *> [TGZ]</a>
  12. *> <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/sgesvd.f">
  13. *> [ZIP]</a>
  14. *> <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/sgesvd.f">
  15. *> [TXT]</a>
  16. *> \endhtmlonly
  17. *
  18. * Definition:
  19. * ===========
  20. *
  21. * SUBROUTINE SGESVD( JOBU, JOBVT, M, N, A, LDA, S, U, LDU, VT, LDVT,
  22. * WORK, LWORK, INFO )
  23. *
  24. * .. Scalar Arguments ..
  25. * CHARACTER JOBU, JOBVT
  26. * INTEGER INFO, LDA, LDU, LDVT, LWORK, M, N
  27. * ..
  28. * .. Array Arguments ..
  29. * REAL A( LDA, * ), S( * ), U( LDU, * ),
  30. * $ VT( LDVT, * ), WORK( * )
  31. * ..
  32. *
  33. *
  34. *> \par Purpose:
  35. * =============
  36. *>
  37. *> \verbatim
  38. *>
  39. *> SGESVD computes the singular value decomposition (SVD) of a real
  40. *> M-by-N matrix A, optionally computing the left and/or right singular
  41. *> vectors. The SVD is written
  42. *>
  43. *> A = U * SIGMA * transpose(V)
  44. *>
  45. *> where SIGMA is an M-by-N matrix which is zero except for its
  46. *> min(m,n) diagonal elements, U is an M-by-M orthogonal matrix, and
  47. *> V is an N-by-N orthogonal matrix. The diagonal elements of SIGMA
  48. *> are the singular values of A; they are real and non-negative, and
  49. *> are returned in descending order. The first min(m,n) columns of
  50. *> U and V are the left and right singular vectors of A.
  51. *>
  52. *> Note that the routine returns V**T, not V.
  53. *> \endverbatim
  54. *
  55. * Arguments:
  56. * ==========
  57. *
  58. *> \param[in] JOBU
  59. *> \verbatim
  60. *> JOBU is CHARACTER*1
  61. *> Specifies options for computing all or part of the matrix U:
  62. *> = 'A': all M columns of U are returned in array U:
  63. *> = 'S': the first min(m,n) columns of U (the left singular
  64. *> vectors) are returned in the array U;
  65. *> = 'O': the first min(m,n) columns of U (the left singular
  66. *> vectors) are overwritten on the array A;
  67. *> = 'N': no columns of U (no left singular vectors) are
  68. *> computed.
  69. *> \endverbatim
  70. *>
  71. *> \param[in] JOBVT
  72. *> \verbatim
  73. *> JOBVT is CHARACTER*1
  74. *> Specifies options for computing all or part of the matrix
  75. *> V**T:
  76. *> = 'A': all N rows of V**T are returned in the array VT;
  77. *> = 'S': the first min(m,n) rows of V**T (the right singular
  78. *> vectors) are returned in the array VT;
  79. *> = 'O': the first min(m,n) rows of V**T (the right singular
  80. *> vectors) are overwritten on the array A;
  81. *> = 'N': no rows of V**T (no right singular vectors) are
  82. *> computed.
  83. *>
  84. *> JOBVT and JOBU cannot both be 'O'.
  85. *> \endverbatim
  86. *>
  87. *> \param[in] M
  88. *> \verbatim
  89. *> M is INTEGER
  90. *> The number of rows of the input matrix A. M >= 0.
  91. *> \endverbatim
  92. *>
  93. *> \param[in] N
  94. *> \verbatim
  95. *> N is INTEGER
  96. *> The number of columns of the input matrix A. N >= 0.
  97. *> \endverbatim
  98. *>
  99. *> \param[in,out] A
  100. *> \verbatim
  101. *> A is REAL array, dimension (LDA,N)
  102. *> On entry, the M-by-N matrix A.
  103. *> On exit,
  104. *> if JOBU = 'O', A is overwritten with the first min(m,n)
  105. *> columns of U (the left singular vectors,
  106. *> stored columnwise);
  107. *> if JOBVT = 'O', A is overwritten with the first min(m,n)
  108. *> rows of V**T (the right singular vectors,
  109. *> stored rowwise);
  110. *> if JOBU .ne. 'O' and JOBVT .ne. 'O', the contents of A
  111. *> are destroyed.
  112. *> \endverbatim
  113. *>
  114. *> \param[in] LDA
  115. *> \verbatim
  116. *> LDA is INTEGER
  117. *> The leading dimension of the array A. LDA >= max(1,M).
  118. *> \endverbatim
  119. *>
  120. *> \param[out] S
  121. *> \verbatim
  122. *> S is REAL array, dimension (min(M,N))
  123. *> The singular values of A, sorted so that S(i) >= S(i+1).
  124. *> \endverbatim
  125. *>
  126. *> \param[out] U
  127. *> \verbatim
  128. *> U is REAL array, dimension (LDU,UCOL)
  129. *> (LDU,M) if JOBU = 'A' or (LDU,min(M,N)) if JOBU = 'S'.
  130. *> If JOBU = 'A', U contains the M-by-M orthogonal matrix U;
  131. *> if JOBU = 'S', U contains the first min(m,n) columns of U
  132. *> (the left singular vectors, stored columnwise);
  133. *> if JOBU = 'N' or 'O', U is not referenced.
  134. *> \endverbatim
  135. *>
  136. *> \param[in] LDU
  137. *> \verbatim
  138. *> LDU is INTEGER
  139. *> The leading dimension of the array U. LDU >= 1; if
  140. *> JOBU = 'S' or 'A', LDU >= M.
  141. *> \endverbatim
  142. *>
  143. *> \param[out] VT
  144. *> \verbatim
  145. *> VT is REAL array, dimension (LDVT,N)
  146. *> If JOBVT = 'A', VT contains the N-by-N orthogonal matrix
  147. *> V**T;
  148. *> if JOBVT = 'S', VT contains the first min(m,n) rows of
  149. *> V**T (the right singular vectors, stored rowwise);
  150. *> if JOBVT = 'N' or 'O', VT is not referenced.
  151. *> \endverbatim
  152. *>
  153. *> \param[in] LDVT
  154. *> \verbatim
  155. *> LDVT is INTEGER
  156. *> The leading dimension of the array VT. LDVT >= 1; if
  157. *> JOBVT = 'A', LDVT >= N; if JOBVT = 'S', LDVT >= min(M,N).
  158. *> \endverbatim
  159. *>
  160. *> \param[out] WORK
  161. *> \verbatim
  162. *> WORK is REAL array, dimension (MAX(1,LWORK))
  163. *> On exit, if INFO = 0, WORK(1) returns the optimal LWORK;
  164. *> if INFO > 0, WORK(2:MIN(M,N)) contains the unconverged
  165. *> superdiagonal elements of an upper bidiagonal matrix B
  166. *> whose diagonal is in S (not necessarily sorted). B
  167. *> satisfies A = U * B * VT, so it has the same singular values
  168. *> as A, and singular vectors related by U and VT.
  169. *> \endverbatim
  170. *>
  171. *> \param[in] LWORK
  172. *> \verbatim
  173. *> LWORK is INTEGER
  174. *> The dimension of the array WORK.
  175. *> LWORK >= MAX(1,5*MIN(M,N)) for the paths (see comments inside code):
  176. *> - PATH 1 (M much larger than N, JOBU='N')
  177. *> - PATH 1t (N much larger than M, JOBVT='N')
  178. *> LWORK >= MAX(1,3*MIN(M,N)+MAX(M,N),5*MIN(M,N)) for the other paths
  179. *> For good performance, LWORK should generally be larger.
  180. *>
  181. *> If LWORK = -1, then a workspace query is assumed; the routine
  182. *> only calculates the optimal size of the WORK array, returns
  183. *> this value as the first entry of the WORK array, and no error
  184. *> message related to LWORK is issued by XERBLA.
  185. *> \endverbatim
  186. *>
  187. *> \param[out] INFO
  188. *> \verbatim
  189. *> INFO is INTEGER
  190. *> = 0: successful exit.
  191. *> < 0: if INFO = -i, the i-th argument had an illegal value.
  192. *> > 0: if SBDSQR did not converge, INFO specifies how many
  193. *> superdiagonals of an intermediate bidiagonal form B
  194. *> did not converge to zero. See the description of WORK
  195. *> above for details.
  196. *> \endverbatim
  197. *
  198. * Authors:
  199. * ========
  200. *
  201. *> \author Univ. of Tennessee
  202. *> \author Univ. of California Berkeley
  203. *> \author Univ. of Colorado Denver
  204. *> \author NAG Ltd.
  205. *
  206. *> \ingroup realGEsing
  207. *
  208. * =====================================================================
  209. SUBROUTINE SGESVD( JOBU, JOBVT, M, N, A, LDA, S, U, LDU, VT, LDVT,
  210. $ WORK, LWORK, INFO )
  211. *
  212. * -- LAPACK driver routine --
  213. * -- LAPACK is a software package provided by Univ. of Tennessee, --
  214. * -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
  215. *
  216. * .. Scalar Arguments ..
  217. CHARACTER JOBU, JOBVT
  218. INTEGER INFO, LDA, LDU, LDVT, LWORK, M, N
  219. * ..
  220. * .. Array Arguments ..
  221. REAL A( LDA, * ), S( * ), U( LDU, * ),
  222. $ VT( LDVT, * ), WORK( * )
  223. * ..
  224. *
  225. * =====================================================================
  226. *
  227. * .. Parameters ..
  228. REAL ZERO, ONE
  229. PARAMETER ( ZERO = 0.0E0, ONE = 1.0E0 )
  230. * ..
  231. * .. Local Scalars ..
  232. LOGICAL LQUERY, WNTUA, WNTUAS, WNTUN, WNTUO, WNTUS,
  233. $ WNTVA, WNTVAS, WNTVN, WNTVO, WNTVS
  234. INTEGER BDSPAC, BLK, CHUNK, I, IE, IERR, IR, ISCL,
  235. $ ITAU, ITAUP, ITAUQ, IU, IWORK, LDWRKR, LDWRKU,
  236. $ MAXWRK, MINMN, MINWRK, MNTHR, NCU, NCVT, NRU,
  237. $ NRVT, WRKBL
  238. INTEGER LWORK_SGEQRF, LWORK_SORGQR_N, LWORK_SORGQR_M,
  239. $ LWORK_SGEBRD, LWORK_SORGBR_P, LWORK_SORGBR_Q,
  240. $ LWORK_SGELQF, LWORK_SORGLQ_N, LWORK_SORGLQ_M
  241. REAL ANRM, BIGNUM, EPS, SMLNUM
  242. * ..
  243. * .. Local Arrays ..
  244. REAL DUM( 1 )
  245. * ..
  246. * .. External Subroutines ..
  247. EXTERNAL SBDSQR, SGEBRD, SGELQF, SGEMM, SGEQRF, SLACPY,
  248. $ SLASCL, SLASET, SORGBR, SORGLQ, SORGQR, SORMBR,
  249. $ XERBLA
  250. * ..
  251. * .. External Functions ..
  252. LOGICAL LSAME
  253. INTEGER ILAENV
  254. REAL SLAMCH, SLANGE
  255. EXTERNAL LSAME, ILAENV, SLAMCH, SLANGE
  256. * ..
  257. * .. Intrinsic Functions ..
  258. INTRINSIC MAX, MIN, SQRT
  259. * ..
  260. * .. Executable Statements ..
  261. *
  262. * Test the input arguments
  263. *
  264. INFO = 0
  265. MINMN = MIN( M, N )
  266. WNTUA = LSAME( JOBU, 'A' )
  267. WNTUS = LSAME( JOBU, 'S' )
  268. WNTUAS = WNTUA .OR. WNTUS
  269. WNTUO = LSAME( JOBU, 'O' )
  270. WNTUN = LSAME( JOBU, 'N' )
  271. WNTVA = LSAME( JOBVT, 'A' )
  272. WNTVS = LSAME( JOBVT, 'S' )
  273. WNTVAS = WNTVA .OR. WNTVS
  274. WNTVO = LSAME( JOBVT, 'O' )
  275. WNTVN = LSAME( JOBVT, 'N' )
  276. LQUERY = ( LWORK.EQ.-1 )
  277. *
  278. IF( .NOT.( WNTUA .OR. WNTUS .OR. WNTUO .OR. WNTUN ) ) THEN
  279. INFO = -1
  280. ELSE IF( .NOT.( WNTVA .OR. WNTVS .OR. WNTVO .OR. WNTVN ) .OR.
  281. $ ( WNTVO .AND. WNTUO ) ) THEN
  282. INFO = -2
  283. ELSE IF( M.LT.0 ) THEN
  284. INFO = -3
  285. ELSE IF( N.LT.0 ) THEN
  286. INFO = -4
  287. ELSE IF( LDA.LT.MAX( 1, M ) ) THEN
  288. INFO = -6
  289. ELSE IF( LDU.LT.1 .OR. ( WNTUAS .AND. LDU.LT.M ) ) THEN
  290. INFO = -9
  291. ELSE IF( LDVT.LT.1 .OR. ( WNTVA .AND. LDVT.LT.N ) .OR.
  292. $ ( WNTVS .AND. LDVT.LT.MINMN ) ) THEN
  293. INFO = -11
  294. END IF
  295. *
  296. * Compute workspace
  297. * (Note: Comments in the code beginning "Workspace:" describe the
  298. * minimal amount of workspace needed at that point in the code,
  299. * as well as the preferred amount for good performance.
  300. * NB refers to the optimal block size for the immediately
  301. * following subroutine, as returned by ILAENV.)
  302. *
  303. IF( INFO.EQ.0 ) THEN
  304. MINWRK = 1
  305. MAXWRK = 1
  306. IF( M.GE.N .AND. MINMN.GT.0 ) THEN
  307. *
  308. * Compute space needed for SBDSQR
  309. *
  310. MNTHR = ILAENV( 6, 'SGESVD', JOBU // JOBVT, M, N, 0, 0 )
  311. BDSPAC = 5*N
  312. * Compute space needed for SGEQRF
  313. CALL SGEQRF( M, N, A, LDA, DUM(1), DUM(1), -1, IERR )
  314. LWORK_SGEQRF = INT( DUM(1) )
  315. * Compute space needed for SORGQR
  316. CALL SORGQR( M, N, N, A, LDA, DUM(1), DUM(1), -1, IERR )
  317. LWORK_SORGQR_N = INT( DUM(1) )
  318. CALL SORGQR( M, M, N, A, LDA, DUM(1), DUM(1), -1, IERR )
  319. LWORK_SORGQR_M = INT( DUM(1) )
  320. * Compute space needed for SGEBRD
  321. CALL SGEBRD( N, N, A, LDA, S, DUM(1), DUM(1),
  322. $ DUM(1), DUM(1), -1, IERR )
  323. LWORK_SGEBRD = INT( DUM(1) )
  324. * Compute space needed for SORGBR P
  325. CALL SORGBR( 'P', N, N, N, A, LDA, DUM(1),
  326. $ DUM(1), -1, IERR )
  327. LWORK_SORGBR_P = INT( DUM(1) )
  328. * Compute space needed for SORGBR Q
  329. CALL SORGBR( 'Q', N, N, N, A, LDA, DUM(1),
  330. $ DUM(1), -1, IERR )
  331. LWORK_SORGBR_Q = INT( DUM(1) )
  332. *
  333. IF( M.GE.MNTHR ) THEN
  334. IF( WNTUN ) THEN
  335. *
  336. * Path 1 (M much larger than N, JOBU='N')
  337. *
  338. MAXWRK = N + LWORK_SGEQRF
  339. MAXWRK = MAX( MAXWRK, 3*N+LWORK_SGEBRD )
  340. IF( WNTVO .OR. WNTVAS )
  341. $ MAXWRK = MAX( MAXWRK, 3*N+LWORK_SORGBR_P )
  342. MAXWRK = MAX( MAXWRK, BDSPAC )
  343. MINWRK = MAX( 4*N, BDSPAC )
  344. ELSE IF( WNTUO .AND. WNTVN ) THEN
  345. *
  346. * Path 2 (M much larger than N, JOBU='O', JOBVT='N')
  347. *
  348. WRKBL = N + LWORK_SGEQRF
  349. WRKBL = MAX( WRKBL, N+LWORK_SORGQR_N )
  350. WRKBL = MAX( WRKBL, 3*N+LWORK_SGEBRD )
  351. WRKBL = MAX( WRKBL, 3*N+LWORK_SORGBR_Q )
  352. WRKBL = MAX( WRKBL, BDSPAC )
  353. MAXWRK = MAX( N*N+WRKBL, N*N+M*N+N )
  354. MINWRK = MAX( 3*N+M, BDSPAC )
  355. ELSE IF( WNTUO .AND. WNTVAS ) THEN
  356. *
  357. * Path 3 (M much larger than N, JOBU='O', JOBVT='S' or
  358. * 'A')
  359. *
  360. WRKBL = N + LWORK_SGEQRF
  361. WRKBL = MAX( WRKBL, N+LWORK_SORGQR_N )
  362. WRKBL = MAX( WRKBL, 3*N+LWORK_SGEBRD )
  363. WRKBL = MAX( WRKBL, 3*N+LWORK_SORGBR_Q )
  364. WRKBL = MAX( WRKBL, 3*N+LWORK_SORGBR_P )
  365. WRKBL = MAX( WRKBL, BDSPAC )
  366. MAXWRK = MAX( N*N+WRKBL, N*N+M*N+N )
  367. MINWRK = MAX( 3*N+M, BDSPAC )
  368. ELSE IF( WNTUS .AND. WNTVN ) THEN
  369. *
  370. * Path 4 (M much larger than N, JOBU='S', JOBVT='N')
  371. *
  372. WRKBL = N + LWORK_SGEQRF
  373. WRKBL = MAX( WRKBL, N+LWORK_SORGQR_N )
  374. WRKBL = MAX( WRKBL, 3*N+LWORK_SGEBRD )
  375. WRKBL = MAX( WRKBL, 3*N+LWORK_SORGBR_Q )
  376. WRKBL = MAX( WRKBL, BDSPAC )
  377. MAXWRK = N*N + WRKBL
  378. MINWRK = MAX( 3*N+M, BDSPAC )
  379. ELSE IF( WNTUS .AND. WNTVO ) THEN
  380. *
  381. * Path 5 (M much larger than N, JOBU='S', JOBVT='O')
  382. *
  383. WRKBL = N + LWORK_SGEQRF
  384. WRKBL = MAX( WRKBL, N+LWORK_SORGQR_N )
  385. WRKBL = MAX( WRKBL, 3*N+LWORK_SGEBRD )
  386. WRKBL = MAX( WRKBL, 3*N+LWORK_SORGBR_Q )
  387. WRKBL = MAX( WRKBL, 3*N+LWORK_SORGBR_P )
  388. WRKBL = MAX( WRKBL, BDSPAC )
  389. MAXWRK = 2*N*N + WRKBL
  390. MINWRK = MAX( 3*N+M, BDSPAC )
  391. ELSE IF( WNTUS .AND. WNTVAS ) THEN
  392. *
  393. * Path 6 (M much larger than N, JOBU='S', JOBVT='S' or
  394. * 'A')
  395. *
  396. WRKBL = N + LWORK_SGEQRF
  397. WRKBL = MAX( WRKBL, N+LWORK_SORGQR_N )
  398. WRKBL = MAX( WRKBL, 3*N+LWORK_SGEBRD )
  399. WRKBL = MAX( WRKBL, 3*N+LWORK_SORGBR_Q )
  400. WRKBL = MAX( WRKBL, 3*N+LWORK_SORGBR_P )
  401. WRKBL = MAX( WRKBL, BDSPAC )
  402. MAXWRK = N*N + WRKBL
  403. MINWRK = MAX( 3*N+M, BDSPAC )
  404. ELSE IF( WNTUA .AND. WNTVN ) THEN
  405. *
  406. * Path 7 (M much larger than N, JOBU='A', JOBVT='N')
  407. *
  408. WRKBL = N + LWORK_SGEQRF
  409. WRKBL = MAX( WRKBL, N+LWORK_SORGQR_M )
  410. WRKBL = MAX( WRKBL, 3*N+LWORK_SGEBRD )
  411. WRKBL = MAX( WRKBL, 3*N+LWORK_SORGBR_Q )
  412. WRKBL = MAX( WRKBL, BDSPAC )
  413. MAXWRK = N*N + WRKBL
  414. MINWRK = MAX( 3*N+M, BDSPAC )
  415. ELSE IF( WNTUA .AND. WNTVO ) THEN
  416. *
  417. * Path 8 (M much larger than N, JOBU='A', JOBVT='O')
  418. *
  419. WRKBL = N + LWORK_SGEQRF
  420. WRKBL = MAX( WRKBL, N+LWORK_SORGQR_M )
  421. WRKBL = MAX( WRKBL, 3*N+LWORK_SGEBRD )
  422. WRKBL = MAX( WRKBL, 3*N+LWORK_SORGBR_Q )
  423. WRKBL = MAX( WRKBL, 3*N+LWORK_SORGBR_P )
  424. WRKBL = MAX( WRKBL, BDSPAC )
  425. MAXWRK = 2*N*N + WRKBL
  426. MINWRK = MAX( 3*N+M, BDSPAC )
  427. ELSE IF( WNTUA .AND. WNTVAS ) THEN
  428. *
  429. * Path 9 (M much larger than N, JOBU='A', JOBVT='S' or
  430. * 'A')
  431. *
  432. WRKBL = N + LWORK_SGEQRF
  433. WRKBL = MAX( WRKBL, N+LWORK_SORGQR_M )
  434. WRKBL = MAX( WRKBL, 3*N+LWORK_SGEBRD )
  435. WRKBL = MAX( WRKBL, 3*N+LWORK_SORGBR_Q )
  436. WRKBL = MAX( WRKBL, 3*N+LWORK_SORGBR_P )
  437. WRKBL = MAX( WRKBL, BDSPAC )
  438. MAXWRK = N*N + WRKBL
  439. MINWRK = MAX( 3*N+M, BDSPAC )
  440. END IF
  441. ELSE
  442. *
  443. * Path 10 (M at least N, but not much larger)
  444. *
  445. CALL SGEBRD( M, N, A, LDA, S, DUM(1), DUM(1),
  446. $ DUM(1), DUM(1), -1, IERR )
  447. LWORK_SGEBRD = INT( DUM(1) )
  448. MAXWRK = 3*N + LWORK_SGEBRD
  449. IF( WNTUS .OR. WNTUO ) THEN
  450. CALL SORGBR( 'Q', M, N, N, A, LDA, DUM(1),
  451. $ DUM(1), -1, IERR )
  452. LWORK_SORGBR_Q = INT( DUM(1) )
  453. MAXWRK = MAX( MAXWRK, 3*N+LWORK_SORGBR_Q )
  454. END IF
  455. IF( WNTUA ) THEN
  456. CALL SORGBR( 'Q', M, M, N, A, LDA, DUM(1),
  457. $ DUM(1), -1, IERR )
  458. LWORK_SORGBR_Q = INT( DUM(1) )
  459. MAXWRK = MAX( MAXWRK, 3*N+LWORK_SORGBR_Q )
  460. END IF
  461. IF( .NOT.WNTVN ) THEN
  462. MAXWRK = MAX( MAXWRK, 3*N+LWORK_SORGBR_P )
  463. END IF
  464. MAXWRK = MAX( MAXWRK, BDSPAC )
  465. MINWRK = MAX( 3*N+M, BDSPAC )
  466. END IF
  467. ELSE IF( MINMN.GT.0 ) THEN
  468. *
  469. * Compute space needed for SBDSQR
  470. *
  471. MNTHR = ILAENV( 6, 'SGESVD', JOBU // JOBVT, M, N, 0, 0 )
  472. BDSPAC = 5*M
  473. * Compute space needed for SGELQF
  474. CALL SGELQF( M, N, A, LDA, DUM(1), DUM(1), -1, IERR )
  475. LWORK_SGELQF = INT( DUM(1) )
  476. * Compute space needed for SORGLQ
  477. CALL SORGLQ( N, N, M, DUM(1), N, DUM(1), DUM(1), -1, IERR )
  478. LWORK_SORGLQ_N = INT( DUM(1) )
  479. CALL SORGLQ( M, N, M, A, LDA, DUM(1), DUM(1), -1, IERR )
  480. LWORK_SORGLQ_M = INT( DUM(1) )
  481. * Compute space needed for SGEBRD
  482. CALL SGEBRD( M, M, A, LDA, S, DUM(1), DUM(1),
  483. $ DUM(1), DUM(1), -1, IERR )
  484. LWORK_SGEBRD = INT( DUM(1) )
  485. * Compute space needed for SORGBR P
  486. CALL SORGBR( 'P', M, M, M, A, N, DUM(1),
  487. $ DUM(1), -1, IERR )
  488. LWORK_SORGBR_P = INT( DUM(1) )
  489. * Compute space needed for SORGBR Q
  490. CALL SORGBR( 'Q', M, M, M, A, N, DUM(1),
  491. $ DUM(1), -1, IERR )
  492. LWORK_SORGBR_Q = INT( DUM(1) )
  493. IF( N.GE.MNTHR ) THEN
  494. IF( WNTVN ) THEN
  495. *
  496. * Path 1t(N much larger than M, JOBVT='N')
  497. *
  498. MAXWRK = M + LWORK_SGELQF
  499. MAXWRK = MAX( MAXWRK, 3*M+LWORK_SGEBRD )
  500. IF( WNTUO .OR. WNTUAS )
  501. $ MAXWRK = MAX( MAXWRK, 3*M+LWORK_SORGBR_Q )
  502. MAXWRK = MAX( MAXWRK, BDSPAC )
  503. MINWRK = MAX( 4*M, BDSPAC )
  504. ELSE IF( WNTVO .AND. WNTUN ) THEN
  505. *
  506. * Path 2t(N much larger than M, JOBU='N', JOBVT='O')
  507. *
  508. WRKBL = M + LWORK_SGELQF
  509. WRKBL = MAX( WRKBL, M+LWORK_SORGLQ_M )
  510. WRKBL = MAX( WRKBL, 3*M+LWORK_SGEBRD )
  511. WRKBL = MAX( WRKBL, 3*M+LWORK_SORGBR_P )
  512. WRKBL = MAX( WRKBL, BDSPAC )
  513. MAXWRK = MAX( M*M+WRKBL, M*M+M*N+M )
  514. MINWRK = MAX( 3*M+N, BDSPAC )
  515. ELSE IF( WNTVO .AND. WNTUAS ) THEN
  516. *
  517. * Path 3t(N much larger than M, JOBU='S' or 'A',
  518. * JOBVT='O')
  519. *
  520. WRKBL = M + LWORK_SGELQF
  521. WRKBL = MAX( WRKBL, M+LWORK_SORGLQ_M )
  522. WRKBL = MAX( WRKBL, 3*M+LWORK_SGEBRD )
  523. WRKBL = MAX( WRKBL, 3*M+LWORK_SORGBR_P )
  524. WRKBL = MAX( WRKBL, 3*M+LWORK_SORGBR_Q )
  525. WRKBL = MAX( WRKBL, BDSPAC )
  526. MAXWRK = MAX( M*M+WRKBL, M*M+M*N+M )
  527. MINWRK = MAX( 3*M+N, BDSPAC )
  528. ELSE IF( WNTVS .AND. WNTUN ) THEN
  529. *
  530. * Path 4t(N much larger than M, JOBU='N', JOBVT='S')
  531. *
  532. WRKBL = M + LWORK_SGELQF
  533. WRKBL = MAX( WRKBL, M+LWORK_SORGLQ_M )
  534. WRKBL = MAX( WRKBL, 3*M+LWORK_SGEBRD )
  535. WRKBL = MAX( WRKBL, 3*M+LWORK_SORGBR_P )
  536. WRKBL = MAX( WRKBL, BDSPAC )
  537. MAXWRK = M*M + WRKBL
  538. MINWRK = MAX( 3*M+N, BDSPAC )
  539. ELSE IF( WNTVS .AND. WNTUO ) THEN
  540. *
  541. * Path 5t(N much larger than M, JOBU='O', JOBVT='S')
  542. *
  543. WRKBL = M + LWORK_SGELQF
  544. WRKBL = MAX( WRKBL, M+LWORK_SORGLQ_M )
  545. WRKBL = MAX( WRKBL, 3*M+LWORK_SGEBRD )
  546. WRKBL = MAX( WRKBL, 3*M+LWORK_SORGBR_P )
  547. WRKBL = MAX( WRKBL, 3*M+LWORK_SORGBR_Q )
  548. WRKBL = MAX( WRKBL, BDSPAC )
  549. MAXWRK = 2*M*M + WRKBL
  550. MINWRK = MAX( 3*M+N, BDSPAC )
  551. MAXWRK = MAX( MAXWRK, MINWRK )
  552. ELSE IF( WNTVS .AND. WNTUAS ) THEN
  553. *
  554. * Path 6t(N much larger than M, JOBU='S' or 'A',
  555. * JOBVT='S')
  556. *
  557. WRKBL = M + LWORK_SGELQF
  558. WRKBL = MAX( WRKBL, M+LWORK_SORGLQ_M )
  559. WRKBL = MAX( WRKBL, 3*M+LWORK_SGEBRD )
  560. WRKBL = MAX( WRKBL, 3*M+LWORK_SORGBR_P )
  561. WRKBL = MAX( WRKBL, 3*M+LWORK_SORGBR_Q )
  562. WRKBL = MAX( WRKBL, BDSPAC )
  563. MAXWRK = M*M + WRKBL
  564. MINWRK = MAX( 3*M+N, BDSPAC )
  565. ELSE IF( WNTVA .AND. WNTUN ) THEN
  566. *
  567. * Path 7t(N much larger than M, JOBU='N', JOBVT='A')
  568. *
  569. WRKBL = M + LWORK_SGELQF
  570. WRKBL = MAX( WRKBL, M+LWORK_SORGLQ_N )
  571. WRKBL = MAX( WRKBL, 3*M+LWORK_SGEBRD )
  572. WRKBL = MAX( WRKBL, 3*M+LWORK_SORGBR_P )
  573. WRKBL = MAX( WRKBL, BDSPAC )
  574. MAXWRK = M*M + WRKBL
  575. MINWRK = MAX( 3*M+N, BDSPAC )
  576. ELSE IF( WNTVA .AND. WNTUO ) THEN
  577. *
  578. * Path 8t(N much larger than M, JOBU='O', JOBVT='A')
  579. *
  580. WRKBL = M + LWORK_SGELQF
  581. WRKBL = MAX( WRKBL, M+LWORK_SORGLQ_N )
  582. WRKBL = MAX( WRKBL, 3*M+LWORK_SGEBRD )
  583. WRKBL = MAX( WRKBL, 3*M+LWORK_SORGBR_P )
  584. WRKBL = MAX( WRKBL, 3*M+LWORK_SORGBR_Q )
  585. WRKBL = MAX( WRKBL, BDSPAC )
  586. MAXWRK = 2*M*M + WRKBL
  587. MINWRK = MAX( 3*M+N, BDSPAC )
  588. ELSE IF( WNTVA .AND. WNTUAS ) THEN
  589. *
  590. * Path 9t(N much larger than M, JOBU='S' or 'A',
  591. * JOBVT='A')
  592. *
  593. WRKBL = M + LWORK_SGELQF
  594. WRKBL = MAX( WRKBL, M+LWORK_SORGLQ_N )
  595. WRKBL = MAX( WRKBL, 3*M+LWORK_SGEBRD )
  596. WRKBL = MAX( WRKBL, 3*M+LWORK_SORGBR_P )
  597. WRKBL = MAX( WRKBL, 3*M+LWORK_SORGBR_Q )
  598. WRKBL = MAX( WRKBL, BDSPAC )
  599. MAXWRK = M*M + WRKBL
  600. MINWRK = MAX( 3*M+N, BDSPAC )
  601. END IF
  602. ELSE
  603. *
  604. * Path 10t(N greater than M, but not much larger)
  605. *
  606. CALL SGEBRD( M, N, A, LDA, S, DUM(1), DUM(1),
  607. $ DUM(1), DUM(1), -1, IERR )
  608. LWORK_SGEBRD = INT( DUM(1) )
  609. MAXWRK = 3*M + LWORK_SGEBRD
  610. IF( WNTVS .OR. WNTVO ) THEN
  611. * Compute space needed for SORGBR P
  612. CALL SORGBR( 'P', M, N, M, A, N, DUM(1),
  613. $ DUM(1), -1, IERR )
  614. LWORK_SORGBR_P = INT( DUM(1) )
  615. MAXWRK = MAX( MAXWRK, 3*M+LWORK_SORGBR_P )
  616. END IF
  617. IF( WNTVA ) THEN
  618. CALL SORGBR( 'P', N, N, M, A, N, DUM(1),
  619. $ DUM(1), -1, IERR )
  620. LWORK_SORGBR_P = INT( DUM(1) )
  621. MAXWRK = MAX( MAXWRK, 3*M+LWORK_SORGBR_P )
  622. END IF
  623. IF( .NOT.WNTUN ) THEN
  624. MAXWRK = MAX( MAXWRK, 3*M+LWORK_SORGBR_Q )
  625. END IF
  626. MAXWRK = MAX( MAXWRK, BDSPAC )
  627. MINWRK = MAX( 3*M+N, BDSPAC )
  628. END IF
  629. END IF
  630. MAXWRK = MAX( MAXWRK, MINWRK )
  631. WORK( 1 ) = MAXWRK
  632. *
  633. IF( LWORK.LT.MINWRK .AND. .NOT.LQUERY ) THEN
  634. INFO = -13
  635. END IF
  636. END IF
  637. *
  638. IF( INFO.NE.0 ) THEN
  639. CALL XERBLA( 'SGESVD', -INFO )
  640. RETURN
  641. ELSE IF( LQUERY ) THEN
  642. RETURN
  643. END IF
  644. *
  645. * Quick return if possible
  646. *
  647. IF( M.EQ.0 .OR. N.EQ.0 ) THEN
  648. RETURN
  649. END IF
  650. *
  651. * Get machine constants
  652. *
  653. EPS = SLAMCH( 'P' )
  654. SMLNUM = SQRT( SLAMCH( 'S' ) ) / EPS
  655. BIGNUM = ONE / SMLNUM
  656. *
  657. * Scale A if max element outside range [SMLNUM,BIGNUM]
  658. *
  659. ANRM = SLANGE( 'M', M, N, A, LDA, DUM )
  660. ISCL = 0
  661. IF( ANRM.GT.ZERO .AND. ANRM.LT.SMLNUM ) THEN
  662. ISCL = 1
  663. CALL SLASCL( 'G', 0, 0, ANRM, SMLNUM, M, N, A, LDA, IERR )
  664. ELSE IF( ANRM.GT.BIGNUM ) THEN
  665. ISCL = 1
  666. CALL SLASCL( 'G', 0, 0, ANRM, BIGNUM, M, N, A, LDA, IERR )
  667. END IF
  668. *
  669. IF( M.GE.N ) THEN
  670. *
  671. * A has at least as many rows as columns. If A has sufficiently
  672. * more rows than columns, first reduce using the QR
  673. * decomposition (if sufficient workspace available)
  674. *
  675. IF( M.GE.MNTHR ) THEN
  676. *
  677. IF( WNTUN ) THEN
  678. *
  679. * Path 1 (M much larger than N, JOBU='N')
  680. * No left singular vectors to be computed
  681. *
  682. ITAU = 1
  683. IWORK = ITAU + N
  684. *
  685. * Compute A=Q*R
  686. * (Workspace: need 2*N, prefer N+N*NB)
  687. *
  688. CALL SGEQRF( M, N, A, LDA, WORK( ITAU ), WORK( IWORK ),
  689. $ LWORK-IWORK+1, IERR )
  690. *
  691. * Zero out below R
  692. *
  693. IF( N .GT. 1 ) THEN
  694. CALL SLASET( 'L', N-1, N-1, ZERO, ZERO, A( 2, 1 ),
  695. $ LDA )
  696. END IF
  697. IE = 1
  698. ITAUQ = IE + N
  699. ITAUP = ITAUQ + N
  700. IWORK = ITAUP + N
  701. *
  702. * Bidiagonalize R in A
  703. * (Workspace: need 4*N, prefer 3*N+2*N*NB)
  704. *
  705. CALL SGEBRD( N, N, A, LDA, S, WORK( IE ), WORK( ITAUQ ),
  706. $ WORK( ITAUP ), WORK( IWORK ), LWORK-IWORK+1,
  707. $ IERR )
  708. NCVT = 0
  709. IF( WNTVO .OR. WNTVAS ) THEN
  710. *
  711. * If right singular vectors desired, generate P'.
  712. * (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB)
  713. *
  714. CALL SORGBR( 'P', N, N, N, A, LDA, WORK( ITAUP ),
  715. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  716. NCVT = N
  717. END IF
  718. IWORK = IE + N
  719. *
  720. * Perform bidiagonal QR iteration, computing right
  721. * singular vectors of A in A if desired
  722. * (Workspace: need BDSPAC)
  723. *
  724. CALL SBDSQR( 'U', N, NCVT, 0, 0, S, WORK( IE ), A, LDA,
  725. $ DUM, 1, DUM, 1, WORK( IWORK ), INFO )
  726. *
  727. * If right singular vectors desired in VT, copy them there
  728. *
  729. IF( WNTVAS )
  730. $ CALL SLACPY( 'F', N, N, A, LDA, VT, LDVT )
  731. *
  732. ELSE IF( WNTUO .AND. WNTVN ) THEN
  733. *
  734. * Path 2 (M much larger than N, JOBU='O', JOBVT='N')
  735. * N left singular vectors to be overwritten on A and
  736. * no right singular vectors to be computed
  737. *
  738. IF( LWORK.GE.N*N+MAX( 4*N, BDSPAC ) ) THEN
  739. *
  740. * Sufficient workspace for a fast algorithm
  741. *
  742. IR = 1
  743. IF( LWORK.GE.MAX( WRKBL, LDA*N+N )+LDA*N ) THEN
  744. *
  745. * WORK(IU) is LDA by N, WORK(IR) is LDA by N
  746. *
  747. LDWRKU = LDA
  748. LDWRKR = LDA
  749. ELSE IF( LWORK.GE.MAX( WRKBL, LDA*N+N )+N*N ) THEN
  750. *
  751. * WORK(IU) is LDA by N, WORK(IR) is N by N
  752. *
  753. LDWRKU = LDA
  754. LDWRKR = N
  755. ELSE
  756. *
  757. * WORK(IU) is LDWRKU by N, WORK(IR) is N by N
  758. *
  759. LDWRKU = ( LWORK-N*N-N ) / N
  760. LDWRKR = N
  761. END IF
  762. ITAU = IR + LDWRKR*N
  763. IWORK = ITAU + N
  764. *
  765. * Compute A=Q*R
  766. * (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
  767. *
  768. CALL SGEQRF( M, N, A, LDA, WORK( ITAU ),
  769. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  770. *
  771. * Copy R to WORK(IR) and zero out below it
  772. *
  773. CALL SLACPY( 'U', N, N, A, LDA, WORK( IR ), LDWRKR )
  774. CALL SLASET( 'L', N-1, N-1, ZERO, ZERO, WORK( IR+1 ),
  775. $ LDWRKR )
  776. *
  777. * Generate Q in A
  778. * (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
  779. *
  780. CALL SORGQR( M, N, N, A, LDA, WORK( ITAU ),
  781. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  782. IE = ITAU
  783. ITAUQ = IE + N
  784. ITAUP = ITAUQ + N
  785. IWORK = ITAUP + N
  786. *
  787. * Bidiagonalize R in WORK(IR)
  788. * (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
  789. *
  790. CALL SGEBRD( N, N, WORK( IR ), LDWRKR, S, WORK( IE ),
  791. $ WORK( ITAUQ ), WORK( ITAUP ),
  792. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  793. *
  794. * Generate left vectors bidiagonalizing R
  795. * (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB)
  796. *
  797. CALL SORGBR( 'Q', N, N, N, WORK( IR ), LDWRKR,
  798. $ WORK( ITAUQ ), WORK( IWORK ),
  799. $ LWORK-IWORK+1, IERR )
  800. IWORK = IE + N
  801. *
  802. * Perform bidiagonal QR iteration, computing left
  803. * singular vectors of R in WORK(IR)
  804. * (Workspace: need N*N+BDSPAC)
  805. *
  806. CALL SBDSQR( 'U', N, 0, N, 0, S, WORK( IE ), DUM, 1,
  807. $ WORK( IR ), LDWRKR, DUM, 1,
  808. $ WORK( IWORK ), INFO )
  809. IU = IE + N
  810. *
  811. * Multiply Q in A by left singular vectors of R in
  812. * WORK(IR), storing result in WORK(IU) and copying to A
  813. * (Workspace: need N*N+2*N, prefer N*N+M*N+N)
  814. *
  815. DO 10 I = 1, M, LDWRKU
  816. CHUNK = MIN( M-I+1, LDWRKU )
  817. CALL SGEMM( 'N', 'N', CHUNK, N, N, ONE, A( I, 1 ),
  818. $ LDA, WORK( IR ), LDWRKR, ZERO,
  819. $ WORK( IU ), LDWRKU )
  820. CALL SLACPY( 'F', CHUNK, N, WORK( IU ), LDWRKU,
  821. $ A( I, 1 ), LDA )
  822. 10 CONTINUE
  823. *
  824. ELSE
  825. *
  826. * Insufficient workspace for a fast algorithm
  827. *
  828. IE = 1
  829. ITAUQ = IE + N
  830. ITAUP = ITAUQ + N
  831. IWORK = ITAUP + N
  832. *
  833. * Bidiagonalize A
  834. * (Workspace: need 3*N+M, prefer 3*N+(M+N)*NB)
  835. *
  836. CALL SGEBRD( M, N, A, LDA, S, WORK( IE ),
  837. $ WORK( ITAUQ ), WORK( ITAUP ),
  838. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  839. *
  840. * Generate left vectors bidiagonalizing A
  841. * (Workspace: need 4*N, prefer 3*N+N*NB)
  842. *
  843. CALL SORGBR( 'Q', M, N, N, A, LDA, WORK( ITAUQ ),
  844. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  845. IWORK = IE + N
  846. *
  847. * Perform bidiagonal QR iteration, computing left
  848. * singular vectors of A in A
  849. * (Workspace: need BDSPAC)
  850. *
  851. CALL SBDSQR( 'U', N, 0, M, 0, S, WORK( IE ), DUM, 1,
  852. $ A, LDA, DUM, 1, WORK( IWORK ), INFO )
  853. *
  854. END IF
  855. *
  856. ELSE IF( WNTUO .AND. WNTVAS ) THEN
  857. *
  858. * Path 3 (M much larger than N, JOBU='O', JOBVT='S' or 'A')
  859. * N left singular vectors to be overwritten on A and
  860. * N right singular vectors to be computed in VT
  861. *
  862. IF( LWORK.GE.N*N+MAX( 4*N, BDSPAC ) ) THEN
  863. *
  864. * Sufficient workspace for a fast algorithm
  865. *
  866. IR = 1
  867. IF( LWORK.GE.MAX( WRKBL, LDA*N+N )+LDA*N ) THEN
  868. *
  869. * WORK(IU) is LDA by N and WORK(IR) is LDA by N
  870. *
  871. LDWRKU = LDA
  872. LDWRKR = LDA
  873. ELSE IF( LWORK.GE.MAX( WRKBL, LDA*N+N )+N*N ) THEN
  874. *
  875. * WORK(IU) is LDA by N and WORK(IR) is N by N
  876. *
  877. LDWRKU = LDA
  878. LDWRKR = N
  879. ELSE
  880. *
  881. * WORK(IU) is LDWRKU by N and WORK(IR) is N by N
  882. *
  883. LDWRKU = ( LWORK-N*N-N ) / N
  884. LDWRKR = N
  885. END IF
  886. ITAU = IR + LDWRKR*N
  887. IWORK = ITAU + N
  888. *
  889. * Compute A=Q*R
  890. * (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
  891. *
  892. CALL SGEQRF( M, N, A, LDA, WORK( ITAU ),
  893. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  894. *
  895. * Copy R to VT, zeroing out below it
  896. *
  897. CALL SLACPY( 'U', N, N, A, LDA, VT, LDVT )
  898. IF( N.GT.1 )
  899. $ CALL SLASET( 'L', N-1, N-1, ZERO, ZERO,
  900. $ VT( 2, 1 ), LDVT )
  901. *
  902. * Generate Q in A
  903. * (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
  904. *
  905. CALL SORGQR( M, N, N, A, LDA, WORK( ITAU ),
  906. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  907. IE = ITAU
  908. ITAUQ = IE + N
  909. ITAUP = ITAUQ + N
  910. IWORK = ITAUP + N
  911. *
  912. * Bidiagonalize R in VT, copying result to WORK(IR)
  913. * (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
  914. *
  915. CALL SGEBRD( N, N, VT, LDVT, S, WORK( IE ),
  916. $ WORK( ITAUQ ), WORK( ITAUP ),
  917. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  918. CALL SLACPY( 'L', N, N, VT, LDVT, WORK( IR ), LDWRKR )
  919. *
  920. * Generate left vectors bidiagonalizing R in WORK(IR)
  921. * (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB)
  922. *
  923. CALL SORGBR( 'Q', N, N, N, WORK( IR ), LDWRKR,
  924. $ WORK( ITAUQ ), WORK( IWORK ),
  925. $ LWORK-IWORK+1, IERR )
  926. *
  927. * Generate right vectors bidiagonalizing R in VT
  928. * (Workspace: need N*N+4*N-1, prefer N*N+3*N+(N-1)*NB)
  929. *
  930. CALL SORGBR( 'P', N, N, N, VT, LDVT, WORK( ITAUP ),
  931. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  932. IWORK = IE + N
  933. *
  934. * Perform bidiagonal QR iteration, computing left
  935. * singular vectors of R in WORK(IR) and computing right
  936. * singular vectors of R in VT
  937. * (Workspace: need N*N+BDSPAC)
  938. *
  939. CALL SBDSQR( 'U', N, N, N, 0, S, WORK( IE ), VT, LDVT,
  940. $ WORK( IR ), LDWRKR, DUM, 1,
  941. $ WORK( IWORK ), INFO )
  942. IU = IE + N
  943. *
  944. * Multiply Q in A by left singular vectors of R in
  945. * WORK(IR), storing result in WORK(IU) and copying to A
  946. * (Workspace: need N*N+2*N, prefer N*N+M*N+N)
  947. *
  948. DO 20 I = 1, M, LDWRKU
  949. CHUNK = MIN( M-I+1, LDWRKU )
  950. CALL SGEMM( 'N', 'N', CHUNK, N, N, ONE, A( I, 1 ),
  951. $ LDA, WORK( IR ), LDWRKR, ZERO,
  952. $ WORK( IU ), LDWRKU )
  953. CALL SLACPY( 'F', CHUNK, N, WORK( IU ), LDWRKU,
  954. $ A( I, 1 ), LDA )
  955. 20 CONTINUE
  956. *
  957. ELSE
  958. *
  959. * Insufficient workspace for a fast algorithm
  960. *
  961. ITAU = 1
  962. IWORK = ITAU + N
  963. *
  964. * Compute A=Q*R
  965. * (Workspace: need 2*N, prefer N+N*NB)
  966. *
  967. CALL SGEQRF( M, N, A, LDA, WORK( ITAU ),
  968. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  969. *
  970. * Copy R to VT, zeroing out below it
  971. *
  972. CALL SLACPY( 'U', N, N, A, LDA, VT, LDVT )
  973. IF( N.GT.1 )
  974. $ CALL SLASET( 'L', N-1, N-1, ZERO, ZERO,
  975. $ VT( 2, 1 ), LDVT )
  976. *
  977. * Generate Q in A
  978. * (Workspace: need 2*N, prefer N+N*NB)
  979. *
  980. CALL SORGQR( M, N, N, A, LDA, WORK( ITAU ),
  981. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  982. IE = ITAU
  983. ITAUQ = IE + N
  984. ITAUP = ITAUQ + N
  985. IWORK = ITAUP + N
  986. *
  987. * Bidiagonalize R in VT
  988. * (Workspace: need 4*N, prefer 3*N+2*N*NB)
  989. *
  990. CALL SGEBRD( N, N, VT, LDVT, S, WORK( IE ),
  991. $ WORK( ITAUQ ), WORK( ITAUP ),
  992. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  993. *
  994. * Multiply Q in A by left vectors bidiagonalizing R
  995. * (Workspace: need 3*N+M, prefer 3*N+M*NB)
  996. *
  997. CALL SORMBR( 'Q', 'R', 'N', M, N, N, VT, LDVT,
  998. $ WORK( ITAUQ ), A, LDA, WORK( IWORK ),
  999. $ LWORK-IWORK+1, IERR )
  1000. *
  1001. * Generate right vectors bidiagonalizing R in VT
  1002. * (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB)
  1003. *
  1004. CALL SORGBR( 'P', N, N, N, VT, LDVT, WORK( ITAUP ),
  1005. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1006. IWORK = IE + N
  1007. *
  1008. * Perform bidiagonal QR iteration, computing left
  1009. * singular vectors of A in A and computing right
  1010. * singular vectors of A in VT
  1011. * (Workspace: need BDSPAC)
  1012. *
  1013. CALL SBDSQR( 'U', N, N, M, 0, S, WORK( IE ), VT, LDVT,
  1014. $ A, LDA, DUM, 1, WORK( IWORK ), INFO )
  1015. *
  1016. END IF
  1017. *
  1018. ELSE IF( WNTUS ) THEN
  1019. *
  1020. IF( WNTVN ) THEN
  1021. *
  1022. * Path 4 (M much larger than N, JOBU='S', JOBVT='N')
  1023. * N left singular vectors to be computed in U and
  1024. * no right singular vectors to be computed
  1025. *
  1026. IF( LWORK.GE.N*N+MAX( 4*N, BDSPAC ) ) THEN
  1027. *
  1028. * Sufficient workspace for a fast algorithm
  1029. *
  1030. IR = 1
  1031. IF( LWORK.GE.WRKBL+LDA*N ) THEN
  1032. *
  1033. * WORK(IR) is LDA by N
  1034. *
  1035. LDWRKR = LDA
  1036. ELSE
  1037. *
  1038. * WORK(IR) is N by N
  1039. *
  1040. LDWRKR = N
  1041. END IF
  1042. ITAU = IR + LDWRKR*N
  1043. IWORK = ITAU + N
  1044. *
  1045. * Compute A=Q*R
  1046. * (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
  1047. *
  1048. CALL SGEQRF( M, N, A, LDA, WORK( ITAU ),
  1049. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1050. *
  1051. * Copy R to WORK(IR), zeroing out below it
  1052. *
  1053. CALL SLACPY( 'U', N, N, A, LDA, WORK( IR ),
  1054. $ LDWRKR )
  1055. CALL SLASET( 'L', N-1, N-1, ZERO, ZERO,
  1056. $ WORK( IR+1 ), LDWRKR )
  1057. *
  1058. * Generate Q in A
  1059. * (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
  1060. *
  1061. CALL SORGQR( M, N, N, A, LDA, WORK( ITAU ),
  1062. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1063. IE = ITAU
  1064. ITAUQ = IE + N
  1065. ITAUP = ITAUQ + N
  1066. IWORK = ITAUP + N
  1067. *
  1068. * Bidiagonalize R in WORK(IR)
  1069. * (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
  1070. *
  1071. CALL SGEBRD( N, N, WORK( IR ), LDWRKR, S,
  1072. $ WORK( IE ), WORK( ITAUQ ),
  1073. $ WORK( ITAUP ), WORK( IWORK ),
  1074. $ LWORK-IWORK+1, IERR )
  1075. *
  1076. * Generate left vectors bidiagonalizing R in WORK(IR)
  1077. * (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB)
  1078. *
  1079. CALL SORGBR( 'Q', N, N, N, WORK( IR ), LDWRKR,
  1080. $ WORK( ITAUQ ), WORK( IWORK ),
  1081. $ LWORK-IWORK+1, IERR )
  1082. IWORK = IE + N
  1083. *
  1084. * Perform bidiagonal QR iteration, computing left
  1085. * singular vectors of R in WORK(IR)
  1086. * (Workspace: need N*N+BDSPAC)
  1087. *
  1088. CALL SBDSQR( 'U', N, 0, N, 0, S, WORK( IE ), DUM,
  1089. $ 1, WORK( IR ), LDWRKR, DUM, 1,
  1090. $ WORK( IWORK ), INFO )
  1091. *
  1092. * Multiply Q in A by left singular vectors of R in
  1093. * WORK(IR), storing result in U
  1094. * (Workspace: need N*N)
  1095. *
  1096. CALL SGEMM( 'N', 'N', M, N, N, ONE, A, LDA,
  1097. $ WORK( IR ), LDWRKR, ZERO, U, LDU )
  1098. *
  1099. ELSE
  1100. *
  1101. * Insufficient workspace for a fast algorithm
  1102. *
  1103. ITAU = 1
  1104. IWORK = ITAU + N
  1105. *
  1106. * Compute A=Q*R, copying result to U
  1107. * (Workspace: need 2*N, prefer N+N*NB)
  1108. *
  1109. CALL SGEQRF( M, N, A, LDA, WORK( ITAU ),
  1110. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1111. CALL SLACPY( 'L', M, N, A, LDA, U, LDU )
  1112. *
  1113. * Generate Q in U
  1114. * (Workspace: need 2*N, prefer N+N*NB)
  1115. *
  1116. CALL SORGQR( M, N, N, U, LDU, WORK( ITAU ),
  1117. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1118. IE = ITAU
  1119. ITAUQ = IE + N
  1120. ITAUP = ITAUQ + N
  1121. IWORK = ITAUP + N
  1122. *
  1123. * Zero out below R in A
  1124. *
  1125. IF( N .GT. 1 ) THEN
  1126. CALL SLASET( 'L', N-1, N-1, ZERO, ZERO,
  1127. $ A( 2, 1 ), LDA )
  1128. END IF
  1129. *
  1130. * Bidiagonalize R in A
  1131. * (Workspace: need 4*N, prefer 3*N+2*N*NB)
  1132. *
  1133. CALL SGEBRD( N, N, A, LDA, S, WORK( IE ),
  1134. $ WORK( ITAUQ ), WORK( ITAUP ),
  1135. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1136. *
  1137. * Multiply Q in U by left vectors bidiagonalizing R
  1138. * (Workspace: need 3*N+M, prefer 3*N+M*NB)
  1139. *
  1140. CALL SORMBR( 'Q', 'R', 'N', M, N, N, A, LDA,
  1141. $ WORK( ITAUQ ), U, LDU, WORK( IWORK ),
  1142. $ LWORK-IWORK+1, IERR )
  1143. IWORK = IE + N
  1144. *
  1145. * Perform bidiagonal QR iteration, computing left
  1146. * singular vectors of A in U
  1147. * (Workspace: need BDSPAC)
  1148. *
  1149. CALL SBDSQR( 'U', N, 0, M, 0, S, WORK( IE ), DUM,
  1150. $ 1, U, LDU, DUM, 1, WORK( IWORK ),
  1151. $ INFO )
  1152. *
  1153. END IF
  1154. *
  1155. ELSE IF( WNTVO ) THEN
  1156. *
  1157. * Path 5 (M much larger than N, JOBU='S', JOBVT='O')
  1158. * N left singular vectors to be computed in U and
  1159. * N right singular vectors to be overwritten on A
  1160. *
  1161. IF( LWORK.GE.2*N*N+MAX( 4*N, BDSPAC ) ) THEN
  1162. *
  1163. * Sufficient workspace for a fast algorithm
  1164. *
  1165. IU = 1
  1166. IF( LWORK.GE.WRKBL+2*LDA*N ) THEN
  1167. *
  1168. * WORK(IU) is LDA by N and WORK(IR) is LDA by N
  1169. *
  1170. LDWRKU = LDA
  1171. IR = IU + LDWRKU*N
  1172. LDWRKR = LDA
  1173. ELSE IF( LWORK.GE.WRKBL+( LDA+N )*N ) THEN
  1174. *
  1175. * WORK(IU) is LDA by N and WORK(IR) is N by N
  1176. *
  1177. LDWRKU = LDA
  1178. IR = IU + LDWRKU*N
  1179. LDWRKR = N
  1180. ELSE
  1181. *
  1182. * WORK(IU) is N by N and WORK(IR) is N by N
  1183. *
  1184. LDWRKU = N
  1185. IR = IU + LDWRKU*N
  1186. LDWRKR = N
  1187. END IF
  1188. ITAU = IR + LDWRKR*N
  1189. IWORK = ITAU + N
  1190. *
  1191. * Compute A=Q*R
  1192. * (Workspace: need 2*N*N+2*N, prefer 2*N*N+N+N*NB)
  1193. *
  1194. CALL SGEQRF( M, N, A, LDA, WORK( ITAU ),
  1195. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1196. *
  1197. * Copy R to WORK(IU), zeroing out below it
  1198. *
  1199. CALL SLACPY( 'U', N, N, A, LDA, WORK( IU ),
  1200. $ LDWRKU )
  1201. CALL SLASET( 'L', N-1, N-1, ZERO, ZERO,
  1202. $ WORK( IU+1 ), LDWRKU )
  1203. *
  1204. * Generate Q in A
  1205. * (Workspace: need 2*N*N+2*N, prefer 2*N*N+N+N*NB)
  1206. *
  1207. CALL SORGQR( M, N, N, A, LDA, WORK( ITAU ),
  1208. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1209. IE = ITAU
  1210. ITAUQ = IE + N
  1211. ITAUP = ITAUQ + N
  1212. IWORK = ITAUP + N
  1213. *
  1214. * Bidiagonalize R in WORK(IU), copying result to
  1215. * WORK(IR)
  1216. * (Workspace: need 2*N*N+4*N,
  1217. * prefer 2*N*N+3*N+2*N*NB)
  1218. *
  1219. CALL SGEBRD( N, N, WORK( IU ), LDWRKU, S,
  1220. $ WORK( IE ), WORK( ITAUQ ),
  1221. $ WORK( ITAUP ), WORK( IWORK ),
  1222. $ LWORK-IWORK+1, IERR )
  1223. CALL SLACPY( 'U', N, N, WORK( IU ), LDWRKU,
  1224. $ WORK( IR ), LDWRKR )
  1225. *
  1226. * Generate left bidiagonalizing vectors in WORK(IU)
  1227. * (Workspace: need 2*N*N+4*N, prefer 2*N*N+3*N+N*NB)
  1228. *
  1229. CALL SORGBR( 'Q', N, N, N, WORK( IU ), LDWRKU,
  1230. $ WORK( ITAUQ ), WORK( IWORK ),
  1231. $ LWORK-IWORK+1, IERR )
  1232. *
  1233. * Generate right bidiagonalizing vectors in WORK(IR)
  1234. * (Workspace: need 2*N*N+4*N-1,
  1235. * prefer 2*N*N+3*N+(N-1)*NB)
  1236. *
  1237. CALL SORGBR( 'P', N, N, N, WORK( IR ), LDWRKR,
  1238. $ WORK( ITAUP ), WORK( IWORK ),
  1239. $ LWORK-IWORK+1, IERR )
  1240. IWORK = IE + N
  1241. *
  1242. * Perform bidiagonal QR iteration, computing left
  1243. * singular vectors of R in WORK(IU) and computing
  1244. * right singular vectors of R in WORK(IR)
  1245. * (Workspace: need 2*N*N+BDSPAC)
  1246. *
  1247. CALL SBDSQR( 'U', N, N, N, 0, S, WORK( IE ),
  1248. $ WORK( IR ), LDWRKR, WORK( IU ),
  1249. $ LDWRKU, DUM, 1, WORK( IWORK ), INFO )
  1250. *
  1251. * Multiply Q in A by left singular vectors of R in
  1252. * WORK(IU), storing result in U
  1253. * (Workspace: need N*N)
  1254. *
  1255. CALL SGEMM( 'N', 'N', M, N, N, ONE, A, LDA,
  1256. $ WORK( IU ), LDWRKU, ZERO, U, LDU )
  1257. *
  1258. * Copy right singular vectors of R to A
  1259. * (Workspace: need N*N)
  1260. *
  1261. CALL SLACPY( 'F', N, N, WORK( IR ), LDWRKR, A,
  1262. $ LDA )
  1263. *
  1264. ELSE
  1265. *
  1266. * Insufficient workspace for a fast algorithm
  1267. *
  1268. ITAU = 1
  1269. IWORK = ITAU + N
  1270. *
  1271. * Compute A=Q*R, copying result to U
  1272. * (Workspace: need 2*N, prefer N+N*NB)
  1273. *
  1274. CALL SGEQRF( M, N, A, LDA, WORK( ITAU ),
  1275. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1276. CALL SLACPY( 'L', M, N, A, LDA, U, LDU )
  1277. *
  1278. * Generate Q in U
  1279. * (Workspace: need 2*N, prefer N+N*NB)
  1280. *
  1281. CALL SORGQR( M, N, N, U, LDU, WORK( ITAU ),
  1282. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1283. IE = ITAU
  1284. ITAUQ = IE + N
  1285. ITAUP = ITAUQ + N
  1286. IWORK = ITAUP + N
  1287. *
  1288. * Zero out below R in A
  1289. *
  1290. IF( N .GT. 1 ) THEN
  1291. CALL SLASET( 'L', N-1, N-1, ZERO, ZERO,
  1292. $ A( 2, 1 ), LDA )
  1293. END IF
  1294. *
  1295. * Bidiagonalize R in A
  1296. * (Workspace: need 4*N, prefer 3*N+2*N*NB)
  1297. *
  1298. CALL SGEBRD( N, N, A, LDA, S, WORK( IE ),
  1299. $ WORK( ITAUQ ), WORK( ITAUP ),
  1300. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1301. *
  1302. * Multiply Q in U by left vectors bidiagonalizing R
  1303. * (Workspace: need 3*N+M, prefer 3*N+M*NB)
  1304. *
  1305. CALL SORMBR( 'Q', 'R', 'N', M, N, N, A, LDA,
  1306. $ WORK( ITAUQ ), U, LDU, WORK( IWORK ),
  1307. $ LWORK-IWORK+1, IERR )
  1308. *
  1309. * Generate right vectors bidiagonalizing R in A
  1310. * (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB)
  1311. *
  1312. CALL SORGBR( 'P', N, N, N, A, LDA, WORK( ITAUP ),
  1313. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1314. IWORK = IE + N
  1315. *
  1316. * Perform bidiagonal QR iteration, computing left
  1317. * singular vectors of A in U and computing right
  1318. * singular vectors of A in A
  1319. * (Workspace: need BDSPAC)
  1320. *
  1321. CALL SBDSQR( 'U', N, N, M, 0, S, WORK( IE ), A,
  1322. $ LDA, U, LDU, DUM, 1, WORK( IWORK ),
  1323. $ INFO )
  1324. *
  1325. END IF
  1326. *
  1327. ELSE IF( WNTVAS ) THEN
  1328. *
  1329. * Path 6 (M much larger than N, JOBU='S', JOBVT='S'
  1330. * or 'A')
  1331. * N left singular vectors to be computed in U and
  1332. * N right singular vectors to be computed in VT
  1333. *
  1334. IF( LWORK.GE.N*N+MAX( 4*N, BDSPAC ) ) THEN
  1335. *
  1336. * Sufficient workspace for a fast algorithm
  1337. *
  1338. IU = 1
  1339. IF( LWORK.GE.WRKBL+LDA*N ) THEN
  1340. *
  1341. * WORK(IU) is LDA by N
  1342. *
  1343. LDWRKU = LDA
  1344. ELSE
  1345. *
  1346. * WORK(IU) is N by N
  1347. *
  1348. LDWRKU = N
  1349. END IF
  1350. ITAU = IU + LDWRKU*N
  1351. IWORK = ITAU + N
  1352. *
  1353. * Compute A=Q*R
  1354. * (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
  1355. *
  1356. CALL SGEQRF( M, N, A, LDA, WORK( ITAU ),
  1357. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1358. *
  1359. * Copy R to WORK(IU), zeroing out below it
  1360. *
  1361. CALL SLACPY( 'U', N, N, A, LDA, WORK( IU ),
  1362. $ LDWRKU )
  1363. CALL SLASET( 'L', N-1, N-1, ZERO, ZERO,
  1364. $ WORK( IU+1 ), LDWRKU )
  1365. *
  1366. * Generate Q in A
  1367. * (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
  1368. *
  1369. CALL SORGQR( M, N, N, A, LDA, WORK( ITAU ),
  1370. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1371. IE = ITAU
  1372. ITAUQ = IE + N
  1373. ITAUP = ITAUQ + N
  1374. IWORK = ITAUP + N
  1375. *
  1376. * Bidiagonalize R in WORK(IU), copying result to VT
  1377. * (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
  1378. *
  1379. CALL SGEBRD( N, N, WORK( IU ), LDWRKU, S,
  1380. $ WORK( IE ), WORK( ITAUQ ),
  1381. $ WORK( ITAUP ), WORK( IWORK ),
  1382. $ LWORK-IWORK+1, IERR )
  1383. CALL SLACPY( 'U', N, N, WORK( IU ), LDWRKU, VT,
  1384. $ LDVT )
  1385. *
  1386. * Generate left bidiagonalizing vectors in WORK(IU)
  1387. * (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB)
  1388. *
  1389. CALL SORGBR( 'Q', N, N, N, WORK( IU ), LDWRKU,
  1390. $ WORK( ITAUQ ), WORK( IWORK ),
  1391. $ LWORK-IWORK+1, IERR )
  1392. *
  1393. * Generate right bidiagonalizing vectors in VT
  1394. * (Workspace: need N*N+4*N-1,
  1395. * prefer N*N+3*N+(N-1)*NB)
  1396. *
  1397. CALL SORGBR( 'P', N, N, N, VT, LDVT, WORK( ITAUP ),
  1398. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1399. IWORK = IE + N
  1400. *
  1401. * Perform bidiagonal QR iteration, computing left
  1402. * singular vectors of R in WORK(IU) and computing
  1403. * right singular vectors of R in VT
  1404. * (Workspace: need N*N+BDSPAC)
  1405. *
  1406. CALL SBDSQR( 'U', N, N, N, 0, S, WORK( IE ), VT,
  1407. $ LDVT, WORK( IU ), LDWRKU, DUM, 1,
  1408. $ WORK( IWORK ), INFO )
  1409. *
  1410. * Multiply Q in A by left singular vectors of R in
  1411. * WORK(IU), storing result in U
  1412. * (Workspace: need N*N)
  1413. *
  1414. CALL SGEMM( 'N', 'N', M, N, N, ONE, A, LDA,
  1415. $ WORK( IU ), LDWRKU, ZERO, U, LDU )
  1416. *
  1417. ELSE
  1418. *
  1419. * Insufficient workspace for a fast algorithm
  1420. *
  1421. ITAU = 1
  1422. IWORK = ITAU + N
  1423. *
  1424. * Compute A=Q*R, copying result to U
  1425. * (Workspace: need 2*N, prefer N+N*NB)
  1426. *
  1427. CALL SGEQRF( M, N, A, LDA, WORK( ITAU ),
  1428. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1429. CALL SLACPY( 'L', M, N, A, LDA, U, LDU )
  1430. *
  1431. * Generate Q in U
  1432. * (Workspace: need 2*N, prefer N+N*NB)
  1433. *
  1434. CALL SORGQR( M, N, N, U, LDU, WORK( ITAU ),
  1435. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1436. *
  1437. * Copy R to VT, zeroing out below it
  1438. *
  1439. CALL SLACPY( 'U', N, N, A, LDA, VT, LDVT )
  1440. IF( N.GT.1 )
  1441. $ CALL SLASET( 'L', N-1, N-1, ZERO, ZERO,
  1442. $ VT( 2, 1 ), LDVT )
  1443. IE = ITAU
  1444. ITAUQ = IE + N
  1445. ITAUP = ITAUQ + N
  1446. IWORK = ITAUP + N
  1447. *
  1448. * Bidiagonalize R in VT
  1449. * (Workspace: need 4*N, prefer 3*N+2*N*NB)
  1450. *
  1451. CALL SGEBRD( N, N, VT, LDVT, S, WORK( IE ),
  1452. $ WORK( ITAUQ ), WORK( ITAUP ),
  1453. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1454. *
  1455. * Multiply Q in U by left bidiagonalizing vectors
  1456. * in VT
  1457. * (Workspace: need 3*N+M, prefer 3*N+M*NB)
  1458. *
  1459. CALL SORMBR( 'Q', 'R', 'N', M, N, N, VT, LDVT,
  1460. $ WORK( ITAUQ ), U, LDU, WORK( IWORK ),
  1461. $ LWORK-IWORK+1, IERR )
  1462. *
  1463. * Generate right bidiagonalizing vectors in VT
  1464. * (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB)
  1465. *
  1466. CALL SORGBR( 'P', N, N, N, VT, LDVT, WORK( ITAUP ),
  1467. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1468. IWORK = IE + N
  1469. *
  1470. * Perform bidiagonal QR iteration, computing left
  1471. * singular vectors of A in U and computing right
  1472. * singular vectors of A in VT
  1473. * (Workspace: need BDSPAC)
  1474. *
  1475. CALL SBDSQR( 'U', N, N, M, 0, S, WORK( IE ), VT,
  1476. $ LDVT, U, LDU, DUM, 1, WORK( IWORK ),
  1477. $ INFO )
  1478. *
  1479. END IF
  1480. *
  1481. END IF
  1482. *
  1483. ELSE IF( WNTUA ) THEN
  1484. *
  1485. IF( WNTVN ) THEN
  1486. *
  1487. * Path 7 (M much larger than N, JOBU='A', JOBVT='N')
  1488. * M left singular vectors to be computed in U and
  1489. * no right singular vectors to be computed
  1490. *
  1491. IF( LWORK.GE.N*N+MAX( N+M, 4*N, BDSPAC ) ) THEN
  1492. *
  1493. * Sufficient workspace for a fast algorithm
  1494. *
  1495. IR = 1
  1496. IF( LWORK.GE.WRKBL+LDA*N ) THEN
  1497. *
  1498. * WORK(IR) is LDA by N
  1499. *
  1500. LDWRKR = LDA
  1501. ELSE
  1502. *
  1503. * WORK(IR) is N by N
  1504. *
  1505. LDWRKR = N
  1506. END IF
  1507. ITAU = IR + LDWRKR*N
  1508. IWORK = ITAU + N
  1509. *
  1510. * Compute A=Q*R, copying result to U
  1511. * (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
  1512. *
  1513. CALL SGEQRF( M, N, A, LDA, WORK( ITAU ),
  1514. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1515. CALL SLACPY( 'L', M, N, A, LDA, U, LDU )
  1516. *
  1517. * Copy R to WORK(IR), zeroing out below it
  1518. *
  1519. CALL SLACPY( 'U', N, N, A, LDA, WORK( IR ),
  1520. $ LDWRKR )
  1521. CALL SLASET( 'L', N-1, N-1, ZERO, ZERO,
  1522. $ WORK( IR+1 ), LDWRKR )
  1523. *
  1524. * Generate Q in U
  1525. * (Workspace: need N*N+N+M, prefer N*N+N+M*NB)
  1526. *
  1527. CALL SORGQR( M, M, N, U, LDU, WORK( ITAU ),
  1528. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1529. IE = ITAU
  1530. ITAUQ = IE + N
  1531. ITAUP = ITAUQ + N
  1532. IWORK = ITAUP + N
  1533. *
  1534. * Bidiagonalize R in WORK(IR)
  1535. * (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
  1536. *
  1537. CALL SGEBRD( N, N, WORK( IR ), LDWRKR, S,
  1538. $ WORK( IE ), WORK( ITAUQ ),
  1539. $ WORK( ITAUP ), WORK( IWORK ),
  1540. $ LWORK-IWORK+1, IERR )
  1541. *
  1542. * Generate left bidiagonalizing vectors in WORK(IR)
  1543. * (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB)
  1544. *
  1545. CALL SORGBR( 'Q', N, N, N, WORK( IR ), LDWRKR,
  1546. $ WORK( ITAUQ ), WORK( IWORK ),
  1547. $ LWORK-IWORK+1, IERR )
  1548. IWORK = IE + N
  1549. *
  1550. * Perform bidiagonal QR iteration, computing left
  1551. * singular vectors of R in WORK(IR)
  1552. * (Workspace: need N*N+BDSPAC)
  1553. *
  1554. CALL SBDSQR( 'U', N, 0, N, 0, S, WORK( IE ), DUM,
  1555. $ 1, WORK( IR ), LDWRKR, DUM, 1,
  1556. $ WORK( IWORK ), INFO )
  1557. *
  1558. * Multiply Q in U by left singular vectors of R in
  1559. * WORK(IR), storing result in A
  1560. * (Workspace: need N*N)
  1561. *
  1562. CALL SGEMM( 'N', 'N', M, N, N, ONE, U, LDU,
  1563. $ WORK( IR ), LDWRKR, ZERO, A, LDA )
  1564. *
  1565. * Copy left singular vectors of A from A to U
  1566. *
  1567. CALL SLACPY( 'F', M, N, A, LDA, U, LDU )
  1568. *
  1569. ELSE
  1570. *
  1571. * Insufficient workspace for a fast algorithm
  1572. *
  1573. ITAU = 1
  1574. IWORK = ITAU + N
  1575. *
  1576. * Compute A=Q*R, copying result to U
  1577. * (Workspace: need 2*N, prefer N+N*NB)
  1578. *
  1579. CALL SGEQRF( M, N, A, LDA, WORK( ITAU ),
  1580. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1581. CALL SLACPY( 'L', M, N, A, LDA, U, LDU )
  1582. *
  1583. * Generate Q in U
  1584. * (Workspace: need N+M, prefer N+M*NB)
  1585. *
  1586. CALL SORGQR( M, M, N, U, LDU, WORK( ITAU ),
  1587. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1588. IE = ITAU
  1589. ITAUQ = IE + N
  1590. ITAUP = ITAUQ + N
  1591. IWORK = ITAUP + N
  1592. *
  1593. * Zero out below R in A
  1594. *
  1595. IF( N .GT. 1 ) THEN
  1596. CALL SLASET( 'L', N-1, N-1, ZERO, ZERO,
  1597. $ A( 2, 1 ), LDA )
  1598. END IF
  1599. *
  1600. * Bidiagonalize R in A
  1601. * (Workspace: need 4*N, prefer 3*N+2*N*NB)
  1602. *
  1603. CALL SGEBRD( N, N, A, LDA, S, WORK( IE ),
  1604. $ WORK( ITAUQ ), WORK( ITAUP ),
  1605. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1606. *
  1607. * Multiply Q in U by left bidiagonalizing vectors
  1608. * in A
  1609. * (Workspace: need 3*N+M, prefer 3*N+M*NB)
  1610. *
  1611. CALL SORMBR( 'Q', 'R', 'N', M, N, N, A, LDA,
  1612. $ WORK( ITAUQ ), U, LDU, WORK( IWORK ),
  1613. $ LWORK-IWORK+1, IERR )
  1614. IWORK = IE + N
  1615. *
  1616. * Perform bidiagonal QR iteration, computing left
  1617. * singular vectors of A in U
  1618. * (Workspace: need BDSPAC)
  1619. *
  1620. CALL SBDSQR( 'U', N, 0, M, 0, S, WORK( IE ), DUM,
  1621. $ 1, U, LDU, DUM, 1, WORK( IWORK ),
  1622. $ INFO )
  1623. *
  1624. END IF
  1625. *
  1626. ELSE IF( WNTVO ) THEN
  1627. *
  1628. * Path 8 (M much larger than N, JOBU='A', JOBVT='O')
  1629. * M left singular vectors to be computed in U and
  1630. * N right singular vectors to be overwritten on A
  1631. *
  1632. IF( LWORK.GE.2*N*N+MAX( N+M, 4*N, BDSPAC ) ) THEN
  1633. *
  1634. * Sufficient workspace for a fast algorithm
  1635. *
  1636. IU = 1
  1637. IF( LWORK.GE.WRKBL+2*LDA*N ) THEN
  1638. *
  1639. * WORK(IU) is LDA by N and WORK(IR) is LDA by N
  1640. *
  1641. LDWRKU = LDA
  1642. IR = IU + LDWRKU*N
  1643. LDWRKR = LDA
  1644. ELSE IF( LWORK.GE.WRKBL+( LDA+N )*N ) THEN
  1645. *
  1646. * WORK(IU) is LDA by N and WORK(IR) is N by N
  1647. *
  1648. LDWRKU = LDA
  1649. IR = IU + LDWRKU*N
  1650. LDWRKR = N
  1651. ELSE
  1652. *
  1653. * WORK(IU) is N by N and WORK(IR) is N by N
  1654. *
  1655. LDWRKU = N
  1656. IR = IU + LDWRKU*N
  1657. LDWRKR = N
  1658. END IF
  1659. ITAU = IR + LDWRKR*N
  1660. IWORK = ITAU + N
  1661. *
  1662. * Compute A=Q*R, copying result to U
  1663. * (Workspace: need 2*N*N+2*N, prefer 2*N*N+N+N*NB)
  1664. *
  1665. CALL SGEQRF( M, N, A, LDA, WORK( ITAU ),
  1666. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1667. CALL SLACPY( 'L', M, N, A, LDA, U, LDU )
  1668. *
  1669. * Generate Q in U
  1670. * (Workspace: need 2*N*N+N+M, prefer 2*N*N+N+M*NB)
  1671. *
  1672. CALL SORGQR( M, M, N, U, LDU, WORK( ITAU ),
  1673. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1674. *
  1675. * Copy R to WORK(IU), zeroing out below it
  1676. *
  1677. CALL SLACPY( 'U', N, N, A, LDA, WORK( IU ),
  1678. $ LDWRKU )
  1679. CALL SLASET( 'L', N-1, N-1, ZERO, ZERO,
  1680. $ WORK( IU+1 ), LDWRKU )
  1681. IE = ITAU
  1682. ITAUQ = IE + N
  1683. ITAUP = ITAUQ + N
  1684. IWORK = ITAUP + N
  1685. *
  1686. * Bidiagonalize R in WORK(IU), copying result to
  1687. * WORK(IR)
  1688. * (Workspace: need 2*N*N+4*N,
  1689. * prefer 2*N*N+3*N+2*N*NB)
  1690. *
  1691. CALL SGEBRD( N, N, WORK( IU ), LDWRKU, S,
  1692. $ WORK( IE ), WORK( ITAUQ ),
  1693. $ WORK( ITAUP ), WORK( IWORK ),
  1694. $ LWORK-IWORK+1, IERR )
  1695. CALL SLACPY( 'U', N, N, WORK( IU ), LDWRKU,
  1696. $ WORK( IR ), LDWRKR )
  1697. *
  1698. * Generate left bidiagonalizing vectors in WORK(IU)
  1699. * (Workspace: need 2*N*N+4*N, prefer 2*N*N+3*N+N*NB)
  1700. *
  1701. CALL SORGBR( 'Q', N, N, N, WORK( IU ), LDWRKU,
  1702. $ WORK( ITAUQ ), WORK( IWORK ),
  1703. $ LWORK-IWORK+1, IERR )
  1704. *
  1705. * Generate right bidiagonalizing vectors in WORK(IR)
  1706. * (Workspace: need 2*N*N+4*N-1,
  1707. * prefer 2*N*N+3*N+(N-1)*NB)
  1708. *
  1709. CALL SORGBR( 'P', N, N, N, WORK( IR ), LDWRKR,
  1710. $ WORK( ITAUP ), WORK( IWORK ),
  1711. $ LWORK-IWORK+1, IERR )
  1712. IWORK = IE + N
  1713. *
  1714. * Perform bidiagonal QR iteration, computing left
  1715. * singular vectors of R in WORK(IU) and computing
  1716. * right singular vectors of R in WORK(IR)
  1717. * (Workspace: need 2*N*N+BDSPAC)
  1718. *
  1719. CALL SBDSQR( 'U', N, N, N, 0, S, WORK( IE ),
  1720. $ WORK( IR ), LDWRKR, WORK( IU ),
  1721. $ LDWRKU, DUM, 1, WORK( IWORK ), INFO )
  1722. *
  1723. * Multiply Q in U by left singular vectors of R in
  1724. * WORK(IU), storing result in A
  1725. * (Workspace: need N*N)
  1726. *
  1727. CALL SGEMM( 'N', 'N', M, N, N, ONE, U, LDU,
  1728. $ WORK( IU ), LDWRKU, ZERO, A, LDA )
  1729. *
  1730. * Copy left singular vectors of A from A to U
  1731. *
  1732. CALL SLACPY( 'F', M, N, A, LDA, U, LDU )
  1733. *
  1734. * Copy right singular vectors of R from WORK(IR) to A
  1735. *
  1736. CALL SLACPY( 'F', N, N, WORK( IR ), LDWRKR, A,
  1737. $ LDA )
  1738. *
  1739. ELSE
  1740. *
  1741. * Insufficient workspace for a fast algorithm
  1742. *
  1743. ITAU = 1
  1744. IWORK = ITAU + N
  1745. *
  1746. * Compute A=Q*R, copying result to U
  1747. * (Workspace: need 2*N, prefer N+N*NB)
  1748. *
  1749. CALL SGEQRF( M, N, A, LDA, WORK( ITAU ),
  1750. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1751. CALL SLACPY( 'L', M, N, A, LDA, U, LDU )
  1752. *
  1753. * Generate Q in U
  1754. * (Workspace: need N+M, prefer N+M*NB)
  1755. *
  1756. CALL SORGQR( M, M, N, U, LDU, WORK( ITAU ),
  1757. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1758. IE = ITAU
  1759. ITAUQ = IE + N
  1760. ITAUP = ITAUQ + N
  1761. IWORK = ITAUP + N
  1762. *
  1763. * Zero out below R in A
  1764. *
  1765. IF( N .GT. 1 ) THEN
  1766. CALL SLASET( 'L', N-1, N-1, ZERO, ZERO,
  1767. $ A( 2, 1 ), LDA )
  1768. END IF
  1769. *
  1770. * Bidiagonalize R in A
  1771. * (Workspace: need 4*N, prefer 3*N+2*N*NB)
  1772. *
  1773. CALL SGEBRD( N, N, A, LDA, S, WORK( IE ),
  1774. $ WORK( ITAUQ ), WORK( ITAUP ),
  1775. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1776. *
  1777. * Multiply Q in U by left bidiagonalizing vectors
  1778. * in A
  1779. * (Workspace: need 3*N+M, prefer 3*N+M*NB)
  1780. *
  1781. CALL SORMBR( 'Q', 'R', 'N', M, N, N, A, LDA,
  1782. $ WORK( ITAUQ ), U, LDU, WORK( IWORK ),
  1783. $ LWORK-IWORK+1, IERR )
  1784. *
  1785. * Generate right bidiagonalizing vectors in A
  1786. * (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB)
  1787. *
  1788. CALL SORGBR( 'P', N, N, N, A, LDA, WORK( ITAUP ),
  1789. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1790. IWORK = IE + N
  1791. *
  1792. * Perform bidiagonal QR iteration, computing left
  1793. * singular vectors of A in U and computing right
  1794. * singular vectors of A in A
  1795. * (Workspace: need BDSPAC)
  1796. *
  1797. CALL SBDSQR( 'U', N, N, M, 0, S, WORK( IE ), A,
  1798. $ LDA, U, LDU, DUM, 1, WORK( IWORK ),
  1799. $ INFO )
  1800. *
  1801. END IF
  1802. *
  1803. ELSE IF( WNTVAS ) THEN
  1804. *
  1805. * Path 9 (M much larger than N, JOBU='A', JOBVT='S'
  1806. * or 'A')
  1807. * M left singular vectors to be computed in U and
  1808. * N right singular vectors to be computed in VT
  1809. *
  1810. IF( LWORK.GE.N*N+MAX( N+M, 4*N, BDSPAC ) ) THEN
  1811. *
  1812. * Sufficient workspace for a fast algorithm
  1813. *
  1814. IU = 1
  1815. IF( LWORK.GE.WRKBL+LDA*N ) THEN
  1816. *
  1817. * WORK(IU) is LDA by N
  1818. *
  1819. LDWRKU = LDA
  1820. ELSE
  1821. *
  1822. * WORK(IU) is N by N
  1823. *
  1824. LDWRKU = N
  1825. END IF
  1826. ITAU = IU + LDWRKU*N
  1827. IWORK = ITAU + N
  1828. *
  1829. * Compute A=Q*R, copying result to U
  1830. * (Workspace: need N*N+2*N, prefer N*N+N+N*NB)
  1831. *
  1832. CALL SGEQRF( M, N, A, LDA, WORK( ITAU ),
  1833. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1834. CALL SLACPY( 'L', M, N, A, LDA, U, LDU )
  1835. *
  1836. * Generate Q in U
  1837. * (Workspace: need N*N+N+M, prefer N*N+N+M*NB)
  1838. *
  1839. CALL SORGQR( M, M, N, U, LDU, WORK( ITAU ),
  1840. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1841. *
  1842. * Copy R to WORK(IU), zeroing out below it
  1843. *
  1844. CALL SLACPY( 'U', N, N, A, LDA, WORK( IU ),
  1845. $ LDWRKU )
  1846. CALL SLASET( 'L', N-1, N-1, ZERO, ZERO,
  1847. $ WORK( IU+1 ), LDWRKU )
  1848. IE = ITAU
  1849. ITAUQ = IE + N
  1850. ITAUP = ITAUQ + N
  1851. IWORK = ITAUP + N
  1852. *
  1853. * Bidiagonalize R in WORK(IU), copying result to VT
  1854. * (Workspace: need N*N+4*N, prefer N*N+3*N+2*N*NB)
  1855. *
  1856. CALL SGEBRD( N, N, WORK( IU ), LDWRKU, S,
  1857. $ WORK( IE ), WORK( ITAUQ ),
  1858. $ WORK( ITAUP ), WORK( IWORK ),
  1859. $ LWORK-IWORK+1, IERR )
  1860. CALL SLACPY( 'U', N, N, WORK( IU ), LDWRKU, VT,
  1861. $ LDVT )
  1862. *
  1863. * Generate left bidiagonalizing vectors in WORK(IU)
  1864. * (Workspace: need N*N+4*N, prefer N*N+3*N+N*NB)
  1865. *
  1866. CALL SORGBR( 'Q', N, N, N, WORK( IU ), LDWRKU,
  1867. $ WORK( ITAUQ ), WORK( IWORK ),
  1868. $ LWORK-IWORK+1, IERR )
  1869. *
  1870. * Generate right bidiagonalizing vectors in VT
  1871. * (Workspace: need N*N+4*N-1,
  1872. * prefer N*N+3*N+(N-1)*NB)
  1873. *
  1874. CALL SORGBR( 'P', N, N, N, VT, LDVT, WORK( ITAUP ),
  1875. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1876. IWORK = IE + N
  1877. *
  1878. * Perform bidiagonal QR iteration, computing left
  1879. * singular vectors of R in WORK(IU) and computing
  1880. * right singular vectors of R in VT
  1881. * (Workspace: need N*N+BDSPAC)
  1882. *
  1883. CALL SBDSQR( 'U', N, N, N, 0, S, WORK( IE ), VT,
  1884. $ LDVT, WORK( IU ), LDWRKU, DUM, 1,
  1885. $ WORK( IWORK ), INFO )
  1886. *
  1887. * Multiply Q in U by left singular vectors of R in
  1888. * WORK(IU), storing result in A
  1889. * (Workspace: need N*N)
  1890. *
  1891. CALL SGEMM( 'N', 'N', M, N, N, ONE, U, LDU,
  1892. $ WORK( IU ), LDWRKU, ZERO, A, LDA )
  1893. *
  1894. * Copy left singular vectors of A from A to U
  1895. *
  1896. CALL SLACPY( 'F', M, N, A, LDA, U, LDU )
  1897. *
  1898. ELSE
  1899. *
  1900. * Insufficient workspace for a fast algorithm
  1901. *
  1902. ITAU = 1
  1903. IWORK = ITAU + N
  1904. *
  1905. * Compute A=Q*R, copying result to U
  1906. * (Workspace: need 2*N, prefer N+N*NB)
  1907. *
  1908. CALL SGEQRF( M, N, A, LDA, WORK( ITAU ),
  1909. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1910. CALL SLACPY( 'L', M, N, A, LDA, U, LDU )
  1911. *
  1912. * Generate Q in U
  1913. * (Workspace: need N+M, prefer N+M*NB)
  1914. *
  1915. CALL SORGQR( M, M, N, U, LDU, WORK( ITAU ),
  1916. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1917. *
  1918. * Copy R from A to VT, zeroing out below it
  1919. *
  1920. CALL SLACPY( 'U', N, N, A, LDA, VT, LDVT )
  1921. IF( N.GT.1 )
  1922. $ CALL SLASET( 'L', N-1, N-1, ZERO, ZERO,
  1923. $ VT( 2, 1 ), LDVT )
  1924. IE = ITAU
  1925. ITAUQ = IE + N
  1926. ITAUP = ITAUQ + N
  1927. IWORK = ITAUP + N
  1928. *
  1929. * Bidiagonalize R in VT
  1930. * (Workspace: need 4*N, prefer 3*N+2*N*NB)
  1931. *
  1932. CALL SGEBRD( N, N, VT, LDVT, S, WORK( IE ),
  1933. $ WORK( ITAUQ ), WORK( ITAUP ),
  1934. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1935. *
  1936. * Multiply Q in U by left bidiagonalizing vectors
  1937. * in VT
  1938. * (Workspace: need 3*N+M, prefer 3*N+M*NB)
  1939. *
  1940. CALL SORMBR( 'Q', 'R', 'N', M, N, N, VT, LDVT,
  1941. $ WORK( ITAUQ ), U, LDU, WORK( IWORK ),
  1942. $ LWORK-IWORK+1, IERR )
  1943. *
  1944. * Generate right bidiagonalizing vectors in VT
  1945. * (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB)
  1946. *
  1947. CALL SORGBR( 'P', N, N, N, VT, LDVT, WORK( ITAUP ),
  1948. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1949. IWORK = IE + N
  1950. *
  1951. * Perform bidiagonal QR iteration, computing left
  1952. * singular vectors of A in U and computing right
  1953. * singular vectors of A in VT
  1954. * (Workspace: need BDSPAC)
  1955. *
  1956. CALL SBDSQR( 'U', N, N, M, 0, S, WORK( IE ), VT,
  1957. $ LDVT, U, LDU, DUM, 1, WORK( IWORK ),
  1958. $ INFO )
  1959. *
  1960. END IF
  1961. *
  1962. END IF
  1963. *
  1964. END IF
  1965. *
  1966. ELSE
  1967. *
  1968. * M .LT. MNTHR
  1969. *
  1970. * Path 10 (M at least N, but not much larger)
  1971. * Reduce to bidiagonal form without QR decomposition
  1972. *
  1973. IE = 1
  1974. ITAUQ = IE + N
  1975. ITAUP = ITAUQ + N
  1976. IWORK = ITAUP + N
  1977. *
  1978. * Bidiagonalize A
  1979. * (Workspace: need 3*N+M, prefer 3*N+(M+N)*NB)
  1980. *
  1981. CALL SGEBRD( M, N, A, LDA, S, WORK( IE ), WORK( ITAUQ ),
  1982. $ WORK( ITAUP ), WORK( IWORK ), LWORK-IWORK+1,
  1983. $ IERR )
  1984. IF( WNTUAS ) THEN
  1985. *
  1986. * If left singular vectors desired in U, copy result to U
  1987. * and generate left bidiagonalizing vectors in U
  1988. * (Workspace: need 3*N+NCU, prefer 3*N+NCU*NB)
  1989. *
  1990. CALL SLACPY( 'L', M, N, A, LDA, U, LDU )
  1991. IF( WNTUS )
  1992. $ NCU = N
  1993. IF( WNTUA )
  1994. $ NCU = M
  1995. CALL SORGBR( 'Q', M, NCU, N, U, LDU, WORK( ITAUQ ),
  1996. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  1997. END IF
  1998. IF( WNTVAS ) THEN
  1999. *
  2000. * If right singular vectors desired in VT, copy result to
  2001. * VT and generate right bidiagonalizing vectors in VT
  2002. * (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB)
  2003. *
  2004. CALL SLACPY( 'U', N, N, A, LDA, VT, LDVT )
  2005. CALL SORGBR( 'P', N, N, N, VT, LDVT, WORK( ITAUP ),
  2006. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2007. END IF
  2008. IF( WNTUO ) THEN
  2009. *
  2010. * If left singular vectors desired in A, generate left
  2011. * bidiagonalizing vectors in A
  2012. * (Workspace: need 4*N, prefer 3*N+N*NB)
  2013. *
  2014. CALL SORGBR( 'Q', M, N, N, A, LDA, WORK( ITAUQ ),
  2015. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2016. END IF
  2017. IF( WNTVO ) THEN
  2018. *
  2019. * If right singular vectors desired in A, generate right
  2020. * bidiagonalizing vectors in A
  2021. * (Workspace: need 4*N-1, prefer 3*N+(N-1)*NB)
  2022. *
  2023. CALL SORGBR( 'P', N, N, N, A, LDA, WORK( ITAUP ),
  2024. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2025. END IF
  2026. IWORK = IE + N
  2027. IF( WNTUAS .OR. WNTUO )
  2028. $ NRU = M
  2029. IF( WNTUN )
  2030. $ NRU = 0
  2031. IF( WNTVAS .OR. WNTVO )
  2032. $ NCVT = N
  2033. IF( WNTVN )
  2034. $ NCVT = 0
  2035. IF( ( .NOT.WNTUO ) .AND. ( .NOT.WNTVO ) ) THEN
  2036. *
  2037. * Perform bidiagonal QR iteration, if desired, computing
  2038. * left singular vectors in U and computing right singular
  2039. * vectors in VT
  2040. * (Workspace: need BDSPAC)
  2041. *
  2042. CALL SBDSQR( 'U', N, NCVT, NRU, 0, S, WORK( IE ), VT,
  2043. $ LDVT, U, LDU, DUM, 1, WORK( IWORK ), INFO )
  2044. ELSE IF( ( .NOT.WNTUO ) .AND. WNTVO ) THEN
  2045. *
  2046. * Perform bidiagonal QR iteration, if desired, computing
  2047. * left singular vectors in U and computing right singular
  2048. * vectors in A
  2049. * (Workspace: need BDSPAC)
  2050. *
  2051. CALL SBDSQR( 'U', N, NCVT, NRU, 0, S, WORK( IE ), A, LDA,
  2052. $ U, LDU, DUM, 1, WORK( IWORK ), INFO )
  2053. ELSE
  2054. *
  2055. * Perform bidiagonal QR iteration, if desired, computing
  2056. * left singular vectors in A and computing right singular
  2057. * vectors in VT
  2058. * (Workspace: need BDSPAC)
  2059. *
  2060. CALL SBDSQR( 'U', N, NCVT, NRU, 0, S, WORK( IE ), VT,
  2061. $ LDVT, A, LDA, DUM, 1, WORK( IWORK ), INFO )
  2062. END IF
  2063. *
  2064. END IF
  2065. *
  2066. ELSE
  2067. *
  2068. * A has more columns than rows. If A has sufficiently more
  2069. * columns than rows, first reduce using the LQ decomposition (if
  2070. * sufficient workspace available)
  2071. *
  2072. IF( N.GE.MNTHR ) THEN
  2073. *
  2074. IF( WNTVN ) THEN
  2075. *
  2076. * Path 1t(N much larger than M, JOBVT='N')
  2077. * No right singular vectors to be computed
  2078. *
  2079. ITAU = 1
  2080. IWORK = ITAU + M
  2081. *
  2082. * Compute A=L*Q
  2083. * (Workspace: need 2*M, prefer M+M*NB)
  2084. *
  2085. CALL SGELQF( M, N, A, LDA, WORK( ITAU ), WORK( IWORK ),
  2086. $ LWORK-IWORK+1, IERR )
  2087. *
  2088. * Zero out above L
  2089. *
  2090. CALL SLASET( 'U', M-1, M-1, ZERO, ZERO, A( 1, 2 ), LDA )
  2091. IE = 1
  2092. ITAUQ = IE + M
  2093. ITAUP = ITAUQ + M
  2094. IWORK = ITAUP + M
  2095. *
  2096. * Bidiagonalize L in A
  2097. * (Workspace: need 4*M, prefer 3*M+2*M*NB)
  2098. *
  2099. CALL SGEBRD( M, M, A, LDA, S, WORK( IE ), WORK( ITAUQ ),
  2100. $ WORK( ITAUP ), WORK( IWORK ), LWORK-IWORK+1,
  2101. $ IERR )
  2102. IF( WNTUO .OR. WNTUAS ) THEN
  2103. *
  2104. * If left singular vectors desired, generate Q
  2105. * (Workspace: need 4*M, prefer 3*M+M*NB)
  2106. *
  2107. CALL SORGBR( 'Q', M, M, M, A, LDA, WORK( ITAUQ ),
  2108. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2109. END IF
  2110. IWORK = IE + M
  2111. NRU = 0
  2112. IF( WNTUO .OR. WNTUAS )
  2113. $ NRU = M
  2114. *
  2115. * Perform bidiagonal QR iteration, computing left singular
  2116. * vectors of A in A if desired
  2117. * (Workspace: need BDSPAC)
  2118. *
  2119. CALL SBDSQR( 'U', M, 0, NRU, 0, S, WORK( IE ), DUM, 1, A,
  2120. $ LDA, DUM, 1, WORK( IWORK ), INFO )
  2121. *
  2122. * If left singular vectors desired in U, copy them there
  2123. *
  2124. IF( WNTUAS )
  2125. $ CALL SLACPY( 'F', M, M, A, LDA, U, LDU )
  2126. *
  2127. ELSE IF( WNTVO .AND. WNTUN ) THEN
  2128. *
  2129. * Path 2t(N much larger than M, JOBU='N', JOBVT='O')
  2130. * M right singular vectors to be overwritten on A and
  2131. * no left singular vectors to be computed
  2132. *
  2133. IF( LWORK.GE.M*M+MAX( 4*M, BDSPAC ) ) THEN
  2134. *
  2135. * Sufficient workspace for a fast algorithm
  2136. *
  2137. IR = 1
  2138. IF( LWORK.GE.MAX( WRKBL, LDA*N+M )+LDA*M ) THEN
  2139. *
  2140. * WORK(IU) is LDA by N and WORK(IR) is LDA by M
  2141. *
  2142. LDWRKU = LDA
  2143. CHUNK = N
  2144. LDWRKR = LDA
  2145. ELSE IF( LWORK.GE.MAX( WRKBL, LDA*N+M )+M*M ) THEN
  2146. *
  2147. * WORK(IU) is LDA by N and WORK(IR) is M by M
  2148. *
  2149. LDWRKU = LDA
  2150. CHUNK = N
  2151. LDWRKR = M
  2152. ELSE
  2153. *
  2154. * WORK(IU) is M by CHUNK and WORK(IR) is M by M
  2155. *
  2156. LDWRKU = M
  2157. CHUNK = ( LWORK-M*M-M ) / M
  2158. LDWRKR = M
  2159. END IF
  2160. ITAU = IR + LDWRKR*M
  2161. IWORK = ITAU + M
  2162. *
  2163. * Compute A=L*Q
  2164. * (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
  2165. *
  2166. CALL SGELQF( M, N, A, LDA, WORK( ITAU ),
  2167. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2168. *
  2169. * Copy L to WORK(IR) and zero out above it
  2170. *
  2171. CALL SLACPY( 'L', M, M, A, LDA, WORK( IR ), LDWRKR )
  2172. CALL SLASET( 'U', M-1, M-1, ZERO, ZERO,
  2173. $ WORK( IR+LDWRKR ), LDWRKR )
  2174. *
  2175. * Generate Q in A
  2176. * (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
  2177. *
  2178. CALL SORGLQ( M, N, M, A, LDA, WORK( ITAU ),
  2179. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2180. IE = ITAU
  2181. ITAUQ = IE + M
  2182. ITAUP = ITAUQ + M
  2183. IWORK = ITAUP + M
  2184. *
  2185. * Bidiagonalize L in WORK(IR)
  2186. * (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
  2187. *
  2188. CALL SGEBRD( M, M, WORK( IR ), LDWRKR, S, WORK( IE ),
  2189. $ WORK( ITAUQ ), WORK( ITAUP ),
  2190. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2191. *
  2192. * Generate right vectors bidiagonalizing L
  2193. * (Workspace: need M*M+4*M-1, prefer M*M+3*M+(M-1)*NB)
  2194. *
  2195. CALL SORGBR( 'P', M, M, M, WORK( IR ), LDWRKR,
  2196. $ WORK( ITAUP ), WORK( IWORK ),
  2197. $ LWORK-IWORK+1, IERR )
  2198. IWORK = IE + M
  2199. *
  2200. * Perform bidiagonal QR iteration, computing right
  2201. * singular vectors of L in WORK(IR)
  2202. * (Workspace: need M*M+BDSPAC)
  2203. *
  2204. CALL SBDSQR( 'U', M, M, 0, 0, S, WORK( IE ),
  2205. $ WORK( IR ), LDWRKR, DUM, 1, DUM, 1,
  2206. $ WORK( IWORK ), INFO )
  2207. IU = IE + M
  2208. *
  2209. * Multiply right singular vectors of L in WORK(IR) by Q
  2210. * in A, storing result in WORK(IU) and copying to A
  2211. * (Workspace: need M*M+2*M, prefer M*M+M*N+M)
  2212. *
  2213. DO 30 I = 1, N, CHUNK
  2214. BLK = MIN( N-I+1, CHUNK )
  2215. CALL SGEMM( 'N', 'N', M, BLK, M, ONE, WORK( IR ),
  2216. $ LDWRKR, A( 1, I ), LDA, ZERO,
  2217. $ WORK( IU ), LDWRKU )
  2218. CALL SLACPY( 'F', M, BLK, WORK( IU ), LDWRKU,
  2219. $ A( 1, I ), LDA )
  2220. 30 CONTINUE
  2221. *
  2222. ELSE
  2223. *
  2224. * Insufficient workspace for a fast algorithm
  2225. *
  2226. IE = 1
  2227. ITAUQ = IE + M
  2228. ITAUP = ITAUQ + M
  2229. IWORK = ITAUP + M
  2230. *
  2231. * Bidiagonalize A
  2232. * (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB)
  2233. *
  2234. CALL SGEBRD( M, N, A, LDA, S, WORK( IE ),
  2235. $ WORK( ITAUQ ), WORK( ITAUP ),
  2236. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2237. *
  2238. * Generate right vectors bidiagonalizing A
  2239. * (Workspace: need 4*M, prefer 3*M+M*NB)
  2240. *
  2241. CALL SORGBR( 'P', M, N, M, A, LDA, WORK( ITAUP ),
  2242. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2243. IWORK = IE + M
  2244. *
  2245. * Perform bidiagonal QR iteration, computing right
  2246. * singular vectors of A in A
  2247. * (Workspace: need BDSPAC)
  2248. *
  2249. CALL SBDSQR( 'L', M, N, 0, 0, S, WORK( IE ), A, LDA,
  2250. $ DUM, 1, DUM, 1, WORK( IWORK ), INFO )
  2251. *
  2252. END IF
  2253. *
  2254. ELSE IF( WNTVO .AND. WNTUAS ) THEN
  2255. *
  2256. * Path 3t(N much larger than M, JOBU='S' or 'A', JOBVT='O')
  2257. * M right singular vectors to be overwritten on A and
  2258. * M left singular vectors to be computed in U
  2259. *
  2260. IF( LWORK.GE.M*M+MAX( 4*M, BDSPAC ) ) THEN
  2261. *
  2262. * Sufficient workspace for a fast algorithm
  2263. *
  2264. IR = 1
  2265. IF( LWORK.GE.MAX( WRKBL, LDA*N+M )+LDA*M ) THEN
  2266. *
  2267. * WORK(IU) is LDA by N and WORK(IR) is LDA by M
  2268. *
  2269. LDWRKU = LDA
  2270. CHUNK = N
  2271. LDWRKR = LDA
  2272. ELSE IF( LWORK.GE.MAX( WRKBL, LDA*N+M )+M*M ) THEN
  2273. *
  2274. * WORK(IU) is LDA by N and WORK(IR) is M by M
  2275. *
  2276. LDWRKU = LDA
  2277. CHUNK = N
  2278. LDWRKR = M
  2279. ELSE
  2280. *
  2281. * WORK(IU) is M by CHUNK and WORK(IR) is M by M
  2282. *
  2283. LDWRKU = M
  2284. CHUNK = ( LWORK-M*M-M ) / M
  2285. LDWRKR = M
  2286. END IF
  2287. ITAU = IR + LDWRKR*M
  2288. IWORK = ITAU + M
  2289. *
  2290. * Compute A=L*Q
  2291. * (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
  2292. *
  2293. CALL SGELQF( M, N, A, LDA, WORK( ITAU ),
  2294. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2295. *
  2296. * Copy L to U, zeroing about above it
  2297. *
  2298. CALL SLACPY( 'L', M, M, A, LDA, U, LDU )
  2299. CALL SLASET( 'U', M-1, M-1, ZERO, ZERO, U( 1, 2 ),
  2300. $ LDU )
  2301. *
  2302. * Generate Q in A
  2303. * (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
  2304. *
  2305. CALL SORGLQ( M, N, M, A, LDA, WORK( ITAU ),
  2306. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2307. IE = ITAU
  2308. ITAUQ = IE + M
  2309. ITAUP = ITAUQ + M
  2310. IWORK = ITAUP + M
  2311. *
  2312. * Bidiagonalize L in U, copying result to WORK(IR)
  2313. * (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
  2314. *
  2315. CALL SGEBRD( M, M, U, LDU, S, WORK( IE ),
  2316. $ WORK( ITAUQ ), WORK( ITAUP ),
  2317. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2318. CALL SLACPY( 'U', M, M, U, LDU, WORK( IR ), LDWRKR )
  2319. *
  2320. * Generate right vectors bidiagonalizing L in WORK(IR)
  2321. * (Workspace: need M*M+4*M-1, prefer M*M+3*M+(M-1)*NB)
  2322. *
  2323. CALL SORGBR( 'P', M, M, M, WORK( IR ), LDWRKR,
  2324. $ WORK( ITAUP ), WORK( IWORK ),
  2325. $ LWORK-IWORK+1, IERR )
  2326. *
  2327. * Generate left vectors bidiagonalizing L in U
  2328. * (Workspace: need M*M+4*M, prefer M*M+3*M+M*NB)
  2329. *
  2330. CALL SORGBR( 'Q', M, M, M, U, LDU, WORK( ITAUQ ),
  2331. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2332. IWORK = IE + M
  2333. *
  2334. * Perform bidiagonal QR iteration, computing left
  2335. * singular vectors of L in U, and computing right
  2336. * singular vectors of L in WORK(IR)
  2337. * (Workspace: need M*M+BDSPAC)
  2338. *
  2339. CALL SBDSQR( 'U', M, M, M, 0, S, WORK( IE ),
  2340. $ WORK( IR ), LDWRKR, U, LDU, DUM, 1,
  2341. $ WORK( IWORK ), INFO )
  2342. IU = IE + M
  2343. *
  2344. * Multiply right singular vectors of L in WORK(IR) by Q
  2345. * in A, storing result in WORK(IU) and copying to A
  2346. * (Workspace: need M*M+2*M, prefer M*M+M*N+M))
  2347. *
  2348. DO 40 I = 1, N, CHUNK
  2349. BLK = MIN( N-I+1, CHUNK )
  2350. CALL SGEMM( 'N', 'N', M, BLK, M, ONE, WORK( IR ),
  2351. $ LDWRKR, A( 1, I ), LDA, ZERO,
  2352. $ WORK( IU ), LDWRKU )
  2353. CALL SLACPY( 'F', M, BLK, WORK( IU ), LDWRKU,
  2354. $ A( 1, I ), LDA )
  2355. 40 CONTINUE
  2356. *
  2357. ELSE
  2358. *
  2359. * Insufficient workspace for a fast algorithm
  2360. *
  2361. ITAU = 1
  2362. IWORK = ITAU + M
  2363. *
  2364. * Compute A=L*Q
  2365. * (Workspace: need 2*M, prefer M+M*NB)
  2366. *
  2367. CALL SGELQF( M, N, A, LDA, WORK( ITAU ),
  2368. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2369. *
  2370. * Copy L to U, zeroing out above it
  2371. *
  2372. CALL SLACPY( 'L', M, M, A, LDA, U, LDU )
  2373. CALL SLASET( 'U', M-1, M-1, ZERO, ZERO, U( 1, 2 ),
  2374. $ LDU )
  2375. *
  2376. * Generate Q in A
  2377. * (Workspace: need 2*M, prefer M+M*NB)
  2378. *
  2379. CALL SORGLQ( M, N, M, A, LDA, WORK( ITAU ),
  2380. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2381. IE = ITAU
  2382. ITAUQ = IE + M
  2383. ITAUP = ITAUQ + M
  2384. IWORK = ITAUP + M
  2385. *
  2386. * Bidiagonalize L in U
  2387. * (Workspace: need 4*M, prefer 3*M+2*M*NB)
  2388. *
  2389. CALL SGEBRD( M, M, U, LDU, S, WORK( IE ),
  2390. $ WORK( ITAUQ ), WORK( ITAUP ),
  2391. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2392. *
  2393. * Multiply right vectors bidiagonalizing L by Q in A
  2394. * (Workspace: need 3*M+N, prefer 3*M+N*NB)
  2395. *
  2396. CALL SORMBR( 'P', 'L', 'T', M, N, M, U, LDU,
  2397. $ WORK( ITAUP ), A, LDA, WORK( IWORK ),
  2398. $ LWORK-IWORK+1, IERR )
  2399. *
  2400. * Generate left vectors bidiagonalizing L in U
  2401. * (Workspace: need 4*M, prefer 3*M+M*NB)
  2402. *
  2403. CALL SORGBR( 'Q', M, M, M, U, LDU, WORK( ITAUQ ),
  2404. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2405. IWORK = IE + M
  2406. *
  2407. * Perform bidiagonal QR iteration, computing left
  2408. * singular vectors of A in U and computing right
  2409. * singular vectors of A in A
  2410. * (Workspace: need BDSPAC)
  2411. *
  2412. CALL SBDSQR( 'U', M, N, M, 0, S, WORK( IE ), A, LDA,
  2413. $ U, LDU, DUM, 1, WORK( IWORK ), INFO )
  2414. *
  2415. END IF
  2416. *
  2417. ELSE IF( WNTVS ) THEN
  2418. *
  2419. IF( WNTUN ) THEN
  2420. *
  2421. * Path 4t(N much larger than M, JOBU='N', JOBVT='S')
  2422. * M right singular vectors to be computed in VT and
  2423. * no left singular vectors to be computed
  2424. *
  2425. IF( LWORK.GE.M*M+MAX( 4*M, BDSPAC ) ) THEN
  2426. *
  2427. * Sufficient workspace for a fast algorithm
  2428. *
  2429. IR = 1
  2430. IF( LWORK.GE.WRKBL+LDA*M ) THEN
  2431. *
  2432. * WORK(IR) is LDA by M
  2433. *
  2434. LDWRKR = LDA
  2435. ELSE
  2436. *
  2437. * WORK(IR) is M by M
  2438. *
  2439. LDWRKR = M
  2440. END IF
  2441. ITAU = IR + LDWRKR*M
  2442. IWORK = ITAU + M
  2443. *
  2444. * Compute A=L*Q
  2445. * (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
  2446. *
  2447. CALL SGELQF( M, N, A, LDA, WORK( ITAU ),
  2448. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2449. *
  2450. * Copy L to WORK(IR), zeroing out above it
  2451. *
  2452. CALL SLACPY( 'L', M, M, A, LDA, WORK( IR ),
  2453. $ LDWRKR )
  2454. CALL SLASET( 'U', M-1, M-1, ZERO, ZERO,
  2455. $ WORK( IR+LDWRKR ), LDWRKR )
  2456. *
  2457. * Generate Q in A
  2458. * (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
  2459. *
  2460. CALL SORGLQ( M, N, M, A, LDA, WORK( ITAU ),
  2461. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2462. IE = ITAU
  2463. ITAUQ = IE + M
  2464. ITAUP = ITAUQ + M
  2465. IWORK = ITAUP + M
  2466. *
  2467. * Bidiagonalize L in WORK(IR)
  2468. * (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
  2469. *
  2470. CALL SGEBRD( M, M, WORK( IR ), LDWRKR, S,
  2471. $ WORK( IE ), WORK( ITAUQ ),
  2472. $ WORK( ITAUP ), WORK( IWORK ),
  2473. $ LWORK-IWORK+1, IERR )
  2474. *
  2475. * Generate right vectors bidiagonalizing L in
  2476. * WORK(IR)
  2477. * (Workspace: need M*M+4*M, prefer M*M+3*M+(M-1)*NB)
  2478. *
  2479. CALL SORGBR( 'P', M, M, M, WORK( IR ), LDWRKR,
  2480. $ WORK( ITAUP ), WORK( IWORK ),
  2481. $ LWORK-IWORK+1, IERR )
  2482. IWORK = IE + M
  2483. *
  2484. * Perform bidiagonal QR iteration, computing right
  2485. * singular vectors of L in WORK(IR)
  2486. * (Workspace: need M*M+BDSPAC)
  2487. *
  2488. CALL SBDSQR( 'U', M, M, 0, 0, S, WORK( IE ),
  2489. $ WORK( IR ), LDWRKR, DUM, 1, DUM, 1,
  2490. $ WORK( IWORK ), INFO )
  2491. *
  2492. * Multiply right singular vectors of L in WORK(IR) by
  2493. * Q in A, storing result in VT
  2494. * (Workspace: need M*M)
  2495. *
  2496. CALL SGEMM( 'N', 'N', M, N, M, ONE, WORK( IR ),
  2497. $ LDWRKR, A, LDA, ZERO, VT, LDVT )
  2498. *
  2499. ELSE
  2500. *
  2501. * Insufficient workspace for a fast algorithm
  2502. *
  2503. ITAU = 1
  2504. IWORK = ITAU + M
  2505. *
  2506. * Compute A=L*Q
  2507. * (Workspace: need 2*M, prefer M+M*NB)
  2508. *
  2509. CALL SGELQF( M, N, A, LDA, WORK( ITAU ),
  2510. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2511. *
  2512. * Copy result to VT
  2513. *
  2514. CALL SLACPY( 'U', M, N, A, LDA, VT, LDVT )
  2515. *
  2516. * Generate Q in VT
  2517. * (Workspace: need 2*M, prefer M+M*NB)
  2518. *
  2519. CALL SORGLQ( M, N, M, VT, LDVT, WORK( ITAU ),
  2520. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2521. IE = ITAU
  2522. ITAUQ = IE + M
  2523. ITAUP = ITAUQ + M
  2524. IWORK = ITAUP + M
  2525. *
  2526. * Zero out above L in A
  2527. *
  2528. CALL SLASET( 'U', M-1, M-1, ZERO, ZERO, A( 1, 2 ),
  2529. $ LDA )
  2530. *
  2531. * Bidiagonalize L in A
  2532. * (Workspace: need 4*M, prefer 3*M+2*M*NB)
  2533. *
  2534. CALL SGEBRD( M, M, A, LDA, S, WORK( IE ),
  2535. $ WORK( ITAUQ ), WORK( ITAUP ),
  2536. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2537. *
  2538. * Multiply right vectors bidiagonalizing L by Q in VT
  2539. * (Workspace: need 3*M+N, prefer 3*M+N*NB)
  2540. *
  2541. CALL SORMBR( 'P', 'L', 'T', M, N, M, A, LDA,
  2542. $ WORK( ITAUP ), VT, LDVT,
  2543. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2544. IWORK = IE + M
  2545. *
  2546. * Perform bidiagonal QR iteration, computing right
  2547. * singular vectors of A in VT
  2548. * (Workspace: need BDSPAC)
  2549. *
  2550. CALL SBDSQR( 'U', M, N, 0, 0, S, WORK( IE ), VT,
  2551. $ LDVT, DUM, 1, DUM, 1, WORK( IWORK ),
  2552. $ INFO )
  2553. *
  2554. END IF
  2555. *
  2556. ELSE IF( WNTUO ) THEN
  2557. *
  2558. * Path 5t(N much larger than M, JOBU='O', JOBVT='S')
  2559. * M right singular vectors to be computed in VT and
  2560. * M left singular vectors to be overwritten on A
  2561. *
  2562. IF( LWORK.GE.2*M*M+MAX( 4*M, BDSPAC ) ) THEN
  2563. *
  2564. * Sufficient workspace for a fast algorithm
  2565. *
  2566. IU = 1
  2567. IF( LWORK.GE.WRKBL+2*LDA*M ) THEN
  2568. *
  2569. * WORK(IU) is LDA by M and WORK(IR) is LDA by M
  2570. *
  2571. LDWRKU = LDA
  2572. IR = IU + LDWRKU*M
  2573. LDWRKR = LDA
  2574. ELSE IF( LWORK.GE.WRKBL+( LDA+M )*M ) THEN
  2575. *
  2576. * WORK(IU) is LDA by M and WORK(IR) is M by M
  2577. *
  2578. LDWRKU = LDA
  2579. IR = IU + LDWRKU*M
  2580. LDWRKR = M
  2581. ELSE
  2582. *
  2583. * WORK(IU) is M by M and WORK(IR) is M by M
  2584. *
  2585. LDWRKU = M
  2586. IR = IU + LDWRKU*M
  2587. LDWRKR = M
  2588. END IF
  2589. ITAU = IR + LDWRKR*M
  2590. IWORK = ITAU + M
  2591. *
  2592. * Compute A=L*Q
  2593. * (Workspace: need 2*M*M+2*M, prefer 2*M*M+M+M*NB)
  2594. *
  2595. CALL SGELQF( M, N, A, LDA, WORK( ITAU ),
  2596. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2597. *
  2598. * Copy L to WORK(IU), zeroing out below it
  2599. *
  2600. CALL SLACPY( 'L', M, M, A, LDA, WORK( IU ),
  2601. $ LDWRKU )
  2602. CALL SLASET( 'U', M-1, M-1, ZERO, ZERO,
  2603. $ WORK( IU+LDWRKU ), LDWRKU )
  2604. *
  2605. * Generate Q in A
  2606. * (Workspace: need 2*M*M+2*M, prefer 2*M*M+M+M*NB)
  2607. *
  2608. CALL SORGLQ( M, N, M, A, LDA, WORK( ITAU ),
  2609. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2610. IE = ITAU
  2611. ITAUQ = IE + M
  2612. ITAUP = ITAUQ + M
  2613. IWORK = ITAUP + M
  2614. *
  2615. * Bidiagonalize L in WORK(IU), copying result to
  2616. * WORK(IR)
  2617. * (Workspace: need 2*M*M+4*M,
  2618. * prefer 2*M*M+3*M+2*M*NB)
  2619. *
  2620. CALL SGEBRD( M, M, WORK( IU ), LDWRKU, S,
  2621. $ WORK( IE ), WORK( ITAUQ ),
  2622. $ WORK( ITAUP ), WORK( IWORK ),
  2623. $ LWORK-IWORK+1, IERR )
  2624. CALL SLACPY( 'L', M, M, WORK( IU ), LDWRKU,
  2625. $ WORK( IR ), LDWRKR )
  2626. *
  2627. * Generate right bidiagonalizing vectors in WORK(IU)
  2628. * (Workspace: need 2*M*M+4*M-1,
  2629. * prefer 2*M*M+3*M+(M-1)*NB)
  2630. *
  2631. CALL SORGBR( 'P', M, M, M, WORK( IU ), LDWRKU,
  2632. $ WORK( ITAUP ), WORK( IWORK ),
  2633. $ LWORK-IWORK+1, IERR )
  2634. *
  2635. * Generate left bidiagonalizing vectors in WORK(IR)
  2636. * (Workspace: need 2*M*M+4*M, prefer 2*M*M+3*M+M*NB)
  2637. *
  2638. CALL SORGBR( 'Q', M, M, M, WORK( IR ), LDWRKR,
  2639. $ WORK( ITAUQ ), WORK( IWORK ),
  2640. $ LWORK-IWORK+1, IERR )
  2641. IWORK = IE + M
  2642. *
  2643. * Perform bidiagonal QR iteration, computing left
  2644. * singular vectors of L in WORK(IR) and computing
  2645. * right singular vectors of L in WORK(IU)
  2646. * (Workspace: need 2*M*M+BDSPAC)
  2647. *
  2648. CALL SBDSQR( 'U', M, M, M, 0, S, WORK( IE ),
  2649. $ WORK( IU ), LDWRKU, WORK( IR ),
  2650. $ LDWRKR, DUM, 1, WORK( IWORK ), INFO )
  2651. *
  2652. * Multiply right singular vectors of L in WORK(IU) by
  2653. * Q in A, storing result in VT
  2654. * (Workspace: need M*M)
  2655. *
  2656. CALL SGEMM( 'N', 'N', M, N, M, ONE, WORK( IU ),
  2657. $ LDWRKU, A, LDA, ZERO, VT, LDVT )
  2658. *
  2659. * Copy left singular vectors of L to A
  2660. * (Workspace: need M*M)
  2661. *
  2662. CALL SLACPY( 'F', M, M, WORK( IR ), LDWRKR, A,
  2663. $ LDA )
  2664. *
  2665. ELSE
  2666. *
  2667. * Insufficient workspace for a fast algorithm
  2668. *
  2669. ITAU = 1
  2670. IWORK = ITAU + M
  2671. *
  2672. * Compute A=L*Q, copying result to VT
  2673. * (Workspace: need 2*M, prefer M+M*NB)
  2674. *
  2675. CALL SGELQF( M, N, A, LDA, WORK( ITAU ),
  2676. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2677. CALL SLACPY( 'U', M, N, A, LDA, VT, LDVT )
  2678. *
  2679. * Generate Q in VT
  2680. * (Workspace: need 2*M, prefer M+M*NB)
  2681. *
  2682. CALL SORGLQ( M, N, M, VT, LDVT, WORK( ITAU ),
  2683. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2684. IE = ITAU
  2685. ITAUQ = IE + M
  2686. ITAUP = ITAUQ + M
  2687. IWORK = ITAUP + M
  2688. *
  2689. * Zero out above L in A
  2690. *
  2691. CALL SLASET( 'U', M-1, M-1, ZERO, ZERO, A( 1, 2 ),
  2692. $ LDA )
  2693. *
  2694. * Bidiagonalize L in A
  2695. * (Workspace: need 4*M, prefer 3*M+2*M*NB)
  2696. *
  2697. CALL SGEBRD( M, M, A, LDA, S, WORK( IE ),
  2698. $ WORK( ITAUQ ), WORK( ITAUP ),
  2699. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2700. *
  2701. * Multiply right vectors bidiagonalizing L by Q in VT
  2702. * (Workspace: need 3*M+N, prefer 3*M+N*NB)
  2703. *
  2704. CALL SORMBR( 'P', 'L', 'T', M, N, M, A, LDA,
  2705. $ WORK( ITAUP ), VT, LDVT,
  2706. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2707. *
  2708. * Generate left bidiagonalizing vectors of L in A
  2709. * (Workspace: need 4*M, prefer 3*M+M*NB)
  2710. *
  2711. CALL SORGBR( 'Q', M, M, M, A, LDA, WORK( ITAUQ ),
  2712. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2713. IWORK = IE + M
  2714. *
  2715. * Perform bidiagonal QR iteration, compute left
  2716. * singular vectors of A in A and compute right
  2717. * singular vectors of A in VT
  2718. * (Workspace: need BDSPAC)
  2719. *
  2720. CALL SBDSQR( 'U', M, N, M, 0, S, WORK( IE ), VT,
  2721. $ LDVT, A, LDA, DUM, 1, WORK( IWORK ),
  2722. $ INFO )
  2723. *
  2724. END IF
  2725. *
  2726. ELSE IF( WNTUAS ) THEN
  2727. *
  2728. * Path 6t(N much larger than M, JOBU='S' or 'A',
  2729. * JOBVT='S')
  2730. * M right singular vectors to be computed in VT and
  2731. * M left singular vectors to be computed in U
  2732. *
  2733. IF( LWORK.GE.M*M+MAX( 4*M, BDSPAC ) ) THEN
  2734. *
  2735. * Sufficient workspace for a fast algorithm
  2736. *
  2737. IU = 1
  2738. IF( LWORK.GE.WRKBL+LDA*M ) THEN
  2739. *
  2740. * WORK(IU) is LDA by N
  2741. *
  2742. LDWRKU = LDA
  2743. ELSE
  2744. *
  2745. * WORK(IU) is LDA by M
  2746. *
  2747. LDWRKU = M
  2748. END IF
  2749. ITAU = IU + LDWRKU*M
  2750. IWORK = ITAU + M
  2751. *
  2752. * Compute A=L*Q
  2753. * (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
  2754. *
  2755. CALL SGELQF( M, N, A, LDA, WORK( ITAU ),
  2756. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2757. *
  2758. * Copy L to WORK(IU), zeroing out above it
  2759. *
  2760. CALL SLACPY( 'L', M, M, A, LDA, WORK( IU ),
  2761. $ LDWRKU )
  2762. CALL SLASET( 'U', M-1, M-1, ZERO, ZERO,
  2763. $ WORK( IU+LDWRKU ), LDWRKU )
  2764. *
  2765. * Generate Q in A
  2766. * (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
  2767. *
  2768. CALL SORGLQ( M, N, M, A, LDA, WORK( ITAU ),
  2769. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2770. IE = ITAU
  2771. ITAUQ = IE + M
  2772. ITAUP = ITAUQ + M
  2773. IWORK = ITAUP + M
  2774. *
  2775. * Bidiagonalize L in WORK(IU), copying result to U
  2776. * (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
  2777. *
  2778. CALL SGEBRD( M, M, WORK( IU ), LDWRKU, S,
  2779. $ WORK( IE ), WORK( ITAUQ ),
  2780. $ WORK( ITAUP ), WORK( IWORK ),
  2781. $ LWORK-IWORK+1, IERR )
  2782. CALL SLACPY( 'L', M, M, WORK( IU ), LDWRKU, U,
  2783. $ LDU )
  2784. *
  2785. * Generate right bidiagonalizing vectors in WORK(IU)
  2786. * (Workspace: need M*M+4*M-1,
  2787. * prefer M*M+3*M+(M-1)*NB)
  2788. *
  2789. CALL SORGBR( 'P', M, M, M, WORK( IU ), LDWRKU,
  2790. $ WORK( ITAUP ), WORK( IWORK ),
  2791. $ LWORK-IWORK+1, IERR )
  2792. *
  2793. * Generate left bidiagonalizing vectors in U
  2794. * (Workspace: need M*M+4*M, prefer M*M+3*M+M*NB)
  2795. *
  2796. CALL SORGBR( 'Q', M, M, M, U, LDU, WORK( ITAUQ ),
  2797. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2798. IWORK = IE + M
  2799. *
  2800. * Perform bidiagonal QR iteration, computing left
  2801. * singular vectors of L in U and computing right
  2802. * singular vectors of L in WORK(IU)
  2803. * (Workspace: need M*M+BDSPAC)
  2804. *
  2805. CALL SBDSQR( 'U', M, M, M, 0, S, WORK( IE ),
  2806. $ WORK( IU ), LDWRKU, U, LDU, DUM, 1,
  2807. $ WORK( IWORK ), INFO )
  2808. *
  2809. * Multiply right singular vectors of L in WORK(IU) by
  2810. * Q in A, storing result in VT
  2811. * (Workspace: need M*M)
  2812. *
  2813. CALL SGEMM( 'N', 'N', M, N, M, ONE, WORK( IU ),
  2814. $ LDWRKU, A, LDA, ZERO, VT, LDVT )
  2815. *
  2816. ELSE
  2817. *
  2818. * Insufficient workspace for a fast algorithm
  2819. *
  2820. ITAU = 1
  2821. IWORK = ITAU + M
  2822. *
  2823. * Compute A=L*Q, copying result to VT
  2824. * (Workspace: need 2*M, prefer M+M*NB)
  2825. *
  2826. CALL SGELQF( M, N, A, LDA, WORK( ITAU ),
  2827. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2828. CALL SLACPY( 'U', M, N, A, LDA, VT, LDVT )
  2829. *
  2830. * Generate Q in VT
  2831. * (Workspace: need 2*M, prefer M+M*NB)
  2832. *
  2833. CALL SORGLQ( M, N, M, VT, LDVT, WORK( ITAU ),
  2834. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2835. *
  2836. * Copy L to U, zeroing out above it
  2837. *
  2838. CALL SLACPY( 'L', M, M, A, LDA, U, LDU )
  2839. CALL SLASET( 'U', M-1, M-1, ZERO, ZERO, U( 1, 2 ),
  2840. $ LDU )
  2841. IE = ITAU
  2842. ITAUQ = IE + M
  2843. ITAUP = ITAUQ + M
  2844. IWORK = ITAUP + M
  2845. *
  2846. * Bidiagonalize L in U
  2847. * (Workspace: need 4*M, prefer 3*M+2*M*NB)
  2848. *
  2849. CALL SGEBRD( M, M, U, LDU, S, WORK( IE ),
  2850. $ WORK( ITAUQ ), WORK( ITAUP ),
  2851. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2852. *
  2853. * Multiply right bidiagonalizing vectors in U by Q
  2854. * in VT
  2855. * (Workspace: need 3*M+N, prefer 3*M+N*NB)
  2856. *
  2857. CALL SORMBR( 'P', 'L', 'T', M, N, M, U, LDU,
  2858. $ WORK( ITAUP ), VT, LDVT,
  2859. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2860. *
  2861. * Generate left bidiagonalizing vectors in U
  2862. * (Workspace: need 4*M, prefer 3*M+M*NB)
  2863. *
  2864. CALL SORGBR( 'Q', M, M, M, U, LDU, WORK( ITAUQ ),
  2865. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2866. IWORK = IE + M
  2867. *
  2868. * Perform bidiagonal QR iteration, computing left
  2869. * singular vectors of A in U and computing right
  2870. * singular vectors of A in VT
  2871. * (Workspace: need BDSPAC)
  2872. *
  2873. CALL SBDSQR( 'U', M, N, M, 0, S, WORK( IE ), VT,
  2874. $ LDVT, U, LDU, DUM, 1, WORK( IWORK ),
  2875. $ INFO )
  2876. *
  2877. END IF
  2878. *
  2879. END IF
  2880. *
  2881. ELSE IF( WNTVA ) THEN
  2882. *
  2883. IF( WNTUN ) THEN
  2884. *
  2885. * Path 7t(N much larger than M, JOBU='N', JOBVT='A')
  2886. * N right singular vectors to be computed in VT and
  2887. * no left singular vectors to be computed
  2888. *
  2889. IF( LWORK.GE.M*M+MAX( N+M, 4*M, BDSPAC ) ) THEN
  2890. *
  2891. * Sufficient workspace for a fast algorithm
  2892. *
  2893. IR = 1
  2894. IF( LWORK.GE.WRKBL+LDA*M ) THEN
  2895. *
  2896. * WORK(IR) is LDA by M
  2897. *
  2898. LDWRKR = LDA
  2899. ELSE
  2900. *
  2901. * WORK(IR) is M by M
  2902. *
  2903. LDWRKR = M
  2904. END IF
  2905. ITAU = IR + LDWRKR*M
  2906. IWORK = ITAU + M
  2907. *
  2908. * Compute A=L*Q, copying result to VT
  2909. * (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
  2910. *
  2911. CALL SGELQF( M, N, A, LDA, WORK( ITAU ),
  2912. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2913. CALL SLACPY( 'U', M, N, A, LDA, VT, LDVT )
  2914. *
  2915. * Copy L to WORK(IR), zeroing out above it
  2916. *
  2917. CALL SLACPY( 'L', M, M, A, LDA, WORK( IR ),
  2918. $ LDWRKR )
  2919. CALL SLASET( 'U', M-1, M-1, ZERO, ZERO,
  2920. $ WORK( IR+LDWRKR ), LDWRKR )
  2921. *
  2922. * Generate Q in VT
  2923. * (Workspace: need M*M+M+N, prefer M*M+M+N*NB)
  2924. *
  2925. CALL SORGLQ( N, N, M, VT, LDVT, WORK( ITAU ),
  2926. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2927. IE = ITAU
  2928. ITAUQ = IE + M
  2929. ITAUP = ITAUQ + M
  2930. IWORK = ITAUP + M
  2931. *
  2932. * Bidiagonalize L in WORK(IR)
  2933. * (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
  2934. *
  2935. CALL SGEBRD( M, M, WORK( IR ), LDWRKR, S,
  2936. $ WORK( IE ), WORK( ITAUQ ),
  2937. $ WORK( ITAUP ), WORK( IWORK ),
  2938. $ LWORK-IWORK+1, IERR )
  2939. *
  2940. * Generate right bidiagonalizing vectors in WORK(IR)
  2941. * (Workspace: need M*M+4*M-1,
  2942. * prefer M*M+3*M+(M-1)*NB)
  2943. *
  2944. CALL SORGBR( 'P', M, M, M, WORK( IR ), LDWRKR,
  2945. $ WORK( ITAUP ), WORK( IWORK ),
  2946. $ LWORK-IWORK+1, IERR )
  2947. IWORK = IE + M
  2948. *
  2949. * Perform bidiagonal QR iteration, computing right
  2950. * singular vectors of L in WORK(IR)
  2951. * (Workspace: need M*M+BDSPAC)
  2952. *
  2953. CALL SBDSQR( 'U', M, M, 0, 0, S, WORK( IE ),
  2954. $ WORK( IR ), LDWRKR, DUM, 1, DUM, 1,
  2955. $ WORK( IWORK ), INFO )
  2956. *
  2957. * Multiply right singular vectors of L in WORK(IR) by
  2958. * Q in VT, storing result in A
  2959. * (Workspace: need M*M)
  2960. *
  2961. CALL SGEMM( 'N', 'N', M, N, M, ONE, WORK( IR ),
  2962. $ LDWRKR, VT, LDVT, ZERO, A, LDA )
  2963. *
  2964. * Copy right singular vectors of A from A to VT
  2965. *
  2966. CALL SLACPY( 'F', M, N, A, LDA, VT, LDVT )
  2967. *
  2968. ELSE
  2969. *
  2970. * Insufficient workspace for a fast algorithm
  2971. *
  2972. ITAU = 1
  2973. IWORK = ITAU + M
  2974. *
  2975. * Compute A=L*Q, copying result to VT
  2976. * (Workspace: need 2*M, prefer M+M*NB)
  2977. *
  2978. CALL SGELQF( M, N, A, LDA, WORK( ITAU ),
  2979. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2980. CALL SLACPY( 'U', M, N, A, LDA, VT, LDVT )
  2981. *
  2982. * Generate Q in VT
  2983. * (Workspace: need M+N, prefer M+N*NB)
  2984. *
  2985. CALL SORGLQ( N, N, M, VT, LDVT, WORK( ITAU ),
  2986. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  2987. IE = ITAU
  2988. ITAUQ = IE + M
  2989. ITAUP = ITAUQ + M
  2990. IWORK = ITAUP + M
  2991. *
  2992. * Zero out above L in A
  2993. *
  2994. CALL SLASET( 'U', M-1, M-1, ZERO, ZERO, A( 1, 2 ),
  2995. $ LDA )
  2996. *
  2997. * Bidiagonalize L in A
  2998. * (Workspace: need 4*M, prefer 3*M+2*M*NB)
  2999. *
  3000. CALL SGEBRD( M, M, A, LDA, S, WORK( IE ),
  3001. $ WORK( ITAUQ ), WORK( ITAUP ),
  3002. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3003. *
  3004. * Multiply right bidiagonalizing vectors in A by Q
  3005. * in VT
  3006. * (Workspace: need 3*M+N, prefer 3*M+N*NB)
  3007. *
  3008. CALL SORMBR( 'P', 'L', 'T', M, N, M, A, LDA,
  3009. $ WORK( ITAUP ), VT, LDVT,
  3010. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3011. IWORK = IE + M
  3012. *
  3013. * Perform bidiagonal QR iteration, computing right
  3014. * singular vectors of A in VT
  3015. * (Workspace: need BDSPAC)
  3016. *
  3017. CALL SBDSQR( 'U', M, N, 0, 0, S, WORK( IE ), VT,
  3018. $ LDVT, DUM, 1, DUM, 1, WORK( IWORK ),
  3019. $ INFO )
  3020. *
  3021. END IF
  3022. *
  3023. ELSE IF( WNTUO ) THEN
  3024. *
  3025. * Path 8t(N much larger than M, JOBU='O', JOBVT='A')
  3026. * N right singular vectors to be computed in VT and
  3027. * M left singular vectors to be overwritten on A
  3028. *
  3029. IF( LWORK.GE.2*M*M+MAX( N+M, 4*M, BDSPAC ) ) THEN
  3030. *
  3031. * Sufficient workspace for a fast algorithm
  3032. *
  3033. IU = 1
  3034. IF( LWORK.GE.WRKBL+2*LDA*M ) THEN
  3035. *
  3036. * WORK(IU) is LDA by M and WORK(IR) is LDA by M
  3037. *
  3038. LDWRKU = LDA
  3039. IR = IU + LDWRKU*M
  3040. LDWRKR = LDA
  3041. ELSE IF( LWORK.GE.WRKBL+( LDA+M )*M ) THEN
  3042. *
  3043. * WORK(IU) is LDA by M and WORK(IR) is M by M
  3044. *
  3045. LDWRKU = LDA
  3046. IR = IU + LDWRKU*M
  3047. LDWRKR = M
  3048. ELSE
  3049. *
  3050. * WORK(IU) is M by M and WORK(IR) is M by M
  3051. *
  3052. LDWRKU = M
  3053. IR = IU + LDWRKU*M
  3054. LDWRKR = M
  3055. END IF
  3056. ITAU = IR + LDWRKR*M
  3057. IWORK = ITAU + M
  3058. *
  3059. * Compute A=L*Q, copying result to VT
  3060. * (Workspace: need 2*M*M+2*M, prefer 2*M*M+M+M*NB)
  3061. *
  3062. CALL SGELQF( M, N, A, LDA, WORK( ITAU ),
  3063. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3064. CALL SLACPY( 'U', M, N, A, LDA, VT, LDVT )
  3065. *
  3066. * Generate Q in VT
  3067. * (Workspace: need 2*M*M+M+N, prefer 2*M*M+M+N*NB)
  3068. *
  3069. CALL SORGLQ( N, N, M, VT, LDVT, WORK( ITAU ),
  3070. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3071. *
  3072. * Copy L to WORK(IU), zeroing out above it
  3073. *
  3074. CALL SLACPY( 'L', M, M, A, LDA, WORK( IU ),
  3075. $ LDWRKU )
  3076. CALL SLASET( 'U', M-1, M-1, ZERO, ZERO,
  3077. $ WORK( IU+LDWRKU ), LDWRKU )
  3078. IE = ITAU
  3079. ITAUQ = IE + M
  3080. ITAUP = ITAUQ + M
  3081. IWORK = ITAUP + M
  3082. *
  3083. * Bidiagonalize L in WORK(IU), copying result to
  3084. * WORK(IR)
  3085. * (Workspace: need 2*M*M+4*M,
  3086. * prefer 2*M*M+3*M+2*M*NB)
  3087. *
  3088. CALL SGEBRD( M, M, WORK( IU ), LDWRKU, S,
  3089. $ WORK( IE ), WORK( ITAUQ ),
  3090. $ WORK( ITAUP ), WORK( IWORK ),
  3091. $ LWORK-IWORK+1, IERR )
  3092. CALL SLACPY( 'L', M, M, WORK( IU ), LDWRKU,
  3093. $ WORK( IR ), LDWRKR )
  3094. *
  3095. * Generate right bidiagonalizing vectors in WORK(IU)
  3096. * (Workspace: need 2*M*M+4*M-1,
  3097. * prefer 2*M*M+3*M+(M-1)*NB)
  3098. *
  3099. CALL SORGBR( 'P', M, M, M, WORK( IU ), LDWRKU,
  3100. $ WORK( ITAUP ), WORK( IWORK ),
  3101. $ LWORK-IWORK+1, IERR )
  3102. *
  3103. * Generate left bidiagonalizing vectors in WORK(IR)
  3104. * (Workspace: need 2*M*M+4*M, prefer 2*M*M+3*M+M*NB)
  3105. *
  3106. CALL SORGBR( 'Q', M, M, M, WORK( IR ), LDWRKR,
  3107. $ WORK( ITAUQ ), WORK( IWORK ),
  3108. $ LWORK-IWORK+1, IERR )
  3109. IWORK = IE + M
  3110. *
  3111. * Perform bidiagonal QR iteration, computing left
  3112. * singular vectors of L in WORK(IR) and computing
  3113. * right singular vectors of L in WORK(IU)
  3114. * (Workspace: need 2*M*M+BDSPAC)
  3115. *
  3116. CALL SBDSQR( 'U', M, M, M, 0, S, WORK( IE ),
  3117. $ WORK( IU ), LDWRKU, WORK( IR ),
  3118. $ LDWRKR, DUM, 1, WORK( IWORK ), INFO )
  3119. *
  3120. * Multiply right singular vectors of L in WORK(IU) by
  3121. * Q in VT, storing result in A
  3122. * (Workspace: need M*M)
  3123. *
  3124. CALL SGEMM( 'N', 'N', M, N, M, ONE, WORK( IU ),
  3125. $ LDWRKU, VT, LDVT, ZERO, A, LDA )
  3126. *
  3127. * Copy right singular vectors of A from A to VT
  3128. *
  3129. CALL SLACPY( 'F', M, N, A, LDA, VT, LDVT )
  3130. *
  3131. * Copy left singular vectors of A from WORK(IR) to A
  3132. *
  3133. CALL SLACPY( 'F', M, M, WORK( IR ), LDWRKR, A,
  3134. $ LDA )
  3135. *
  3136. ELSE
  3137. *
  3138. * Insufficient workspace for a fast algorithm
  3139. *
  3140. ITAU = 1
  3141. IWORK = ITAU + M
  3142. *
  3143. * Compute A=L*Q, copying result to VT
  3144. * (Workspace: need 2*M, prefer M+M*NB)
  3145. *
  3146. CALL SGELQF( M, N, A, LDA, WORK( ITAU ),
  3147. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3148. CALL SLACPY( 'U', M, N, A, LDA, VT, LDVT )
  3149. *
  3150. * Generate Q in VT
  3151. * (Workspace: need M+N, prefer M+N*NB)
  3152. *
  3153. CALL SORGLQ( N, N, M, VT, LDVT, WORK( ITAU ),
  3154. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3155. IE = ITAU
  3156. ITAUQ = IE + M
  3157. ITAUP = ITAUQ + M
  3158. IWORK = ITAUP + M
  3159. *
  3160. * Zero out above L in A
  3161. *
  3162. CALL SLASET( 'U', M-1, M-1, ZERO, ZERO, A( 1, 2 ),
  3163. $ LDA )
  3164. *
  3165. * Bidiagonalize L in A
  3166. * (Workspace: need 4*M, prefer 3*M+2*M*NB)
  3167. *
  3168. CALL SGEBRD( M, M, A, LDA, S, WORK( IE ),
  3169. $ WORK( ITAUQ ), WORK( ITAUP ),
  3170. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3171. *
  3172. * Multiply right bidiagonalizing vectors in A by Q
  3173. * in VT
  3174. * (Workspace: need 3*M+N, prefer 3*M+N*NB)
  3175. *
  3176. CALL SORMBR( 'P', 'L', 'T', M, N, M, A, LDA,
  3177. $ WORK( ITAUP ), VT, LDVT,
  3178. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3179. *
  3180. * Generate left bidiagonalizing vectors in A
  3181. * (Workspace: need 4*M, prefer 3*M+M*NB)
  3182. *
  3183. CALL SORGBR( 'Q', M, M, M, A, LDA, WORK( ITAUQ ),
  3184. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3185. IWORK = IE + M
  3186. *
  3187. * Perform bidiagonal QR iteration, computing left
  3188. * singular vectors of A in A and computing right
  3189. * singular vectors of A in VT
  3190. * (Workspace: need BDSPAC)
  3191. *
  3192. CALL SBDSQR( 'U', M, N, M, 0, S, WORK( IE ), VT,
  3193. $ LDVT, A, LDA, DUM, 1, WORK( IWORK ),
  3194. $ INFO )
  3195. *
  3196. END IF
  3197. *
  3198. ELSE IF( WNTUAS ) THEN
  3199. *
  3200. * Path 9t(N much larger than M, JOBU='S' or 'A',
  3201. * JOBVT='A')
  3202. * N right singular vectors to be computed in VT and
  3203. * M left singular vectors to be computed in U
  3204. *
  3205. IF( LWORK.GE.M*M+MAX( N+M, 4*M, BDSPAC ) ) THEN
  3206. *
  3207. * Sufficient workspace for a fast algorithm
  3208. *
  3209. IU = 1
  3210. IF( LWORK.GE.WRKBL+LDA*M ) THEN
  3211. *
  3212. * WORK(IU) is LDA by M
  3213. *
  3214. LDWRKU = LDA
  3215. ELSE
  3216. *
  3217. * WORK(IU) is M by M
  3218. *
  3219. LDWRKU = M
  3220. END IF
  3221. ITAU = IU + LDWRKU*M
  3222. IWORK = ITAU + M
  3223. *
  3224. * Compute A=L*Q, copying result to VT
  3225. * (Workspace: need M*M+2*M, prefer M*M+M+M*NB)
  3226. *
  3227. CALL SGELQF( M, N, A, LDA, WORK( ITAU ),
  3228. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3229. CALL SLACPY( 'U', M, N, A, LDA, VT, LDVT )
  3230. *
  3231. * Generate Q in VT
  3232. * (Workspace: need M*M+M+N, prefer M*M+M+N*NB)
  3233. *
  3234. CALL SORGLQ( N, N, M, VT, LDVT, WORK( ITAU ),
  3235. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3236. *
  3237. * Copy L to WORK(IU), zeroing out above it
  3238. *
  3239. CALL SLACPY( 'L', M, M, A, LDA, WORK( IU ),
  3240. $ LDWRKU )
  3241. CALL SLASET( 'U', M-1, M-1, ZERO, ZERO,
  3242. $ WORK( IU+LDWRKU ), LDWRKU )
  3243. IE = ITAU
  3244. ITAUQ = IE + M
  3245. ITAUP = ITAUQ + M
  3246. IWORK = ITAUP + M
  3247. *
  3248. * Bidiagonalize L in WORK(IU), copying result to U
  3249. * (Workspace: need M*M+4*M, prefer M*M+3*M+2*M*NB)
  3250. *
  3251. CALL SGEBRD( M, M, WORK( IU ), LDWRKU, S,
  3252. $ WORK( IE ), WORK( ITAUQ ),
  3253. $ WORK( ITAUP ), WORK( IWORK ),
  3254. $ LWORK-IWORK+1, IERR )
  3255. CALL SLACPY( 'L', M, M, WORK( IU ), LDWRKU, U,
  3256. $ LDU )
  3257. *
  3258. * Generate right bidiagonalizing vectors in WORK(IU)
  3259. * (Workspace: need M*M+4*M, prefer M*M+3*M+(M-1)*NB)
  3260. *
  3261. CALL SORGBR( 'P', M, M, M, WORK( IU ), LDWRKU,
  3262. $ WORK( ITAUP ), WORK( IWORK ),
  3263. $ LWORK-IWORK+1, IERR )
  3264. *
  3265. * Generate left bidiagonalizing vectors in U
  3266. * (Workspace: need M*M+4*M, prefer M*M+3*M+M*NB)
  3267. *
  3268. CALL SORGBR( 'Q', M, M, M, U, LDU, WORK( ITAUQ ),
  3269. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3270. IWORK = IE + M
  3271. *
  3272. * Perform bidiagonal QR iteration, computing left
  3273. * singular vectors of L in U and computing right
  3274. * singular vectors of L in WORK(IU)
  3275. * (Workspace: need M*M+BDSPAC)
  3276. *
  3277. CALL SBDSQR( 'U', M, M, M, 0, S, WORK( IE ),
  3278. $ WORK( IU ), LDWRKU, U, LDU, DUM, 1,
  3279. $ WORK( IWORK ), INFO )
  3280. *
  3281. * Multiply right singular vectors of L in WORK(IU) by
  3282. * Q in VT, storing result in A
  3283. * (Workspace: need M*M)
  3284. *
  3285. CALL SGEMM( 'N', 'N', M, N, M, ONE, WORK( IU ),
  3286. $ LDWRKU, VT, LDVT, ZERO, A, LDA )
  3287. *
  3288. * Copy right singular vectors of A from A to VT
  3289. *
  3290. CALL SLACPY( 'F', M, N, A, LDA, VT, LDVT )
  3291. *
  3292. ELSE
  3293. *
  3294. * Insufficient workspace for a fast algorithm
  3295. *
  3296. ITAU = 1
  3297. IWORK = ITAU + M
  3298. *
  3299. * Compute A=L*Q, copying result to VT
  3300. * (Workspace: need 2*M, prefer M+M*NB)
  3301. *
  3302. CALL SGELQF( M, N, A, LDA, WORK( ITAU ),
  3303. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3304. CALL SLACPY( 'U', M, N, A, LDA, VT, LDVT )
  3305. *
  3306. * Generate Q in VT
  3307. * (Workspace: need M+N, prefer M+N*NB)
  3308. *
  3309. CALL SORGLQ( N, N, M, VT, LDVT, WORK( ITAU ),
  3310. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3311. *
  3312. * Copy L to U, zeroing out above it
  3313. *
  3314. CALL SLACPY( 'L', M, M, A, LDA, U, LDU )
  3315. CALL SLASET( 'U', M-1, M-1, ZERO, ZERO, U( 1, 2 ),
  3316. $ LDU )
  3317. IE = ITAU
  3318. ITAUQ = IE + M
  3319. ITAUP = ITAUQ + M
  3320. IWORK = ITAUP + M
  3321. *
  3322. * Bidiagonalize L in U
  3323. * (Workspace: need 4*M, prefer 3*M+2*M*NB)
  3324. *
  3325. CALL SGEBRD( M, M, U, LDU, S, WORK( IE ),
  3326. $ WORK( ITAUQ ), WORK( ITAUP ),
  3327. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3328. *
  3329. * Multiply right bidiagonalizing vectors in U by Q
  3330. * in VT
  3331. * (Workspace: need 3*M+N, prefer 3*M+N*NB)
  3332. *
  3333. CALL SORMBR( 'P', 'L', 'T', M, N, M, U, LDU,
  3334. $ WORK( ITAUP ), VT, LDVT,
  3335. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3336. *
  3337. * Generate left bidiagonalizing vectors in U
  3338. * (Workspace: need 4*M, prefer 3*M+M*NB)
  3339. *
  3340. CALL SORGBR( 'Q', M, M, M, U, LDU, WORK( ITAUQ ),
  3341. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3342. IWORK = IE + M
  3343. *
  3344. * Perform bidiagonal QR iteration, computing left
  3345. * singular vectors of A in U and computing right
  3346. * singular vectors of A in VT
  3347. * (Workspace: need BDSPAC)
  3348. *
  3349. CALL SBDSQR( 'U', M, N, M, 0, S, WORK( IE ), VT,
  3350. $ LDVT, U, LDU, DUM, 1, WORK( IWORK ),
  3351. $ INFO )
  3352. *
  3353. END IF
  3354. *
  3355. END IF
  3356. *
  3357. END IF
  3358. *
  3359. ELSE
  3360. *
  3361. * N .LT. MNTHR
  3362. *
  3363. * Path 10t(N greater than M, but not much larger)
  3364. * Reduce to bidiagonal form without LQ decomposition
  3365. *
  3366. IE = 1
  3367. ITAUQ = IE + M
  3368. ITAUP = ITAUQ + M
  3369. IWORK = ITAUP + M
  3370. *
  3371. * Bidiagonalize A
  3372. * (Workspace: need 3*M+N, prefer 3*M+(M+N)*NB)
  3373. *
  3374. CALL SGEBRD( M, N, A, LDA, S, WORK( IE ), WORK( ITAUQ ),
  3375. $ WORK( ITAUP ), WORK( IWORK ), LWORK-IWORK+1,
  3376. $ IERR )
  3377. IF( WNTUAS ) THEN
  3378. *
  3379. * If left singular vectors desired in U, copy result to U
  3380. * and generate left bidiagonalizing vectors in U
  3381. * (Workspace: need 4*M-1, prefer 3*M+(M-1)*NB)
  3382. *
  3383. CALL SLACPY( 'L', M, M, A, LDA, U, LDU )
  3384. CALL SORGBR( 'Q', M, M, N, U, LDU, WORK( ITAUQ ),
  3385. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3386. END IF
  3387. IF( WNTVAS ) THEN
  3388. *
  3389. * If right singular vectors desired in VT, copy result to
  3390. * VT and generate right bidiagonalizing vectors in VT
  3391. * (Workspace: need 3*M+NRVT, prefer 3*M+NRVT*NB)
  3392. *
  3393. CALL SLACPY( 'U', M, N, A, LDA, VT, LDVT )
  3394. IF( WNTVA )
  3395. $ NRVT = N
  3396. IF( WNTVS )
  3397. $ NRVT = M
  3398. CALL SORGBR( 'P', NRVT, N, M, VT, LDVT, WORK( ITAUP ),
  3399. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3400. END IF
  3401. IF( WNTUO ) THEN
  3402. *
  3403. * If left singular vectors desired in A, generate left
  3404. * bidiagonalizing vectors in A
  3405. * (Workspace: need 4*M-1, prefer 3*M+(M-1)*NB)
  3406. *
  3407. CALL SORGBR( 'Q', M, M, N, A, LDA, WORK( ITAUQ ),
  3408. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3409. END IF
  3410. IF( WNTVO ) THEN
  3411. *
  3412. * If right singular vectors desired in A, generate right
  3413. * bidiagonalizing vectors in A
  3414. * (Workspace: need 4*M, prefer 3*M+M*NB)
  3415. *
  3416. CALL SORGBR( 'P', M, N, M, A, LDA, WORK( ITAUP ),
  3417. $ WORK( IWORK ), LWORK-IWORK+1, IERR )
  3418. END IF
  3419. IWORK = IE + M
  3420. IF( WNTUAS .OR. WNTUO )
  3421. $ NRU = M
  3422. IF( WNTUN )
  3423. $ NRU = 0
  3424. IF( WNTVAS .OR. WNTVO )
  3425. $ NCVT = N
  3426. IF( WNTVN )
  3427. $ NCVT = 0
  3428. IF( ( .NOT.WNTUO ) .AND. ( .NOT.WNTVO ) ) THEN
  3429. *
  3430. * Perform bidiagonal QR iteration, if desired, computing
  3431. * left singular vectors in U and computing right singular
  3432. * vectors in VT
  3433. * (Workspace: need BDSPAC)
  3434. *
  3435. CALL SBDSQR( 'L', M, NCVT, NRU, 0, S, WORK( IE ), VT,
  3436. $ LDVT, U, LDU, DUM, 1, WORK( IWORK ), INFO )
  3437. ELSE IF( ( .NOT.WNTUO ) .AND. WNTVO ) THEN
  3438. *
  3439. * Perform bidiagonal QR iteration, if desired, computing
  3440. * left singular vectors in U and computing right singular
  3441. * vectors in A
  3442. * (Workspace: need BDSPAC)
  3443. *
  3444. CALL SBDSQR( 'L', M, NCVT, NRU, 0, S, WORK( IE ), A, LDA,
  3445. $ U, LDU, DUM, 1, WORK( IWORK ), INFO )
  3446. ELSE
  3447. *
  3448. * Perform bidiagonal QR iteration, if desired, computing
  3449. * left singular vectors in A and computing right singular
  3450. * vectors in VT
  3451. * (Workspace: need BDSPAC)
  3452. *
  3453. CALL SBDSQR( 'L', M, NCVT, NRU, 0, S, WORK( IE ), VT,
  3454. $ LDVT, A, LDA, DUM, 1, WORK( IWORK ), INFO )
  3455. END IF
  3456. *
  3457. END IF
  3458. *
  3459. END IF
  3460. *
  3461. * If SBDSQR failed to converge, copy unconverged superdiagonals
  3462. * to WORK( 2:MINMN )
  3463. *
  3464. IF( INFO.NE.0 ) THEN
  3465. IF( IE.GT.2 ) THEN
  3466. DO 50 I = 1, MINMN - 1
  3467. WORK( I+1 ) = WORK( I+IE-1 )
  3468. 50 CONTINUE
  3469. END IF
  3470. IF( IE.LT.2 ) THEN
  3471. DO 60 I = MINMN - 1, 1, -1
  3472. WORK( I+1 ) = WORK( I+IE-1 )
  3473. 60 CONTINUE
  3474. END IF
  3475. END IF
  3476. *
  3477. * Undo scaling if necessary
  3478. *
  3479. IF( ISCL.EQ.1 ) THEN
  3480. IF( ANRM.GT.BIGNUM )
  3481. $ CALL SLASCL( 'G', 0, 0, BIGNUM, ANRM, MINMN, 1, S, MINMN,
  3482. $ IERR )
  3483. IF( INFO.NE.0 .AND. ANRM.GT.BIGNUM )
  3484. $ CALL SLASCL( 'G', 0, 0, BIGNUM, ANRM, MINMN-1, 1, WORK( 2 ),
  3485. $ MINMN, IERR )
  3486. IF( ANRM.LT.SMLNUM )
  3487. $ CALL SLASCL( 'G', 0, 0, SMLNUM, ANRM, MINMN, 1, S, MINMN,
  3488. $ IERR )
  3489. IF( INFO.NE.0 .AND. ANRM.LT.SMLNUM )
  3490. $ CALL SLASCL( 'G', 0, 0, SMLNUM, ANRM, MINMN-1, 1, WORK( 2 ),
  3491. $ MINMN, IERR )
  3492. END IF
  3493. *
  3494. * Return optimal workspace in WORK(1)
  3495. *
  3496. WORK( 1 ) = MAXWRK
  3497. *
  3498. RETURN
  3499. *
  3500. * End of SGESVD
  3501. *
  3502. END