You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

davinci_model.cc 168 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/load/new_model_manager/davinci_model.h"
  17. #include <graph/utils/node_utils.h>
  18. #include <algorithm>
  19. #include <map>
  20. #include <utility>
  21. #include "common/debug/log.h"
  22. #include "common/formats/formats.h"
  23. #include "common/formats/utils/formats_trans_utils.h"
  24. #include "common/math/math_util.h"
  25. #include "common/op/ge_op_utils.h"
  26. #include "common/profiling/profiling_manager.h"
  27. #include "common/properties_manager.h"
  28. #include "common/scope_guard.h"
  29. #include "common/thread_pool.h"
  30. #include "framework/common/debug/ge_log.h"
  31. #include "graph/common/ge_call_wrapper.h"
  32. #include "graph/compute_graph.h"
  33. #include "graph/debug/ge_attr_define.h"
  34. #include "graph/ge_context.h"
  35. #include "graph/graph.h"
  36. #include "graph/load/new_model_manager/cpu_queue_schedule.h"
  37. #include "graph/load/new_model_manager/model_manager.h"
  38. #include "graph/load/new_model_manager/tbe_handle_store.h"
  39. #include "graph/manager/graph_mem_allocator.h"
  40. #include "graph/manager/graph_var_manager.h"
  41. #include "graph/manager/trans_var_data_utils.h"
  42. #include "graph/manager/util/debug.h"
  43. #include "graph/model_serialize.h"
  44. #include "graph/node.h"
  45. #include "graph/utils/graph_utils.h"
  46. #include "graph/utils/type_utils.h"
  47. #include "init/gelib.h"
  48. #include "mmpa/mmpa_api.h"
  49. #include "omm/csa_interact.h"
  50. #include "runtime/base.h"
  51. #include "runtime/dev.h"
  52. #include "runtime/event.h"
  53. #include "runtime/mem.h"
  54. #include "runtime/rt_model.h"
  55. #include "runtime/stream.h"
  56. #include "securec.h"
  57. #include "graph/common/local_context.h"
  58. #include "common/formats/utils/formats_trans_utils.h"
  59. // create std::thread, catch exceptions using try/catch
  60. #define CREATE_STD_THREAD(thread_id, func, args) \
  61. do { \
  62. try { \
  63. thread_id = std::thread(func, args); \
  64. } catch (const std::system_error &e) { \
  65. GELOGE(FAILED, "Caught system_error with code:%d, meaning:%s", e.code().value(), e.what()); \
  66. GELOGE(FAILED, "Thread creat FAIL, Please check the left resource!"); \
  67. return FAILED; \
  68. } \
  69. } while (0)
  70. namespace ge {
  71. namespace {
  72. const uint32_t kDataIndex = 0;
  73. const uint32_t kOutputNum = 1;
  74. const uint32_t kTrueBranchStreamNum = 1;
  75. const uint32_t kGetDynamicDimsCount = 1;
  76. const uint32_t kThreadNum = 16;
  77. const uint32_t kAddrLen = sizeof(void *);
  78. const int kDecimal = 10;
  79. const int kBytes = 8;
  80. const uint32_t kDataMemAlignSizeCompare = 64;
  81. const uint32_t kDumpL1FusionOpMByteSize = 2097152; // 2 * 1024 * 1024
  82. const uint32_t kDumpFlagOfL1Fusion = 0;
  83. const char *const kDefaultBatchLable = "Batch_default";
  84. const char *const kGetDynamicDimsName = "ascend_mbatch_get_dynamic_dims_node";
  85. const int32_t kInvalidStream = -1;
  86. const uint32_t kEndOfSequence = 0x0704000a;
  87. const uint32_t kEndOfSequenceNew = 507005;
  88. const int32_t kModelAbortNormal = 0x0704000e;
  89. const int32_t kModelAbortNormalNew = 507024;
  90. inline bool IsDataOp(const std::string &node_type) {
  91. return node_type == DATA_TYPE || node_type == AIPP_DATA_TYPE || node_type == ANN_DATA_TYPE;
  92. }
  93. inline bool IsNoTaskAndDumpNeeded(const OpDescPtr &op_desc) {
  94. bool save_dump_info = false;
  95. (void)ge::AttrUtils::GetBool(op_desc, ATTR_NO_TASK_AND_DUMP_NEEDED, save_dump_info);
  96. return save_dump_info;
  97. }
  98. } // namespace
  99. std::mutex DavinciModel::tvm_bin_mutex_;
  100. DavinciModel::DavinciModel(int32_t priority, const std::shared_ptr<ModelListener> &listener)
  101. : weights_mem_base_(nullptr),
  102. var_mem_base_(nullptr),
  103. fixed_mem_base_(0),
  104. mem_base_(nullptr),
  105. is_inner_mem_base_(false),
  106. is_inner_weight_base_(false),
  107. is_inner_p2p_mem_base_(false),
  108. data_inputer_(nullptr),
  109. load_begin_time_(0),
  110. load_end_time_(0),
  111. time_info_(),
  112. dataInputTid(0),
  113. is_weight_mem_has_inited_(false),
  114. is_feature_map_mem_has_inited_(false),
  115. model_id_(0),
  116. runtime_model_id_(0),
  117. version_(0),
  118. ge_model_(nullptr),
  119. thread_id_(),
  120. listener_(listener),
  121. run_flg_(false),
  122. priority_(priority),
  123. rt_model_handle_(nullptr),
  124. rt_model_stream_(nullptr),
  125. is_inner_model_stream_(false),
  126. is_async_mode_(false),
  127. last_execute_mode_(INITIALIZATION),
  128. session_id_(0),
  129. device_id_(0),
  130. maxDumpOpNum_(0), data_dumper_(runtime_param_),
  131. iterator_count_(0),
  132. is_l1_fusion_enable_(false),
  133. is_first_execute_(true) {
  134. op_list_.clear();
  135. skt_info_ = {0, 0, 0, 0, nullptr, nullptr, {}, {}, {}, {}, {}, RT_KERNEL_DEFAULT, -1, 0, nullptr};
  136. }
  137. DavinciModel::~DavinciModel() {
  138. try {
  139. Status ret = data_dumper_.UnloadDumpInfo();
  140. if (ret != SUCCESS) {
  141. GELOGW("UnloadDumpInfo failed, ret: %u.", ret);
  142. }
  143. for (const auto &op_and_addr : saved_task_addrs_) {
  144. auto addr = op_and_addr.second;
  145. if (addr != nullptr) {
  146. GE_CHK_RT(rtFree(addr));
  147. }
  148. addr = nullptr;
  149. }
  150. saved_task_addrs_.clear();
  151. GE_CHK_STATUS(ModelRunStop());
  152. op_list_.clear();
  153. data_op_list_.clear();
  154. tensor_name_to_fixed_addr_size_.clear();
  155. tensor_name_to_peer_output_index_.clear();
  156. GE_DELETE_NEW_SINGLE(data_inputer_);
  157. // check rt ctx is exist. rt api call will cause error log when ctx not exist
  158. rtContext_t ctx = nullptr;
  159. rtError_t rt_ret = rtCtxGetCurrent(&ctx);
  160. if (rt_ret == RT_ERROR_NONE) {
  161. UnbindTaskSinkStream();
  162. for (size_t i = 0; i < label_list_.size(); ++i) {
  163. if (label_list_[i] != nullptr) {
  164. GE_LOGW_IF(rtLabelDestroy(label_list_[i]) != RT_ERROR_NONE, "Destroy label failed, index: %zu", i);
  165. }
  166. }
  167. for (size_t i = 0; i < stream_list_.size(); ++i) {
  168. GE_LOGW_IF(rtStreamDestroy(stream_list_[i]) != RT_ERROR_NONE, "Destroy stream failed, index: %zu", i);
  169. }
  170. for (size_t i = 0; i < event_list_.size(); ++i) {
  171. GE_LOGW_IF(rtEventDestroy(event_list_[i]) != RT_ERROR_NONE, "Destroy event failed, index: %zu", i);
  172. }
  173. FreeWeightsMem();
  174. FreeFeatureMapMem();
  175. FreeP2PMem();
  176. if (l1_fusion_addr_ != nullptr) {
  177. GE_CHK_RT(rtFree(l1_fusion_addr_));
  178. }
  179. if (rt_model_handle_ != nullptr) {
  180. GE_CHK_RT(rtModelDestroy(rt_model_handle_));
  181. rt_model_handle_ = nullptr;
  182. }
  183. }
  184. OpDebugUnRegister();
  185. ReleaseTask();
  186. CleanTbeHandle();
  187. var_mem_base_ = nullptr;
  188. if (known_node_) {
  189. if (args_ != nullptr) {
  190. GE_CHK_RT(rtFree(args_));
  191. }
  192. total_io_addrs_.clear();
  193. if (fixed_addrs_ != nullptr) {
  194. GE_CHK_RT(rtFree(fixed_addrs_));
  195. }
  196. }
  197. } catch (...) {
  198. GELOGW("DavinciModel::~DavinciModel: clear op_list catch exception.");
  199. }
  200. }
  201. void DavinciModel::UnbindHcomStream() {
  202. if (!all_hccl_stream_list_.empty()) {
  203. for (size_t i = 0; i < all_hccl_stream_list_.size(); i++) {
  204. GE_LOGW_IF(rtModelUnbindStream(rt_model_handle_, all_hccl_stream_list_[i]) != RT_ERROR_NONE,
  205. "Unbind hccl stream from model failed! Index: %zu", i);
  206. GE_LOGW_IF(rtStreamDestroy(all_hccl_stream_list_[i]) != RT_ERROR_NONE, "Destroy hccl stream for rt_model failed!")
  207. }
  208. }
  209. return;
  210. }
  211. void DavinciModel::ReleaseTask() {
  212. for (const auto &task : cpu_task_list_) {
  213. if (task != nullptr) {
  214. GE_CHK_STATUS(task->Release(), "Release task failed.");
  215. }
  216. }
  217. cpu_task_list_.clear();
  218. for (const auto &task : task_list_) {
  219. if (task != nullptr) {
  220. GE_CHK_STATUS(task->Release(), "Release task failed.");
  221. }
  222. }
  223. }
  224. Status DavinciModel::Assign(const GeModelPtr &ge_model) {
  225. if (ge_model == nullptr) {
  226. GELOGI("can't assign null ge_model");
  227. return FAILED;
  228. }
  229. ge_model_ = ge_model;
  230. return SUCCESS;
  231. }
  232. ///
  233. /// @ingroup ge
  234. /// @brief Reduce memory usage after task sink.
  235. /// @return: void
  236. ///
  237. void DavinciModel::Shrink() {
  238. skt_info_ = {0, 0, 0, 0, nullptr, nullptr, {}, {}, {}, {}, {}, RT_KERNEL_DEFAULT, -1, 0, nullptr};
  239. ge_model_.reset(); // delete object.
  240. }
  241. Status DavinciModel::InitWeightMem(void *dev_ptr, void *weight_ptr, size_t weight_size) {
  242. if (is_weight_mem_has_inited_) {
  243. GELOGE(FAILED, "call InitWeightMem more than once.");
  244. return FAILED;
  245. }
  246. is_weight_mem_has_inited_ = true;
  247. const Buffer &weights = ge_model_->GetWeight();
  248. std::size_t weights_size = weights.GetSize();
  249. GE_CHECK_LE(weights_size, ALLOC_MEMORY_MAX_SIZE);
  250. if ((weight_ptr != nullptr) && (weight_size < weights_size)) {
  251. GELOGE(FAILED, "Invalid mem param: weight_size=%zu totalsize=%zu.", weight_size, weights_size);
  252. return FAILED;
  253. }
  254. weights_mem_base_ = static_cast<uint8_t *>(dev_ptr);
  255. is_inner_weight_base_ = false;
  256. if (weights_size != 0) {
  257. weights_mem_base_ = static_cast<uint8_t *>(weight_ptr);
  258. is_inner_weight_base_ = false;
  259. if (weight_ptr == nullptr) {
  260. weights_mem_base_ = MallocWeightsMem(weights_size);
  261. if (weights_mem_base_ == nullptr) {
  262. GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Alloc weight memory failed. size: %zu", weights_size);
  263. return ACL_ERROR_GE_MEMORY_ALLOCATION;
  264. }
  265. is_inner_weight_base_ = true;
  266. }
  267. GELOGI("[IMAS]InitWeightMem graph_%u MallocMemory type[W] memaddr[%p] mem_size[%zu]", runtime_param_.graph_id,
  268. weights_mem_base_, weights_size);
  269. GE_CHK_RT_RET(rtMemcpy(weights_mem_base_, weights_size, weights.GetData(), weights_size, RT_MEMCPY_HOST_TO_DEVICE));
  270. GELOGI("copy weights data to device");
  271. }
  272. runtime_param_.weight_base = weights_mem_base_;
  273. return SUCCESS;
  274. }
  275. Status DavinciModel::InitFeatureMapAndP2PMem(void *dev_ptr, size_t mem_size) {
  276. if (is_feature_map_mem_has_inited_) {
  277. GELOGE(PARAM_INVALID, "call InitFeatureMapMem more than once.");
  278. return PARAM_INVALID;
  279. }
  280. is_feature_map_mem_has_inited_ = true;
  281. std::size_t data_size = TotalMemSize();
  282. std::size_t p2p_data_size = P2PMemInfos().at(RT_MEMORY_P2P_DDR).memory_size;
  283. if ((dev_ptr != nullptr) && (mem_size < TotalMemSize())) {
  284. GELOGE(PARAM_INVALID, "Invalid mem param: mem_size=%zu totalsize=%zu.", mem_size, TotalMemSize());
  285. return PARAM_INVALID;
  286. }
  287. mem_base_ = static_cast<uint8_t *>(dev_ptr);
  288. p2p_mem_base_ = static_cast<uint8_t *>(dev_ptr);
  289. is_inner_mem_base_ = false;
  290. if (TotalMemSize() && mem_base_ == nullptr) {
  291. mem_base_ = MallocFeatureMapMem(data_size);
  292. if (mem_base_ == nullptr) {
  293. GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Alloc feature map memory failed. size: %zu", data_size);
  294. return ACL_ERROR_GE_MEMORY_ALLOCATION;
  295. }
  296. GEEVENT("[IMAS]InitFeatureMapAndP2PMem graph_%u MallocMemory type[F] memaddr[%p] mem_size[%zu]",
  297. runtime_param_.graph_id, mem_base_, data_size);
  298. if (!is_inner_weight_base_) {
  299. weights_mem_base_ = mem_base_;
  300. is_inner_weight_base_ = true;
  301. }
  302. is_inner_mem_base_ = true;
  303. }
  304. if (p2p_data_size != 0) {
  305. p2p_mem_base_ = MallocP2PMem(p2p_data_size);
  306. if (p2p_mem_base_ == nullptr) {
  307. GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "Alloc p2p memory failed,size: %zu", p2p_data_size);
  308. return ACL_ERROR_GE_MEMORY_ALLOCATION;
  309. }
  310. GELOGI("InitFeatureMapAndP2PMem graph_%u MallocMemory type[F] memaddr[%p] mem_size[%zu]", runtime_param_.graph_id,
  311. p2p_mem_base_, p2p_data_size);
  312. is_inner_p2p_mem_base_ = true;
  313. }
  314. GE_CHK_STATUS_RET(InitVariableMem(), "Init variable memory failed.");
  315. runtime_param_.mem_base = mem_base_;
  316. runtime_param_.weight_base = weights_mem_base_;
  317. runtime_param_.memory_infos[RT_MEMORY_P2P_DDR].memory_base = p2p_mem_base_;
  318. return SUCCESS;
  319. }
  320. Status DavinciModel::InitVariableMem() {
  321. // malloc variable memory base
  322. var_mem_base_ = VarManager::Instance(session_id_)->GetVarMemoryBase(RT_MEMORY_HBM);
  323. if (TotalVarMemSize() && var_mem_base_ == nullptr) {
  324. Status ret = VarManager::Instance(session_id_)->MallocVarMemory(TotalVarMemSize());
  325. if (ret != SUCCESS) {
  326. GELOGE(ret, "Malloc variable memory failed.");
  327. return ret;
  328. }
  329. var_mem_base_ = VarManager::Instance(session_id_)->GetVarMemoryBase(RT_MEMORY_HBM);
  330. GEEVENT("[IMAS]InitVariableMem graph_%u MallocMemory type[V] memaddr[%p] mem_size[%zu]", runtime_param_.graph_id,
  331. var_mem_base_, TotalVarMemSize());
  332. }
  333. runtime_param_.var_base = var_mem_base_;
  334. return SUCCESS;
  335. }
  336. void DavinciModel::InitRuntimeParams() {
  337. int64_t value = 0;
  338. bool ret;
  339. MemInfo p2p_mem_info;
  340. ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_MEMORY_SIZE, value);
  341. runtime_param_.mem_size = ret ? (uint64_t)value : 0;
  342. ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_WEIGHT_SIZE, value);
  343. runtime_param_.weight_size = ret ? (uint64_t)value : 0;
  344. ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_STREAM_NUM, value);
  345. runtime_param_.stream_num = ret ? (uint32_t)value : 0;
  346. ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_EVENT_NUM, value);
  347. runtime_param_.event_num = ret ? (uint32_t)value : 0;
  348. ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_LABEL_NUM, value);
  349. runtime_param_.label_num = ret ? (uint32_t)value : 0;
  350. ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_BATCH_NUM, value);
  351. runtime_param_.batch_num = ret ? (uint32_t)value : 0;
  352. ret = ge::AttrUtils::GetInt(ge_model_, MODEL_ATTR_TASK_GEN_BASE_ADDR, value);
  353. runtime_param_.logic_mem_base = ret ? (uint64_t)value : 0;
  354. ret = ge::AttrUtils::GetInt(ge_model_, MODEL_ATTR_TASK_GEN_WEIGHT_ADDR, value);
  355. runtime_param_.logic_weight_base = ret ? (uint64_t)value : 0;
  356. ret = ge::AttrUtils::GetInt(ge_model_, ge::MODEL_ATTR_SESSION_ID, value);
  357. runtime_param_.session_id = ret ? (uint64_t)value : 0;
  358. ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_TASK_GEN_VAR_ADDR, value);
  359. runtime_param_.logic_var_base = ret ? (uint64_t)value : 0;
  360. ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_VAR_SIZE, value);
  361. runtime_param_.var_size = ret ? (uint64_t)value : 0;
  362. session_id_ = runtime_param_.session_id;
  363. ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_P2P_MEMORY_SIZE, value);
  364. p2p_mem_info.memory_size = ret ? (uint64_t)value : 0;
  365. runtime_param_.memory_infos[RT_MEMORY_P2P_DDR] = std::move(p2p_mem_info);
  366. GELOGI(
  367. "InitRuntimeParams(), session_id:%lu, stream_num:%u, event_num:%u, label_num:%u, "
  368. "logic_mem_base:0x%lx, logic_weight_base:0x%lx, logic_var_base:0x%lx, "
  369. "memory_size:%lu, weight_size:%lu, var_size:%lu",
  370. runtime_param_.session_id, runtime_param_.stream_num, runtime_param_.event_num, runtime_param_.label_num,
  371. runtime_param_.logic_mem_base, runtime_param_.logic_weight_base, runtime_param_.logic_var_base,
  372. runtime_param_.mem_size, runtime_param_.weight_size, runtime_param_.var_size);
  373. }
  374. void DavinciModel::CheckHasHcomOp() {
  375. Graph graph = ge_model_->GetGraph();
  376. auto compute_graph = GraphUtils::GetComputeGraph(graph);
  377. if (compute_graph == nullptr) {
  378. return;
  379. }
  380. for (const auto &node : compute_graph->GetAllNodes()) {
  381. OpDescPtr op_desc = node->GetOpDesc();
  382. GE_IF_BOOL_EXEC(op_desc == nullptr, GELOGW("Node OpDesc is nullptr"); continue);
  383. GE_IF_BOOL_EXEC(((op_desc->GetType() == HCOMBROADCAST) || (op_desc->GetType() == HCOMALLGATHER) ||
  384. (op_desc->GetType() == HCOMALLREDUCE) || (op_desc->GetType() == HCOMSEND) ||
  385. (op_desc->GetType() == HCOMRECEIVE) || (op_desc->GetType() == HCOMREDUCESCATTER) ||
  386. (op_desc->GetType() == HVDCALLBACKALLREDUCE) || (op_desc->GetType() == HVDCALLBACKALLGATHER) ||
  387. (op_desc->GetType() == HVDCALLBACKBROADCAST) || (op_desc->GetType() == HVDWAIT) ||
  388. (op_desc->GetType() == HCOMREDUCE)),
  389. uint32_t stream_id = static_cast<uint32_t>(op_desc->GetStreamId());
  390. (void)hcom_streams_.emplace(stream_id); GELOGD("hcom stream: %u.", stream_id); continue);
  391. }
  392. }
  393. ///
  394. /// @ingroup ge
  395. /// @brief Make active stream list and bind to model.
  396. /// @return: 0 for success / others for fail
  397. ///
  398. Status DavinciModel::BindModelStream() {
  399. // Stream not in active_stream_indication_ is active stream.
  400. is_stream_list_bind_ = false;
  401. if ((!input_queue_ids_.empty() || !output_queue_ids_.empty()) || (deploy_type_ == AICPU_DEPLOY_CROSS_THREAD)) {
  402. for (size_t i = 0; i < stream_list_.size(); ++i) {
  403. if (active_stream_indication_.count(i) == 0) {
  404. active_stream_list_.push_back(stream_list_[i]);
  405. active_stream_indication_.insert(i); // deactive all model stream.
  406. }
  407. }
  408. }
  409. for (size_t i = 0; i < stream_list_.size(); ++i) {
  410. if (active_stream_indication_.count(i) > 0) {
  411. GELOGI("rtModelBindStream[%zu]", i);
  412. GE_CHK_RT_RET(rtModelBindStream(rt_model_handle_, stream_list_[i], RT_INVALID_FLAG));
  413. } else {
  414. // bind rt_model_handel to all streams that relates to op
  415. GE_CHK_RT_RET(rtModelBindStream(rt_model_handle_, stream_list_[i], RT_HEAD_STREAM));
  416. }
  417. }
  418. is_stream_list_bind_ = true;
  419. return SUCCESS;
  420. }
  421. Status DavinciModel::DoTaskSink() {
  422. // task sink is supported as model_task_def is set
  423. const auto &model_task_def = ge_model_->GetModelTaskDefPtr();
  424. if (model_task_def == nullptr) {
  425. return SUCCESS;
  426. }
  427. GE_CHK_RT_RET(rtGetAicpuDeploy(&deploy_type_));
  428. GELOGI("do task_sink. AiCpu deploy type is: %x.", deploy_type_);
  429. GE_CHK_STATUS_RET(BindModelStream(), "Bind model stream failed.");
  430. if (known_node_) {
  431. GE_CHK_STATUS_RET(MallocKnownArgs(), "Mallloc known node args failed.");
  432. }
  433. GE_CHK_STATUS_RET(InitTaskInfo(*model_task_def.get()), "InitTaskInfo failed.");
  434. GE_CHK_STATUS_RET(ModelManager::GetInstance()->LaunchCustAicpuSo(), "Launch cust aicpu so failed.");
  435. GE_CHK_STATUS_RET(ModelManager::GetInstance()->CheckAicpuOpList(ge_model_), "Check aicpu op type failed.");
  436. GE_CHK_STATUS_RET(InitEntryTask(), "InitEntryTask failed.");
  437. GE_CHK_STATUS_RET(DistributeTask(), "Distribute failed.");
  438. GE_CHK_RT_RET(rtModelLoadComplete(rt_model_handle_));
  439. SetCopyOnlyOutput();
  440. return SUCCESS;
  441. }
  442. // set device use aicore(0) or vectorcore(1)
  443. Status DavinciModel::SetTSDevice() {
  444. int64_t value = 0;
  445. bool ret = ge::AttrUtils::GetInt(ge_model_, ATTR_MODEL_CORE_TYPE, value);
  446. uint32_t core_type = ret ? static_cast<uint32_t>(value) : 0;
  447. GELOGD("SetTSDevice: %u", core_type);
  448. rtError_t rt_ret = rtSetTSDevice(core_type);
  449. if (rt_ret != RT_ERROR_NONE) {
  450. GELOGE(RT_FAILED, "SetTSDevice failed, ret: 0x%X", rt_ret);
  451. return RT_ERROR_TO_GE_STATUS(rt_ret);
  452. }
  453. return SUCCESS;
  454. }
  455. Status DavinciModel::OpDebugRegister() {
  456. bool is_op_debug = false;
  457. (void)ge::AttrUtils::GetBool(ge_model_, ATTR_OP_DEBUG_FLAG, is_op_debug);
  458. GELOGD("The value of op_debug in ge_model_ is %d.", is_op_debug);
  459. if (is_op_debug) {
  460. debug_reg_mutex_.lock();
  461. rtError_t rt_ret = rtMalloc(&op_debug_addr_, kOpDebugMemorySize, RT_MEMORY_DDR);
  462. if (rt_ret != RT_ERROR_NONE) {
  463. GELOGE(RT_FAILED, "rtMalloc error, ret: 0x%X", rt_ret);
  464. return RT_ERROR_TO_GE_STATUS(rt_ret);
  465. }
  466. uint64_t debug_addrs_tmp = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(op_debug_addr_));
  467. // For data dump, aicpu needs the pointer to pointer that save the real debug address.
  468. rt_ret = rtMalloc(&p2p_debug_addr_, kDebugP2pSize, RT_MEMORY_HBM);
  469. if (rt_ret != RT_ERROR_NONE) {
  470. GELOGE(RT_FAILED, "rtMalloc error, ret: 0x%X", rt_ret);
  471. return RT_ERROR_TO_GE_STATUS(rt_ret);
  472. }
  473. rt_ret = rtMemcpy(p2p_debug_addr_, sizeof(uint64_t), &debug_addrs_tmp, sizeof(uint64_t), RT_MEMCPY_HOST_TO_DEVICE);
  474. if (rt_ret != RT_ERROR_NONE) {
  475. GELOGE(RT_FAILED, "rtMemcpy to p2p_addr error: 0x%X", rt_ret);
  476. return RT_ERROR_TO_GE_STATUS(rt_ret);
  477. }
  478. uint32_t op_debug_mode = 0;
  479. (void)ge::AttrUtils::GetInt(ge_model_, ATTR_OP_DEBUG_MODE, op_debug_mode);
  480. GELOGD("The value of op_debug_mode in ge_model_ is %u.", op_debug_mode);
  481. uint32_t debug_task_id = 0;
  482. uint32_t debug_stream_id = 0;
  483. rt_ret = rtDebugRegister(rt_model_handle_, op_debug_mode, op_debug_addr_, &debug_stream_id, &debug_task_id);
  484. if (rt_ret != RT_ERROR_NONE) {
  485. GELOGE(RT_FAILED, "rtDebugRegister error, ret: 0x%X", rt_ret);
  486. return RT_ERROR_TO_GE_STATUS(rt_ret);
  487. }
  488. GELOGI("debug_task_id:%d, debug_stream_id:%u", debug_task_id, debug_stream_id);
  489. is_op_debug_reg_ = true;
  490. data_dumper_.SaveOpDebugId(debug_task_id, debug_stream_id, p2p_debug_addr_, is_op_debug);
  491. }
  492. return SUCCESS;
  493. }
  494. void DavinciModel::OpDebugUnRegister() {
  495. if (is_op_debug_reg_) {
  496. debug_reg_mutex_.unlock();
  497. rtError_t rt_ret = RT_ERROR_NONE;
  498. if (rt_model_handle_ != nullptr) {
  499. GELOGD("start call debug_unregister.");
  500. rt_ret = rtDebugUnRegister(rt_model_handle_);
  501. if (rt_ret != RT_ERROR_NONE) {
  502. GELOGW("rtDebugUnRegister failed, ret: 0x%X", rt_ret);
  503. }
  504. }
  505. if (op_debug_addr_ != nullptr) {
  506. rt_ret = rtFree(op_debug_addr_);
  507. if (rt_ret != RT_ERROR_NONE) {
  508. GELOGW("rtFree failed, ret: 0x%X", rt_ret);
  509. }
  510. op_debug_addr_ = nullptr;
  511. }
  512. if (p2p_debug_addr_ != nullptr) {
  513. rt_ret = rtFree(p2p_debug_addr_);
  514. if (rt_ret != RT_ERROR_NONE) {
  515. GELOGW("rtFree failed, ret: 0x%X", rt_ret);
  516. }
  517. p2p_debug_addr_ = nullptr;
  518. }
  519. is_op_debug_reg_ = false;
  520. }
  521. return;
  522. }
  523. // initialize op sequence and call initialization function of each op respectively
  524. Status DavinciModel::Init(void *dev_ptr, size_t mem_size, void *weight_ptr, size_t weight_size) {
  525. // validating params
  526. GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(priority_ < 0 || priority_ > 7, return PARAM_INVALID,
  527. "Priority must between 0-7, now is %d", priority_);
  528. GE_CHK_BOOL_RET_STATUS(ge_model_ != nullptr, PARAM_INVALID, "GeModel is null.");
  529. Graph graph = ge_model_->GetGraph();
  530. ComputeGraphPtr compute_graph = GraphUtils::GetComputeGraph(graph);
  531. GE_CHK_BOOL_RET_STATUS(compute_graph != nullptr, INTERNAL_ERROR, "Get compute graph is nullptr.");
  532. // Initializing runtime_param_
  533. InitRuntimeParams();
  534. // RTS set aicore or vectorcore
  535. GE_CHK_STATUS_RET(SetTSDevice(), "SetTSDevice failed");
  536. version_ = ge_model_->GetVersion();
  537. name_ = ge_model_->GetName();
  538. (void)ge::AttrUtils::GetBool(ge_model_, ATTR_NAME_SWITCH_FOR_L1_FUSION, is_l1_fusion_enable_);
  539. GELOGD("The value of ge.l1Fusion in ge_model_ is %d.", is_l1_fusion_enable_);
  540. CheckHasHcomOp();
  541. vector<int64_t> huge_stream_list;
  542. (void)ge::AttrUtils::GetListInt(ge_model_, ATTR_MODEL_HUGE_STREAM_LIST, huge_stream_list);
  543. std::set<int64_t> huge_streams(huge_stream_list.begin(), huge_stream_list.end());
  544. for (uint32_t i = 0; i < StreamNum(); i++) {
  545. rtStream_t stream = nullptr;
  546. GE_MAKE_GUARD_RTSTREAM(stream);
  547. uint32_t stream_flags = RT_STREAM_PERSISTENT;
  548. if (huge_streams.find(i) != huge_streams.end()) {
  549. GELOGI("Stream %u is huge stream.", i);
  550. stream_flags |= RT_STREAM_HUGE;
  551. }
  552. if (hcom_streams_.find(i) != hcom_streams_.end()) {
  553. GE_CHK_RT_RET(rtStreamCreateWithFlags(&stream, priority_, stream_flags | RT_STREAM_FORCE_COPY));
  554. } else {
  555. GE_CHK_RT_RET(rtStreamCreateWithFlags(&stream, priority_, stream_flags));
  556. }
  557. GE_DISMISS_GUARD(stream);
  558. stream_list_.push_back(stream);
  559. int32_t rt_stream_id = kInvalidStream;
  560. (void)rtGetStreamId(stream, &rt_stream_id);
  561. GELOGI("Logical stream index:%u, stream:%p, rtstream: %d.", i, stream, rt_stream_id);
  562. }
  563. for (uint32_t i = 0; i < EventNum(); i++) {
  564. rtEvent_t rt_event;
  565. GE_CHK_RT_RET(rtEventCreate(&rt_event));
  566. event_list_.push_back(rt_event);
  567. }
  568. label_list_.resize(LabelNum(), nullptr);
  569. // create model_handle to load model
  570. GE_CHK_RT_RET(rtModelCreate(&rt_model_handle_, 0));
  571. GE_CHK_RT_RET(rtModelGetId(rt_model_handle_, &runtime_model_id_));
  572. // inference will use default graph_id 0;
  573. runtime_param_.graph_id = compute_graph->GetGraphID();
  574. // op debug register
  575. GE_CHK_STATUS_RET(OpDebugRegister(), "OpDebugRegister failed");
  576. GE_TIMESTAMP_START(TransAllVarData);
  577. GE_CHK_STATUS_RET(TransAllVarData(compute_graph, runtime_param_.graph_id), "TransAllVarData failed.");
  578. GE_TIMESTAMP_END(TransAllVarData, "GraphLoader::TransAllVarData");
  579. GE_CHK_STATUS_RET(TransVarDataUtils::CopyVarData(compute_graph, session_id_, device_id_), "copy var data failed.");
  580. GE_TIMESTAMP_START(InitModelMem);
  581. GELOGD("Known node is %d", known_node_);
  582. GE_CHK_STATUS_RET_NOLOG(InitWeightMem(dev_ptr, weight_ptr, weight_size));
  583. if (!known_node_) {
  584. GE_CHK_STATUS_RET_NOLOG(InitFeatureMapAndP2PMem(dev_ptr, mem_size));
  585. data_inputer_ = new (std::nothrow) DataInputer();
  586. GE_CHK_BOOL_RET_STATUS(data_inputer_ != nullptr, MEMALLOC_FAILED, "data_inputer_ is nullptr.");
  587. }
  588. fixed_mem_base_ = reinterpret_cast<uintptr_t>(mem_base_);
  589. GE_TIMESTAMP_END(InitModelMem, "GraphLoader::InitModelMem");
  590. for (const ge::NodePtr &node : compute_graph->GetDirectNode()) {
  591. auto op_desc = node->GetOpDesc();
  592. GE_IF_BOOL_EXEC(op_desc == nullptr, continue);
  593. GE_IF_BOOL_EXEC(op_desc->GetType() != VARIABLE, continue);
  594. GE_IF_BOOL_EXEC(IsBroadCastOpData(node),
  595. (void)ge::AttrUtils::SetStr(op_desc, VAR_ATTR_VAR_IS_BROADCAST, "var_is_restore"););
  596. }
  597. GE_CHK_STATUS_RET(InitNodes(compute_graph), "Init nodes failed");
  598. SetDataDumperArgs(compute_graph);
  599. GE_TIMESTAMP_START(DoTaskSink);
  600. GE_CHK_STATUS_RET(DoTaskSink(), "Task sink failed");
  601. GE_TIMESTAMP_END(DoTaskSink, "GraphLoader::DoTaskSink");
  602. auto all_dump_model = GetDumpProperties().GetAllDumpModel();
  603. bool findByOmName = all_dump_model.find(om_name_) != all_dump_model.end();
  604. bool findByModelName = all_dump_model.find(name_) != all_dump_model.end();
  605. bool dump_l1fusion_op = (all_dump_model.find(ge::DUMP_ALL_MODEL) != all_dump_model.end()) ||
  606. findByOmName || findByModelName;
  607. if (dump_l1fusion_op) {
  608. // malloc 2M for dump l1fusion op
  609. GE_CHK_RT_RET(rtMalloc(&l1_fusion_addr_, kDumpL1FusionOpMByteSize, RT_MEMORY_DDR));
  610. // send l1fusion dump addr to rts
  611. GE_CHK_RT_RET(rtDumpAddrSet(rt_model_handle_, l1_fusion_addr_, kDumpL1FusionOpMByteSize, kDumpFlagOfL1Fusion));
  612. }
  613. /// In zero copy model, if a aicpu operator is connected to the first or last layer, before model execution,
  614. /// the aicpu opertor needs to destroy history record, and update operator memory address.
  615. /// The model with specified aicpu operators is only marked here, and destruction is in ModelManager::ExecuteModel().
  616. need_destroy_aicpu_kernel_ = IsAicpuKernelConnectSpecifiedLayer();
  617. (void)ge::AttrUtils::GetListStr(ge_model_, ATTR_MODEL_OUT_NODES_NAME, out_node_name_);
  618. string fp_ceiling_mode;
  619. if (ge::AttrUtils::GetStr(ge_model_, ATTR_FP_CEILING_MODE, fp_ceiling_mode)) {
  620. GELOGI("Get attr ATTR_FP_CEILING_MODE from model, value is %s.", fp_ceiling_mode.c_str());
  621. // mode 0: Do not perform saturation processing. By default, IEEE754 is used.
  622. GE_CHK_RT_RET(rtSetCtxINFMode((fp_ceiling_mode != "0")));
  623. }
  624. // collect profiling for ge
  625. GE_CHK_STATUS_RET(InitModelProfile(), "Init model profile failed");
  626. auto &profiling_manager = ProfilingManager::Instance();
  627. if (profiling_manager.ProfilingModelLoadOn()) {
  628. Status p_ret = ReportProfilingData();
  629. if (p_ret != SUCCESS) {
  630. GELOGE(p_ret, "Report profiling data failed.");
  631. return p_ret;
  632. }
  633. }
  634. Shrink();
  635. return SUCCESS;
  636. }
  637. Status DavinciModel::ReportProfilingData() {
  638. std::vector<ComputeGraphDescInfo> compute_graph_desc_info;
  639. Status ret = GetComputeGraphInfo(compute_graph_desc_info);
  640. if (ret != SUCCESS) {
  641. GELOGE(ret, "GetComputeGraphInfo failed.");
  642. return ret;
  643. }
  644. ProfilingManager::Instance().ReportProfilingData(model_id_, GetTaskDescInfo(), compute_graph_desc_info);
  645. GE_CHK_STATUS(SinkModelProfile(), "Sink model profiler failed.");
  646. op_list_.clear();
  647. return SUCCESS;
  648. }
  649. ///
  650. /// @ingroup ge
  651. /// @brief Travel all nodes and determine if destruction is required.
  652. /// @return bool
  653. ///
  654. bool DavinciModel::IsAicpuKernelConnectSpecifiedLayer() {
  655. Graph graph = ge_model_->GetGraph();
  656. ComputeGraphPtr compute_graph = GraphUtils::GetComputeGraph(graph);
  657. auto all_nodes = compute_graph->GetAllNodes();
  658. for (auto &node : all_nodes) {
  659. GE_IF_BOOL_EXEC(node == nullptr, continue);
  660. OpDescPtr op_desc = node->GetOpDesc();
  661. GE_IF_BOOL_EXEC(op_desc == nullptr, continue);
  662. int64_t imply_type = -1;
  663. (void)ge::AttrUtils::GetInt(op_desc, ATTR_NAME_IMPLY_TYPE, imply_type);
  664. if (imply_type != static_cast<int64_t>(domi::ImplyType::AI_CPU)) {
  665. continue;
  666. }
  667. GELOGD("Current operator imply type is %ld, name is %s.", imply_type, op_desc->GetName().c_str());
  668. for (auto &in_data_anchor : node->GetAllInDataAnchors()) {
  669. GE_IF_BOOL_EXEC(in_data_anchor == nullptr, continue);
  670. auto peer_out_data_anchor = in_data_anchor->GetPeerOutAnchor();
  671. GE_IF_BOOL_EXEC(peer_out_data_anchor == nullptr, continue);
  672. auto peer_node = peer_out_data_anchor->GetOwnerNode();
  673. GE_IF_BOOL_EXEC(peer_node == nullptr, continue);
  674. auto peer_op_desc = peer_node->GetOpDesc();
  675. GE_IF_BOOL_EXEC(peer_op_desc == nullptr, continue);
  676. if (IsDataOp(peer_op_desc->GetType())) {
  677. GELOGI("Mark specified aicpu operator connected to data.");
  678. return true;
  679. }
  680. }
  681. for (auto &out_data_anchor : node->GetAllOutDataAnchors()) {
  682. GE_IF_BOOL_EXEC(out_data_anchor == nullptr, continue);
  683. auto peer_in_data_anchors = out_data_anchor->GetPeerInDataAnchors();
  684. for (auto &peer_in_data_anchor : peer_in_data_anchors) {
  685. GE_IF_BOOL_EXEC(peer_in_data_anchor == nullptr, continue);
  686. auto peer_node = peer_in_data_anchor->GetOwnerNode();
  687. GE_IF_BOOL_EXEC(peer_node == nullptr, continue);
  688. auto peer_op_desc = peer_node->GetOpDesc();
  689. GE_IF_BOOL_EXEC(peer_op_desc == nullptr, continue);
  690. if (peer_op_desc->GetType() == NETOUTPUT) {
  691. GELOGI("Mark specified aicpu operator connected to netoutput.");
  692. return true;
  693. }
  694. }
  695. }
  696. }
  697. return false;
  698. }
  699. Status DavinciModel::UpdateSessionId(uint64_t session_id) {
  700. GE_CHECK_NOTNULL(ge_model_);
  701. if (!AttrUtils::SetInt(ge_model_, MODEL_ATTR_SESSION_ID, static_cast<int64_t>(session_id))) {
  702. GELOGW("Set attr[%s] failed in updating session_id.", MODEL_ATTR_SESSION_ID.c_str());
  703. }
  704. GELOGD("Update session id: %lu.", session_id);
  705. return SUCCESS;
  706. }
  707. ///
  708. /// @ingroup ge
  709. /// @brief Travel all nodes and do some init.
  710. /// @param [in] compute_graph: ComputeGraph to load.
  711. /// @return Status
  712. ///
  713. Status DavinciModel::InitNodes(const ComputeGraphPtr &compute_graph) {
  714. uint32_t data_op_index = 0;
  715. GE_TIMESTAMP_CALLNUM_START(LoadTBEKernelBinToOpDesc);
  716. GE_TIMESTAMP_CALLNUM_START(InitTbeHandle);
  717. typedef Status (DavinciModel::*OpDescCall)(const OpDescPtr &);
  718. static std::map<std::string, OpDescCall> op_desc_handle = {
  719. {VARIABLE, &DavinciModel::InitVariable},
  720. {CONSTANTOP, &DavinciModel::InitConstant},
  721. {STREAMACTIVE, &DavinciModel::InitStreamActive},
  722. {STREAMSWITCH, &DavinciModel::InitStreamSwitch},
  723. {STREAMSWITCHN, &DavinciModel::InitStreamSwitchN},
  724. {LABELSET, &DavinciModel::InitLabelSet},
  725. {CASE, &DavinciModel::InitCase},
  726. };
  727. vector<OpDescPtr> output_op_list;
  728. map<uint32_t, OpDescPtr> data_by_index;
  729. auto nodes = compute_graph->GetAllNodes();
  730. const CustAICPUKernelStore &aicpu_kernel_store = ge_model_->GetCustAICPUKernelStore();
  731. for (size_t i = 0; i < nodes.size(); ++i) {
  732. auto node = nodes.at(i);
  733. auto op_desc = node->GetOpDesc();
  734. if (op_desc == nullptr) {
  735. GELOGE(PARAM_INVALID, "op_desc is null.");
  736. return PARAM_INVALID;
  737. }
  738. op_list_[op_desc->GetId()] = op_desc;
  739. GE_TIMESTAMP_RESTART(LoadTBEKernelBinToOpDesc);
  740. aicpu_kernel_store.LoadCustAICPUKernelBinToOpDesc(op_desc);
  741. GE_TIMESTAMP_ADD(LoadTBEKernelBinToOpDesc);
  742. if (IsDataOp(op_desc->GetType())) {
  743. if (InitDataOp(compute_graph, node, data_op_index, data_by_index) != SUCCESS) {
  744. GELOGE(PARAM_INVALID, "Data init failed, Name: %s", op_desc->GetName().c_str());
  745. return PARAM_INVALID;
  746. }
  747. data_dumper_.SaveDumpInput(node);
  748. continue;
  749. }
  750. if (op_desc->GetType() == NETOUTPUT) {
  751. if (InitNetOutput(compute_graph, node, output_op_list) != SUCCESS) {
  752. GELOGE(PARAM_INVALID, "NetOutput init failed, Name: %s", op_desc->GetName().c_str());
  753. return PARAM_INVALID;
  754. }
  755. continue;
  756. }
  757. auto it = op_desc_handle.find(op_desc->GetType());
  758. if (it != op_desc_handle.end()) {
  759. if ((this->*it->second)(op_desc) != SUCCESS) {
  760. GELOGE(PARAM_INVALID, "NetOutput init failed, Name: %s", op_desc->GetName().c_str());
  761. return PARAM_INVALID;
  762. }
  763. continue;
  764. }
  765. // for dynamic shape with control flow
  766. SetLabelForDynamic(node);
  767. if (IsNoTaskAndDumpNeeded(op_desc)) {
  768. GELOGD("node[%s] without task, and save op_desc and addr for dump", op_desc->GetName().c_str());
  769. const RuntimeParam &rts_param = GetRuntimeParam();
  770. const vector<void *> input_data_addrs = ModelUtils::GetInputDataAddrs(rts_param, op_desc);
  771. const vector<void *> output_data_addrs = ModelUtils::GetOutputDataAddrs(rts_param, op_desc);
  772. const vector<void *> workspace_data_addrs = ModelUtils::GetWorkspaceDataAddrs(rts_param, op_desc);
  773. vector<void *> tensor_device_addrs;
  774. tensor_device_addrs.insert(tensor_device_addrs.end(), input_data_addrs.begin(), input_data_addrs.end());
  775. tensor_device_addrs.insert(tensor_device_addrs.end(), output_data_addrs.begin(), output_data_addrs.end());
  776. tensor_device_addrs.insert(tensor_device_addrs.end(), workspace_data_addrs.begin(), workspace_data_addrs.end());
  777. void *addr = nullptr;
  778. auto size = kAddrLen * tensor_device_addrs.size();
  779. GE_CHK_RT_RET(rtMalloc(&addr, size, RT_MEMORY_HBM));
  780. rtError_t rt_ret = rtMemcpy(addr, size, tensor_device_addrs.data(), size, RT_MEMCPY_HOST_TO_DEVICE);
  781. if (rt_ret != RT_ERROR_NONE) {
  782. GELOGE(RT_FAILED, "rtMemcpy error, ret: 0x%X", rt_ret);
  783. GE_CHK_RT(rtFree(addr));
  784. return RT_ERROR_TO_GE_STATUS(rt_ret);
  785. }
  786. saved_task_addrs_.emplace(op_desc, addr);
  787. }
  788. GE_TIMESTAMP_RESTART(InitTbeHandle);
  789. uint32_t run_mode = static_cast<uint32_t>(domi::ImplyType::INVALID);
  790. if (AttrUtils::GetInt(op_desc, ATTR_NAME_IMPLY_TYPE, run_mode) &&
  791. run_mode == static_cast<uint32_t>(domi::ImplyType::TVM)) {
  792. // Skip no_task operator, such as concat and split.
  793. bool attr_notask = false;
  794. bool get_attr_notask_flag = ge::AttrUtils::GetBool(op_desc, ATTR_NAME_NOTASK, attr_notask);
  795. GE_IF_BOOL_EXEC(get_attr_notask_flag && attr_notask,
  796. GELOGI("Node[name:%s, type:%s] does not generate task, skip initialization.",
  797. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  798. continue;);
  799. Status status = InitTbeHandle(op_desc);
  800. if (status != SUCCESS) {
  801. GELOGE(status, "TBE init failed. %s", op_desc->GetName().c_str());
  802. return status;
  803. }
  804. }
  805. GE_TIMESTAMP_ADD(InitTbeHandle);
  806. }
  807. GE_TIMESTAMP_CALLNUM_END(LoadTBEKernelBinToOpDesc, "GraphLoader::LoadTBEKernelBinToOpDesc.");
  808. GE_TIMESTAMP_CALLNUM_END(InitTbeHandle, "GraphLoader::InitTbeHandle.");
  809. return OptInputOutputInfo(data_by_index, output_op_list);
  810. }
  811. void DavinciModel::SetLabelForDynamic(const NodePtr &node) {
  812. if (known_node_ && node->GetOpDesc()->GetType() == LABELSWITCHBYINDEX) {
  813. for (auto &in_data_anchor : node->GetAllInDataAnchors()) {
  814. auto peer_out_data_anchor = in_data_anchor->GetPeerOutAnchor();
  815. if (peer_out_data_anchor != nullptr) {
  816. string tensor_name = node->GetName();
  817. auto peer_node = peer_out_data_anchor->GetOwnerNode();
  818. (void)AttrUtils::SetStr(peer_node->GetOpDesc(), ATTR_DYNAMIC_SHAPE_FIXED_ADDR, tensor_name);
  819. (void)AttrUtils::SetInt(peer_node->GetOpDesc(), ATTR_DYNAMIC_SHAPE_FIXED_ADDR_INDEX, 0);
  820. tensor_name_to_peer_output_index_[tensor_name] = 0;
  821. }
  822. }
  823. }
  824. }
  825. ///
  826. /// @ingroup ge
  827. /// @brief Data Op Initialize.
  828. /// @param [in] ComputeGraphPtr: root graph of the model.
  829. /// @param [in] NodePtr: Data Op.
  830. /// @param [in/out] data_op_index: index of courrent count.
  831. /// @param [in/out] data_by_index: Data ordered by index.
  832. /// @return Status
  833. ///
  834. Status DavinciModel::InitDataOp(const ComputeGraphPtr &graph, const NodePtr &node, uint32_t &data_op_index,
  835. map<uint32_t, OpDescPtr> &data_by_index) {
  836. // op_desc Checked by Init: Data, valid.
  837. auto op_desc = node->GetOpDesc();
  838. if (node->GetOwnerComputeGraph() != graph) {
  839. GELOGI("Skip subgraph Data node: %s.", op_desc->GetName().c_str());
  840. return SUCCESS;
  841. }
  842. GELOGI("Init Data node: %s.", op_desc->GetName().c_str());
  843. auto data_index = data_op_index++;
  844. if (AttrUtils::GetInt(op_desc, ATTR_NAME_INDEX, data_index)) {
  845. GELOGD("Get new index %u, old %u", data_index, data_op_index - 1);
  846. }
  847. data_by_index[data_index] = op_desc;
  848. data_op_list_.push_back(op_desc);
  849. if (known_node_) {
  850. return SUCCESS;
  851. }
  852. // Make information for copy input data.
  853. const vector<int64_t> output_size_list = ModelUtils::GetOutputSize(op_desc);
  854. const vector<void *> virtual_addr_list = ModelUtils::GetOutputDataAddrs(runtime_param_, op_desc);
  855. const vector<int64_t> output_offset_list = op_desc->GetOutputOffset();
  856. if (output_size_list.empty() || virtual_addr_list.empty() || (output_size_list.size() != virtual_addr_list.size()) ||
  857. (output_offset_list.size() != virtual_addr_list.size())) {
  858. GELOGE(PARAM_INVALID, "Data[%s] init failed: output size is %zu, virtual_addr size is %zu, offset size is %zu.",
  859. op_desc->GetName().c_str(), output_size_list.size(), virtual_addr_list.size(), output_offset_list.size());
  860. return PARAM_INVALID;
  861. }
  862. bool fusion_flag = false;
  863. ZeroCopyOffset zero_copy_offset;
  864. int64_t data_size = output_size_list[kDataIndex];
  865. void *virtual_addr = virtual_addr_list[kDataIndex];
  866. Status ret = zero_copy_offset.InitInputDataInfo(data_size, virtual_addr, op_desc, fusion_flag);
  867. if (ret != SUCCESS) {
  868. GELOGE(PARAM_INVALID, "InitDataInfo of input_info %s failed.", op_desc->GetName().c_str());
  869. return PARAM_INVALID;
  870. }
  871. new_input_data_info_[data_index] = zero_copy_offset;
  872. for (size_t index = 0; index < virtual_addr_list.size(); ++index) {
  873. void *addr = virtual_addr_list.at(index);
  874. if (new_input_outside_addrs_.find(addr) != new_input_outside_addrs_.end()) {
  875. continue;
  876. }
  877. zero_copy_offset.SetInputOutsideAddrs(output_offset_list, addr, index, fusion_flag, real_virtual_addrs_);
  878. new_input_outside_addrs_[addr] = zero_copy_offset;
  879. }
  880. return SUCCESS;
  881. }
  882. ///
  883. /// @ingroup ge
  884. /// @brief Sort Data op list by index.
  885. /// @param [in] data_by_index: map of Data Op.
  886. /// @param [in] output_op_list: list of NetOutput op.
  887. /// @return Status
  888. ///
  889. Status DavinciModel::OptInputOutputInfo(const map<uint32_t, OpDescPtr> &data_by_index,
  890. const vector<OpDescPtr> &output_op_list) {
  891. GELOGD("Data node size: %zu, NetOutput node size: %zu", data_op_list_.size(), output_op_list.size());
  892. if (data_by_index.size() != data_op_list_.size()) {
  893. GELOGE(INTERNAL_ERROR, "Data map size: %zu, Data list size: %zu.", data_by_index.size(), data_op_list_.size());
  894. return INTERNAL_ERROR;
  895. }
  896. data_op_list_.clear();
  897. for (auto &item : data_by_index) {
  898. data_op_list_.emplace_back(item.second);
  899. auto output_addrs = ModelUtils::GetOutputDataAddrs(runtime_param_, item.second);
  900. GELOGD("Data node: %s, output addr size: %zu", item.second->GetName().c_str(), output_addrs.size());
  901. input_addrs_list_.emplace_back(output_addrs);
  902. if (item.second->GetType() == AIPP_DATA_TYPE) {
  903. GELOGI("This is dynamic aipp model, Node: %s", item.second->GetName().c_str());
  904. is_dynamic_aipp_ = true;
  905. }
  906. }
  907. for (const auto &op_desc : output_op_list) {
  908. auto input_addrs = ModelUtils::GetInputDataAddrs(runtime_param_, op_desc);
  909. GELOGD("NetOutput node: %s, input addr size: %zu", op_desc->GetName().c_str(), input_addrs.size());
  910. output_addrs_list_.emplace_back(input_addrs);
  911. bool getnext_sink_dynamic = false;
  912. if (AttrUtils::GetBool(op_desc, ATTR_GETNEXT_SINK_DYNMAIC, getnext_sink_dynamic) && getnext_sink_dynamic) {
  913. GELOGI("ATTR_GETNEXT_SINK_DYNMAIC has been set and is true, node: %s", op_desc->GetName().c_str());
  914. is_getnext_sink_dynamic_ = true;
  915. }
  916. vector<string> shape_info;
  917. if (AttrUtils::GetListStr(op_desc, ATTR_NAME_DYNAMIC_OUTPUT_DIMS, shape_info)) {
  918. dynamic_output_shape_info_.insert(dynamic_output_shape_info_.end(), shape_info.begin(), shape_info.end());
  919. }
  920. if (InitOutputTensorInfo(op_desc) != SUCCESS) {
  921. return INTERNAL_ERROR;
  922. }
  923. }
  924. return InitOutputDescInfo(output_op_list, output_descs_, output_formats_);
  925. }
  926. bool DavinciModel::IsGetNextSinkDynamic(const OpDescPtr &op_desc) {
  927. bool getnext_sink_dynamic = false;
  928. if (ge::AttrUtils::GetBool(op_desc, ATTR_GETNEXT_SINK_DYNMAIC, getnext_sink_dynamic) && getnext_sink_dynamic) {
  929. GELOGI("ATTR_GETNEXT_SINK_DYNMAIC has been set and is true.");
  930. return true;
  931. }
  932. return false;
  933. }
  934. /// @ingroup ge
  935. /// @brief NetOutput Op Initialize.
  936. /// @param [in] ComputeGraphPtr: root graph of the model.
  937. /// @param [in] NodePtr: NetOutput Op.
  938. /// @param [in/out] vector<OpDescPtr>: All NetOutput node in model.
  939. /// @return Status
  940. Status DavinciModel::InitNetOutput(const ComputeGraphPtr &graph, const NodePtr &node,
  941. vector<OpDescPtr> &output_op_list) {
  942. // node->GetOpDesc Checked by Init: NetOutput, valid.
  943. auto op_desc = node->GetOpDesc();
  944. // excludes the function op sub graph, e.g. case,if
  945. if (node->GetOwnerComputeGraph() != graph) {
  946. GELOGI("Skip subgraph NetOutput node: %s.", op_desc->GetName().c_str());
  947. op_list_.erase(op_desc->GetId());
  948. return SUCCESS;
  949. }
  950. GELOGI("Init NetOutput node: %s.", op_desc->GetName().c_str());
  951. output_op_list.push_back(op_desc);
  952. if (known_node_) {
  953. return SUCCESS;
  954. }
  955. // Make information for copy output data.
  956. const vector<int64_t> input_size_list = ModelUtils::GetInputSize(op_desc);
  957. const vector<void *> virtual_addr_list = ModelUtils::GetInputDataAddrs(runtime_param_, op_desc);
  958. const vector<int64_t> input_offset_list = op_desc->GetInputOffset();
  959. GE_IF_BOOL_EXEC(input_offset_list.size() != virtual_addr_list.size(),
  960. GELOGE(PARAM_INVALID, "virtual_addr size should be equal to offset size."); return PARAM_INVALID;);
  961. if (input_size_list.empty() && virtual_addr_list.empty()) {
  962. GELOGI("NetOutput[%s] is empty.", op_desc->GetName().c_str());
  963. return SUCCESS;
  964. }
  965. if (input_size_list.empty() || input_size_list.size() != virtual_addr_list.size()) {
  966. GELOGE(PARAM_INVALID, "NetOutput[%s] init failed: Input size is %zu, Input addr is %zu", op_desc->GetName().c_str(),
  967. input_size_list.size(), virtual_addr_list.size());
  968. return PARAM_INVALID;
  969. }
  970. size_t num = new_output_data_info_.size();
  971. bool fusion_flag = false;
  972. size_t input_count = input_size_list.size();
  973. is_getnext_sink_dynamic_ = false;
  974. if (IsGetNextSinkDynamic(op_desc)) {
  975. input_count = input_size_list.size() - kGetDynamicDimsCount;
  976. is_getnext_sink_dynamic_ = true;
  977. }
  978. for (size_t idx = 0; idx < input_count; ++idx) {
  979. ZeroCopyOffset zero_copy_offset;
  980. Status ret = zero_copy_offset.InitOutputDataInfo(input_size_list, virtual_addr_list, op_desc, idx, fusion_flag);
  981. GE_IF_BOOL_EXEC(ret != SUCCESS, GELOGE(PARAM_INVALID, "InitDataInfo of input_info %s failed.",
  982. op_desc->GetName().c_str()); return PARAM_INVALID;);
  983. new_output_data_info_[num + idx] = zero_copy_offset;
  984. void *addr = virtual_addr_list.at(idx);
  985. int64_t input_offset = input_offset_list.at(idx);
  986. vector<void *> tensor_addrs;
  987. zero_copy_offset.SetOutputOutsideAddrs(input_offset, fusion_flag, addr, tensor_addrs);
  988. auto rslt = new_output_outside_addrs_.insert(std::pair<void *, ZeroCopyOffset>(addr, zero_copy_offset));
  989. if (!rslt.second) {
  990. GELOGI("same output_tensor_addr %p to different input_tensor of %s", addr, op_desc->GetName().c_str());
  991. DisableZeroCopy(addr);
  992. }
  993. for (size_t i = 0; i < tensor_addrs.size(); ++i) {
  994. void *real_addr = tensor_addrs.at(i);
  995. DisableZeroCopy(real_addr);
  996. real_virtual_addrs_.insert(real_addr);
  997. }
  998. }
  999. GetAllGearsInfo(node);
  1000. if (is_getnext_sink_dynamic_) {
  1001. GE_IF_BOOL_EXEC(GetGetDynamicDimsNodeInfo(node) != SUCCESS,
  1002. GELOGE(PARAM_INVALID, "Failed to get info of getdynamicdims node."); return PARAM_INVALID;);
  1003. }
  1004. if (is_online_infer_dynamic_) {
  1005. GE_IF_BOOL_EXEC(GetGearAndRealOutSizeInfo(input_count, node) != SUCCESS,
  1006. GELOGE(PARAM_INVALID, "Failed to get gear and real out size info."); return PARAM_INVALID;);
  1007. GE_IF_BOOL_EXEC(GetGearAndRealOutShapeInfo(input_count, op_desc) != SUCCESS,
  1008. GELOGE(PARAM_INVALID, "Failed to get gear and real out shape info."); return PARAM_INVALID;);
  1009. }
  1010. return SUCCESS;
  1011. }
  1012. void DavinciModel::GetAllGearsInfo(const NodePtr &node) {
  1013. is_online_infer_dynamic_ = false;
  1014. all_gears_info_.clear();
  1015. std::string shapes;
  1016. (void) AttrUtils::GetStr(node->GetOpDesc(), ATTR_ALL_GEARS_INFO, shapes);
  1017. if (!shapes.empty()) {
  1018. is_online_infer_dynamic_ = true;
  1019. std::vector<std::string> shape_strs = ge::StringUtils::Split(shapes, ';');
  1020. for (const auto &shape_str : shape_strs) {
  1021. if (shape_str.empty()) {
  1022. continue;
  1023. }
  1024. std::vector<int64_t> gear_info;
  1025. std::vector<std::string> dims = ge::StringUtils::Split(shape_str, ',');
  1026. for (const auto &dim : dims) {
  1027. if (dim.empty()) {
  1028. continue;
  1029. }
  1030. gear_info.emplace_back(std::strtol(dim.c_str(), nullptr, kDecimal));
  1031. }
  1032. if (!gear_info.empty()) {
  1033. all_gears_info_.emplace_back(gear_info);
  1034. GELOGD("Init all gears info from %s, gaer info is %s.", node->GetName().c_str(),
  1035. formats::JoinToString(gear_info).c_str());
  1036. }
  1037. }
  1038. }
  1039. }
  1040. Status DavinciModel::GetGetDynamicDimsNodeInfo(const NodePtr &node) {
  1041. GE_CHECK_NOTNULL(node->GetOpDesc());
  1042. size_t input_count = node->GetAllInDataAnchors().size();
  1043. GELOGI("input_anchor count of %s is %zu.", node->GetName().c_str(), input_count);
  1044. size_t get_dynamic_dims_index = input_count - kGetDynamicDimsCount;
  1045. auto in_anchor = node->GetAllInDataAnchors().at(get_dynamic_dims_index);
  1046. auto peer_out_anchor = in_anchor->GetPeerOutAnchor();
  1047. if (peer_out_anchor == nullptr) {
  1048. GELOGE(PARAM_INVALID, "Out anchor of getdynmaicdims node should not be nullptr.");
  1049. return PARAM_INVALID;
  1050. }
  1051. auto peer_node = peer_out_anchor->GetOwnerNode();
  1052. auto op_desc = peer_node->GetOpDesc();
  1053. GE_CHECK_NOTNULL(op_desc);
  1054. if (op_desc->GetName() == kGetDynamicDimsName && op_desc->GetType() == GETDYNAMICDIMS) {
  1055. GELOGD("Start get info of %s.", op_desc->GetName().c_str());
  1056. auto input_addr = ModelUtils::GetInputDataAddrs(runtime_param_, node->GetOpDesc());
  1057. auto input_size = ModelUtils::GetInputSize(node->GetOpDesc());
  1058. if (input_addr.empty() || input_size.empty()) {
  1059. GELOGE(PARAM_INVALID, "Not set output of %s", op_desc->GetName().c_str());
  1060. return PARAM_INVALID;
  1061. }
  1062. auto input_desc = node->GetOpDesc()->GetInputDescPtr(get_dynamic_dims_index);
  1063. GE_CHECK_NOTNULL(input_desc);
  1064. if (input_desc->GetShape().GetDims().empty()) {
  1065. GELOGE(PARAM_INVALID, "Not set output desc shape of %s.", op_desc->GetName().c_str());
  1066. return PARAM_INVALID;
  1067. }
  1068. netoutput_last_input_addr_ = input_addr[get_dynamic_dims_index];
  1069. netoutput_last_input_size_ = input_size[get_dynamic_dims_index];
  1070. shape_of_cur_dynamic_dims_ = input_desc->GetShape().GetDims().at(0);
  1071. GELOGD("Shape of cur dynamic dims is %zu, size is %ld, addr is %p.", shape_of_cur_dynamic_dims_,
  1072. netoutput_last_input_size_, netoutput_last_input_addr_);
  1073. }
  1074. return SUCCESS;
  1075. }
  1076. Status DavinciModel::GetGearAndRealOutSizeInfo(size_t input_count, const NodePtr &node) {
  1077. GELOGD("Start get gear and real output size info of %s, input count is %zu.", node->GetName().c_str(), input_count);
  1078. merge_nodes_gear_and_real_out_size_info_.clear();
  1079. for (size_t idx = 0; idx < input_count; ++idx) {
  1080. auto in_anchor = node->GetAllInDataAnchors().at(idx);
  1081. auto peer_out_anchor = in_anchor->GetPeerOutAnchor();
  1082. if (peer_out_anchor == nullptr) {
  1083. continue;
  1084. }
  1085. auto peer_node = peer_out_anchor->GetOwnerNode();
  1086. auto op_desc = peer_node->GetOpDesc();
  1087. GE_CHECK_NOTNULL(op_desc);
  1088. if ((peer_node->GetType() == MERGE) && (op_desc->HasAttr(ATTR_INSERT_BY_MBATCH))) {
  1089. if (GetRealOutputSizeOfMerge(idx, peer_node) != SUCCESS) {
  1090. GELOGE(PARAM_INVALID, "Get real output size of %s failed.", peer_node->GetName().c_str());
  1091. return PARAM_INVALID;
  1092. }
  1093. }
  1094. }
  1095. return SUCCESS;
  1096. }
  1097. Status DavinciModel::GetRealOutputSizeOfMerge(size_t input_index, const NodePtr &merge_node) {
  1098. GELOGD("Start get output size of %s, which is %zu input to netoutput.", merge_node->GetName().c_str(), input_index);
  1099. std::map<vector<int64_t>, int64_t> gear_and_real_out_size_info;
  1100. for (auto &in_anchor : merge_node->GetAllInDataAnchors()) {
  1101. auto peer_out_anchor = in_anchor->GetPeerOutAnchor();
  1102. if (peer_out_anchor == nullptr) {
  1103. continue;
  1104. }
  1105. auto in_node = peer_out_anchor->GetOwnerNode();
  1106. GELOGD("Input node of merge is %s.", in_node->GetName().c_str());
  1107. auto op_desc = in_node->GetOpDesc();
  1108. GE_CHECK_NOTNULL(op_desc);
  1109. string batch_label;
  1110. if (AttrUtils::GetStr(op_desc, ATTR_NAME_BATCH_LABEL, batch_label)) {
  1111. size_t batch_index = static_cast<size_t>(stoi(batch_label.substr(batch_label.rfind('_') + 1)));
  1112. GELOGD("Batch index of %s is %zu.", op_desc->GetName().c_str(), batch_index);
  1113. if (batch_index > all_gears_info_.size()) {
  1114. GELOGE(PARAM_INVALID, "The value of ATTR_NAME_BATCH_LABEL is invalid.");
  1115. return PARAM_INVALID;
  1116. }
  1117. const vector<int64_t> output_size_list = ModelUtils::GetOutputSize(op_desc);
  1118. int output_index = ge::AnchorUtils::GetIdx(peer_out_anchor);
  1119. auto tensor_desc = op_desc->GetOutputDescPtr(output_index);
  1120. GE_CHECK_NOTNULL(tensor_desc);
  1121. int64_t data_size = 0;
  1122. if (TensorUtils::GetTensorSizeInBytes(*tensor_desc, data_size) != GRAPH_SUCCESS) {
  1123. GELOGE(FAILED, "Get tensor size in bytes failed.");
  1124. return FAILED;
  1125. }
  1126. gear_and_real_out_size_info[all_gears_info_[batch_index]] = data_size;
  1127. GELOGD("Get real gear index is: %zu, gear info is %s, size is %ld, tensor size is %ld",
  1128. batch_index, formats::JoinToString(all_gears_info_[batch_index]).c_str(),
  1129. output_size_list[output_index], data_size);
  1130. }
  1131. }
  1132. merge_nodes_gear_and_real_out_size_info_[input_index] = gear_and_real_out_size_info;
  1133. return SUCCESS;
  1134. }
  1135. Status DavinciModel::GetGearAndRealOutShapeInfo(size_t input_count, const OpDescPtr &op_desc) {
  1136. GELOGD("Start to get dynamic output dims of %s.", op_desc->GetName().c_str());
  1137. merge_nodes_gear_and_real_out_shape_info_.clear();
  1138. std::vector<std::string> dynamic_output_shape_info;
  1139. if (!AttrUtils::GetListStr(op_desc, ATTR_NAME_DYNAMIC_OUTPUT_DIMS, dynamic_output_shape_info)) {
  1140. GELOGD("Can not get dynamic output dims attr");
  1141. return SUCCESS;
  1142. }
  1143. GELOGI("Dynamic output shape info is %s", formats::JoinToString(dynamic_output_shape_info).c_str());
  1144. std::vector<vector<int64_t>> dynamic_output_shape;
  1145. ParseDynamicOutShape(dynamic_output_shape_info, dynamic_output_shape);
  1146. // idx: input_index to netoutput
  1147. for (size_t idx = 0; idx < input_count; ++idx) {
  1148. std::map<vector<int64_t>, vector<int64_t>> gear_and_real_out_shape_info;
  1149. for (auto &it : dynamic_output_shape) {
  1150. auto gear_index = static_cast<size_t>(it[0]);
  1151. if (gear_index > all_gears_info_.size()) {
  1152. GELOGE(PARAM_INVALID, "The value of cur index: %zu is invalid.", static_cast<size_t>(it[0]));
  1153. return PARAM_INVALID;
  1154. }
  1155. if (static_cast<size_t>(it[1]) == idx) {
  1156. vector<int64_t> output_shape;
  1157. for (size_t i = 2; i < it.size(); ++i) {
  1158. output_shape.emplace_back(it[i]);
  1159. }
  1160. gear_and_real_out_shape_info[all_gears_info_[gear_index]] = output_shape;
  1161. GELOGD("Get real gear index is: %zu, gear info is %s, output shape is %s.",
  1162. gear_index, formats::JoinToString(all_gears_info_[gear_index]).c_str(),
  1163. formats::JoinToString(output_shape).c_str());
  1164. }
  1165. }
  1166. merge_nodes_gear_and_real_out_shape_info_[idx] = gear_and_real_out_shape_info;
  1167. }
  1168. return SUCCESS;
  1169. }
  1170. void DavinciModel::ParseDynamicOutShape(const std::vector<std::string> &str_info,
  1171. std::vector<vector<int64_t>> &vec_info) {
  1172. for (size_t i = 0; i < str_info.size(); ++i) {
  1173. std::vector<int64_t> shape;
  1174. std::vector<std::string> dims = ge::StringUtils::Split(str_info[i], ',');
  1175. for (const auto &dim : dims) {
  1176. if (dim.empty()) {
  1177. continue;
  1178. }
  1179. shape.emplace_back(std::strtol(dim.c_str(), nullptr, kDecimal));
  1180. }
  1181. GELOGI("Shape from attr is %s.", formats::JoinToString(shape).c_str());
  1182. vec_info.emplace_back(shape);
  1183. }
  1184. }
  1185. /// @ingroup ge
  1186. /// @brief LabelSet Op Initialize.
  1187. /// @param [in] op_desc: LabelSet Op descriptor.
  1188. /// @return Status
  1189. Status DavinciModel::InitLabelSet(const OpDescPtr &op_desc) {
  1190. uint32_t label_index = 0;
  1191. if (!AttrUtils::GetInt(op_desc, ATTR_NAME_LABEL_SWITCH_INDEX, label_index)) {
  1192. GELOGE(INTERNAL_ERROR, "InitLabelSet: %s attr [%s] not exist.", op_desc->GetName().c_str(),
  1193. ATTR_NAME_LABEL_SWITCH_INDEX.c_str());
  1194. return INTERNAL_ERROR;
  1195. }
  1196. if (label_index >= LabelNum()) {
  1197. GELOGE(INTERNAL_ERROR, "InitLabelSet: label index: %u >= label size: %u.", label_index, LabelNum());
  1198. return INTERNAL_ERROR;
  1199. }
  1200. if (label_id_indication_.count(label_index) > 0) {
  1201. GELOGE(INTERNAL_ERROR, "InitLabelSet: %s label index: %u already used.", op_desc->GetName().c_str(), label_index);
  1202. return INTERNAL_ERROR;
  1203. }
  1204. rtStream_t stream = nullptr;
  1205. uint32_t stream_id = static_cast<uint32_t>(op_desc->GetStreamId());
  1206. if (stream_list_.size() == 1) {
  1207. stream = stream_list_[0];
  1208. } else if (stream_list_.size() > stream_id) {
  1209. stream = stream_list_[stream_id];
  1210. } else {
  1211. GELOGE(INTERNAL_ERROR, "InitLabelSet: stream index: %u >= stream size: %zu.", stream_id, stream_list_.size());
  1212. return INTERNAL_ERROR;
  1213. }
  1214. rtLabel_t rt_label = nullptr;
  1215. rtError_t rt_error = rtLabelCreateEx(&rt_label, stream);
  1216. if (rt_error != RT_ERROR_NONE || rt_label == nullptr) {
  1217. GELOGE(INTERNAL_ERROR, "InitLabelSet: %s create label failed, error=0x%x.", op_desc->GetName().c_str(), rt_error);
  1218. return INTERNAL_ERROR;
  1219. }
  1220. GELOGI("InitLabelSet: label[%u]=%p stream[%u]=%p.", label_index, rt_label, stream_id, stream);
  1221. label_id_indication_.insert(label_index);
  1222. label_list_[label_index] = rt_label;
  1223. return SUCCESS;
  1224. }
  1225. Status DavinciModel::InitVariable(const OpDescPtr &op_desc) {
  1226. variable_op_list_.push_back(op_desc);
  1227. return SUCCESS;
  1228. }
  1229. /// @ingroup ge
  1230. /// @brief ACL case, Load task list with queue.
  1231. /// @param [in] input_queue_ids: input queue ids from user, nums equal Data Op.
  1232. /// @param [in] output_queue_ids: input queue ids from user, nums equal NetOutput Op.
  1233. /// @return: 0 for success / others for failed
  1234. Status DavinciModel::SetQueIds(const std::vector<uint32_t> &input_queue_ids,
  1235. const std::vector<uint32_t> &output_queue_ids) {
  1236. if (input_queue_ids.empty() && output_queue_ids.empty()) {
  1237. GELOGE(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID, "Param is empty");
  1238. return ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID;
  1239. }
  1240. input_queue_ids_ = input_queue_ids;
  1241. output_queue_ids_ = output_queue_ids;
  1242. return SUCCESS;
  1243. }
  1244. ///
  1245. /// @ingroup ge
  1246. /// @brief ACL case, Load task list with queue.
  1247. /// @param [in] input_que_ids: input queue ids from user, nums equal Data Op.
  1248. /// @param [in] output_que_ids: input queue ids from user, nums equal NetOutput Op.
  1249. /// @return: 0 for success / others for failed
  1250. ///
  1251. Status DavinciModel::LoadWithQueue() {
  1252. if (input_queue_ids_.empty() && output_queue_ids_.empty()) {
  1253. return SUCCESS;
  1254. }
  1255. if (input_queue_ids_.size() != new_input_data_info_.size()) {
  1256. GELOGE(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID, "Input queue ids not match model: input_queue=%zu input_data=%zu",
  1257. input_queue_ids_.size(), new_input_data_info_.size());
  1258. return ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID;
  1259. }
  1260. if (output_queue_ids_.size() != new_output_data_info_.size()) {
  1261. GELOGE(ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID,
  1262. "Output queue ids not match model: output_queue=%zu output_data=%zu",
  1263. output_queue_ids_.size(), new_output_data_info_.size());
  1264. return ACL_ERROR_GE_EXEC_MODEL_QUEUE_ID_INVALID;
  1265. }
  1266. GE_CHK_STATUS_RET(AddHeadStream(), "Add head stream failed.");
  1267. // Binding input_queue and Data Op.
  1268. GE_CHK_STATUS_RET(BindInputQueue(), "Launch bind input queue failed.");
  1269. GE_CHK_STATUS_RET(CpuTaskModelZeroCopy(input_mbuf_list_, new_input_outside_addrs_), "Launch zero copy failed.");
  1270. // Binding output_queue and NetOutput Op.
  1271. GE_CHK_STATUS_RET(BindOutputQueue(), "Launch bind output queue failed.");
  1272. GE_CHK_STATUS_RET(CpuTaskModelZeroCopy(output_mbuf_list_, new_output_outside_addrs_), "Launch zero copy failed.");
  1273. GE_CHK_STATUS_RET(CpuActiveStream(), "Launch active entry stream failed.");
  1274. GE_CHK_STATUS_RET(CpuWaitEndGraph(), "Launch wait end graph failed.");
  1275. GE_CHK_STATUS_RET(BindEnqueue(), "Launch enqueue failed.");
  1276. GE_CHK_STATUS_RET(CpuModelRepeat(), "Launch model repeat failed.");
  1277. return SUCCESS;
  1278. }
  1279. /// @ingroup ge
  1280. /// @brief queue schedule, Bind input queue to Data output address.
  1281. /// @return: 0 for success / others for failed
  1282. Status DavinciModel::BindInputQueue() {
  1283. // Caller checked: input_queue_ids_.size() == input_size_list_.size() != input_addr_list_.size()
  1284. for (size_t i = 0; i < input_queue_ids_.size(); ++i) {
  1285. auto it = new_input_data_info_.find(i);
  1286. if (it == new_input_data_info_.end()) {
  1287. GELOGE(FAILED, "Input not match: tensor num=%zu, Queue id index=%zu", new_input_data_info_.size(), i);
  1288. return FAILED;
  1289. }
  1290. uint32_t queue_id = input_queue_ids_[i];
  1291. if (it->second.GetDataInfo().empty()) {
  1292. GELOGE(INTERNAL_ERROR, "the %zu input_queue not set data_info.", i);
  1293. return INTERNAL_ERROR;
  1294. }
  1295. uint32_t data_size = static_cast<uint32_t>(it->second.GetDataInfo().at(0).first);
  1296. uintptr_t data_addr = reinterpret_cast<uintptr_t>(it->second.GetDataInfo().at(0).second);
  1297. GELOGI("BindInputToQueue: graph_%u index[%zu] queue id[%u] output addr[0x%lx] output size[%u]",
  1298. runtime_param_.graph_id, i, queue_id, data_addr, data_size);
  1299. rtError_t rt_ret = rtModelBindQueue(rt_model_handle_, queue_id, RT_MODEL_INPUT_QUEUE);
  1300. if (rt_ret != RT_ERROR_NONE) {
  1301. GELOGE(RT_FAILED, "Call rtModelBindQueue failed, ret: 0x%X", rt_ret);
  1302. return RT_ERROR_TO_GE_STATUS(rt_ret);
  1303. }
  1304. if (CpuModelDequeue(queue_id) != SUCCESS) {
  1305. return INTERNAL_ERROR;
  1306. }
  1307. }
  1308. return SUCCESS;
  1309. }
  1310. /// @ingroup ge
  1311. /// @brief definiteness queue schedule, bind input queue to task.
  1312. /// @param [in] queue_id: input queue id from user.
  1313. /// @return: 0 for success / others for failed
  1314. Status DavinciModel::CpuModelDequeue(uint32_t queue_id) {
  1315. GELOGI("Set CpuKernel model dequeue task enter.");
  1316. std::shared_ptr<CpuTaskModelDequeue> dequeue_task = MakeShared<CpuTaskModelDequeue>(rt_entry_stream_);
  1317. if (dequeue_task == nullptr) {
  1318. GELOGE(MEMALLOC_FAILED, "Make CpuTaskModelDequeue task failed.");
  1319. return MEMALLOC_FAILED;
  1320. }
  1321. // Get DataOp Output address and bind to queue.
  1322. uintptr_t in_mbuf = 0;
  1323. Status status = dequeue_task->Init(queue_id, in_mbuf);
  1324. if (status != SUCCESS) {
  1325. return status;
  1326. }
  1327. cpu_task_list_.push_back(dequeue_task);
  1328. input_mbuf_list_.push_back(in_mbuf);
  1329. GELOGI("Set CpuKernel model dequeue task success.");
  1330. return SUCCESS;
  1331. }
  1332. Status DavinciModel::CpuTaskModelZeroCopy(std::vector<uintptr_t> &mbuf_list,
  1333. std::map<const void *, ZeroCopyOffset> &outside_addrs) {
  1334. GELOGI("Set CpuKernel model zero_copy task enter.");
  1335. std::shared_ptr<CpuTaskZeroCopy> zero_copy = MakeShared<CpuTaskZeroCopy>(rt_entry_stream_);
  1336. if (zero_copy == nullptr) {
  1337. GELOGE(MEMALLOC_FAILED, "Make CpuTaskZeroCopy task failed.");
  1338. return MEMALLOC_FAILED;
  1339. }
  1340. // mdc zero_copy not support l2 fusion
  1341. Status status = zero_copy->Init(mbuf_list, outside_addrs);
  1342. if (status != SUCCESS) {
  1343. return status;
  1344. }
  1345. cpu_task_list_.push_back(zero_copy);
  1346. GELOGI("Set CpuKernel model zero_copy task success.");
  1347. return SUCCESS;
  1348. }
  1349. /// @ingroup ge
  1350. /// @brief queue schedule, bind output queue to NetOutput input address.
  1351. /// @return: 0 for success / others for failed
  1352. Status DavinciModel::BindOutputQueue() {
  1353. // Caller checked: input_queue_ids_.size() == input_size_list_.size() != input_addr_list_.size()
  1354. for (size_t i = 0; i < output_queue_ids_.size(); ++i) {
  1355. auto it = new_output_data_info_.find(i);
  1356. if (it == new_output_data_info_.end()) {
  1357. GELOGE(FAILED, "Output not match: tensor num=%zu, Queue id index=%zu", new_output_data_info_.size(), i);
  1358. return FAILED;
  1359. }
  1360. uint32_t queue_id = output_queue_ids_[i];
  1361. if (it->second.GetDataInfo().empty()) {
  1362. GELOGE(INTERNAL_ERROR, "the %zu output_queue not set data_info.", i);
  1363. return INTERNAL_ERROR;
  1364. }
  1365. uint32_t data_size = static_cast<uint32_t>(it->second.GetDataInfo().at(0).first);
  1366. uintptr_t data_addr = reinterpret_cast<uintptr_t>(it->second.GetDataInfo().at(0).second);
  1367. GELOGI("BindOutputToQueue: graph_%u index[%zu] queue id[%u] input addr[0x%lx] input size[%u]",
  1368. runtime_param_.graph_id, i, queue_id, data_addr, data_size);
  1369. rtError_t rt_ret = rtModelBindQueue(rt_model_handle_, queue_id, RT_MODEL_OUTPUT_QUEUE);
  1370. if (rt_ret != RT_ERROR_NONE) {
  1371. GELOGE(RT_FAILED, "Call rtModelBindQueue failed, ret: 0x%X", rt_ret);
  1372. return RT_ERROR_TO_GE_STATUS(rt_ret);
  1373. }
  1374. Status status = CpuModelPrepareOutput(data_addr, data_size);
  1375. if (status != SUCCESS) {
  1376. return status;
  1377. }
  1378. }
  1379. return SUCCESS;
  1380. }
  1381. /// @ingroup ge
  1382. /// @brief definiteness queue schedule, bind output queue to task.
  1383. /// @param [in] addr: NetOutput Op input tensor address.
  1384. /// @param [in] size: NetOutput Op input tensor size.
  1385. /// @return: 0 for success / others for failed
  1386. Status DavinciModel::CpuModelPrepareOutput(uintptr_t addr, uint32_t size) {
  1387. GELOGI("Set CpuKernel model enqueue task enter.");
  1388. if (input_mbuf_list_.empty()) {
  1389. GELOGE(FAILED, "Need input mbuf for fill output mbuf head info.");
  1390. return FAILED;
  1391. }
  1392. std::shared_ptr<CpuTaskPrepareOutput> prepare_output = MakeShared<CpuTaskPrepareOutput>(rt_entry_stream_);
  1393. if (prepare_output == nullptr) {
  1394. GELOGE(MEMALLOC_FAILED, "Make CpuTaskPrepareOutput task failed.");
  1395. return MEMALLOC_FAILED;
  1396. }
  1397. uintptr_t out_mbuf = 0;
  1398. if (prepare_output->Init(addr, size, input_mbuf_list_.back(), out_mbuf) != SUCCESS) {
  1399. return FAILED;
  1400. }
  1401. cpu_task_list_.push_back(prepare_output);
  1402. output_mbuf_list_.push_back(out_mbuf);
  1403. GELOGI("Set CpuKernel model enqueue task success.");
  1404. return SUCCESS;
  1405. }
  1406. ///
  1407. /// @ingroup ge
  1408. /// @brief definiteness queue schedule, active original model stream.
  1409. /// @return: 0 for success / others for failed
  1410. ///
  1411. Status DavinciModel::CpuActiveStream() {
  1412. GELOGI("Set CpuKernel active stream task enter.");
  1413. std::shared_ptr<CpuTaskActiveEntry> active_entry = MakeShared<CpuTaskActiveEntry>(rt_entry_stream_);
  1414. if (active_entry == nullptr) {
  1415. GELOGE(MEMALLOC_FAILED, "Make CpuTaskActiveEntry task failed.");
  1416. return MEMALLOC_FAILED;
  1417. }
  1418. Status status = active_entry->Init(rt_head_stream_);
  1419. if (status != SUCCESS) {
  1420. return status;
  1421. }
  1422. cpu_task_list_.push_back(active_entry);
  1423. GELOGI("Set CpuKernel active stream task success.");
  1424. return SUCCESS;
  1425. }
  1426. /// @ingroup ge
  1427. /// @brief definiteness queue schedule, wait for end graph.
  1428. /// @return: 0 for success / others for failed
  1429. Status DavinciModel::CpuWaitEndGraph() {
  1430. GELOGI("Set CpuKernel wait end graph task enter.");
  1431. std::shared_ptr<CpuTaskWaitEndGraph> wait_endgraph = MakeShared<CpuTaskWaitEndGraph>(rt_entry_stream_);
  1432. if (wait_endgraph == nullptr) {
  1433. GELOGE(MEMALLOC_FAILED, "Make CpuTaskWaitEndGraph task failed.");
  1434. return MEMALLOC_FAILED;
  1435. }
  1436. Status status = wait_endgraph->Init(runtime_model_id_);
  1437. if (status != SUCCESS) {
  1438. return status;
  1439. }
  1440. cpu_task_list_.push_back(wait_endgraph);
  1441. GELOGI("Set CpuKernel wait end graph task success.");
  1442. return SUCCESS;
  1443. }
  1444. Status DavinciModel::BindEnqueue() {
  1445. for (size_t i = 0; i < output_queue_ids_.size(); ++i) {
  1446. auto it = new_output_data_info_.find(i);
  1447. if (it == new_output_data_info_.end()) {
  1448. GELOGE(FAILED, "Output not match: tensor num=%zu, Queue id index=%zu", new_output_data_info_.size(), i);
  1449. return FAILED;
  1450. }
  1451. uint32_t queue_id = output_queue_ids_[i];
  1452. if (CpuModelEnqueue(queue_id, output_mbuf_list_[i]) != SUCCESS) {
  1453. return INTERNAL_ERROR;
  1454. }
  1455. }
  1456. return SUCCESS;
  1457. }
  1458. Status DavinciModel::CpuModelEnqueue(uint32_t queue_id, uintptr_t out_mbuf) {
  1459. GELOGI("Set CpuKernel model enqueue task enter.");
  1460. std::shared_ptr<CpuTaskModelEnqueue> model_enqueue = MakeShared<CpuTaskModelEnqueue>(rt_entry_stream_);
  1461. if (model_enqueue == nullptr) {
  1462. GELOGE(MEMALLOC_FAILED, "Make CpuTaskModelEnqueue task failed.");
  1463. return MEMALLOC_FAILED;
  1464. }
  1465. Status status = model_enqueue->Init(queue_id, out_mbuf);
  1466. if (status != SUCCESS) {
  1467. return status;
  1468. }
  1469. cpu_task_list_.push_back(model_enqueue);
  1470. GELOGI("Set CpuKernel model enqueue task enter.");
  1471. return SUCCESS;
  1472. }
  1473. /// @ingroup ge
  1474. /// @brief definiteness queue schedule, repeat run model.
  1475. /// @return: 0 for success / others for failed
  1476. Status DavinciModel::CpuModelRepeat() {
  1477. GELOGI("Set CpuKernel repeat task enter.");
  1478. std::shared_ptr<CpuTaskModelRepeat> model_repeat = MakeShared<CpuTaskModelRepeat>(rt_entry_stream_);
  1479. if (model_repeat == nullptr) {
  1480. GELOGE(MEMALLOC_FAILED, "Make CpuTaskModelRepeat task failed.");
  1481. return MEMALLOC_FAILED;
  1482. }
  1483. Status status = model_repeat->Init(runtime_model_id_);
  1484. if (status != SUCCESS) {
  1485. return status;
  1486. }
  1487. cpu_task_list_.push_back(model_repeat);
  1488. GELOGI("Set CpuKernel repeat task success.");
  1489. return SUCCESS;
  1490. }
  1491. Status DavinciModel::GetInputOutputDescInfo(vector<InputOutputDescInfo> &input_desc,
  1492. vector<InputOutputDescInfo> &output_desc) {
  1493. if (input_addrs_list_.empty() || input_addrs_list_[0].size() != 1) {
  1494. GELOGI("data_op_list_ is empty or input_desc size is not 1.");
  1495. } else {
  1496. vector<uint32_t> input_formats;
  1497. GE_CHK_STATUS_RET(GetInputDescInfo(input_desc, input_formats), "get input desc info failed.");
  1498. }
  1499. vector<uint32_t> output_formats;
  1500. GE_CHK_STATUS_RET(GetOutputDescInfo(output_desc, output_formats), "get output desc info failed");
  1501. return SUCCESS;
  1502. }
  1503. Status DavinciModel::GetInputOutputDescInfo(vector<InputOutputDescInfo> &input_desc,
  1504. vector<InputOutputDescInfo> &output_desc,
  1505. vector<uint32_t> &input_formats,
  1506. vector<uint32_t> &output_formats) {
  1507. if (input_addrs_list_.empty() || input_addrs_list_[0].size() != 1) {
  1508. GELOGE(FAILED, "OP List Pointer is null or input_desc size is not 1!");
  1509. return FAILED;
  1510. }
  1511. GE_CHK_STATUS_RET(GetInputDescInfo(input_desc, input_formats), "get input desc info failed");
  1512. GE_CHK_STATUS_RET(GetOutputDescInfo(output_desc, output_formats), "get output desc info failed");
  1513. return SUCCESS;
  1514. }
  1515. ///
  1516. /// @ingroup ge
  1517. /// @brief Get dynamic batch_info
  1518. /// @param [out] batch_info
  1519. /// @param [out] dynamic_type
  1520. /// @return execute result
  1521. ///
  1522. Status DavinciModel::GetDynamicBatchInfo(std::vector<std::vector<int64_t>> &batch_info, int32_t &dynamic_type) const {
  1523. dynamic_type = dynamic_type_;
  1524. batch_info = batch_info_;
  1525. return SUCCESS;
  1526. }
  1527. ///
  1528. /// @ingroup ge
  1529. /// @brief Get combined dynamic dims info
  1530. /// @param [out] batch_info
  1531. /// @return None
  1532. ///
  1533. void DavinciModel::GetCombinedDynamicDims(std::vector<std::vector<int64_t>> &batch_info) const {
  1534. batch_info.clear();
  1535. batch_info = combined_batch_info_;
  1536. }
  1537. ///
  1538. /// @ingroup ge
  1539. /// @brief Get user designate shape order
  1540. /// @param [out] user_input_shape_order
  1541. /// @return None
  1542. ///
  1543. void DavinciModel::GetUserDesignateShapeOrder(std::vector<std::string> &user_input_shape_order) const {
  1544. user_input_shape_order.clear();
  1545. user_input_shape_order = user_designate_shape_order_;
  1546. }
  1547. ///
  1548. /// @ingroup ge
  1549. /// @brief Get AIPP input info
  1550. /// @param [in] index
  1551. /// @param [out] aipp_info
  1552. /// @return execute result
  1553. ///
  1554. Status DavinciModel::GetAIPPInfo(uint32_t index, AippConfigInfo &aipp_info) {
  1555. GE_CHK_BOOL_RET_STATUS(index < data_op_list_.size(), PARAM_INVALID, "Index %u is invalid.", index);
  1556. OpDescPtr data_op = data_op_list_[index];
  1557. if (!data_op->HasAttr(ATTR_NAME_AIPP)) {
  1558. GELOGW("GetAIPPInfo: there is not AIPP related with index %u.", index);
  1559. return ACL_ERROR_GE_AIPP_NOT_EXIST;
  1560. }
  1561. std::unique_ptr<domi::AippOpParams> aipp_params(new (std::nothrow) domi::AippOpParams());
  1562. GE_CHECK_NOTNULL(aipp_params);
  1563. ge::GeAttrValue::NAMED_ATTRS aipp_attr;
  1564. GE_CHK_BOOL_RET_STATUS(AttrUtils::GetNamedAttrs(data_op, ATTR_NAME_AIPP, aipp_attr), GE_AIPP_NOT_EXIST,
  1565. "Data node do not contain param aipp!");
  1566. GE_CHK_STATUS_RET(OpUtils::ConvertAippParams(aipp_attr, aipp_params.get()), "get aipp params failed");
  1567. GELOGI("GetAIPPInfo: node data: %s, type: %s, current index: %u, current node related input rank: %u",
  1568. data_op->GetName().c_str(), data_op->GetType().c_str(), index, aipp_params->related_input_rank());
  1569. GE_CHK_STATUS_RET(AippUtils::ConvertAippParams2AippInfo(aipp_params.get(), aipp_info),
  1570. "convert aipp params to aipp config info failed");
  1571. return SUCCESS;
  1572. }
  1573. Status DavinciModel::GetAippType(uint32_t index, InputAippType &type, size_t &aipp_index) {
  1574. GE_CHK_BOOL_RET_STATUS(index < data_op_list_.size(), PARAM_INVALID, "Index %u is invalid.", index);
  1575. // Set default value
  1576. type = DATA_WITHOUT_AIPP;
  1577. aipp_index = 0xFFFFFFFF; // default invalid value
  1578. OpDescPtr data_op = data_op_list_[index];
  1579. GE_CHECK_NOTNULL(data_op);
  1580. if (!data_op->HasAttr(ATTR_DATA_RELATED_AIPP_MODE)) {
  1581. GELOGW("There is no aipp releated info with index %u.", index);
  1582. return SUCCESS;
  1583. }
  1584. std::string data_mode;
  1585. (void)AttrUtils::GetStr(data_op, ATTR_DATA_RELATED_AIPP_MODE, data_mode);
  1586. if (data_mode == "static_aipp") {
  1587. type = DATA_WITH_STATIC_AIPP;
  1588. } else if (data_mode == "dynamic_aipp") {
  1589. type = DATA_WITH_DYNAMIC_AIPP;
  1590. } else if (data_mode == "dynamic_aipp_conf") {
  1591. type = DYNAMIC_AIPP_NODE;
  1592. } else {
  1593. GELOGE(ACL_ERROR_GE_AIPP_MODE_INVALID,
  1594. "The info of aipp releated info %s is invalid with index %u.", data_mode.c_str(), index);
  1595. return ACL_ERROR_GE_AIPP_MODE_INVALID;
  1596. }
  1597. if (type == DATA_WITH_DYNAMIC_AIPP) {
  1598. string releated_name;
  1599. (void)AttrUtils::GetStr(data_op, ATTR_DATA_AIPP_DATA_NAME_MAP, releated_name);
  1600. for (size_t i = 0; i < data_op_list_.size(); ++i) {
  1601. GE_CHECK_NOTNULL(data_op_list_[i]);
  1602. if (data_op_list_[i]->GetName() == releated_name) {
  1603. GELOGI("Find aipp_data [%s] index %zu from index %u", releated_name.c_str(), i, index);
  1604. aipp_index = i;
  1605. }
  1606. }
  1607. if (aipp_index == 0xFFFFFFFF) {
  1608. GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "Can not find aipp data node from index %u", index);
  1609. return ACL_ERROR_GE_AIPP_NOT_EXIST;
  1610. }
  1611. }
  1612. return SUCCESS;
  1613. }
  1614. void DavinciModel::SetDynamicSize(const std::vector<uint64_t> &batch_num, int32_t dynamic_type) {
  1615. batch_size_.clear();
  1616. if (batch_num.empty()) {
  1617. GELOGD("User has not set dynammic data");
  1618. }
  1619. for (size_t i = 0; i < batch_num.size(); i++) {
  1620. batch_size_.emplace_back(batch_num[i]);
  1621. }
  1622. dynamic_type_ = dynamic_type;
  1623. }
  1624. void DavinciModel::GetCurShape(std::vector<int64_t> &batch_info, int32_t &dynamic_type) {
  1625. if (batch_size_.empty()) {
  1626. GELOGD("User does not set dynamic size");
  1627. }
  1628. for (size_t i = 0; i < batch_size_.size(); i++) {
  1629. GELOGI("Start to get current shape");
  1630. batch_info.emplace_back(batch_size_[i]);
  1631. }
  1632. dynamic_type = dynamic_type_;
  1633. }
  1634. void DavinciModel::GetModelAttr(vector<string> &out_shape_info) {
  1635. out_shape_info.insert(out_shape_info.end(), dynamic_output_shape_info_.begin(), dynamic_output_shape_info_.end());
  1636. }
  1637. Status DavinciModel::GetInputOutputDescInfoForZeroCopy(vector<InputOutputDescInfo> &input_desc,
  1638. vector<InputOutputDescInfo> &output_desc,
  1639. std::vector<uint32_t> &input_formats,
  1640. std::vector<uint32_t> &output_formats) {
  1641. if (input_addrs_list_.empty() || input_addrs_list_[0].size() != kOutputNum) {
  1642. GELOGE(FAILED, "OP List Pointer is null or input_desc size is not 1!");
  1643. return FAILED;
  1644. }
  1645. GE_CHK_STATUS_RET(GetInputDescInfo(input_desc, input_formats), "get input desc info failed");
  1646. GE_CHK_STATUS_RET(GetOutputDescInfo(output_desc, output_formats), "get ouput desc info failed");
  1647. GE_CHK_BOOL_RET_STATUS(output_desc.size() == output_memory_size_list_.size(), INTERNAL_ERROR,
  1648. "output_desc size[%zu] not equal output_size_list_[%zu] size!", output_desc.size(),
  1649. output_memory_size_list_.size());
  1650. /// For function zero copy,the momery should be aligned by 512 bytes.
  1651. /// And, because of the cce op limit, size should be lager than the real shape size. The memory should be padded by 32
  1652. /// bytes.
  1653. /// *size equals to ((tensorDesc->dataSize + 2 * 32 - 1) / 32) * 32;
  1654. for (size_t i = 0; i < output_memory_size_list_.size(); i++) {
  1655. output_desc[i].size = output_memory_size_list_[i];
  1656. }
  1657. return SUCCESS;
  1658. }
  1659. void DavinciModel::SetInputDimsInfo(const vector<int64_t> &model_input_dims, Format &format,
  1660. InputOutputDescInfo &input) {
  1661. uint32_t n, c, h, w;
  1662. n = format == FORMAT_NHWC ? NHWC_DIM_N : NCHW_DIM_N;
  1663. c = format == FORMAT_NHWC ? NHWC_DIM_C : NCHW_DIM_C;
  1664. h = format == FORMAT_NHWC ? NHWC_DIM_H : NCHW_DIM_H;
  1665. w = format == FORMAT_NHWC ? NHWC_DIM_W : NCHW_DIM_W;
  1666. if (model_input_dims.size() == static_cast<size_t>(NORMAL_TENSOR_SIZE)) {
  1667. input.shape_info.num = model_input_dims[n];
  1668. input.shape_info.height = model_input_dims[h];
  1669. input.shape_info.width = model_input_dims[w];
  1670. input.shape_info.channel = model_input_dims[c];
  1671. }
  1672. for (size_t k = 0; k < model_input_dims.size(); ++k) {
  1673. input.shape_info.dims.push_back(model_input_dims[k]);
  1674. }
  1675. return;
  1676. }
  1677. void DavinciModel::CreateInputDimsInfo(const OpDescPtr &op_desc, Format format, InputOutputDescInfo &input) {
  1678. if (is_new_model_desc_ && op_desc->HasAttr(ATTR_NAME_INPUT_DIMS)) {
  1679. // When static aipp is set, need to get the model input dims which processed by aipp
  1680. vector<int64_t> model_input_dims;
  1681. (void)AttrUtils::GetListInt(op_desc, ATTR_NAME_INPUT_DIMS, model_input_dims);
  1682. SetInputDimsInfo(model_input_dims, format, input);
  1683. return;
  1684. }
  1685. // judge if this data is linked dynamic aipp first, multiply batch has been considered
  1686. if (op_desc->HasAttr(ATTR_DYNAMIC_AIPP_INPUT_DIMS)) {
  1687. vector<int64_t> dynamic_aipp_input_dims;
  1688. (void)AttrUtils::GetListInt(op_desc, ATTR_DYNAMIC_AIPP_INPUT_DIMS, dynamic_aipp_input_dims);
  1689. SetInputDimsInfo(dynamic_aipp_input_dims, format, input);
  1690. return;
  1691. } else {
  1692. // judge if this data is multiply batch
  1693. if (!op_desc->HasAttr(ATTR_MBATCH_ORIGIN_INPUT_DIMS)) {
  1694. vector<int64_t> input_dims = op_desc->GetInputDescPtr(0)->GetShape().GetDims();
  1695. SetInputDimsInfo(input_dims, format, input);
  1696. return;
  1697. } else {
  1698. vector<int64_t> origin_input_dims;
  1699. (void)AttrUtils::GetListInt(op_desc, ATTR_MBATCH_ORIGIN_INPUT_DIMS, origin_input_dims);
  1700. SetInputDimsInfo(origin_input_dims, format, input);
  1701. return;
  1702. }
  1703. }
  1704. }
  1705. Status DavinciModel::GetInputDescInfo(vector<InputOutputDescInfo> &input_desc, std::vector<uint32_t> &formats) {
  1706. for (size_t index = 0; index < data_op_list_.size(); ++index) {
  1707. InputOutputDescInfo input;
  1708. GE_CHECK_NOTNULL(data_op_list_[index]);
  1709. GE_CHECK_NOTNULL(data_op_list_[index]->GetInputDescPtr(0));
  1710. Format format = data_op_list_[index]->GetInputDescPtr(0)->GetFormat();
  1711. CreateInputDimsInfo(data_op_list_[index], format, input);
  1712. input.data_type = data_op_list_[index]->GetInputDescPtr(0)->GetDataType();
  1713. input.name = data_op_list_[index]->GetName();
  1714. int64_t input_size = 0;
  1715. GE_CHK_STATUS_RET(TensorUtils::GetSize(*data_op_list_[index]->GetInputDescPtr(0), input_size),
  1716. "get input size failed.");
  1717. input.size = input_size;
  1718. formats.push_back(format);
  1719. input_desc.push_back(input);
  1720. }
  1721. // cause GetInputDescInfo called not only once, set is_new_model_desc_ to false after calc the model input dims
  1722. is_new_model_desc_ = false;
  1723. return SUCCESS;
  1724. }
  1725. void DavinciModel::CreateOutput(uint32_t index, const OpDescPtr &op_desc, InputOutputDescInfo &output,
  1726. uint32_t &format_result) {
  1727. /// netoutput input tensor desc
  1728. GE_IF_BOOL_EXEC(op_desc->GetInputDescPtr(index) == nullptr, GELOGE(FAILED, "OpDesc GetInputDescPtr is nullptr");
  1729. return );
  1730. Format format = op_desc->GetInputDescPtr(index)->GetFormat();
  1731. GeShape shape = op_desc->GetInputDescPtr(index)->GetShape();
  1732. DataType data_type = op_desc->GetInputDescPtr(index)->GetDataType();
  1733. int64_t dims[] = {1, 1, 1, 1};
  1734. format_result = format;
  1735. if (format == FORMAT_ND) { // for ND tensor
  1736. for (size_t i = 0; i < shape.GetDimNum() && i < (sizeof(dims) / sizeof(dims[0])); i++) {
  1737. dims[i] = shape.GetDim(i);
  1738. }
  1739. } else { // FOR FORMAT_NHWC or FORMAT_NCHW
  1740. dims[0] = shape.GetDim(format == FORMAT_NHWC ? NHWC_DIM_N : NCHW_DIM_N); // 0: first dim
  1741. dims[1] = shape.GetDim(format == FORMAT_NHWC ? NHWC_DIM_C : NCHW_DIM_C); // 1: second dim
  1742. dims[2] = shape.GetDim(format == FORMAT_NHWC ? NHWC_DIM_H : NCHW_DIM_H); // 2: third dim
  1743. dims[3] = shape.GetDim(format == FORMAT_NHWC ? NHWC_DIM_W : NCHW_DIM_W); // 3: forth dim
  1744. }
  1745. output.shape_info.num = dims[0]; // 0: first dim
  1746. output.shape_info.channel = dims[1]; // 1: second dim
  1747. output.shape_info.height = dims[2]; // 2: third dim
  1748. output.shape_info.width = dims[3]; // 3: forth dim
  1749. if (op_desc->GetInputDescPtr(index)->GetFormat() == FORMAT_FRACTAL_Z) { // FraczToHWCK
  1750. int64_t k = shape.GetDim(0); // 0: first dim
  1751. int64_t c = shape.GetDim(1); // 1: second dim
  1752. int64_t h = shape.GetDim(2); // 2: third dim
  1753. int64_t w = shape.GetDim(3); // 3: forth dim
  1754. output.shape_info.dims.push_back(h);
  1755. output.shape_info.dims.push_back(w);
  1756. output.shape_info.dims.push_back(c);
  1757. output.shape_info.dims.push_back(k);
  1758. format_result = FORMAT_HWCN;
  1759. } else {
  1760. for (size_t j = 0; j < shape.GetDimNum(); j++) {
  1761. output.shape_info.dims.push_back(shape.GetDim(j));
  1762. }
  1763. }
  1764. int64_t tensor_size = 0;
  1765. if (AttrUtils::GetInt(op_desc->GetInputDescPtr(index), ATTR_NAME_SPECIAL_OUTPUT_SIZE, tensor_size)
  1766. && (tensor_size > 0)) {
  1767. GELOGI("netoutput[%s] [%d]th input has special size [%ld]", op_desc->GetName().c_str(), index, tensor_size);
  1768. } else {
  1769. (void)TensorUtils::CalcTensorMemSize(shape, format, data_type, tensor_size); // no need to check value
  1770. }
  1771. output.size = static_cast<uint64_t>(tensor_size);
  1772. output.data_type = op_desc->GetInputDescPtr(index)->GetDataType();
  1773. }
  1774. Status DavinciModel::InitOutputDescInfo(const vector<OpDescPtr> &output_op_list,
  1775. vector<InputOutputDescInfo> &output_descs, vector<uint32_t> &output_formats) {
  1776. GELOGD("Output node size: %zu", output_op_list.size());
  1777. for (const auto &op_desc : output_op_list) {
  1778. uint32_t out_size = static_cast<uint32_t>(op_desc->GetInputsSize());
  1779. for (uint32_t index = 0; index < out_size; index++) {
  1780. string output_name;
  1781. InputOutputDescInfo output;
  1782. uint32_t format_result;
  1783. CreateOutput(index, op_desc, output, format_result);
  1784. std::vector<std::string> src_name = op_desc->GetSrcName();
  1785. std::vector<int64_t> src_index = op_desc->GetSrcIndex();
  1786. GE_CHK_BOOL_RET_STATUS(src_name.size() > index && src_index.size() > index, INTERNAL_ERROR,
  1787. "construct output_name failed.");
  1788. // forward compatbility, if old om has no out_node_name, need to return output follow origin way
  1789. if (out_size == out_node_name_.size()) {
  1790. // neweast plan, the index will add to name during generate model.
  1791. bool contains_colon = out_node_name_[index].find(":") != std::string::npos;
  1792. output_name =
  1793. contains_colon ? out_node_name_[index] : out_node_name_[index] + ":" + std::to_string(src_index[index]);
  1794. } else {
  1795. output_name = std::string("output_") + std::to_string(index) + "_" + src_name[index] + "_" +
  1796. std::to_string(src_index[index]);
  1797. }
  1798. output.name = output_name;
  1799. output_descs.push_back(output);
  1800. output_formats.push_back(format_result);
  1801. }
  1802. }
  1803. return SUCCESS;
  1804. }
  1805. Status DavinciModel::GetOutputDescInfo(vector<InputOutputDescInfo> &output_descs, vector<uint32_t> &output_formats) {
  1806. output_descs.insert(output_descs.end(), output_descs_.begin(), output_descs_.end());
  1807. output_formats.insert(output_formats.end(), output_formats_.begin(), output_formats_.end());
  1808. return SUCCESS;
  1809. }
  1810. ge::Format DavinciModel::GetFormat() {
  1811. if ((data_op_list_.empty()) || data_op_list_[0] == nullptr || data_op_list_[0]->GetInputDescPtr(0) == nullptr) {
  1812. GELOGW("OP List Pointer is null or input_desc size is not 1!");
  1813. return FORMAT_NCHW;
  1814. }
  1815. return data_op_list_[0]->GetInputDescPtr(0)->GetFormat();
  1816. }
  1817. Status DavinciModel::CopyInputData(const InputData &input_data, bool device_data) {
  1818. rtMemcpyKind_t kind = device_data ? RT_MEMCPY_DEVICE_TO_DEVICE : RT_MEMCPY_HOST_TO_DEVICE;
  1819. const std::vector<DataBuffer> &blobs = input_data.blobs;
  1820. for (const auto &data : new_input_data_info_) {
  1821. if (data.first >= blobs.size()) {
  1822. GELOGE(FAILED, "Blobs not match: blobs=%zu, tensor=%zu, index=%u, size=%ld, op_name(%s)", blobs.size(),
  1823. new_input_data_info_.size(), data.first, data.second.GetDataInfo().at(0).first,
  1824. data.second.GetOpName().c_str());
  1825. return FAILED;
  1826. }
  1827. const DataBuffer &data_buf = blobs[data.first];
  1828. if (data_buf.length == 0) {
  1829. GELOGW("No data need to memcpy!");
  1830. return SUCCESS;
  1831. }
  1832. uint64_t data_size = data.second.GetDataSize();
  1833. GE_CHK_BOOL_RET_STATUS(data_size >= data_buf.length, PARAM_INVALID,
  1834. "input data size(%lu) does not match model required size(%lu), op_name(%s) ret failed.",
  1835. data_buf.length, data_size, data.second.GetOpName().c_str());
  1836. void *mem_addr = data.second.GetBasicAddr();
  1837. void *data_buf_addr = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(data_buf.data));
  1838. uint64_t data_buf_length = data_buf.length;
  1839. GELOGI("CopyPlainData memcpy graph_%u type[F] input[%s] rank[%u] dst[%p] src[%p] mem_size[%lu] datasize[%lu]",
  1840. runtime_param_.graph_id, data.second.GetOpName().c_str(), data.first, mem_addr, data_buf_addr, data_size,
  1841. data_buf_length);
  1842. GE_CHK_RT_RET(rtMemcpy(mem_addr, data_size, data_buf_addr, data_buf_length, kind));
  1843. }
  1844. return SUCCESS;
  1845. }
  1846. Status DavinciModel::SyncVarData() {
  1847. GELOGI("Sync var data, model id:%u", model_id_);
  1848. Status ret = SUCCESS;
  1849. OpDescPtr global_step = GetVariableOp(NODE_NAME_GLOBAL_STEP);
  1850. if (global_step != nullptr) {
  1851. auto v_output_size = ModelUtils::GetOutputSize(global_step);
  1852. auto v_output_addr = ModelUtils::GetOutputDataAddrs(runtime_param_, global_step);
  1853. if (v_output_size.empty() || v_output_addr.empty()) {
  1854. GELOGE(PARAM_INVALID, "global step op:%s not set output", global_step->GetName().c_str());
  1855. return PARAM_INVALID;
  1856. }
  1857. std::vector<uint64_t> v_step;
  1858. v_step.push_back(iterator_count_);
  1859. GE_CHK_RT_RET(rtMemcpy(v_output_addr[0], v_output_size[0], v_step.data(), v_step.size() * sizeof(uint64_t),
  1860. RT_MEMCPY_HOST_TO_DEVICE));
  1861. }
  1862. for (auto op_desc : variable_op_list_) {
  1863. ret =
  1864. VarManager::Instance(session_id_)->SyncVarData(runtime_param_.graph_id, op_desc->GetName(), op_desc, mem_base_);
  1865. GE_CHK_BOOL_EXEC(ret == SUCCESS, break, "sync var data ret failed, model id:%u, op name:%s.", model_id_,
  1866. op_desc->GetName().c_str());
  1867. }
  1868. return ret;
  1869. }
  1870. Status DavinciModel::InitModelProfile() {
  1871. for (const auto &task : task_list_) {
  1872. GE_CHECK_NOTNULL(task);
  1873. const FusionOpInfo *fusion_op_info = task->GetFusionOpInfo();
  1874. // when type is RT_MODEL_TASK_KERNEL, ctx is not null
  1875. if ((fusion_op_info == nullptr) || fusion_op_info->original_op_names.empty()) {
  1876. continue;
  1877. }
  1878. GELOGI("task.id = %u, opNum = %zu", task->GetTaskID(), fusion_op_info->original_op_names.size());
  1879. op_id_map_.insert(std::make_pair(fusion_op_info->op_index, task->GetTaskID()));
  1880. }
  1881. std::set<uint32_t> task_id_set;
  1882. using CIT = std::multimap<uint32_t, uint32_t>::const_iterator;
  1883. using Range = std::pair<CIT, CIT>;
  1884. for (const auto &task : task_list_) {
  1885. GE_CHECK_NOTNULL(task);
  1886. const FusionOpInfo *fusion_op_info = task->GetFusionOpInfo();
  1887. if ((fusion_op_info == nullptr) || fusion_op_info->original_op_names.empty()) {
  1888. continue;
  1889. }
  1890. if (task_id_set.count(task->GetTaskID()) > 0) {
  1891. continue;
  1892. }
  1893. const auto &op_desc = GetOpByIndex(fusion_op_info->op_index);
  1894. GE_CHK_BOOL_EXEC(op_desc != nullptr, return FAILED, "index: %u out of range", fusion_op_info->op_index);
  1895. ProfileInfo profile;
  1896. profile.fusion_info = *fusion_op_info;
  1897. Range range = op_id_map_.equal_range(fusion_op_info->op_index);
  1898. for (CIT range_idx = range.first; range_idx != range.second; ++range_idx) {
  1899. profile.task_count++;
  1900. task_id_set.insert(range_idx->second);
  1901. }
  1902. // memory info
  1903. TaskMemInfo &mem_info = profile.memory_info;
  1904. const auto input_size = ModelUtils::GetInputSize(op_desc);
  1905. const auto output_size = ModelUtils::GetOutputSize(op_desc);
  1906. const auto workspace_size = ModelUtils::GetWorkspaceSize(op_desc);
  1907. const auto weight_size = ModelUtils::GetWeightSize(op_desc);
  1908. mem_info.input_size = std::accumulate(input_size.begin(), input_size.end(), 0);
  1909. mem_info.output_size = std::accumulate(output_size.begin(), output_size.end(), 0);
  1910. mem_info.workspace_size = std::accumulate(workspace_size.begin(), workspace_size.end(), 0);
  1911. mem_info.weight_size = std::accumulate(weight_size.begin(), weight_size.end(), 0);
  1912. mem_info.total_size = mem_info.weight_size + mem_info.input_size + mem_info.output_size + mem_info.workspace_size;
  1913. profile_list_.emplace_back(profile);
  1914. }
  1915. GELOGI("fusion task size: %zu, profile info size: %zu", op_id_map_.size(), profile_list_.size());
  1916. return SUCCESS;
  1917. }
  1918. Status DavinciModel::SinkModelProfile() {
  1919. // profiling plugin must be registered
  1920. auto &prof_mgr = ProfilingManager::Instance();
  1921. ReporterData reporter_data{};
  1922. // report model data tag name
  1923. std::string tag_name("model_load_info_" + std::to_string(this->Id()));
  1924. GE_CHK_BOOL_EXEC(memcpy_s(reporter_data.tag, MSPROF_ENGINE_MAX_TAG_LEN, tag_name.c_str(), tag_name.size()) == EOK,
  1925. return FAILED, "Sink model tag memcpy error.");
  1926. // Model Header
  1927. std::string name = om_name_.empty() ? name_ : om_name_;
  1928. size_t name_len = name.size();
  1929. reporter_data.deviceId = device_id_;
  1930. reporter_data.data = (unsigned char *)&name_len;
  1931. reporter_data.dataLen = sizeof(int32_t);
  1932. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  1933. "Reporter data fail, model id:%u.", this->Id());
  1934. reporter_data.data = (unsigned char *)name.c_str();
  1935. reporter_data.dataLen = name.size();
  1936. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  1937. "Reporter data fail, model id:%u.", this->Id());
  1938. uint32_t model_id = this->Id();
  1939. reporter_data.data = (unsigned char *)&model_id;
  1940. reporter_data.dataLen = sizeof(uint32_t);
  1941. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  1942. "Reporter data fail, model id:%u.", this->Id());
  1943. // Load Start/End Time
  1944. int64_t start_time = this->GetLoadBeginTime();
  1945. reporter_data.data = (unsigned char *)&start_time;
  1946. reporter_data.dataLen = sizeof(int64_t);
  1947. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  1948. "Reporter data fail, model id:%u.", this->Id());
  1949. int64_t end_time = this->GetLoadEndTime();
  1950. reporter_data.data = (unsigned char *)&end_time;
  1951. reporter_data.dataLen = sizeof(int64_t);
  1952. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  1953. "Reporter data fail, model id:%u.", this->Id());
  1954. using CIT = std::multimap<uint32_t, uint32_t>::const_iterator;
  1955. using Range = std::pair<CIT, CIT>;
  1956. for (const ProfileInfo &profile : profile_list_) {
  1957. // op name after fusion
  1958. string fusion_op_name = profile.fusion_info.op_name;
  1959. int32_t fusion_op_name_len = fusion_op_name.size() == 0 ? 1 : fusion_op_name.size();
  1960. reporter_data.data = (unsigned char *)&fusion_op_name_len;
  1961. reporter_data.dataLen = sizeof(int32_t);
  1962. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  1963. "Reporter data fail, model id:%u.", this->Id());
  1964. reporter_data.data = (unsigned char *)fusion_op_name.c_str();
  1965. reporter_data.dataLen = fusion_op_name_len;
  1966. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  1967. "Reporter data fail, model id:%u.", this->Id());
  1968. // original op name before fusion
  1969. uint32_t op_num = profile.fusion_info.original_op_names.size();
  1970. reporter_data.data = (unsigned char *)&op_num;
  1971. reporter_data.dataLen = sizeof(int32_t);
  1972. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  1973. "Reporter data fail, model id:%u.", this->Id());
  1974. for (uint32_t k = 0; k < op_num; k++) {
  1975. std::string op_name = profile.fusion_info.original_op_names[k];
  1976. int32_t op_name_len = op_name.size() == 0 ? 1 : op_name.size();
  1977. reporter_data.data = (unsigned char *)&op_name_len;
  1978. reporter_data.dataLen = sizeof(int32_t);
  1979. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  1980. "Reporter data fail, model id:%u.", this->Id());
  1981. reporter_data.data = (unsigned char *)op_name.c_str();
  1982. reporter_data.dataLen = op_name_len;
  1983. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  1984. "Reporter data fail, model id:%u.", this->Id());
  1985. }
  1986. // stream id info
  1987. uint32_t streamId = profile.fusion_info.stream_id;
  1988. reporter_data.data = (unsigned char *)&streamId;
  1989. reporter_data.dataLen = sizeof(int32_t);
  1990. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  1991. "Reporter data fail, model id:%u.", this->Id());
  1992. // memory info
  1993. reporter_data.data = (unsigned char *)&profile.memory_info;
  1994. reporter_data.dataLen = sizeof(profile.memory_info);
  1995. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  1996. "Reporter data fail, model id:%u.", this->Id());
  1997. // task info
  1998. reporter_data.data = (unsigned char *)&profile.task_count;
  1999. reporter_data.dataLen = sizeof(uint32_t);
  2000. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  2001. "Reporter data fail, model id:%u.", this->Id());
  2002. Range task_range = op_id_map_.equal_range(profile.fusion_info.op_index);
  2003. for (CIT idx = task_range.first; idx != task_range.second; ++idx) {
  2004. uint32_t task_id = idx->second;
  2005. reporter_data.data = (unsigned char *)&task_id;
  2006. reporter_data.dataLen = sizeof(uint32_t);
  2007. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  2008. "Reporter data fail, model id:%u.", this->Id());
  2009. }
  2010. }
  2011. return SUCCESS;
  2012. }
  2013. Status DavinciModel::SinkTimeProfile(const InputData &current_data) {
  2014. // profiling plugin must be registered
  2015. auto &prof_mgr = ProfilingManager::Instance();
  2016. ReporterData reporter_data{};
  2017. // report model data tag name
  2018. std::string tag_name;
  2019. tag_name.append("model_time_info_")
  2020. .append(std::to_string(this->Id()))
  2021. .append("_")
  2022. .append(std::to_string(current_data.index));
  2023. GE_CHK_BOOL_EXEC(memcpy_s(reporter_data.tag, MSPROF_ENGINE_MAX_TAG_LEN, tag_name.c_str(), tag_name.size()) == EOK,
  2024. return FAILED, "Sink model tag memcpy error.");
  2025. // device id
  2026. reporter_data.deviceId = device_id_;
  2027. // Model Header
  2028. string name;
  2029. if (!om_name_.empty()) {
  2030. name = om_name_;
  2031. } else {
  2032. name = name_;
  2033. }
  2034. size_t name_len = name.size();
  2035. reporter_data.data = (unsigned char *)&name_len;
  2036. reporter_data.dataLen = sizeof(int32_t);
  2037. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  2038. "Reporter data fail, model id:%u.", this->Id());
  2039. reporter_data.data = (unsigned char *)name.c_str();
  2040. reporter_data.dataLen = name.size();
  2041. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  2042. "Reporter data fail, model id:%u.", this->Id());
  2043. // request id
  2044. uint64_t request_id = current_data.request_id;
  2045. reporter_data.data = (unsigned char *)&request_id;
  2046. reporter_data.dataLen = sizeof(uint32_t);
  2047. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  2048. "Reporter data fail, model id:%u, data index:%u.", this->Id(), current_data.index);
  2049. // thread id
  2050. int32_t thread_id = GetDataInputTid();
  2051. reporter_data.data = (unsigned char *)&thread_id;
  2052. reporter_data.dataLen = sizeof(int32_t);
  2053. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  2054. "Reporter data fail, model id:%u, data index:%u.", this->Id(), current_data.index);
  2055. // time info
  2056. time_info_.modelId = this->Id();
  2057. reporter_data.data = (unsigned char *)&time_info_;
  2058. reporter_data.dataLen = sizeof(struct timeInfo);
  2059. GE_CHK_BOOL_EXEC(prof_mgr.CallMsprofReport(reporter_data) == 0, return FAILED,
  2060. "Reporter data fail, model id:%u, data index:%u.", this->Id(), current_data.index);
  2061. return SUCCESS;
  2062. }
  2063. void DavinciModel::SetProfileTime(ModelProcStage stage, int64_t endTime) {
  2064. int64_t time = endTime;
  2065. if (time == 0) {
  2066. mmTimespec timespec = mmGetTickCount();
  2067. time = timespec.tv_sec * 1000 * 1000 * 1000 + timespec.tv_nsec; // 1000 ^ 3 converts second to nanosecond
  2068. }
  2069. switch (stage) {
  2070. case MODEL_LOAD_START:
  2071. load_begin_time_ = time;
  2072. break;
  2073. case MODEL_LOAD_END:
  2074. load_end_time_ = time;
  2075. break;
  2076. case MODEL_PRE_PROC_START:
  2077. time_info_.processBeginTime = time;
  2078. break;
  2079. case MODEL_PRE_PROC_END:
  2080. time_info_.processEndTime = time;
  2081. break;
  2082. case MODEL_INFER_START:
  2083. time_info_.inferenceBeginTime = time;
  2084. break;
  2085. case MODEL_INFER_END:
  2086. time_info_.inferenceEndTime = time;
  2087. break;
  2088. case MODEL_AFTER_PROC_START:
  2089. time_info_.dumpBeginTime = time;
  2090. break;
  2091. case MODEL_AFTER_PROC_END:
  2092. time_info_.dumpEndTime = time;
  2093. break;
  2094. default:
  2095. break;
  2096. }
  2097. return;
  2098. }
  2099. ///
  2100. /// @ingroup ge
  2101. /// @brief send Output Op result to upper layer
  2102. /// @already malloced in ModelLoad, no need to malloc again
  2103. /// @param [in] data_id: the index of output_data
  2104. /// @param [in/out] output_data: real user output_data
  2105. /// @param [in] kind: the kind of rtMemcpy
  2106. /// @return Status result
  2107. /// @author
  2108. ///
  2109. Status DavinciModel::CopyOutputData(uint32_t data_id, OutputData &output_data, rtMemcpyKind_t kind) {
  2110. if (output_addrs_list_.empty()) {
  2111. Status ret = SyncVarData();
  2112. return ret;
  2113. }
  2114. output_data.index = data_id;
  2115. output_data.model_id = model_id_;
  2116. if (output_data.blobs.size() != new_output_data_info_.size()) {
  2117. GELOGE(FAILED, "Output data buffer num=%zu not equal model data num=%zu", output_data.blobs.size(),
  2118. new_output_data_info_.size());
  2119. return FAILED;
  2120. }
  2121. std::vector<DataBuffer> &blobs = output_data.blobs;
  2122. size_t idx = 0;
  2123. for (const auto &output : new_output_data_info_) {
  2124. if (output.first >= blobs.size()) {
  2125. GELOGE(FAILED, "Blobs not match: blobs=%zu, tensor=%zu, index=%u, size=%ld", blobs.size(),
  2126. new_input_data_info_.size(), output.first, output.second.GetDataInfo().at(0).first);
  2127. return FAILED;
  2128. }
  2129. if ((kind == RT_MEMCPY_DEVICE_TO_DEVICE) && (copy_only_addrs_.count(output.second.GetBasicAddr()) == 0)) {
  2130. continue; // Skip: Feed by zero copy.
  2131. }
  2132. DataBuffer &buffer = blobs[output.first];
  2133. uint64_t mem_size = static_cast<uint64_t>(output.second.GetDataSize());
  2134. if ((buffer.length == 0) || (mem_size == 0)) {
  2135. GELOGI("Length of data is zero, No need copy. output tensor index=%u", output.first);
  2136. continue;
  2137. }
  2138. if (is_dynamic_) {
  2139. GELOGI("No need to check output data size.");
  2140. } else if (buffer.length < mem_size) {
  2141. GELOGE(FAILED, "Tensor data size=%lu, buffer size=%lu", mem_size, buffer.length);
  2142. return FAILED;
  2143. } else if (buffer.length > mem_size) {
  2144. GELOGW("Tensor data size=%lu, buffer size=%lu", mem_size, buffer.length);
  2145. }
  2146. int64_t data_size = output.second.GetDataSize();
  2147. if (is_online_infer_dynamic_) {
  2148. if (merge_nodes_gear_and_real_out_size_info_.find(idx) != merge_nodes_gear_and_real_out_size_info_.end()) {
  2149. auto gear_and_real_out_size_info = merge_nodes_gear_and_real_out_size_info_[idx];
  2150. data_size = gear_and_real_out_size_info[cur_dynamic_dims_];
  2151. }
  2152. }
  2153. uint64_t buffer_length = buffer.length;
  2154. void *buffer_addr = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(buffer.data));
  2155. GELOGI("[IMAS]CopyPlainData memcpy graph_%u type[F] output[%u] memaddr[%p] mem_size[%lu] datasize[%lu]",
  2156. runtime_param_.graph_id, output.first, output.second.GetBasicAddr(), data_size, buffer_length);
  2157. GE_CHK_RT_RET(rtMemcpy(buffer_addr, buffer_length, output.second.GetBasicAddr(), data_size, kind));
  2158. idx++;
  2159. }
  2160. return SUCCESS;
  2161. }
  2162. Status DavinciModel::InitOutputTensorInfo(const OpDescPtr &op_desc) {
  2163. size_t input_num = op_desc->GetInputsSize();
  2164. if (is_getnext_sink_dynamic_) {
  2165. input_num = input_num - kGetDynamicDimsCount;
  2166. }
  2167. for (size_t i = 0; i < input_num; ++i) {
  2168. int64_t size = 0;
  2169. auto input_desc = op_desc->GetInputDescPtr(i);
  2170. GE_CHECK_NOTNULL(input_desc);
  2171. auto ret = TensorUtils::GetTensorSizeInBytes(*input_desc, size);
  2172. GE_IF_BOOL_EXEC(ret != GRAPH_SUCCESS,
  2173. GELOGE(ret, "Get size from TensorDesc failed, op:%s, input id:%zu", op_desc->GetName().c_str(), i);
  2174. return ret);
  2175. const GeShape &shape = input_desc->GetShape();
  2176. GELOGI("Output size is %ld, output shape is %s.", size, formats::JoinToString(shape.GetDims()).c_str());
  2177. output_buffer_size_.emplace_back(size);
  2178. output_shape_info_.emplace_back(shape);
  2179. }
  2180. return SUCCESS;
  2181. }
  2182. Status DavinciModel::GenOutputTensorInfo(OutputData *output_data, vector<OutputTensorInfo> &outputs) {
  2183. GE_CHECK_NOTNULL(output_data);
  2184. if (!output_data->blobs.empty()) {
  2185. GELOGI("No need to generate output tensor info, model id:%u", model_id_);
  2186. return SUCCESS;
  2187. }
  2188. vector<int64_t> output_buffer_size;
  2189. vector<vector<int64_t>> output_shape_info;
  2190. size_t output_num = output_buffer_size_.size();
  2191. for (size_t i = 0; i < output_num; ++i) {
  2192. int64_t output_size = output_buffer_size_[i];
  2193. vector<int64_t> output_shape = output_shape_info_[i].GetDims();
  2194. if (is_online_infer_dynamic_) {
  2195. if (merge_nodes_gear_and_real_out_size_info_.find(i) != merge_nodes_gear_and_real_out_size_info_.end()) {
  2196. auto gear_and_real_out_size_info = merge_nodes_gear_and_real_out_size_info_[i];
  2197. output_size = gear_and_real_out_size_info[cur_dynamic_dims_];
  2198. auto gear_and_real_out_shape_info = merge_nodes_gear_and_real_out_shape_info_[i];
  2199. output_shape = gear_and_real_out_shape_info[cur_dynamic_dims_];
  2200. is_dynamic_ = true;
  2201. }
  2202. }
  2203. GELOGI("Output size is %ld, output shape is %s.", output_size, formats::JoinToString(output_shape).c_str());
  2204. output_buffer_size.push_back(output_size);
  2205. output_shape_info.push_back(output_shape);
  2206. }
  2207. GELOGI("Output blobs size:%zu, model id:%u", output_buffer_size_.size(), model_id_);
  2208. for (size_t i = 0; i < output_buffer_size.size(); ++i) {
  2209. std::unique_ptr<uint8_t[]> data_buf(new (std::nothrow) uint8_t[output_buffer_size[i]]);
  2210. if (data_buf == nullptr) {
  2211. GELOGE(GE_GRAPH_MALLOC_FAILED, "Malloc buffer failed.");
  2212. return GE_GRAPH_MALLOC_FAILED;
  2213. }
  2214. output_data->blobs.push_back({data_buf.get(), static_cast<uint64_t>(output_buffer_size[i]), false});
  2215. OutputTensorInfo output;
  2216. output.dims = output_shape_info[i];
  2217. output.data = std::move(data_buf);
  2218. output.length = output_buffer_size[i];
  2219. outputs.emplace_back(std::move(output));
  2220. GELOGD("Output index:%zu, output dims is %s, data length:%lu.", i,
  2221. formats::JoinToString(output.dims).c_str(), output.length);
  2222. }
  2223. return SUCCESS;
  2224. }
  2225. ///
  2226. /// @ingroup ge
  2227. /// @brief send Output Op result to upper layer
  2228. /// @already malloced in ModelLoad, no need to malloc again
  2229. /// @param [in] data_id: the index of output_data
  2230. /// @param [in] rslt_flg: result flag
  2231. /// @param [in] seq_end_flag: sequence end flag
  2232. /// @param [out] output_data: real user output_data
  2233. /// @return Status result
  2234. /// @author
  2235. ///
  2236. Status DavinciModel::ReturnResult(uint32_t data_id, const bool rslt_flg, const bool seq_end_flag,
  2237. OutputData *output_data) {
  2238. GE_CHK_BOOL_EXEC(listener_ != nullptr, return PARAM_INVALID, "listener_ is null.");
  2239. std::vector<ge::OutputTensorInfo> outputs;
  2240. // return result is not required
  2241. if (!rslt_flg && !seq_end_flag) {
  2242. GELOGW("Compute failed, model id: %u", model_id_);
  2243. auto model_manager = ModelManager::GetInstance();
  2244. GE_CHECK_NOTNULL(model_manager);
  2245. auto exception_infos = model_manager->GetExceptionInfos();
  2246. if (exception_infos.size() > 0) {
  2247. GE_CHK_STATUS_RET(data_dumper_.DumpExceptionInfo(exception_infos), "Dump exception info failed");
  2248. } else {
  2249. GELOGI("Exception info is null");
  2250. }
  2251. GE_CHK_STATUS(listener_->OnComputeDone(model_id_, data_id, INTERNAL_ERROR, outputs), "OnComputeDone failed.");
  2252. return INTERNAL_ERROR;
  2253. }
  2254. if (output_addrs_list_.empty()) {
  2255. GELOGW("Output tensor list is empty, model id: %u", model_id_);
  2256. GE_CHK_STATUS(listener_->OnComputeDone(model_id_, data_id, INTERNAL_ERROR, outputs), "OnComputeDone failed.");
  2257. return INTERNAL_ERROR;
  2258. }
  2259. GE_CHECK_NOTNULL(output_data);
  2260. output_data->index = data_id;
  2261. output_data->model_id = model_id_;
  2262. if (is_getnext_sink_dynamic_) {
  2263. GELOGD("Reinit cur dynamic dims when getnext sink dynamic.");
  2264. cur_dynamic_dims_.clear();
  2265. cur_dynamic_dims_.resize(shape_of_cur_dynamic_dims_);
  2266. auto ret = rtMemcpy(cur_dynamic_dims_.data(), shape_of_cur_dynamic_dims_ * sizeof(int64_t),
  2267. netoutput_last_input_addr_, netoutput_last_input_size_, RT_MEMCPY_DEVICE_TO_HOST);
  2268. GE_CHK_RT_RET(ret);
  2269. }
  2270. GELOGD("Cur dynamic dims is %s.", formats::JoinToString(cur_dynamic_dims_).c_str());
  2271. if (GenOutputTensorInfo(output_data, outputs) != SUCCESS) {
  2272. return INTERNAL_ERROR;
  2273. }
  2274. if (CopyOutputData(data_id, *output_data, RT_MEMCPY_DEVICE_TO_HOST) != SUCCESS) {
  2275. GE_CHK_STATUS(listener_->OnComputeDone(model_id_, data_id, INTERNAL_ERROR, outputs), "OnComputeDone failed");
  2276. return INTERNAL_ERROR;
  2277. }
  2278. if (seq_end_flag) {
  2279. GELOGW("End of sequence, model id: %u", model_id_);
  2280. GE_CHK_STATUS(listener_->OnComputeDone(model_id_, data_id, END_OF_SEQUENCE, outputs), "OnCompute Done failed.");
  2281. return END_OF_SEQUENCE;
  2282. }
  2283. GE_CHK_STATUS(listener_->OnComputeDone(model_id_, data_id, SUCCESS, outputs), "OnComputeDone failed");
  2284. return SUCCESS;
  2285. }
  2286. ///
  2287. /// @ingroup ge
  2288. /// @brief return not output to upper layer for cloud case
  2289. /// @param [in] data_id
  2290. /// @return Status result
  2291. ///
  2292. Status DavinciModel::ReturnNoOutput(uint32_t data_id) {
  2293. GELOGI("ReturnNoOutput model id:%u", model_id_);
  2294. for (auto op_desc : variable_op_list_) {
  2295. Status ret = VarManager::Instance(session_id_)
  2296. ->SyncBroadCastData2Var(runtime_param_.graph_id, op_desc->GetName(), op_desc, mem_base_);
  2297. GE_CHK_BOOL_EXEC(ret == SUCCESS, break, "sync var data ret failed, model id:%u, op name:%s.", model_id_,
  2298. op_desc->GetName().c_str());
  2299. }
  2300. GE_CHK_BOOL_EXEC(listener_ != nullptr, return PARAM_INVALID, "listener_ is null!");
  2301. std::vector<ge::OutputTensorInfo> outputs;
  2302. GE_CHK_STATUS(listener_->OnComputeDone(model_id_, data_id, SUCCESS, outputs), "OnComputeDone failed.");
  2303. return SUCCESS;
  2304. }
  2305. void *DavinciModel::Run(DavinciModel *model) {
  2306. GE_CHK_BOOL_EXEC(model != nullptr,
  2307. CsaInteract::GetInstance().WriteErrorCode(FAILED, ERROR_MODULE_FMK, JOBSUBSTATE_GRAPH_EXEC);
  2308. return nullptr, "model_pointer is null!")
  2309. bool seq_end_flag = false;
  2310. uint32_t model_id = model->Id();
  2311. uint32_t device_id = model->GetDeviceId();
  2312. GELOGI("Model Run thread start, model_id:%u.", model_id);
  2313. rtError_t rt_ret = rtSetDevice(static_cast<int32_t>(device_id));
  2314. if (rt_ret != RT_ERROR_NONE) {
  2315. GELOGE(FAILED, "Model run rtsetdevice failed.");
  2316. return nullptr;
  2317. }
  2318. // DeviceReset before thread run finished!
  2319. GE_MAKE_GUARD(not_used_var, [&] { GE_CHK_RT(rtDeviceReset(device_id)); });
  2320. while (model->RunFlag()) {
  2321. bool rslt_flg = true;
  2322. if (model->GetDataInputer() == nullptr) {
  2323. GELOGW("Data inputer is nullptr.");
  2324. CsaInteract::GetInstance().StoreInternalErrorCode(FAILED, ERROR_MODULE_FMK, JOBSUBSTATE_GRAPH_EXEC);
  2325. break;
  2326. }
  2327. std::shared_ptr<InputDataWrapper> data_wrapper;
  2328. Status ret = model->GetDataInputer()->Pop(data_wrapper);
  2329. if (data_wrapper == nullptr || ret != SUCCESS) {
  2330. GELOGI("data_wrapper is null!");
  2331. continue;
  2332. }
  2333. GELOGI("Getting the input data, model_id:%u", model_id);
  2334. GE_IF_BOOL_EXEC(!model->RunFlag(), break);
  2335. InputData current_data = data_wrapper->GetInput();
  2336. GELOGI("Model thread Run begin, model id:%u, data index:%u.", model_id, current_data.index);
  2337. GE_TIMESTAMP_START(Model_SyncVarData);
  2338. ret = model->SyncVarData();
  2339. GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(
  2340. ret != SUCCESS, (void)model->ReturnResult(current_data.index, false, false, data_wrapper->GetOutput());
  2341. CsaInteract::GetInstance().StoreInternalErrorCode(ret, ERROR_MODULE_FMK, JOBSUBSTATE_GRAPH_EXEC);
  2342. continue, "Copy input data to model failed."); // [No need to check value]
  2343. GE_IF_BOOL_EXEC(model->is_first_execute_, GE_TIMESTAMP_EVENT_END(Model_SyncVarData, "Model Run SyncVarData"));
  2344. GELOGI("Copy input data, model id:%u", model_id);
  2345. GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(),
  2346. model->SetProfileTime(MODEL_PRE_PROC_START));
  2347. ret = model->CopyInputData(current_data, false);
  2348. GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(
  2349. ret != SUCCESS, (void)model->ReturnResult(current_data.index, false, false, data_wrapper->GetOutput());
  2350. CsaInteract::GetInstance().StoreInternalErrorCode(ret, ERROR_MODULE_FMK, JOBSUBSTATE_GRAPH_EXEC);
  2351. continue, "Copy input data to model failed."); // [No need to check value]
  2352. if (model->is_online_infer_dynamic_ && !model->is_getnext_sink_dynamic_) {
  2353. model->cur_dynamic_dims_.clear();
  2354. GE_IF_BOOL_EXEC(current_data.blobs.empty(), break);
  2355. auto shape_data_buffer_data = current_data.blobs.back().data;
  2356. auto shape_data_buffer_length = current_data.blobs.back().length;
  2357. model->cur_dynamic_dims_.assign(reinterpret_cast<int64_t *>(shape_data_buffer_data),
  2358. reinterpret_cast<int64_t *>(shape_data_buffer_data) +
  2359. shape_data_buffer_length / sizeof(int64_t));
  2360. GELOGD("Data: cur dynamic dims is %s", formats::JoinToString(model->cur_dynamic_dims_).c_str());
  2361. delete[] reinterpret_cast<int64_t *>(current_data.blobs.back().data);
  2362. current_data.blobs.pop_back();
  2363. }
  2364. GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), model->SetProfileTime(MODEL_PRE_PROC_END));
  2365. GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), model->SetProfileTime(MODEL_INFER_START));
  2366. GE_TIMESTAMP_START(rtModelExecute);
  2367. GELOGI("rtModelExecute start.");
  2368. rt_ret = rtModelExecute(model->rt_model_handle_, model->rt_model_stream_, 0);
  2369. GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, rslt_flg = false;
  2370. (void)model->ReturnResult(current_data.index, false, false, data_wrapper->GetOutput());
  2371. CsaInteract::GetInstance().WriteErrorCode(rt_ret, ERROR_MODULE_RUNTIME, JOBSUBSTATE_GRAPH_EXEC);
  2372. continue);
  2373. GELOGI("rtModelExecute end");
  2374. GE_IF_BOOL_EXEC(model->is_first_execute_, GE_TIMESTAMP_EVENT_END(rtModelExecute, "GraphExcute::rtModelExecute"));
  2375. GE_TIMESTAMP_START(rtStreamSynchronize);
  2376. GELOGI("rtStreamSynchronize start.");
  2377. rt_ret = rtStreamSynchronize(model->rt_model_stream_);
  2378. if (rt_ret == kEndOfSequence || rt_ret == kEndOfSequenceNew) {
  2379. seq_end_flag = true;
  2380. }
  2381. if (rt_ret == kModelAbortNormal || rt_ret == kModelAbortNormalNew) {
  2382. GELOGI("The model with multiple datasets aborts normally.");
  2383. } else {
  2384. GE_IF_BOOL_EXEC(
  2385. rt_ret != RT_ERROR_NONE, rslt_flg = false; GELOGI("seq_end_flg: %d", seq_end_flag);
  2386. (void)model->ReturnResult(current_data.index, false, seq_end_flag,
  2387. data_wrapper->GetOutput()); // [No need to check value]
  2388. CsaInteract::GetInstance().StoreInternalErrorCode(rt_ret, ERROR_MODULE_RUNTIME, JOBSUBSTATE_GRAPH_EXEC);
  2389. continue);
  2390. }
  2391. GELOGI("rtStreamSynchronize end.");
  2392. GE_IF_BOOL_EXEC(model->is_first_execute_,
  2393. GE_TIMESTAMP_EVENT_END(rtStreamSynchronize, "GraphExcute::Wait for rtStreamSynchronize"));
  2394. GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), model->SetProfileTime(MODEL_INFER_END));
  2395. GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(),
  2396. model->SetProfileTime(MODEL_AFTER_PROC_START));
  2397. GE_TIMESTAMP_START(ReturnResult3);
  2398. // copy output data from device to host
  2399. GE_IF_BOOL_EXEC(!model->output_addrs_list_.empty(),
  2400. (void)model->ReturnResult(current_data.index, rslt_flg, false, data_wrapper->GetOutput()))
  2401. // copy output data from device to host for variable graph
  2402. GE_IF_BOOL_EXEC(model->output_addrs_list_.empty(), (void)model->ReturnNoOutput(current_data.index));
  2403. GE_IF_BOOL_EXEC(model->is_first_execute_,
  2404. GE_TIMESTAMP_EVENT_END(ReturnResult3, "GraphExcute::CopyDataFromDeviceToHost"));
  2405. GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(),
  2406. model->SetProfileTime(MODEL_AFTER_PROC_END));
  2407. GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), (void)model->SinkTimeProfile(current_data));
  2408. model->iterator_count_++;
  2409. model->is_first_execute_ = false;
  2410. GELOGI("run iterator count is %lu", model->iterator_count_);
  2411. }
  2412. CsaInteract::GetInstance().WriteInternalErrorCode();
  2413. GELOGI("Model run end, model id:%u", model->model_id_);
  2414. return nullptr;
  2415. }
  2416. ///
  2417. /// @ingroup ge
  2418. /// @brief call API provided by data inputer to destroy thread
  2419. /// @param [in] no
  2420. /// @return Status Destroy result
  2421. /// @author
  2422. ///
  2423. Status DavinciModel::DestroyThread() {
  2424. GE_CHK_BOOL_RET_STATUS(data_inputer_ != nullptr, INTERNAL_ERROR, "data_inputer_ is nullptr.");
  2425. run_flg_ = false;
  2426. data_inputer_->Stop();
  2427. if (thread_id_.joinable()) {
  2428. thread_id_.join();
  2429. }
  2430. return SUCCESS;
  2431. }
  2432. ///
  2433. /// @ingroup ge
  2434. /// @brief create model std::thread,
  2435. /// @brief start to execute Model
  2436. /// @param [in] no
  2437. /// @return Status create model thread and execute result
  2438. /// @author
  2439. ///
  2440. Status DavinciModel::ModelRunStart() {
  2441. GE_CHK_BOOL_RET_STATUS(data_inputer_ != nullptr, INTERNAL_ERROR, "data_inputer_ is nullptr.");
  2442. LockRunFlg();
  2443. GE_MAKE_GUARD(tmp_lock, [&] { UnlockRunFlg(); });
  2444. GE_CHK_BOOL_RET_STATUS(!run_flg_, INTERNAL_ERROR, "Model already started.");
  2445. run_flg_ = true;
  2446. // create stream instance which rt_model_handel is running on
  2447. GE_CHK_RT_RET(rtStreamCreate(&rt_model_stream_, priority_));
  2448. is_inner_model_stream_ = true;
  2449. string opt = "0";
  2450. (void)ge::GetContext().GetOption(OPTION_GE_MAX_DUMP_OP_NUM, opt); // option may not be set up, no need to check value
  2451. int64_t maxDumpOpNum = std::strtol(opt.c_str(), nullptr, kDecimal);
  2452. maxDumpOpNum_ = maxDumpOpNum;
  2453. CREATE_STD_THREAD(thread_id_, DavinciModel::Run, this);
  2454. GELOGI("model tread create success, model id:%u.", model_id_);
  2455. return SUCCESS;
  2456. }
  2457. ///
  2458. /// @ingroup ge
  2459. /// @brief call API provided by data inputer and destroy model Thread
  2460. /// @param [in] no
  2461. /// @return Status Destroy result
  2462. /// @author
  2463. ///
  2464. Status DavinciModel::ModelRunStop() {
  2465. LockRunFlg();
  2466. GE_MAKE_GUARD(tmp_lock, [&] { UnlockRunFlg(); });
  2467. GE_IF_BOOL_EXEC(!run_flg_, return SUCCESS);
  2468. GE_CHK_STATUS_RET(DestroyThread(), "DestoyThead failed.");
  2469. return SUCCESS;
  2470. }
  2471. void DavinciModel::UnbindTaskSinkStream() {
  2472. // unbinding hcom stream
  2473. UnbindHcomStream();
  2474. if (is_stream_list_bind_) {
  2475. for (size_t i = 0; i < stream_list_.size(); i++) {
  2476. // unbind rt_model_handle and streams
  2477. GE_LOGW_IF(rtModelUnbindStream(rt_model_handle_, stream_list_[i]) != RT_ERROR_NONE,
  2478. "Unbind stream from model failed! Index: %zu", i);
  2479. }
  2480. }
  2481. if (is_inner_model_stream_) {
  2482. if (!input_queue_ids_.empty() || !output_queue_ids_.empty()) {
  2483. GE_LOGW_IF(rtModelUnbindStream(rt_model_handle_, rt_model_stream_) != RT_ERROR_NONE, "Unbind stream failed!");
  2484. }
  2485. // destroy stream that is bound with rt_model
  2486. GE_LOGW_IF(rtStreamDestroy(rt_model_stream_) != RT_ERROR_NONE, "Destroy stream for rt_model failed.")
  2487. }
  2488. if (is_pure_head_stream_ && rt_head_stream_ != nullptr) {
  2489. GE_LOGW_IF(rtModelUnbindStream(rt_model_handle_, rt_head_stream_) != RT_ERROR_NONE, "Unbind stream failed!");
  2490. GE_LOGW_IF(rtStreamDestroy(rt_head_stream_) != RT_ERROR_NONE, "Destroy stream for rt_model failed.");
  2491. rt_head_stream_ = nullptr;
  2492. }
  2493. if (rt_entry_stream_ != nullptr) {
  2494. GE_LOGW_IF(rtModelUnbindStream(rt_model_handle_, rt_entry_stream_) != RT_ERROR_NONE, "Unbind stream failed!");
  2495. GE_LOGW_IF(rtStreamDestroy(rt_entry_stream_) != RT_ERROR_NONE, "Destroy stream for rt_model failed.");
  2496. rt_entry_stream_ = nullptr;
  2497. }
  2498. }
  2499. void *DavinciModel::GetRunAddress(void *addr) const {
  2500. if (fixed_mem_base_ == reinterpret_cast<uintptr_t>(mem_base_)) {
  2501. return addr;
  2502. }
  2503. uintptr_t ptr = reinterpret_cast<uintptr_t>(addr);
  2504. if ((fixed_mem_base_ <= ptr) && (ptr < fixed_mem_base_ + runtime_param_.mem_size)) {
  2505. return mem_base_ + (ptr - fixed_mem_base_);
  2506. } else {
  2507. return addr;
  2508. }
  2509. }
  2510. Status DavinciModel::CreateKnownZeroCopyMap(const vector<void *> &inputs, const vector<void *> &outputs) {
  2511. GELOGI("in, inputs size: %zu, input addr size: %zu, outputs size: %zu, output addr size: %zu",
  2512. inputs.size(), input_addrs_list_.size(), outputs.size(), output_addrs_list_.size());
  2513. if (inputs.size() > input_addrs_list_.size()) {
  2514. GELOGE(FAILED, "input data addr %zu should less than input op num %zu.", inputs.size(), input_addrs_list_.size());
  2515. return FAILED;
  2516. }
  2517. // remove zero copy addr in last iteration
  2518. known_input_data_info_.clear();
  2519. known_output_data_info_.clear();
  2520. for (size_t i = 0; i < inputs.size(); ++i) {
  2521. const vector<void *> &addr_list = input_addrs_list_[i];
  2522. void *addr = GetRunAddress(addr_list[kDataIndex]);
  2523. known_input_data_info_[addr] = inputs[i];
  2524. GELOGI("input %zu, v addr %p, r addr %p, p addr %p", i, addr_list[kDataIndex], addr, inputs[i]);
  2525. }
  2526. if (output_addrs_list_.empty()) {
  2527. GELOGW("output op num in graph is %zu", output_addrs_list_.size());
  2528. return SUCCESS;
  2529. }
  2530. const vector<void *> &addr_list = output_addrs_list_.front();
  2531. for (size_t i = 0; i < addr_list.size() && i < outputs.size(); ++i) {
  2532. void *addr = GetRunAddress(addr_list[i]);
  2533. known_output_data_info_[addr] = outputs[i];
  2534. GELOGI("output %zu, v addr %p, r addr %p, p addr %p", i, addr_list[i], addr, outputs[i]);
  2535. }
  2536. GELOGI("success, known input data info size: %zu, known output data info size: %zu",
  2537. known_input_data_info_.size(), known_output_data_info_.size());
  2538. return SUCCESS;
  2539. }
  2540. void DavinciModel::SetTotalIOAddrs(const vector<void *> &io_addrs) {
  2541. if (fixed_mem_base_ == reinterpret_cast<uintptr_t>(mem_base_)) {
  2542. total_io_addrs_.insert(total_io_addrs_.end(), io_addrs.begin(), io_addrs.end());
  2543. return;
  2544. }
  2545. for (size_t i = 0; i < io_addrs.size(); ++i) {
  2546. total_io_addrs_.emplace_back(GetRunAddress(io_addrs[i]));
  2547. }
  2548. }
  2549. Status DavinciModel::UpdateKnownZeroCopyAddr(vector<void *> &total_io_addrs, bool update_args) {
  2550. if (fixed_mem_base_ != reinterpret_cast<uintptr_t>(mem_base_) && update_args) {
  2551. for (size_t i = 0; i < total_io_addrs.size(); ++i) {
  2552. total_io_addrs[i] = GetRunAddress(total_io_addrs[i]);
  2553. }
  2554. }
  2555. for (size_t i = 0; i < total_io_addrs.size(); ++i) {
  2556. auto it_in = known_input_data_info_.find(total_io_addrs[i]);
  2557. if (it_in != known_input_data_info_.end()) {
  2558. GELOGI("input %zu, v addr %p, p addr %p", i, total_io_addrs[i], known_input_data_info_.at(total_io_addrs[i]));
  2559. total_io_addrs[i] = known_input_data_info_.at(total_io_addrs[i]);
  2560. }
  2561. auto it_out = known_output_data_info_.find(total_io_addrs[i]);
  2562. if (it_out != known_output_data_info_.end()) {
  2563. GELOGI("output %zu, v addr %p, p addr %p", i, total_io_addrs[i], known_output_data_info_.at(total_io_addrs[i]));
  2564. total_io_addrs[i] = known_output_data_info_.at(total_io_addrs[i]);
  2565. }
  2566. }
  2567. GELOGI("success, total io addrs size: %zu", total_io_addrs.size());
  2568. return SUCCESS;
  2569. }
  2570. Status DavinciModel::UpdateKnownNodeArgs(const vector<void *> &inputs, const vector<void *> &outputs) {
  2571. GELOGI("DavinciModel::UpdateKnownNodeArgs in");
  2572. GE_CHK_STATUS_RET(CreateKnownZeroCopyMap(inputs, outputs),
  2573. "DavinciModel::UpdateKnownNodeArgs create map for input/output zero copy.");
  2574. if (!base_addr_not_changed_) {
  2575. total_io_addrs_.clear();
  2576. orig_total_io_addrs_.clear();
  2577. for (size_t task_index = 0; task_index < task_list_.size(); ++task_index) {
  2578. auto &task = task_list_[task_index];
  2579. if (task != nullptr) {
  2580. Status ret = task->UpdateArgs();
  2581. if (ret != SUCCESS) {
  2582. GELOGE(FAILED, "task %zu created by davinci model is nullptr.", task_index);
  2583. return FAILED;
  2584. }
  2585. }
  2586. }
  2587. // cache latest iterator io addr
  2588. orig_total_io_addrs_ = total_io_addrs_;
  2589. } else {
  2590. total_io_addrs_ = orig_total_io_addrs_;
  2591. }
  2592. GE_CHK_STATUS_RET(UpdateKnownZeroCopyAddr(total_io_addrs_, false), "DavinciModel::UpdateKnownZeroCopyAddr failed.");
  2593. if (total_args_size_ == 0) {
  2594. GELOGW("DavinciModel::UpdateKnownNodeArgs device args %p, dst size %u, pass rtMemcpy.", args_, total_args_size_);
  2595. } else {
  2596. uint32_t total_addr_size = total_io_addrs_.size() * sizeof(uint64_t);
  2597. GELOGI("DavinciModel::UpdateKnownNodeArgs device args %p, dst size %u, src size %u", args_, total_args_size_,
  2598. total_addr_size);
  2599. Status rt_ret =
  2600. rtMemcpy(args_, total_args_size_, total_io_addrs_.data(), total_addr_size, RT_MEMCPY_HOST_TO_DEVICE);
  2601. GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(rt_ret, "rtMemcpy error, ret: Ox%X", rt_ret); return FAILED;)
  2602. }
  2603. GELOGI("DavinciModel::UpdateKnownNodeArgs success");
  2604. return SUCCESS;
  2605. }
  2606. Status DavinciModel::InitTaskInfo(domi::ModelTaskDef &model_task_def) {
  2607. GELOGI("InitTaskInfo in, task size %d", model_task_def.task().size());
  2608. task_list_.resize(model_task_def.task_size());
  2609. for (int i = 0; i < model_task_def.task_size(); ++i) {
  2610. // dynamic shape will create task_list_ before
  2611. const domi::TaskDef &task = model_task_def.task(i);
  2612. if (this->task_list_[i] == nullptr) {
  2613. task_list_[i] = TaskInfoFactory::Instance().Create(static_cast<rtModelTaskType_t>(task.type()));
  2614. }
  2615. GE_CHECK_NOTNULL(task_list_[i]);
  2616. Status ret = task_list_[i]->Init(task, this);
  2617. if (ret != SUCCESS) {
  2618. GELOGE(ret, "Task index %d init failed.", i);
  2619. return ret;
  2620. }
  2621. }
  2622. GELOGI("InitTaskInfo out");
  2623. return SUCCESS;
  2624. }
  2625. Status DavinciModel::MallocKnownArgs() {
  2626. GELOGI("DavinciModel::MallocKnownArgs in");
  2627. const auto &model_task_def = ge_model_->GetModelTaskDefPtr();
  2628. if (model_task_def->task_size() == 0) {
  2629. GELOGW("DavinciModel::MallocKnownArgs davincimodel has no task info.");
  2630. return SUCCESS;
  2631. }
  2632. task_list_.resize(model_task_def->task_size());
  2633. for (int32_t i = 0; i < model_task_def->task_size(); ++i) {
  2634. const domi::TaskDef &taskdef = model_task_def->task(i);
  2635. task_list_[i] = TaskInfoFactory::Instance().Create(static_cast<rtModelTaskType_t>(taskdef.type()));
  2636. GE_CHECK_NOTNULL(task_list_[i]);
  2637. Status ret = task_list_[i]->CalculateArgs(taskdef, this);
  2638. if (ret != SUCCESS) {
  2639. GELOGE(ret, "TaskInfo CalculateArgs failed.");
  2640. return ret;
  2641. }
  2642. }
  2643. // malloc args memory
  2644. if (total_args_size_ == 0) {
  2645. GELOGW("DavinciModel::MallocKnownArgs total_args_size_ equals to zero.");
  2646. return SUCCESS;
  2647. }
  2648. rtError_t rt_ret = rtMalloc(&args_, total_args_size_, RT_MEMORY_HBM);
  2649. if (rt_ret != RT_ERROR_NONE) {
  2650. GELOGE(RT_FAILED, "Call rtMalloc failed, ret: 0x%X", rt_ret);
  2651. return RT_ERROR_TO_GE_STATUS(rt_ret);
  2652. }
  2653. // malloc dynamic and static hybrid memory
  2654. if (total_hybrid_args_size_ != 0) {
  2655. rt_ret = rtMalloc(&hybrid_addrs_, total_hybrid_args_size_, RT_MEMORY_HBM);
  2656. if (rt_ret != RT_ERROR_NONE) {
  2657. GELOGE(RT_FAILED, "Call rtMalloc failed, ret: 0x%X", rt_ret);
  2658. return RT_ERROR_TO_GE_STATUS(rt_ret);
  2659. }
  2660. }
  2661. // malloc fixed addr memory, eg: rts op
  2662. if (total_fixed_addr_size_ != 0) {
  2663. GELOGI("Begin to allocate fixed addr.");
  2664. rt_ret = rtMalloc(&fixed_addrs_, total_fixed_addr_size_, RT_MEMORY_HBM);
  2665. if (rt_ret != RT_ERROR_NONE) {
  2666. GELOGE(RT_FAILED, "Call rtMalloc failed, ret: 0x%X", rt_ret);
  2667. return RT_ERROR_TO_GE_STATUS(rt_ret);
  2668. }
  2669. }
  2670. GELOGI("DavinciModel::MallocKnownArgs success, total args size %u. total fixed addr size %ld", total_args_size_,
  2671. total_fixed_addr_size_);
  2672. return SUCCESS;
  2673. }
  2674. Status DavinciModel::DistributeTask() {
  2675. GELOGI("do Distribute.");
  2676. for (auto &task : cpu_task_list_) {
  2677. if (task == nullptr) {
  2678. GELOGW("task is null");
  2679. continue;
  2680. }
  2681. GE_CHK_STATUS_RET(task->Distribute());
  2682. }
  2683. task_desc_info_.clear();
  2684. bool flag = GetL1FusionEnableOption();
  2685. char skt_enable_env[MMPA_MAX_PATH] = { 0x00 };
  2686. INT32 res = mmGetEnv("SKT_ENABLE", skt_enable_env, MMPA_MAX_PATH);
  2687. int64_t env_flag = (res == EN_OK) ? std::strtol(skt_enable_env, nullptr, kDecimal) : 0;
  2688. if (env_flag != 0) {
  2689. flag = true;
  2690. }
  2691. const auto &model_task_def = ge_model_->GetModelTaskDefPtr();
  2692. for (size_t task_index = 0; task_index < task_list_.size(); ++task_index) {
  2693. auto &task_def = model_task_def->task(task_index);
  2694. auto &task = task_list_.at(task_index);
  2695. GE_CHK_STATUS_RET(task->Distribute(), "Task[%zu] distribute fail", task_index);
  2696. // for data dump
  2697. auto op_index = std::max(task_def.kernel().context().op_index(),
  2698. task_def.kernel_ex().op_index());
  2699. OpDescPtr op = GetOpByIndex(op_index);
  2700. GE_CHECK_NOTNULL(op);
  2701. if (reinterpret_cast<void *>(task->GetDumpArgs()) != nullptr) {
  2702. bool call_dump = GetDumpProperties().IsLayerNeedDump(name_, om_name_, op->GetName()) && task->CallSaveDumpInfo();
  2703. if (call_dump || is_op_debug_reg_) {
  2704. SaveDumpTask(task->GetTaskID(), task->GetStreamId(), op, task->GetDumpArgs());
  2705. }
  2706. }
  2707. auto task_type = static_cast<rtModelTaskType_t>(task_def.type());
  2708. bool no_need_profiling = (task_type != RT_MODEL_TASK_KERNEL) && (task_type != RT_MODEL_TASK_KERNEL_EX);
  2709. GE_IF_BOOL_EXEC(no_need_profiling, continue);
  2710. SaveDumpOpInfo(runtime_param_, op, task->GetTaskID(), task->GetStreamId());
  2711. // Load task info for profiling
  2712. TaskDescInfo task_desc_info;
  2713. if (!om_name_.empty()) {
  2714. task_desc_info.model_name = om_name_;
  2715. } else {
  2716. task_desc_info.model_name = name_;
  2717. }
  2718. task_desc_info.op_name = op->GetName();
  2719. task_desc_info.block_dim = task_def.kernel().block_dim();
  2720. task_desc_info.task_id = task->GetTaskID();
  2721. task_desc_info.stream_id = task->GetStreamId();
  2722. task_desc_info.shape_type = "static";
  2723. task_desc_info.cur_iter_num = 0;
  2724. task_desc_info_.emplace_back(task_desc_info);
  2725. if (flag) {
  2726. if (task->GetSktTaskID() != 0xFFFFFFFF) {
  2727. TaskDescInfo task_desc_info;
  2728. string op_name = "super_kernel_" + to_string(task_index);
  2729. task_desc_info.op_name = op_name;
  2730. task_desc_info.task_id = task->GetSktTaskID();
  2731. task_desc_info_.emplace_back(task_desc_info);
  2732. }
  2733. }
  2734. }
  2735. // launch dump kernel to aicpu
  2736. GE_CHK_STATUS_RET(data_dumper_.LoadDumpInfo(), "Load dump info failed.");
  2737. return SUCCESS;
  2738. }
  2739. void DavinciModel::SetEndGraphId(uint32_t task_id, uint32_t stream_id) {
  2740. auto all_dump_model = GetDumpProperties().GetAllDumpModel();
  2741. bool findByOmName = all_dump_model.find(om_name_) != all_dump_model.end();
  2742. bool findByModelName = all_dump_model.find(name_) != all_dump_model.end();
  2743. if (all_dump_model.find(ge::DUMP_ALL_MODEL) != all_dump_model.end() || findByOmName || findByModelName) {
  2744. GELOGI("start save end_graph_info to dumper, task_id is %u, stream_id is %u", task_id, stream_id);
  2745. data_dumper_.SaveEndGraphId(task_id, stream_id);
  2746. }
  2747. }
  2748. ///
  2749. /// @ingroup ge
  2750. /// @brief Set copy only for No task feed NetOutput address.
  2751. /// @return None.
  2752. ///
  2753. void DavinciModel::SetCopyOnlyOutput() {
  2754. for (const auto &output_outside_addrs : new_output_outside_addrs_) {
  2755. ZeroCopyOffset output_outside = output_outside_addrs.second;
  2756. for (uint32_t out_count = 0; out_count < output_outside.GetAddrCount(); ++out_count) {
  2757. auto &addrs_mapping_list = output_outside.GetOutsideAddrs();
  2758. std::map<const void *, std::vector<void *>> virtual_args_addrs = addrs_mapping_list[out_count];
  2759. for (const auto &virtual_args_addr : virtual_args_addrs) {
  2760. const auto &args_addrs = virtual_args_addr.second;
  2761. if (args_addrs.empty()) { // No task feed Output addr, Need copy directly.
  2762. GELOGI("[ZCPY] just copy %p to netoutput.", virtual_args_addr.first);
  2763. copy_only_addrs_.insert(virtual_args_addr.first);
  2764. }
  2765. }
  2766. }
  2767. }
  2768. }
  2769. ///
  2770. /// @ingroup ge
  2771. /// @brief Set disabled input zero copy addr.
  2772. /// @param [in] const void *addr: address of task
  2773. /// @return None.
  2774. ///
  2775. void DavinciModel::DisableZeroCopy(const void *addr) {
  2776. if (real_virtual_addrs_.find(addr) == real_virtual_addrs_.end()) {
  2777. return;
  2778. }
  2779. // Data link to RTS Op directly.
  2780. std::lock_guard<std::mutex> lock(outside_addrs_mutex_);
  2781. GELOGI("[ZCPY] disable zero copy of %p.", addr);
  2782. copy_only_addrs_.insert(addr);
  2783. }
  2784. ///
  2785. /// @ingroup ge
  2786. /// @brief Save outside address used info for ZeroCopy.
  2787. /// @param [in] const OpDescPtr &op_desc: current op desc
  2788. /// @param [in] const std::vector<void *> &outside_addrs: address of task
  2789. /// @param [in] const void *info: task args
  2790. /// @param [in] const char *args: task args
  2791. /// @param [in] size_t size: size of task args
  2792. /// @param [in] size_t offset: offset of task args
  2793. /// @return None.
  2794. ///
  2795. void DavinciModel::SetZeroCopyAddr(const OpDescPtr &op_desc, const std::vector<void *> &outside_addrs, const void *info,
  2796. void *args, size_t size, size_t offset) {
  2797. // Internal call has ensured that op_desc is not nullptr
  2798. GELOGD("[ZCPY] SetZeroCopyAddr for %s.", op_desc->GetName().c_str());
  2799. size_t nums = outside_addrs.size();
  2800. ZeroCopyTask zero_copy_task(op_desc->GetName(), static_cast<uint8_t *>(args), size);
  2801. for (size_t i = 0; i < nums; ++i) {
  2802. std::lock_guard<std::mutex> lock(outside_addrs_mutex_);
  2803. for (auto &input_outside_addrs : new_input_outside_addrs_) {
  2804. ZeroCopyOffset &input_outside = input_outside_addrs.second;
  2805. input_outside.SetOutsideAddrsValue(zero_copy_task, outside_addrs[i], args, offset + i * kAddrLen);
  2806. }
  2807. for (auto &output_outside_addrs : new_output_outside_addrs_) {
  2808. ZeroCopyOffset &output_outside = output_outside_addrs.second;
  2809. output_outside.SetOutsideAddrsValue(zero_copy_task, outside_addrs[i], args, offset + i * kAddrLen);
  2810. }
  2811. }
  2812. string batch_label;
  2813. if (!AttrUtils::GetStr(op_desc, ATTR_NAME_BATCH_LABEL, batch_label) || batch_label.empty()) {
  2814. zero_copy_task.SetBatchLabel(kDefaultBatchLable);
  2815. } else {
  2816. zero_copy_task.SetBatchLabel(batch_label);
  2817. }
  2818. std::lock_guard<std::mutex> lock(outside_addrs_mutex_);
  2819. if (zero_copy_task.IsTaskArgsSet()) {
  2820. zero_copy_task.SetOriginalArgs(info, offset + nums * kAddrLen);
  2821. zero_copy_tasks_.emplace_back(zero_copy_task);
  2822. }
  2823. }
  2824. ///
  2825. /// @ingroup ge
  2826. /// @brief Copy Check input size and model op size.
  2827. /// @param [in] const int64_t &input_size: input size.
  2828. /// @param [in] const int64_t &op_size: model op size.
  2829. /// @param [in] is_dynamic: dynamic batch input flag.
  2830. /// @return true if success
  2831. ///
  2832. bool DavinciModel::CheckInputAndModelSize(const int64_t &input_size, const int64_t &op_size, bool is_dynamic) {
  2833. if (is_dynamic) { // dynamic is max size.
  2834. GELOGI("No need to check input and model size.");
  2835. return true;
  2836. }
  2837. if (input_size > op_size) {
  2838. GELOGW(
  2839. "Input size [%ld] is bigger than om size need [%ld], "
  2840. "MAY cause inference result ERROR, please check model input",
  2841. input_size, op_size);
  2842. }
  2843. if (is_dynamic_aipp_) {
  2844. GELOGI("This is dynamic aipp model, no need to judge smaller input size");
  2845. return true;
  2846. }
  2847. // Judge overflow first
  2848. if (input_size > (INT64_MAX - kDataMemAlignSizeCompare)) {
  2849. GELOGI("The Input size [%ld] is smaller than model size [%ld] and is in the range of 64 bytes", input_size,
  2850. op_size);
  2851. return true;
  2852. }
  2853. // The input and model input size can not be exactly equal because user input is not definite.
  2854. if ((input_size + kDataMemAlignSizeCompare) < op_size) {
  2855. GELOGE(FAILED, "Input size [%ld] can not be smaller than op size [%ld] after 64-byte alignment", input_size,
  2856. op_size);
  2857. return false;
  2858. }
  2859. return true;
  2860. }
  2861. ///
  2862. /// @ingroup ge
  2863. /// @brief Copy Inputs and Outputs addr to model for direct use.
  2864. /// @param [in] const InputData &input_data: model input data.
  2865. /// @param [in] OutputData &output_data: model output data.
  2866. /// @param [in] bool is_dynamic_input: whether is dynamic input, true: is dynamic input; false: not is dynamic input
  2867. /// @return SUCCESS handle successfully / PARAM_INVALID for failed
  2868. ///
  2869. Status DavinciModel::CopyModelData(const InputData &input_data, OutputData &output_data, bool is_dynamic) {
  2870. if (UpdateIoTaskArgs(new_input_data_info_, true, input_data.blobs, is_dynamic, input_data.batch_label) != SUCCESS) {
  2871. GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[ZCPY] Update input data to model failed.");
  2872. return ACL_ERROR_GE_PARAM_INVALID;
  2873. }
  2874. if (UpdateIoTaskArgs(new_output_data_info_, false, output_data.blobs, is_dynamic, input_data.batch_label) !=
  2875. SUCCESS) {
  2876. GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[ZCPY] Update output data to model failed.");
  2877. return ACL_ERROR_GE_PARAM_INVALID;
  2878. }
  2879. for (ZeroCopyTask &task : zero_copy_tasks_) {
  2880. GE_CHK_STATUS_RET(task.DistributeParam(is_async_mode_, rt_model_stream_), "[ZCPY] Update args failed.");
  2881. }
  2882. output_data.index = input_data.index;
  2883. output_data.model_id = model_id_;
  2884. return SUCCESS;
  2885. }
  2886. ///
  2887. /// @ingroup ge
  2888. /// @brief Copy Data addr to model for direct use.
  2889. /// @param [in] data_info: model memory addr/size map { data_index, { tensor_size, tensor_addr } }.
  2890. /// @param [in] is_input: input data or output data
  2891. /// @param [in] blobs: user input/output data list.
  2892. /// @param [in] is_dynamic: whether is dynamic input, true: is dynamic input; false: not is dynamic input
  2893. /// @param [in] batch_label: batch label for multi-batch scenes
  2894. /// @return SUCCESS handle successfully / others handle failed
  2895. ///
  2896. Status DavinciModel::UpdateIoTaskArgs(const std::map<uint32_t, ZeroCopyOffset> &data_info, bool is_input,
  2897. const vector<DataBuffer> &blobs, bool is_dynamic, const string &batch_label) {
  2898. string input_or_output = "input";
  2899. is_input ? input_or_output = "input" : input_or_output = "output";
  2900. if (blobs.size() != data_info.size()) {
  2901. GELOGE(FAILED, "Verify %s data num failed: model requires %zu, but user actually feeds %zu",
  2902. input_or_output.c_str(), data_info.size(), blobs.size());
  2903. return FAILED;
  2904. }
  2905. for (const auto &data : data_info) {
  2906. if (data.first >= blobs.size()) { // check data index.
  2907. GELOGE(FAILED, "Verify %s data num failed: can not find No.%u data, because user only feeds %zu",
  2908. input_or_output.c_str(), data.first, blobs.size());
  2909. return FAILED;
  2910. }
  2911. const DataBuffer &buffer = blobs[data.first]; // index of data.
  2912. if (buffer.data == nullptr) {
  2913. GELOGE(FAILED, "data_buf.data is nullptr, index=%u", data.first);
  2914. return FAILED;
  2915. }
  2916. if (!CheckInputAndModelSize(buffer.length, data.second.GetDataSize(), is_dynamic)) {
  2917. GELOGE(FAILED, "Check input size and model size failed, op[%s]", data.second.GetOpName().c_str());
  2918. return FAILED;
  2919. }
  2920. void *basic_addr = data.second.GetBasicAddr();
  2921. uint64_t data_size = data.second.GetDataSize();
  2922. if (copy_only_addrs_.count(basic_addr) > 0) {
  2923. if (is_input) {
  2924. GELOGI("[IMAS] Find addr %p need direct copy from user malloc input %p", basic_addr, buffer.data);
  2925. if (rtMemcpy(basic_addr, data_size, buffer.data, buffer.length, RT_MEMCPY_DEVICE_TO_DEVICE) != RT_ERROR_NONE) {
  2926. GELOGE(FAILED, "Non-zero copy data node copy failed");
  2927. return FAILED;
  2928. }
  2929. }
  2930. GELOGI("No need to exeucte zero copy task because this addr %p need direct copy.", basic_addr);
  2931. continue;
  2932. }
  2933. for (size_t count = 0; count < data.second.GetDataCount(); ++count) {
  2934. int64_t size = data.second.GetDataInfo().at(count).first;
  2935. void *addr = data.second.GetDataInfo().at(count).second;
  2936. void *buffer_addr = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(buffer.data) +
  2937. data.second.GetRelativeOffset().at(count));
  2938. GELOGI("[ZCPY] Copy %s blobs_index %u, virtual_addr: %p, size: %ld, user_data_addr: %p, batch_label: %s",
  2939. input_or_output.c_str(), data.first, addr, size, buffer_addr, batch_label.c_str());
  2940. // For input data, just copy for rts task.
  2941. for (ZeroCopyTask &task : zero_copy_tasks_) {
  2942. if (task.GetBatchLabel() != kDefaultBatchLable && task.GetBatchLabel() != batch_label) {
  2943. continue;
  2944. }
  2945. uintptr_t addr_val = reinterpret_cast<uintptr_t>(addr);
  2946. if (task.UpdateTaskParam(addr_val, buffer_addr) != SUCCESS) {
  2947. return FAILED;
  2948. }
  2949. }
  2950. }
  2951. }
  2952. return SUCCESS;
  2953. }
  2954. ///
  2955. /// @ingroup ge
  2956. /// @brief get unique identification for op when load two or more models
  2957. /// @param [in] const OpDescPtr: current op.
  2958. /// @param [in] string identification: unique identification for current op.
  2959. /// @return SUCCESS handle successfully / others handle failed
  2960. ///
  2961. void DavinciModel::GetUniqueId(const OpDescPtr &op_desc, std::string &unique_identification) {
  2962. std::string session_graph_id;
  2963. GE_IF_BOOL_EXEC(AttrUtils::GetStr(*op_desc, ATTR_NAME_SESSION_GRAPH_ID, session_graph_id),
  2964. GELOGD("Get original type of session_graph_id."));
  2965. if (session_graph_id.empty()) {
  2966. return;
  2967. } else if (session_graph_id.find("-1") != string::npos) {
  2968. unique_identification = session_graph_id + "_" + to_string(model_id_);
  2969. } else {
  2970. unique_identification = session_graph_id;
  2971. }
  2972. }
  2973. ///
  2974. /// @ingroup ge
  2975. /// @brief For TVM Op, avoid Addr Reuse.
  2976. /// @return void*
  2977. ///
  2978. const char *DavinciModel::GetRegisterStub(const string &binfile, const string &session_graph_id) {
  2979. string binfile_key;
  2980. if (session_graph_id.empty()) {
  2981. binfile_key = binfile;
  2982. } else {
  2983. binfile_key = session_graph_id + "_" + binfile;
  2984. }
  2985. auto it = tvm_bin_kernel_.find(binfile_key);
  2986. if (it != tvm_bin_kernel_.end()) {
  2987. return it->c_str();
  2988. } else {
  2989. it = tvm_bin_kernel_.insert(tvm_bin_kernel_.end(), binfile_key);
  2990. return it->c_str();
  2991. }
  2992. }
  2993. ///
  2994. /// @ingroup ge
  2995. /// @brief Constant Op Init.
  2996. /// @return Status
  2997. ///
  2998. Status DavinciModel::InitConstant(const OpDescPtr &op_desc) {
  2999. auto v_weights = ModelUtils::GetWeights(op_desc);
  3000. auto v_output_size = ModelUtils::GetOutputSize(op_desc);
  3001. auto v_output_addr = ModelUtils::GetOutputDataAddrs(runtime_param_, op_desc);
  3002. GE_IF_BOOL_EXEC(v_weights.empty() || v_output_size.empty() || v_output_addr.empty(),
  3003. GELOGE(PARAM_INVALID, "const op:%s not set output", op_desc->GetName().c_str());
  3004. return PARAM_INVALID;);
  3005. GeTensor *tensor = const_cast<GeTensor *>(v_weights[0].get());
  3006. GE_IF_BOOL_EXEC(static_cast<size_t>(v_output_size[0]) < tensor->GetData().size(),
  3007. GELOGE(PARAM_INVALID, "output size:%ld less than weight data size:%zu", v_output_size[0],
  3008. tensor->GetData().size());
  3009. return PARAM_INVALID;);
  3010. GE_IF_BOOL_EXEC(tensor->GetData().size() == 0, GELOGW("const op:%s has no weight data.", op_desc->GetName().c_str());
  3011. return SUCCESS;);
  3012. auto desc = tensor->GetTensorDesc();
  3013. if (desc.GetDataType() == DT_STRING) {
  3014. GeShape tensor_shape = desc.GetShape();
  3015. /// if tensor is a scaler, it's shape size if zero, according ge_tensor.cc.
  3016. /// the logic of GetShapeSize is wrong, the scaler tensor's GetShapeSize is zero
  3017. /// and that of unknown shape is zero too.
  3018. /// unknown shape will not appear here, so we can use zero judge a tensor is scaler or not
  3019. int64_t elem_num = tensor_shape.GetShapeSize();
  3020. if (elem_num == 0 && tensor_shape.GetDims().size() == 0) {
  3021. elem_num = 1;
  3022. }
  3023. uint64_t *buff = reinterpret_cast<uint64_t *>(tensor->MutableData().data());
  3024. GE_CHK_BOOL_RET_STATUS(ge::CheckInt64Uint32MulOverflow(elem_num, kBytes) == SUCCESS, FAILED,
  3025. "Shape size is invalid");
  3026. uint64_t offset = static_cast<uint64_t>(elem_num * kBytes);
  3027. uint64_t hbm_raw_data_base_addr =
  3028. static_cast<uint64_t>(reinterpret_cast<uintptr_t>(v_output_addr[0])) + offset;
  3029. for (int64_t i = elem_num - 1; i >= 0; --i) {
  3030. buff[i] = hbm_raw_data_base_addr + (buff[i] - buff[0]);
  3031. }
  3032. }
  3033. GELOGI("[IMAS]InitConstant memcpy graph_%u type[V] name[%s] output[%d] memaddr[%p] mem_size[%lu] datasize[%zu]",
  3034. runtime_param_.graph_id, op_desc->GetName().c_str(), 0, v_output_addr[0], v_output_size[0],
  3035. tensor->GetData().size());
  3036. GE_CHK_RT_RET(rtMemcpy(v_output_addr[0], v_output_size[0], tensor->GetData().data(), tensor->GetData().size(),
  3037. RT_MEMCPY_HOST_TO_DEVICE));
  3038. return SUCCESS;
  3039. }
  3040. ///
  3041. /// @ingroup ge
  3042. /// @brief TVM Op Init.
  3043. /// @return Status
  3044. ///
  3045. Status DavinciModel::InitTbeHandle(const OpDescPtr &op_desc) {
  3046. auto kernel = ge_model_->GetTBEKernelStore().FindKernel(op_desc->GetName());
  3047. auto tbe_kernel = (kernel != nullptr) ? kernel : op_desc->TryGetExtAttr(OP_EXTATTR_NAME_TBE_KERNEL, TBEKernelPtr());
  3048. if (tbe_kernel == nullptr) {
  3049. GELOGE(INTERNAL_ERROR, "TBE: %s can't find tvm bin file!", op_desc->GetName().c_str());
  3050. return INTERNAL_ERROR;
  3051. }
  3052. std::string session_graph_model_id;
  3053. GetUniqueId(op_desc, session_graph_model_id);
  3054. const char *bin_file_key = GetRegisterStub(op_desc->GetName(), session_graph_model_id); // from set, always valid.
  3055. TBEHandleStore &kernel_store = TBEHandleStore::GetInstance();
  3056. std::lock_guard<std::mutex> lock(tvm_bin_mutex_);
  3057. if (rtQueryFunctionRegistered(bin_file_key) != RT_ERROR_NONE) {
  3058. void *bin_handle = nullptr;
  3059. if (!kernel_store.FindTBEHandle(bin_file_key, bin_handle)) {
  3060. GELOGD("TBE: can't find the kernel_name[%s] in HandleMap", bin_file_key);
  3061. rtDevBinary_t binary;
  3062. std::string json_string;
  3063. GE_IF_BOOL_EXEC(AttrUtils::GetStr(op_desc, TVM_ATTR_NAME_MAGIC, json_string),
  3064. GELOGD("Get original type of session_graph_id."));
  3065. if (json_string == "RT_DEV_BINARY_MAGIC_ELF_AICPU") {
  3066. binary.magic = RT_DEV_BINARY_MAGIC_ELF_AICPU;
  3067. } else if (json_string == "RT_DEV_BINARY_MAGIC_ELF") {
  3068. binary.magic = RT_DEV_BINARY_MAGIC_ELF;
  3069. } else if (json_string == "RT_DEV_BINARY_MAGIC_ELF_AIVEC") {
  3070. binary.magic = RT_DEV_BINARY_MAGIC_ELF_AIVEC;
  3071. } else {
  3072. GELOGE(PARAM_INVALID, "TBE: Invalid parameter magic number! json: %s", json_string.c_str());
  3073. return PARAM_INVALID;
  3074. }
  3075. binary.version = 0;
  3076. binary.data = tbe_kernel->GetBinData();
  3077. binary.length = tbe_kernel->GetBinDataSize();
  3078. GELOGD("TBE: binary.length: %lu", binary.length);
  3079. GE_CHK_RT_RET(rtDevBinaryRegister(&binary, &bin_handle));
  3080. std::string meta_data;
  3081. GE_IF_BOOL_EXEC(AttrUtils::GetStr(op_desc, TVM_ATTR_NAME_METADATA, meta_data),
  3082. GELOGI("Get original type of json_string"));
  3083. GELOGD("TBE: meta data: %s", meta_data.empty() ? "null" : meta_data.c_str());
  3084. GE_IF_BOOL_EXEC(!meta_data.empty(), GE_CHK_RT_RET(rtMetadataRegister(bin_handle, meta_data.c_str())));
  3085. kernel_store.StoreTBEHandle(bin_file_key, bin_handle, tbe_kernel);
  3086. } else {
  3087. GELOGI("TBE: find the kernel_name[%s] in HandleMap", bin_file_key);
  3088. kernel_store.ReferTBEHandle(bin_file_key);
  3089. }
  3090. std::string kernel_name;
  3091. GE_IF_BOOL_EXEC(AttrUtils::GetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name),
  3092. GELOGD("Get original type of kernel_name"));
  3093. GE_CHK_RT_RET(rtFunctionRegister(bin_handle, bin_file_key, bin_file_key, kernel_name.c_str(), 0));
  3094. used_tbe_handle_map_[bin_file_key] = 1; // Init used num to 1.
  3095. return SUCCESS;
  3096. }
  3097. // Kernel registed, Increase used num in store.
  3098. StoreTbeHandle(bin_file_key);
  3099. return SUCCESS;
  3100. }
  3101. void DavinciModel::StoreTbeHandle(const std::string &handle_key) {
  3102. // Online mode FE may call rtFunctionRegister.
  3103. TBEHandleStore &kernel_store = TBEHandleStore::GetInstance();
  3104. auto it = used_tbe_handle_map_.find(handle_key);
  3105. if (it != used_tbe_handle_map_.end()) {
  3106. // GE registered, increase reference.
  3107. kernel_store.ReferTBEHandle(handle_key);
  3108. it->second++;
  3109. return;
  3110. }
  3111. void *bin_handle = nullptr;
  3112. if (kernel_store.FindTBEHandle(handle_key, bin_handle)) {
  3113. // GE registered, increase reference.
  3114. used_tbe_handle_map_[handle_key] = 1; // Init used num to 1.
  3115. kernel_store.ReferTBEHandle(handle_key);
  3116. }
  3117. }
  3118. void DavinciModel::CleanTbeHandle() {
  3119. TBEHandleStore &kernel_store = TBEHandleStore::GetInstance();
  3120. kernel_store.EraseTBEHandle(used_tbe_handle_map_);
  3121. used_tbe_handle_map_.clear();
  3122. tvm_bin_kernel_.clear();
  3123. }
  3124. ///
  3125. /// @ingroup ge
  3126. /// @brief insert active_stream_indication_
  3127. /// @return Status
  3128. ///
  3129. Status DavinciModel::InitStreamActive(const OpDescPtr &op_desc) {
  3130. if (op_desc->HasAttr(ATTR_NAME_SWITCH_BRANCH_NODE_LABEL)) {
  3131. std::vector<uint32_t> active_stream_list;
  3132. GE_CHK_BOOL_EXEC(AttrUtils::GetListInt(op_desc, ATTR_NAME_ACTIVE_STREAM_LIST, active_stream_list),
  3133. return INTERNAL_ERROR, "StreamActiveOp get attr ACTIVE_STREAM failed.");
  3134. for (size_t j = 0; j < active_stream_list.size(); ++j) {
  3135. active_stream_indication_.insert(active_stream_list[j]);
  3136. GELOGI("flowctrl_op_index_map node:%s, active_stream_id=%u.", op_desc->GetName().c_str(), active_stream_list[j]);
  3137. }
  3138. }
  3139. return SUCCESS;
  3140. }
  3141. Status DavinciModel::InitStreamSwitch(const OpDescPtr &op_desc) {
  3142. std::vector<uint32_t> active_stream_list;
  3143. GE_LOGI_IF(!ge::AttrUtils::GetListInt(op_desc, ATTR_NAME_ACTIVE_STREAM_LIST, active_stream_list),
  3144. "GetInt ACTIVE_STREAM_LIST failed.");
  3145. if (active_stream_list.size() != kTrueBranchStreamNum) {
  3146. GELOGE(INTERNAL_ERROR, "Stream num of switch true branch must be %u.", kTrueBranchStreamNum);
  3147. return INTERNAL_ERROR;
  3148. }
  3149. uint32_t true_stream_id = active_stream_list.front();
  3150. active_stream_indication_.insert(true_stream_id);
  3151. GELOGI("flowctrl_op_index_map node:%s, true_stream_id=%u.", op_desc->GetName().c_str(), true_stream_id);
  3152. return SUCCESS;
  3153. }
  3154. Status DavinciModel::InitStreamSwitchN(const OpDescPtr &op_desc) {
  3155. std::vector<uint32_t> active_stream_list;
  3156. if (!AttrUtils::GetListInt(op_desc, ATTR_NAME_ACTIVE_STREAM_LIST, active_stream_list)) {
  3157. GELOGE(INTERNAL_ERROR, "StreamSwitchNOp get attr ACTIVE_STREAM failed.");
  3158. return INTERNAL_ERROR;
  3159. }
  3160. for (size_t j = 0; j < active_stream_list.size(); ++j) {
  3161. active_stream_indication_.insert(active_stream_list[j]);
  3162. GELOGI("StreamSwitchNOp node:%s, active_stream_id=%u.", op_desc->GetName().c_str(), active_stream_list[j]);
  3163. }
  3164. uint32_t batch_num = 0;
  3165. if (!AttrUtils::GetInt(op_desc, ATTR_NAME_BATCH_NUM, batch_num)) {
  3166. GELOGE(FAILED, "Failed to get attr ATTR_NAME_BATCH_NUM, StreamSwitchN: %s.", op_desc->GetName().c_str());
  3167. return FAILED;
  3168. }
  3169. return SetDynamicBatchInfo(op_desc, batch_num);
  3170. }
  3171. Status DavinciModel::SetDynamicBatchInfo(const OpDescPtr &op_desc, uint32_t batch_num) {
  3172. batch_info_.clear();
  3173. combined_batch_info_.clear();
  3174. (void)AttrUtils::GetInt(op_desc, ATTR_DYNAMIC_TYPE, dynamic_type_);
  3175. (void)AttrUtils::GetListStr(op_desc, ATTR_USER_DESIGNEATE_SHAPE_ORDER, user_designate_shape_order_);
  3176. for (uint32_t i = 0; i < batch_num; ++i) {
  3177. std::vector<int64_t> batch_shape;
  3178. const std::string attr_name = ATTR_NAME_PRED_VALUE + "_" + std::to_string(i);
  3179. if (!AttrUtils::GetListInt(op_desc, attr_name, batch_shape)) {
  3180. GELOGE(FAILED, "Get attr ATTR_NAME_PRED_VALUE failed, Node: %s", op_desc->GetName().c_str());
  3181. batch_info_.clear();
  3182. return FAILED;
  3183. }
  3184. batch_info_.emplace_back(batch_shape);
  3185. batch_shape.clear();
  3186. const string attr_combined_batch = ATTR_NAME_COMBINED_BATCH + "_" + std::to_string(i);
  3187. if (AttrUtils::GetListInt(op_desc, attr_combined_batch, batch_shape)) {
  3188. combined_batch_info_.emplace_back(batch_shape);
  3189. }
  3190. }
  3191. return SUCCESS;
  3192. }
  3193. Status DavinciModel::InitCase(const OpDescPtr &op_desc) {
  3194. uint32_t batch_num = 0;
  3195. if (!AttrUtils::GetInt(op_desc, ATTR_NAME_BATCH_NUM, batch_num)) {
  3196. GELOGI("Not multi-batch Node: %s", op_desc->GetName().c_str());
  3197. return SUCCESS;
  3198. }
  3199. return SetDynamicBatchInfo(op_desc, batch_num);
  3200. }
  3201. bool DavinciModel::IsBroadCastOpData(const ge::NodePtr &var_node) {
  3202. for (auto out_anchor : var_node->GetAllOutDataAnchors()) {
  3203. GE_RT_FALSE_CHECK_NOTNULL(out_anchor);
  3204. for (auto in_anchor : out_anchor->GetPeerInDataAnchors()) {
  3205. GE_RT_FALSE_CHECK_NOTNULL(in_anchor);
  3206. ge::NodePtr dst_node = in_anchor->GetOwnerNode();
  3207. GE_RT_FALSE_CHECK_NOTNULL(dst_node);
  3208. if (dst_node->GetType() == HCOMBROADCAST || dst_node->GetType() == HVDCALLBACKBROADCAST) {
  3209. return true;
  3210. }
  3211. }
  3212. }
  3213. return false;
  3214. }
  3215. ///
  3216. /// @ingroup ge
  3217. /// @brief Init model stream for NN model.
  3218. /// @param [in] stream user input model stream.
  3219. /// @return Status
  3220. ///
  3221. Status DavinciModel::InitModelStream(rtStream_t stream) {
  3222. ExecuteMode curr_mode = is_async_mode_ ? ASYNCHRONIZATION : SYNCHRONIZATION;
  3223. GE_CHK_BOOL_RET_STATUS((curr_mode == last_execute_mode_) || (last_execute_mode_ == INITIALIZATION), INTERNAL_ERROR,
  3224. "NnExecute not support mix execute.");
  3225. last_execute_mode_ = curr_mode;
  3226. // asynchronize mode, use user input stream.
  3227. if (is_async_mode_) {
  3228. rt_model_stream_ = stream;
  3229. is_inner_model_stream_ = false;
  3230. return SUCCESS;
  3231. }
  3232. // synchronize mode, use forbidden stream.
  3233. if (stream != nullptr) {
  3234. if ((rt_model_stream_ != nullptr) && is_inner_model_stream_) {
  3235. GE_LOGW_IF(rtStreamDestroy(rt_model_stream_) != RT_ERROR_NONE, "Destroy rt_stream failed!");
  3236. }
  3237. rt_model_stream_ = stream;
  3238. is_inner_model_stream_ = false;
  3239. return SUCCESS;
  3240. }
  3241. if (rt_model_stream_ == nullptr) {
  3242. GE_CHK_RT_RET(rtStreamCreateWithFlags(&rt_model_stream_, priority_, RT_STREAM_FORBIDDEN_DEFAULT));
  3243. is_inner_model_stream_ = true;
  3244. }
  3245. return SUCCESS;
  3246. }
  3247. ///
  3248. /// @ingroup ge
  3249. /// @brief ACL case, do not start new thread, return execute result.
  3250. /// @param [in] stream execute model stream.
  3251. /// @param [in] async_mode is asynchronize mode.
  3252. /// @param [in] input_data model input data.
  3253. /// @param [out] output_data model output data.
  3254. ///
  3255. Status DavinciModel::NnExecute(rtStream_t stream, bool async_mode, const InputData &input_data,
  3256. OutputData &output_data) {
  3257. is_async_mode_ = async_mode;
  3258. GELOGD("Model Run begin, model id:%u, data index:%u, flag:%d.", model_id_, input_data.index, is_async_mode_);
  3259. GE_CHK_STATUS_RET(InitModelStream(stream), "Init model stream failed.");
  3260. is_dynamic_ = input_data.is_dynamic_batch;
  3261. GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), SetProfileTime(MODEL_PRE_PROC_START));
  3262. Status ret = CopyModelData(input_data, output_data, is_dynamic_);
  3263. GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, return ret, "Copy input data to model failed. model id: %u",
  3264. model_id_);
  3265. GELOGD("current_data.index=%u", input_data.index);
  3266. GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), SetProfileTime(MODEL_PRE_PROC_END));
  3267. if (!task_list_.empty()) {
  3268. GELOGD("rtModelExecute do");
  3269. GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), SetProfileTime(MODEL_INFER_START));
  3270. rtError_t rt_ret = rtModelExecute(rt_model_handle_, rt_model_stream_, 0);
  3271. GE_CHK_RT_EXEC(rt_ret, return RT_ERROR_TO_GE_STATUS(rt_ret));
  3272. GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), SetProfileTime(MODEL_INFER_END));
  3273. GELOGD("rtModelExecute end");
  3274. }
  3275. if (!is_async_mode_) {
  3276. GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), SetProfileTime(MODEL_AFTER_PROC_START));
  3277. ret = CopyOutputData(input_data.index, output_data, RT_MEMCPY_DEVICE_TO_DEVICE);
  3278. GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, return ACL_ERROR_GE_INTERNAL_ERROR,
  3279. "Copy Output data to user failed.");
  3280. GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), SetProfileTime(MODEL_AFTER_PROC_END));
  3281. }
  3282. // report model time data
  3283. GE_IF_BOOL_EXEC(ProfilingManager::Instance().ProfilingModelExecuteOn(), (void)SinkTimeProfile(input_data));
  3284. GELOGD("Model run end, model id:%u", model_id_);
  3285. return SUCCESS;
  3286. }
  3287. // Add active entry stream for special env.
  3288. Status DavinciModel::AddHeadStream() {
  3289. if (active_stream_list_.empty()) {
  3290. GELOGE(INTERNAL_ERROR, "Active stream is empty, stream list size: %zu, stream indication size: %zu.",
  3291. stream_list_.size(), active_stream_indication_.size());
  3292. return INTERNAL_ERROR;
  3293. }
  3294. if (active_stream_list_.size() == 1) {
  3295. GELOGI("Just one active stream, take as head stream.");
  3296. rt_head_stream_ = active_stream_list_[0];
  3297. is_pure_head_stream_ = false;
  3298. } else {
  3299. // Create stream which rt_model_handel running on, this is S0, TS stream.
  3300. GELOGI("Multiple active stream: %zu, create head stream.", active_stream_list_.size());
  3301. GE_CHK_RT_RET(rtStreamCreateWithFlags(&rt_head_stream_, priority_, RT_STREAM_PERSISTENT));
  3302. GE_CHK_RT_RET(rtModelBindStream(rt_model_handle_, rt_head_stream_, RT_INVALID_FLAG)); // Not active.
  3303. is_pure_head_stream_ = true;
  3304. for (auto s : active_stream_list_) {
  3305. std::shared_ptr<CpuTaskActiveEntry> active_entry = MakeShared<CpuTaskActiveEntry>(rt_head_stream_);
  3306. if (active_entry == nullptr) {
  3307. GELOGE(MEMALLOC_FAILED, "Make CpuTaskActiveEntry task failed.");
  3308. return MEMALLOC_FAILED;
  3309. }
  3310. Status status = active_entry->Init(s);
  3311. if (status != SUCCESS) {
  3312. return status;
  3313. }
  3314. cpu_task_list_.emplace_back(active_entry);
  3315. }
  3316. }
  3317. // Create entry stream active head stream. AICPU stream.
  3318. GE_CHK_RT_RET(rtStreamCreateWithFlags(&rt_entry_stream_, priority_, RT_STREAM_AICPU));
  3319. GE_CHK_RT_RET(rtModelBindStream(rt_model_handle_, rt_entry_stream_, RT_HEAD_STREAM));
  3320. return SUCCESS;
  3321. }
  3322. Status DavinciModel::InitEntryTask() {
  3323. if (deploy_type_ == AICPU_DEPLOY_CROSS_THREAD) {
  3324. GE_CHK_STATUS_RET(AddHeadStream(), "Add head stream failed.");
  3325. return CpuActiveStream();
  3326. } else {
  3327. return LoadWithQueue();
  3328. }
  3329. }
  3330. uint8_t *DavinciModel::MallocFeatureMapMem(size_t data_size) {
  3331. uint8_t *mem_base = nullptr;
  3332. const string purpose("feature map,used for op input and output.");
  3333. char ge_static_mem_env[MMPA_MAX_PATH] = { 0x00 };
  3334. INT32 res = mmGetEnv(kEnvGeuseStaticMemory, ge_static_mem_env, MMPA_MAX_PATH);
  3335. if (res == EN_OK) {
  3336. data_size = static_cast<size_t>(VarManager::Instance(session_id_)->GetGraphMemoryMaxSize());
  3337. string memory_key = std::to_string(0) + "_f";
  3338. mem_base = MemManager::Instance(RT_MEMORY_HBM)->MallocMemory(purpose, memory_key, data_size, GetDeviceId());
  3339. } else {
  3340. mem_base = MemManager::Instance(RT_MEMORY_HBM)->MallocMemory(purpose, data_size, GetDeviceId());
  3341. }
  3342. if (mem_base != nullptr) {
  3343. GE_CHK_RT(rtMemset(mem_base, data_size, 0U, data_size));
  3344. }
  3345. return mem_base;
  3346. }
  3347. uint8_t *DavinciModel::MallocP2PMem(size_t p2p_data_size) {
  3348. uint8_t *p2p_mem_base = nullptr;
  3349. const string purpose("p2p memory, used for some op related to hcom");
  3350. if (std::getenv(kEnvGeuseStaticMemory) != nullptr) {
  3351. string p2p_memory_key = std::to_string(0) + "_p";
  3352. p2p_mem_base =
  3353. MemManager::Instance(RT_MEMORY_P2P_DDR)->MallocMemory(purpose, p2p_memory_key, p2p_data_size, GetDeviceId());
  3354. } else {
  3355. p2p_mem_base = MemManager::Instance(RT_MEMORY_P2P_DDR)->MallocMemory(purpose, p2p_data_size, GetDeviceId());
  3356. }
  3357. return p2p_mem_base;
  3358. }
  3359. uint8_t *DavinciModel::MallocWeightsMem(size_t weights_size) {
  3360. uint8_t *weights_mem_base = nullptr;
  3361. const string purpose("weights memory in inference network.");
  3362. char ge_static_mem_env[MMPA_MAX_PATH] = { 0x00 };
  3363. INT32 res = mmGetEnv(kEnvGeuseStaticMemory, ge_static_mem_env, MMPA_MAX_PATH);
  3364. if (res == EN_OK) {
  3365. string weight_memory_key = std::to_string(0) + "_w";
  3366. weights_mem_base =
  3367. MemManager::Instance(RT_MEMORY_HBM)->MallocMemory(purpose, weight_memory_key, weights_size, GetDeviceId());
  3368. } else {
  3369. weights_mem_base = MemManager::Instance(RT_MEMORY_HBM)->MallocMemory(purpose, weights_size, GetDeviceId());
  3370. }
  3371. return weights_mem_base;
  3372. }
  3373. void DavinciModel::FreeFeatureMapMem() {
  3374. char ge_static_mem_env[MMPA_MAX_PATH] = { 0x00 };
  3375. INT32 res = mmGetEnv(kEnvGeuseStaticMemory, ge_static_mem_env, MMPA_MAX_PATH);
  3376. if (res == EN_OK && is_inner_mem_base_) {
  3377. string weight_memory_key = std::to_string(0) + "_f";
  3378. if (MemManager::Instance(RT_MEMORY_HBM)->GetMemoryAddr(weight_memory_key) != nullptr) {
  3379. GE_CHK_STATUS(MemManager::Instance(RT_MEMORY_HBM)->FreeMemory(weight_memory_key, GetDeviceId()),
  3380. "failed to free weight memory");
  3381. }
  3382. mem_base_ = nullptr;
  3383. } else {
  3384. GE_IF_BOOL_EXEC(mem_base_ != nullptr && is_inner_mem_base_,
  3385. GE_CHK_STATUS(MemManager::Instance(RT_MEMORY_HBM)->FreeMemory(mem_base_, GetDeviceId()),
  3386. "failed to free feature_map memory");
  3387. mem_base_ = nullptr);
  3388. }
  3389. }
  3390. void DavinciModel::FreeP2PMem() {
  3391. if (std::getenv(kEnvGeuseStaticMemory) != nullptr) {
  3392. std::string p2p_memory_key = std::to_string(0) + "_p";
  3393. if (MemManager::Instance(RT_MEMORY_P2P_DDR)->GetMemoryAddr(p2p_memory_key) != nullptr) {
  3394. GE_CHK_STATUS(MemManager::Instance(RT_MEMORY_P2P_DDR)->FreeMemory(p2p_memory_key, GetDeviceId()),
  3395. "failed to free p2p memory");
  3396. }
  3397. p2p_mem_base_ = nullptr;
  3398. } else {
  3399. GE_IF_BOOL_EXEC(p2p_mem_base_ != nullptr && is_inner_mem_base_,
  3400. GE_CHK_STATUS(MemManager::Instance(RT_MEMORY_P2P_DDR)->FreeMemory(p2p_mem_base_, GetDeviceId()),
  3401. "failed to free p2p memory");
  3402. p2p_mem_base_ = nullptr);
  3403. }
  3404. }
  3405. void DavinciModel::FreeWeightsMem() {
  3406. char ge_static_mem_env[MMPA_MAX_PATH] = { 0x00 };
  3407. INT32 res = mmGetEnv(kEnvGeuseStaticMemory, ge_static_mem_env, MMPA_MAX_PATH);
  3408. if (res == EN_OK) {
  3409. string memory_key = std::to_string(0) + "_w";
  3410. if (MemManager::Instance(RT_MEMORY_HBM)->GetMemoryAddr(memory_key) != nullptr) {
  3411. GE_CHK_STATUS(MemManager::Instance(RT_MEMORY_HBM)->FreeMemory(memory_key, GetDeviceId()),
  3412. "failed to free feature_map memory");
  3413. }
  3414. weights_mem_base_ = nullptr;
  3415. } else {
  3416. GE_IF_BOOL_EXEC(weights_mem_base_ != nullptr && weights_mem_base_ != mem_base_ && is_inner_weight_base_,
  3417. GE_CHK_STATUS(MemManager::Instance(RT_MEMORY_HBM)->FreeMemory(weights_mem_base_, GetDeviceId()),
  3418. "failed to free weight memory");
  3419. weights_mem_base_ = nullptr);
  3420. }
  3421. }
  3422. Status DavinciModel::TransAllVarData(ComputeGraphPtr &graph, uint32_t graph_id) {
  3423. rtContext_t ctx = nullptr;
  3424. rtError_t rt_ret = rtCtxGetCurrent(&ctx);
  3425. if (rt_ret != RT_ERROR_NONE) {
  3426. GELOGE(RT_FAILED, "Failed to get current context, error_code is: 0x%X.", rt_ret);
  3427. return RT_ERROR_TO_GE_STATUS(rt_ret);
  3428. }
  3429. std::vector<NodePtr> variable_node_list;
  3430. for (ge::NodePtr &node : graph->GetDirectNode()) {
  3431. if (node == nullptr) {
  3432. continue;
  3433. }
  3434. if (node->GetType() != VARIABLE) {
  3435. continue;
  3436. }
  3437. variable_node_list.emplace_back(node);
  3438. }
  3439. GE_CHK_STATUS_RET_NOLOG(
  3440. TransVarDataUtils::TransAllVarData(variable_node_list, session_id_, ctx, graph_id, kThreadNum));
  3441. return SUCCESS;
  3442. }
  3443. void DavinciModel::SetDataDumperArgs(const ComputeGraphPtr &compute_graph) {
  3444. data_dumper_.SetModelName(name_);
  3445. data_dumper_.SetModelId(model_id_);
  3446. data_dumper_.SetOmName(om_name_);
  3447. data_dumper_.SetComputeGraph(compute_graph);
  3448. data_dumper_.SetRefInfo(saved_task_addrs_);
  3449. data_dumper_.SetL1FusionAddr(l1_fusion_addr_);
  3450. int32_t device_id = 0;
  3451. rtError_t rt_ret = rtGetDevice(&device_id);
  3452. if (rt_ret != RT_ERROR_NONE || device_id < 0) {
  3453. GELOGE(RT_FAILED, "Call rtGetDevice failed, ret = 0x%X, device_id = %d.", rt_ret, device_id);
  3454. return;
  3455. }
  3456. data_dumper_.SetDeviceId(device_id);
  3457. // set loop count addr
  3458. auto get_var_addr = [](const OpDescPtr &op, const RuntimeParam &runtime_param) -> void *{
  3459. if (op != nullptr) {
  3460. auto v_output_size = ModelUtils::GetOutputSize(op);
  3461. auto v_output_addr = ModelUtils::GetOutputDataAddrs(runtime_param, op);
  3462. if (v_output_size.empty() || v_output_addr.empty()) {
  3463. return nullptr;
  3464. }
  3465. return v_output_addr[0];
  3466. }
  3467. GELOGD("op is null.");
  3468. return nullptr;
  3469. };
  3470. data_dumper_.SetLoopAddr(get_var_addr(GetVariableOp(NODE_NAME_GLOBAL_STEP), runtime_param_),
  3471. get_var_addr(GetVariableOp(NODE_NAME_FLOWCTRL_LOOP_PER_ITER), runtime_param_),
  3472. get_var_addr(GetVariableOp(NODE_NAME_FLOWCTRL_LOOP_COND), runtime_param_));
  3473. }
  3474. uint32_t DavinciModel::GetFlowctrlIndex(uint32_t op_index) {
  3475. std::lock_guard<std::mutex> lock(flowctrl_op_index_internal_map_mutex_);
  3476. return (++flowctrl_op_index_internal_map_[op_index]) - 1;
  3477. }
  3478. void DavinciModel::PushHcclStream(rtStream_t value) {
  3479. std::lock_guard<std::mutex> lock(all_hccl_stream_list_mutex_);
  3480. all_hccl_stream_list_.push_back(value);
  3481. }
  3482. void DavinciModel::SaveHcclFollowStream(int64_t main_stream_id, rtStream_t stream) {
  3483. std::lock_guard<std::mutex> lock(capacity_of_stream_mutex_);
  3484. main_follow_stream_mapping_[main_stream_id].emplace_back(stream);
  3485. }
  3486. Status DavinciModel::GetComputeGraphInfo(vector<ComputeGraphDescInfo> &graph_desc_info) {
  3487. auto &all_op_desc = data_dumper_.GetAllOpDescInfo();
  3488. for (auto &op_desc : all_op_desc) {
  3489. ComputeGraphDescInfo compute_graph_info;
  3490. if (!om_name_.empty()) {
  3491. compute_graph_info.model_name = om_name_;
  3492. } else {
  3493. compute_graph_info.model_name = name_;
  3494. }
  3495. compute_graph_info.op_name = op_desc.op_name;
  3496. compute_graph_info.op_type = op_desc.op_type;
  3497. compute_graph_info.input_format = op_desc.input_format;
  3498. compute_graph_info.input_shape = op_desc.input_shape;
  3499. compute_graph_info.input_data_type = op_desc.input_data_type;
  3500. compute_graph_info.output_format = op_desc.output_format;
  3501. compute_graph_info.output_shape = op_desc.output_shape;
  3502. compute_graph_info.output_data_type = op_desc.output_data_type;
  3503. graph_desc_info.emplace_back(compute_graph_info);
  3504. }
  3505. return SUCCESS;
  3506. }
  3507. void DavinciModel::SetTotalFixedAddrsSize(string tensor_name, int64_t fix_addr_size) {
  3508. if (tensor_name_to_fixed_addr_size_.find(tensor_name) == tensor_name_to_fixed_addr_size_.end()) {
  3509. tensor_name_to_fixed_addr_size_[tensor_name] = total_fixed_addr_size_;
  3510. total_fixed_addr_size_ += fix_addr_size;
  3511. }
  3512. }
  3513. Status DavinciModel::GetOrigInputInfo(uint32_t index, OriginInputInfo &orig_input_info) {
  3514. GE_CHK_BOOL_RET_STATUS(index < data_op_list_.size(), PARAM_INVALID, "Index %u is invalid.", index);
  3515. OpDescPtr data_op = data_op_list_[index];
  3516. if (!data_op->HasAttr(ATTR_NAME_AIPP_INPUTS) || !data_op->HasAttr(ATTR_NAME_AIPP_OUTPUTS)) {
  3517. GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "GetOrigInputInfo: there is not AIPP related with index %u.", index);
  3518. return ACL_ERROR_GE_AIPP_NOT_EXIST;
  3519. }
  3520. vector<std::string> inputs;
  3521. if (AttrUtils::GetListStr(data_op, ATTR_NAME_AIPP_INPUTS, inputs) && !inputs.empty()) {
  3522. std::string input = inputs[kAippOriginInputIndex];
  3523. GELOGI("GetOrigInputInfo: origin input str: %s", input.c_str());
  3524. std::vector<std::string> infos = ge::StringUtils::Split(input, ':');
  3525. if (infos.size() != kAippInfoNum) {
  3526. GELOGW("origin input str is invalid.");
  3527. }
  3528. orig_input_info.format = TypeUtils::SerialStringToFormat(infos[kAippInfoFormat]);
  3529. orig_input_info.data_type = TypeUtils::SerialStringToDataType(infos[kAippInfoDataType]);
  3530. orig_input_info.dim_num = std::strtol(infos[kAippInfoDimNum].c_str(), nullptr, kDecimal);
  3531. }
  3532. return SUCCESS;
  3533. }
  3534. void DavinciModel::ParseAIPPInfo(std::string in_out_info, InputOutputDims &dims_info) {
  3535. GELOGI("ParseAIPPInfo: origin str: %s", in_out_info.c_str());
  3536. std::vector<std::string> infos = ge::StringUtils::Split(in_out_info, ':');
  3537. if (infos.size() != kAippInfoNum) {
  3538. GELOGW("origin input str is invalid.");
  3539. }
  3540. dims_info.name = infos[kAippInfoTensorName];
  3541. dims_info.size = std::strtol(infos[kAippInfoTensorSize].c_str(), nullptr, kDecimal);
  3542. dims_info.dim_num = std::strtol(infos[kAippInfoDimNum].c_str(), nullptr, kDecimal);
  3543. std::vector<std::string> dims = ge::StringUtils::Split(infos[kAippInfoShape], ',');
  3544. for (const auto &dim : dims) {
  3545. if (dim.empty()) {
  3546. continue;
  3547. }
  3548. dims_info.dims.emplace_back(std::strtol(dim.c_str(), nullptr, kDecimal));
  3549. }
  3550. }
  3551. Status DavinciModel::GetAllAippInputOutputDims(uint32_t index, std::vector<InputOutputDims> &input_dims,
  3552. std::vector<InputOutputDims> &output_dims) {
  3553. GE_CHK_BOOL_RET_STATUS(index < data_op_list_.size(), PARAM_INVALID, "Index %u is invalid.", index);
  3554. OpDescPtr data_op = data_op_list_[index];
  3555. if (!data_op->HasAttr(ATTR_NAME_AIPP_INPUTS) || !data_op->HasAttr(ATTR_NAME_AIPP_OUTPUTS)) {
  3556. GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "GetAllAippInputOutputDims: there is not AIPP related with index %u.", index);
  3557. return ACL_ERROR_GE_AIPP_NOT_EXIST;
  3558. }
  3559. vector<std::string> inputs;
  3560. if (AttrUtils::GetListStr(data_op, ATTR_NAME_AIPP_INPUTS, inputs) && !inputs.empty()) {
  3561. GELOGI("GetAllAippInputOutputDims: Data: %s has %zu related aippInfo.", data_op->GetName().c_str(), inputs.size());
  3562. for (auto it : inputs) {
  3563. InputOutputDims input_info;
  3564. ParseAIPPInfo(it, input_info);
  3565. input_dims.emplace_back(input_info);
  3566. GELOGD("GetAllAippInputOutputDims Aipp origin input dims info: %s", it.c_str());
  3567. ConstGeTensorDescPtr data_input_desc = data_op->GetInputDescPtr(kDataIndex);
  3568. int64_t data_input_size;
  3569. (void)TensorUtils::GetSize(*(data_op->GetInputDescPtr(kDataIndex)), data_input_size);
  3570. GELOGD(
  3571. "GetAllAippInputOutputDims related Data[%d]: tensor_name is %s, dim_num is %zu, tensor_size: %zu, format: "
  3572. "%s, data_type: %s, shape: %s .",
  3573. index, data_op->GetName().c_str(), data_input_desc->GetShape().GetDimNum(), data_input_size,
  3574. TypeUtils::FormatToSerialString(data_input_desc->GetFormat()).c_str(),
  3575. TypeUtils::DataTypeToSerialString(data_input_desc->GetDataType()).c_str(),
  3576. formats::JoinToString(data_input_desc->GetShape().GetDims()).c_str());
  3577. }
  3578. }
  3579. vector<std::string> outputs;
  3580. if (AttrUtils::GetListStr(data_op, ATTR_NAME_AIPP_OUTPUTS, outputs) && !outputs.empty()) {
  3581. for (auto it : outputs) {
  3582. InputOutputDims output_info;
  3583. ParseAIPPInfo(it, output_info);
  3584. output_dims.emplace_back(output_info);
  3585. GELOGD("GetAllAippInputOutputDims Aipp output dims info: %s", it.c_str());
  3586. }
  3587. }
  3588. return SUCCESS;
  3589. }
  3590. int64_t DavinciModel::GetFixedAddrsSize(string tensor_name) {
  3591. if (tensor_name_to_fixed_addr_size_.find(tensor_name) != tensor_name_to_fixed_addr_size_.end()) {
  3592. return tensor_name_to_fixed_addr_size_[tensor_name];
  3593. } else {
  3594. return total_fixed_addr_size_;
  3595. }
  3596. }
  3597. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示