You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

check_package_redundancy.go 34 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054
  1. package event
  2. import (
  3. "context"
  4. "fmt"
  5. "strconv"
  6. "time"
  7. "github.com/samber/lo"
  8. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
  9. "gitlink.org.cn/cloudream/common/pkgs/logger"
  10. cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
  11. "gitlink.org.cn/cloudream/common/utils/sort2"
  12. stgglb "gitlink.org.cn/cloudream/storage/common/globals"
  13. stgmod "gitlink.org.cn/cloudream/storage/common/models"
  14. "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
  15. "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
  16. "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2"
  17. "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser"
  18. "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc"
  19. lrcparser "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc/parser"
  20. coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
  21. scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
  22. "gitlink.org.cn/cloudream/storage/scanner/internal/config"
  23. )
  24. type CheckPackageRedundancy struct {
  25. *scevt.CheckPackageRedundancy
  26. }
  27. func NewCheckPackageRedundancy(evt *scevt.CheckPackageRedundancy) *CheckPackageRedundancy {
  28. return &CheckPackageRedundancy{
  29. CheckPackageRedundancy: evt,
  30. }
  31. }
  32. type StorageLoadInfo struct {
  33. Storage stgmod.StorageDetail
  34. AccessAmount float64
  35. }
  36. func (t *CheckPackageRedundancy) TryMerge(other Event) bool {
  37. event, ok := other.(*CheckPackageRedundancy)
  38. if !ok {
  39. return false
  40. }
  41. return event.PackageID == t.PackageID
  42. }
  43. func (t *CheckPackageRedundancy) Execute(execCtx ExecuteContext) {
  44. log := logger.WithType[CheckPackageRedundancy]("Event")
  45. startTime := time.Now()
  46. log.Debugf("begin with %v", logger.FormatStruct(t.CheckPackageRedundancy))
  47. defer func() {
  48. log.Debugf("end, time: %v", time.Since(startTime))
  49. }()
  50. // TODO 应该像其他event一样直接读取数据库
  51. coorCli, err := stgglb.CoordinatorMQPool.Acquire()
  52. if err != nil {
  53. log.Warnf("new coordinator client: %s", err.Error())
  54. return
  55. }
  56. defer stgglb.CoordinatorMQPool.Release(coorCli)
  57. getObjs, err := coorCli.GetPackageObjectDetails(coormq.ReqGetPackageObjectDetails(t.PackageID))
  58. if err != nil {
  59. log.Warnf("getting package objects: %s", err.Error())
  60. return
  61. }
  62. stats, err := execCtx.Args.DB.PackageAccessStat().GetByPackageID(execCtx.Args.DB.DefCtx(), t.PackageID)
  63. if err != nil {
  64. log.Warnf("getting package access stats: %s", err.Error())
  65. return
  66. }
  67. // TODO UserID
  68. getStgs, err := coorCli.GetUserStorageDetails(coormq.ReqGetUserStorageDetails(1))
  69. if err != nil {
  70. log.Warnf("getting all nodes: %s", err.Error())
  71. return
  72. }
  73. if len(getStgs.Storages) == 0 {
  74. log.Warnf("no available nodes")
  75. return
  76. }
  77. userAllStorages := make(map[cdssdk.StorageID]*StorageLoadInfo)
  78. for _, stg := range getStgs.Storages {
  79. userAllStorages[stg.Storage.StorageID] = &StorageLoadInfo{
  80. Storage: stg,
  81. }
  82. }
  83. for _, stat := range stats {
  84. info, ok := userAllStorages[stat.StorageID]
  85. if !ok {
  86. continue
  87. }
  88. info.AccessAmount = stat.Amount
  89. }
  90. var changedObjects []coormq.UpdatingObjectRedundancy
  91. defRep := cdssdk.DefaultRepRedundancy
  92. defEC := cdssdk.DefaultECRedundancy
  93. // TODO 目前rep的备份数量固定为2,所以这里直接选出两个节点
  94. // TODO 放到chooseRedundancy函数中
  95. mostBlockStgIDs := t.summaryRepObjectBlockNodes(getObjs.Objects, 2)
  96. newRepStgs := t.chooseNewNodesForRep(&defRep, userAllStorages)
  97. rechoosedRepStgs := t.rechooseNodesForRep(mostBlockStgIDs, &defRep, userAllStorages)
  98. newECStgs := t.chooseNewNodesForEC(&defEC, userAllStorages)
  99. // 加锁
  100. builder := reqbuilder.NewBuilder()
  101. for _, node := range newRepStgs {
  102. builder.Shard().Buzy(node.Storage.Storage.StorageID)
  103. }
  104. for _, node := range newECStgs {
  105. builder.Shard().Buzy(node.Storage.Storage.StorageID)
  106. }
  107. mutex, err := builder.MutexLock(execCtx.Args.DistLock)
  108. if err != nil {
  109. log.Warnf("acquiring dist lock: %s", err.Error())
  110. return
  111. }
  112. defer mutex.Unlock()
  113. for _, obj := range getObjs.Objects {
  114. var updating *coormq.UpdatingObjectRedundancy
  115. var err error
  116. newRed, selectedNodes := t.chooseRedundancy(obj, userAllStorages)
  117. switch srcRed := obj.Object.Redundancy.(type) {
  118. case *cdssdk.NoneRedundancy:
  119. switch newRed := newRed.(type) {
  120. case *cdssdk.RepRedundancy:
  121. log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: none -> rep")
  122. updating, err = t.noneToRep(execCtx, obj, newRed, newRepStgs)
  123. case *cdssdk.ECRedundancy:
  124. log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: none -> ec")
  125. updating, err = t.noneToEC(execCtx, obj, newRed, newECStgs)
  126. case *cdssdk.LRCRedundancy:
  127. log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: none -> lrc")
  128. updating, err = t.noneToLRC(execCtx, obj, newRed, selectedNodes)
  129. }
  130. case *cdssdk.RepRedundancy:
  131. switch newRed := newRed.(type) {
  132. case *cdssdk.RepRedundancy:
  133. updating, err = t.repToRep(execCtx, obj, srcRed, rechoosedRepStgs)
  134. case *cdssdk.ECRedundancy:
  135. log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: rep -> ec")
  136. updating, err = t.repToEC(execCtx, obj, newRed, newECStgs)
  137. }
  138. case *cdssdk.ECRedundancy:
  139. switch newRed := newRed.(type) {
  140. case *cdssdk.RepRedundancy:
  141. log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: ec -> rep")
  142. updating, err = t.ecToRep(execCtx, obj, srcRed, newRed, newRepStgs)
  143. case *cdssdk.ECRedundancy:
  144. uploadNodes := t.rechooseNodesForEC(obj, srcRed, userAllStorages)
  145. updating, err = t.ecToEC(execCtx, obj, srcRed, newRed, uploadNodes)
  146. }
  147. case *cdssdk.LRCRedundancy:
  148. switch newRed := newRed.(type) {
  149. case *cdssdk.LRCRedundancy:
  150. uploadNodes := t.rechooseNodesForLRC(obj, srcRed, userAllStorages)
  151. updating, err = t.lrcToLRC(execCtx, obj, srcRed, newRed, uploadNodes)
  152. }
  153. }
  154. if updating != nil {
  155. changedObjects = append(changedObjects, *updating)
  156. }
  157. if err != nil {
  158. log.WithField("ObjectID", obj.Object.ObjectID).Warnf("%s, its redundancy wont be changed", err.Error())
  159. }
  160. }
  161. if len(changedObjects) == 0 {
  162. return
  163. }
  164. _, err = coorCli.UpdateObjectRedundancy(coormq.ReqUpdateObjectRedundancy(changedObjects))
  165. if err != nil {
  166. log.Warnf("requesting to change object redundancy: %s", err.Error())
  167. return
  168. }
  169. }
  170. func (t *CheckPackageRedundancy) chooseRedundancy(obj stgmod.ObjectDetail, userAllStgs map[cdssdk.StorageID]*StorageLoadInfo) (cdssdk.Redundancy, []*StorageLoadInfo) {
  171. switch obj.Object.Redundancy.(type) {
  172. case *cdssdk.NoneRedundancy:
  173. newStgs := t.chooseNewNodesForEC(&cdssdk.DefaultECRedundancy, userAllStgs)
  174. return &cdssdk.DefaultECRedundancy, newStgs
  175. // newLRCNodes := t.chooseNewNodesForLRC(&cdssdk.DefaultLRCRedundancy, userAllNodes)
  176. // return &cdssdk.DefaultLRCRedundancy, newLRCNodes
  177. case *cdssdk.LRCRedundancy:
  178. newLRCStgs := t.rechooseNodesForLRC(obj, &cdssdk.DefaultLRCRedundancy, userAllStgs)
  179. return &cdssdk.DefaultLRCRedundancy, newLRCStgs
  180. }
  181. return nil, nil
  182. }
  183. // 统计每个对象块所在的节点,选出块最多的不超过nodeCnt个节点
  184. func (t *CheckPackageRedundancy) summaryRepObjectBlockNodes(objs []stgmod.ObjectDetail, nodeCnt int) []cdssdk.StorageID {
  185. type stgBlocks struct {
  186. StorageID cdssdk.StorageID
  187. Count int
  188. }
  189. stgBlocksMap := make(map[cdssdk.StorageID]*stgBlocks)
  190. for _, obj := range objs {
  191. shouldUseEC := obj.Object.Size > config.Cfg().ECFileSizeThreshold
  192. if _, ok := obj.Object.Redundancy.(*cdssdk.RepRedundancy); ok && !shouldUseEC {
  193. for _, block := range obj.Blocks {
  194. if _, ok := stgBlocksMap[block.StorageID]; !ok {
  195. stgBlocksMap[block.StorageID] = &stgBlocks{
  196. StorageID: block.StorageID,
  197. Count: 0,
  198. }
  199. }
  200. stgBlocksMap[block.StorageID].Count++
  201. }
  202. }
  203. }
  204. nodes := lo.Values(stgBlocksMap)
  205. sort2.Sort(nodes, func(left *stgBlocks, right *stgBlocks) int {
  206. return right.Count - left.Count
  207. })
  208. ids := lo.Map(nodes, func(item *stgBlocks, idx int) cdssdk.StorageID { return item.StorageID })
  209. if len(ids) > nodeCnt {
  210. ids = ids[:nodeCnt]
  211. }
  212. return ids
  213. }
  214. func (t *CheckPackageRedundancy) chooseNewNodesForRep(red *cdssdk.RepRedundancy, allStgs map[cdssdk.StorageID]*StorageLoadInfo) []*StorageLoadInfo {
  215. sortedNodes := sort2.Sort(lo.Values(allStgs), func(left *StorageLoadInfo, right *StorageLoadInfo) int {
  216. return sort2.Cmp(right.AccessAmount, left.AccessAmount)
  217. })
  218. return t.chooseSoManyNodes(red.RepCount, sortedNodes)
  219. }
  220. func (t *CheckPackageRedundancy) chooseNewNodesForEC(red *cdssdk.ECRedundancy, allStgs map[cdssdk.StorageID]*StorageLoadInfo) []*StorageLoadInfo {
  221. sortedNodes := sort2.Sort(lo.Values(allStgs), func(left *StorageLoadInfo, right *StorageLoadInfo) int {
  222. return sort2.Cmp(right.AccessAmount, left.AccessAmount)
  223. })
  224. return t.chooseSoManyNodes(red.N, sortedNodes)
  225. }
  226. func (t *CheckPackageRedundancy) chooseNewNodesForLRC(red *cdssdk.LRCRedundancy, allNodes map[cdssdk.NodeID]*StorageLoadInfo) []*StorageLoadInfo {
  227. sortedNodes := sort2.Sort(lo.Values(allNodes), func(left *StorageLoadInfo, right *StorageLoadInfo) int {
  228. return sort2.Cmp(right.AccessAmount, left.AccessAmount)
  229. })
  230. return t.chooseSoManyNodes(red.N, sortedNodes)
  231. }
  232. func (t *CheckPackageRedundancy) rechooseNodesForRep(mostBlockStgIDs []cdssdk.StorageID, red *cdssdk.RepRedundancy, allStgs map[cdssdk.StorageID]*StorageLoadInfo) []*StorageLoadInfo {
  233. type rechooseNode struct {
  234. *StorageLoadInfo
  235. HasBlock bool
  236. }
  237. var rechooseStgs []*rechooseNode
  238. for _, stg := range allStgs {
  239. hasBlock := false
  240. for _, id := range mostBlockStgIDs {
  241. if id == stg.Storage.Storage.StorageID {
  242. hasBlock = true
  243. break
  244. }
  245. }
  246. rechooseStgs = append(rechooseStgs, &rechooseNode{
  247. StorageLoadInfo: stg,
  248. HasBlock: hasBlock,
  249. })
  250. }
  251. sortedStgs := sort2.Sort(rechooseStgs, func(left *rechooseNode, right *rechooseNode) int {
  252. // 已经缓存了文件块的节点优先选择
  253. v := sort2.CmpBool(right.HasBlock, left.HasBlock)
  254. if v != 0 {
  255. return v
  256. }
  257. return sort2.Cmp(right.AccessAmount, left.AccessAmount)
  258. })
  259. return t.chooseSoManyNodes(red.RepCount, lo.Map(sortedStgs, func(node *rechooseNode, idx int) *StorageLoadInfo { return node.StorageLoadInfo }))
  260. }
  261. func (t *CheckPackageRedundancy) rechooseNodesForEC(obj stgmod.ObjectDetail, red *cdssdk.ECRedundancy, allStgs map[cdssdk.StorageID]*StorageLoadInfo) []*StorageLoadInfo {
  262. type rechooseStg struct {
  263. *StorageLoadInfo
  264. CachedBlockIndex int
  265. }
  266. var rechooseStgs []*rechooseStg
  267. for _, stg := range allStgs {
  268. cachedBlockIndex := -1
  269. for _, block := range obj.Blocks {
  270. if block.StorageID == stg.Storage.Storage.StorageID {
  271. cachedBlockIndex = block.Index
  272. break
  273. }
  274. }
  275. rechooseStgs = append(rechooseStgs, &rechooseStg{
  276. StorageLoadInfo: stg,
  277. CachedBlockIndex: cachedBlockIndex,
  278. })
  279. }
  280. sortedStgs := sort2.Sort(rechooseStgs, func(left *rechooseStg, right *rechooseStg) int {
  281. // 已经缓存了文件块的节点优先选择
  282. v := sort2.CmpBool(right.CachedBlockIndex > -1, left.CachedBlockIndex > -1)
  283. if v != 0 {
  284. return v
  285. }
  286. return sort2.Cmp(right.AccessAmount, left.AccessAmount)
  287. })
  288. // TODO 可以考虑选择已有块的节点时,能依然按照Index顺序选择
  289. return t.chooseSoManyNodes(red.N, lo.Map(sortedStgs, func(node *rechooseStg, idx int) *StorageLoadInfo { return node.StorageLoadInfo }))
  290. }
  291. func (t *CheckPackageRedundancy) rechooseNodesForLRC(obj stgmod.ObjectDetail, red *cdssdk.LRCRedundancy, allStgs map[cdssdk.StorageID]*StorageLoadInfo) []*StorageLoadInfo {
  292. type rechooseStg struct {
  293. *StorageLoadInfo
  294. CachedBlockIndex int
  295. }
  296. var rechooseStgs []*rechooseStg
  297. for _, stg := range allStgs {
  298. cachedBlockIndex := -1
  299. for _, block := range obj.Blocks {
  300. if block.StorageID == stg.Storage.Storage.StorageID {
  301. cachedBlockIndex = block.Index
  302. break
  303. }
  304. }
  305. rechooseStgs = append(rechooseStgs, &rechooseStg{
  306. StorageLoadInfo: stg,
  307. CachedBlockIndex: cachedBlockIndex,
  308. })
  309. }
  310. sortedStgs := sort2.Sort(rechooseStgs, func(left *rechooseStg, right *rechooseStg) int {
  311. // 已经缓存了文件块的节点优先选择
  312. v := sort2.CmpBool(right.CachedBlockIndex > -1, left.CachedBlockIndex > -1)
  313. if v != 0 {
  314. return v
  315. }
  316. return sort2.Cmp(right.AccessAmount, left.AccessAmount)
  317. })
  318. // TODO 可以考虑选择已有块的节点时,能依然按照Index顺序选择
  319. return t.chooseSoManyNodes(red.N, lo.Map(sortedStgs, func(node *rechooseStg, idx int) *StorageLoadInfo { return node.StorageLoadInfo }))
  320. }
  321. func (t *CheckPackageRedundancy) chooseSoManyNodes(count int, stgs []*StorageLoadInfo) []*StorageLoadInfo {
  322. repeateCount := (count + len(stgs) - 1) / len(stgs)
  323. extendStgs := make([]*StorageLoadInfo, repeateCount*len(stgs))
  324. // 使用复制的方式将节点数扩充到要求的数量
  325. // 复制之后的结构:ABCD -> AAABBBCCCDDD
  326. for p := 0; p < repeateCount; p++ {
  327. for i, node := range stgs {
  328. putIdx := i*repeateCount + p
  329. extendStgs[putIdx] = node
  330. }
  331. }
  332. extendStgs = extendStgs[:count]
  333. var chosen []*StorageLoadInfo
  334. for len(chosen) < count {
  335. // 在每一轮内都选不同地区的节点,如果节点数不够,那么就再来一轮
  336. chosenLocations := make(map[cdssdk.LocationID]bool)
  337. for i, stg := range extendStgs {
  338. if stg == nil {
  339. continue
  340. }
  341. if chosenLocations[stg.Storage.MasterHub.LocationID] {
  342. continue
  343. }
  344. chosen = append(chosen, stg)
  345. chosenLocations[stg.Storage.MasterHub.LocationID] = true
  346. extendStgs[i] = nil
  347. }
  348. }
  349. return chosen
  350. }
  351. func (t *CheckPackageRedundancy) noneToRep(ctx ExecuteContext, obj stgmod.ObjectDetail, red *cdssdk.RepRedundancy, uploadStgs []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) {
  352. if len(obj.Blocks) == 0 {
  353. return nil, fmt.Errorf("object is not cached on any nodes, cannot change its redundancy to rep")
  354. }
  355. coorCli, err := stgglb.CoordinatorMQPool.Acquire()
  356. if err != nil {
  357. return nil, fmt.Errorf("new coordinator client: %w", err)
  358. }
  359. defer stgglb.CoordinatorMQPool.Release(coorCli)
  360. getStgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{obj.Blocks[0].StorageID}))
  361. if err != nil {
  362. return nil, fmt.Errorf("requesting to get storages: %w", err)
  363. }
  364. if getStgs.Storages[0] == nil {
  365. return nil, fmt.Errorf("storage %v not found", obj.Blocks[0].StorageID)
  366. }
  367. if getStgs.Storages[0].MasterHub == nil {
  368. return nil, fmt.Errorf("storage %v has no master hub", obj.Blocks[0].StorageID)
  369. }
  370. // 如果选择的备份节点都是同一个,那么就只要上传一次
  371. uploadStgs = lo.UniqBy(uploadStgs, func(item *StorageLoadInfo) cdssdk.StorageID { return item.Storage.Storage.StorageID })
  372. ft := ioswitch2.NewFromTo()
  373. ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *getStgs.Storages[0].MasterHub, getStgs.Storages[0].Storage, -1))
  374. for i, stg := range uploadStgs {
  375. ft.AddTo(ioswitch2.NewToShardStore(*stg.Storage.MasterHub, stg.Storage.Storage, -1, fmt.Sprintf("%d", i)))
  376. }
  377. plans := exec.NewPlanBuilder()
  378. parser := parser.NewParser(cdssdk.DefaultECRedundancy)
  379. err = parser.Parse(ft, plans)
  380. if err != nil {
  381. return nil, fmt.Errorf("parsing plan: %w", err)
  382. }
  383. // TODO 添加依赖
  384. execCtx := exec.NewExecContext()
  385. exec.SetValueByType(execCtx, ctx.Args.StgMgr)
  386. ret, err := plans.Execute(execCtx).Wait(context.Background())
  387. if err != nil {
  388. return nil, fmt.Errorf("executing io plan: %w", err)
  389. }
  390. var blocks []stgmod.ObjectBlock
  391. for i, stg := range uploadStgs {
  392. blocks = append(blocks, stgmod.ObjectBlock{
  393. ObjectID: obj.Object.ObjectID,
  394. Index: 0,
  395. StorageID: stg.Storage.Storage.StorageID,
  396. FileHash: ret[fmt.Sprintf("%d", i)].(*ops2.FileHashValue).Hash,
  397. })
  398. }
  399. return &coormq.UpdatingObjectRedundancy{
  400. ObjectID: obj.Object.ObjectID,
  401. Redundancy: red,
  402. Blocks: blocks,
  403. }, nil
  404. }
  405. func (t *CheckPackageRedundancy) noneToEC(ctx ExecuteContext, obj stgmod.ObjectDetail, red *cdssdk.ECRedundancy, uploadStgs []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) {
  406. coorCli, err := stgglb.CoordinatorMQPool.Acquire()
  407. if err != nil {
  408. return nil, fmt.Errorf("new coordinator client: %w", err)
  409. }
  410. defer stgglb.CoordinatorMQPool.Release(coorCli)
  411. if len(obj.Blocks) == 0 {
  412. return nil, fmt.Errorf("object is not cached on any nodes, cannot change its redundancy to ec")
  413. }
  414. getStgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{obj.Blocks[0].StorageID}))
  415. if err != nil {
  416. return nil, fmt.Errorf("requesting to get storages: %w", err)
  417. }
  418. if getStgs.Storages[0] == nil {
  419. return nil, fmt.Errorf("storage %v not found", obj.Blocks[0].StorageID)
  420. }
  421. if getStgs.Storages[0].MasterHub == nil {
  422. return nil, fmt.Errorf("storage %v has no master hub", obj.Blocks[0].StorageID)
  423. }
  424. ft := ioswitch2.NewFromTo()
  425. ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *getStgs.Storages[0].MasterHub, getStgs.Storages[0].Storage, -1))
  426. for i := 0; i < red.N; i++ {
  427. ft.AddTo(ioswitch2.NewToShardStore(*uploadStgs[i].Storage.MasterHub, uploadStgs[i].Storage.Storage, i, fmt.Sprintf("%d", i)))
  428. }
  429. parser := parser.NewParser(*red)
  430. plans := exec.NewPlanBuilder()
  431. err = parser.Parse(ft, plans)
  432. if err != nil {
  433. return nil, fmt.Errorf("parsing plan: %w", err)
  434. }
  435. execCtx := exec.NewExecContext()
  436. exec.SetValueByType(execCtx, ctx.Args.StgMgr)
  437. ioRet, err := plans.Execute(execCtx).Wait(context.Background())
  438. if err != nil {
  439. return nil, fmt.Errorf("executing io plan: %w", err)
  440. }
  441. var blocks []stgmod.ObjectBlock
  442. for i := 0; i < red.N; i++ {
  443. blocks = append(blocks, stgmod.ObjectBlock{
  444. ObjectID: obj.Object.ObjectID,
  445. Index: i,
  446. StorageID: uploadStgs[i].Storage.Storage.StorageID,
  447. FileHash: ioRet[fmt.Sprintf("%d", i)].(*ops2.FileHashValue).Hash,
  448. })
  449. }
  450. return &coormq.UpdatingObjectRedundancy{
  451. ObjectID: obj.Object.ObjectID,
  452. Redundancy: red,
  453. Blocks: blocks,
  454. }, nil
  455. }
  456. func (t *CheckPackageRedundancy) noneToLRC(ctx ExecuteContext, obj stgmod.ObjectDetail, red *cdssdk.LRCRedundancy, uploadNodes []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) {
  457. coorCli, err := stgglb.CoordinatorMQPool.Acquire()
  458. if err != nil {
  459. return nil, fmt.Errorf("new coordinator client: %w", err)
  460. }
  461. defer stgglb.CoordinatorMQPool.Release(coorCli)
  462. if len(obj.Blocks) == 0 {
  463. return nil, fmt.Errorf("object is not cached on any nodes, cannot change its redundancy to ec")
  464. }
  465. getStgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{obj.Blocks[0].StorageID}))
  466. if err != nil {
  467. return nil, fmt.Errorf("requesting to get storages: %w", err)
  468. }
  469. if getStgs.Storages[0] == nil {
  470. return nil, fmt.Errorf("storage %v not found", obj.Blocks[0].StorageID)
  471. }
  472. if getStgs.Storages[0].MasterHub == nil {
  473. return nil, fmt.Errorf("storage %v has no master hub", obj.Blocks[0].StorageID)
  474. }
  475. var toes []ioswitchlrc.To
  476. for i := 0; i < red.N; i++ {
  477. toes = append(toes, ioswitchlrc.NewToStorage(*uploadNodes[i].Storage.MasterHub, uploadNodes[i].Storage.Storage, i, fmt.Sprintf("%d", i)))
  478. }
  479. plans := exec.NewPlanBuilder()
  480. err = lrcparser.Encode(ioswitchlrc.NewFromNode(obj.Object.FileHash, *getStgs.Storages[0].MasterHub, getStgs.Storages[0].Storage, -1), toes, plans)
  481. if err != nil {
  482. return nil, fmt.Errorf("parsing plan: %w", err)
  483. }
  484. execCtx := exec.NewExecContext()
  485. exec.SetValueByType(execCtx, ctx.Args.StgMgr)
  486. ioRet, err := plans.Execute(execCtx).Wait(context.Background())
  487. if err != nil {
  488. return nil, fmt.Errorf("executing io plan: %w", err)
  489. }
  490. var blocks []stgmod.ObjectBlock
  491. for i := 0; i < red.N; i++ {
  492. blocks = append(blocks, stgmod.ObjectBlock{
  493. ObjectID: obj.Object.ObjectID,
  494. Index: i,
  495. StorageID: uploadNodes[i].Storage.Storage.StorageID,
  496. FileHash: ioRet[fmt.Sprintf("%d", i)].(*ops2.FileHashValue).Hash,
  497. })
  498. }
  499. return &coormq.UpdatingObjectRedundancy{
  500. ObjectID: obj.Object.ObjectID,
  501. Redundancy: red,
  502. Blocks: blocks,
  503. }, nil
  504. }
  505. func (t *CheckPackageRedundancy) repToRep(ctx ExecuteContext, obj stgmod.ObjectDetail, red *cdssdk.RepRedundancy, uploadStgs []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) {
  506. if len(obj.Blocks) == 0 {
  507. return nil, fmt.Errorf("object is not cached on any nodes, cannot change its redundancy to rep")
  508. }
  509. coorCli, err := stgglb.CoordinatorMQPool.Acquire()
  510. if err != nil {
  511. return nil, fmt.Errorf("new coordinator client: %w", err)
  512. }
  513. defer stgglb.CoordinatorMQPool.Release(coorCli)
  514. getStgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{obj.Blocks[0].StorageID}))
  515. if err != nil {
  516. return nil, fmt.Errorf("requesting to get storages: %w", err)
  517. }
  518. if getStgs.Storages[0] == nil {
  519. return nil, fmt.Errorf("storage %v not found", obj.Blocks[0].StorageID)
  520. }
  521. if getStgs.Storages[0].MasterHub == nil {
  522. return nil, fmt.Errorf("storage %v has no master hub", obj.Blocks[0].StorageID)
  523. }
  524. // 如果选择的备份节点都是同一个,那么就只要上传一次
  525. uploadStgs = lo.UniqBy(uploadStgs, func(item *StorageLoadInfo) cdssdk.StorageID { return item.Storage.Storage.StorageID })
  526. ft := ioswitch2.NewFromTo()
  527. ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *getStgs.Storages[0].MasterHub, getStgs.Storages[0].Storage, -1))
  528. for i, stg := range uploadStgs {
  529. ft.AddTo(ioswitch2.NewToShardStore(*stg.Storage.MasterHub, stg.Storage.Storage, -1, fmt.Sprintf("%d", i)))
  530. }
  531. plans := exec.NewPlanBuilder()
  532. parser := parser.NewParser(cdssdk.DefaultECRedundancy)
  533. err = parser.Parse(ft, plans)
  534. if err != nil {
  535. return nil, fmt.Errorf("parsing plan: %w", err)
  536. }
  537. // TODO 添加依赖
  538. execCtx := exec.NewExecContext()
  539. exec.SetValueByType(execCtx, ctx.Args.StgMgr)
  540. ret, err := plans.Execute(execCtx).Wait(context.Background())
  541. if err != nil {
  542. return nil, fmt.Errorf("executing io plan: %w", err)
  543. }
  544. var blocks []stgmod.ObjectBlock
  545. for i, stg := range uploadStgs {
  546. blocks = append(blocks, stgmod.ObjectBlock{
  547. ObjectID: obj.Object.ObjectID,
  548. Index: 0,
  549. StorageID: stg.Storage.Storage.StorageID,
  550. FileHash: ret[fmt.Sprintf("%d", i)].(*ops2.FileHashValue).Hash,
  551. })
  552. }
  553. return &coormq.UpdatingObjectRedundancy{
  554. ObjectID: obj.Object.ObjectID,
  555. Redundancy: red,
  556. Blocks: blocks,
  557. }, nil
  558. }
  559. func (t *CheckPackageRedundancy) repToEC(ctx ExecuteContext, obj stgmod.ObjectDetail, red *cdssdk.ECRedundancy, uploadNodes []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) {
  560. return t.noneToEC(ctx, obj, red, uploadNodes)
  561. }
  562. func (t *CheckPackageRedundancy) ecToRep(ctx ExecuteContext, obj stgmod.ObjectDetail, srcRed *cdssdk.ECRedundancy, tarRed *cdssdk.RepRedundancy, uploadStgs []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) {
  563. coorCli, err := stgglb.CoordinatorMQPool.Acquire()
  564. if err != nil {
  565. return nil, fmt.Errorf("new coordinator client: %w", err)
  566. }
  567. defer stgglb.CoordinatorMQPool.Release(coorCli)
  568. var chosenBlocks []stgmod.GrouppedObjectBlock
  569. var chosenBlockIndexes []int
  570. for _, block := range obj.GroupBlocks() {
  571. if len(block.StorageIDs) > 0 {
  572. chosenBlocks = append(chosenBlocks, block)
  573. chosenBlockIndexes = append(chosenBlockIndexes, block.Index)
  574. }
  575. if len(chosenBlocks) == srcRed.K {
  576. break
  577. }
  578. }
  579. if len(chosenBlocks) < srcRed.K {
  580. return nil, fmt.Errorf("no enough blocks to reconstruct the original file data")
  581. }
  582. // 如果选择的备份节点都是同一个,那么就只要上传一次
  583. uploadStgs = lo.UniqBy(uploadStgs, func(item *StorageLoadInfo) cdssdk.StorageID { return item.Storage.Storage.StorageID })
  584. // 每个被选节点都在自己节点上重建原始数据
  585. parser := parser.NewParser(*srcRed)
  586. planBlder := exec.NewPlanBuilder()
  587. for i := range uploadStgs {
  588. ft := ioswitch2.NewFromTo()
  589. for _, block := range chosenBlocks {
  590. ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, *uploadStgs[i].Storage.MasterHub, uploadStgs[i].Storage.Storage, block.Index))
  591. }
  592. len := obj.Object.Size
  593. ft.AddTo(ioswitch2.NewToShardStoreWithRange(*uploadStgs[i].Storage.MasterHub, uploadStgs[i].Storage.Storage, -1, fmt.Sprintf("%d", i), exec.Range{
  594. Offset: 0,
  595. Length: &len,
  596. }))
  597. err := parser.Parse(ft, planBlder)
  598. if err != nil {
  599. return nil, fmt.Errorf("parsing plan: %w", err)
  600. }
  601. }
  602. // TODO 添加依赖
  603. execCtx := exec.NewExecContext()
  604. exec.SetValueByType(execCtx, ctx.Args.StgMgr)
  605. ioRet, err := planBlder.Execute(execCtx).Wait(context.Background())
  606. if err != nil {
  607. return nil, fmt.Errorf("executing io plan: %w", err)
  608. }
  609. var blocks []stgmod.ObjectBlock
  610. for i := range uploadStgs {
  611. blocks = append(blocks, stgmod.ObjectBlock{
  612. ObjectID: obj.Object.ObjectID,
  613. Index: 0,
  614. StorageID: uploadStgs[i].Storage.Storage.StorageID,
  615. FileHash: ioRet[fmt.Sprintf("%d", i)].(*ops2.FileHashValue).Hash,
  616. })
  617. }
  618. return &coormq.UpdatingObjectRedundancy{
  619. ObjectID: obj.Object.ObjectID,
  620. Redundancy: tarRed,
  621. Blocks: blocks,
  622. }, nil
  623. }
  624. func (t *CheckPackageRedundancy) ecToEC(ctx ExecuteContext, obj stgmod.ObjectDetail, srcRed *cdssdk.ECRedundancy, tarRed *cdssdk.ECRedundancy, uploadNodes []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) {
  625. coorCli, err := stgglb.CoordinatorMQPool.Acquire()
  626. if err != nil {
  627. return nil, fmt.Errorf("new coordinator client: %w", err)
  628. }
  629. defer stgglb.CoordinatorMQPool.Release(coorCli)
  630. grpBlocks := obj.GroupBlocks()
  631. var chosenBlocks []stgmod.GrouppedObjectBlock
  632. for _, block := range grpBlocks {
  633. if len(block.StorageIDs) > 0 {
  634. chosenBlocks = append(chosenBlocks, block)
  635. }
  636. if len(chosenBlocks) == srcRed.K {
  637. break
  638. }
  639. }
  640. if len(chosenBlocks) < srcRed.K {
  641. return nil, fmt.Errorf("no enough blocks to reconstruct the original file data")
  642. }
  643. // 目前EC的参数都相同,所以可以不用重建出完整数据然后再分块,可以直接构建出目的节点需要的块
  644. parser := parser.NewParser(*srcRed)
  645. planBlder := exec.NewPlanBuilder()
  646. var newBlocks []stgmod.ObjectBlock
  647. shouldUpdateBlocks := false
  648. for i, stg := range uploadNodes {
  649. newBlock := stgmod.ObjectBlock{
  650. ObjectID: obj.Object.ObjectID,
  651. Index: i,
  652. StorageID: stg.Storage.Storage.StorageID,
  653. }
  654. grp, ok := lo.Find(grpBlocks, func(grp stgmod.GrouppedObjectBlock) bool { return grp.Index == i })
  655. // 如果新选中的节点已经记录在Block表中,那么就不需要任何变更
  656. if ok && lo.Contains(grp.StorageIDs, stg.Storage.Storage.StorageID) {
  657. newBlock.FileHash = grp.FileHash
  658. newBlocks = append(newBlocks, newBlock)
  659. continue
  660. }
  661. shouldUpdateBlocks = true
  662. // 否则就要重建出这个节点需要的块
  663. ft := ioswitch2.NewFromTo()
  664. for _, block := range chosenBlocks {
  665. stg := stg.Storage
  666. ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, *stg.MasterHub, stg.Storage, block.Index))
  667. }
  668. // 输出只需要自己要保存的那一块
  669. ft.AddTo(ioswitch2.NewToShardStore(*stg.Storage.MasterHub, stg.Storage.Storage, i, fmt.Sprintf("%d", i)))
  670. err := parser.Parse(ft, planBlder)
  671. if err != nil {
  672. return nil, fmt.Errorf("parsing plan: %w", err)
  673. }
  674. newBlocks = append(newBlocks, newBlock)
  675. }
  676. // 如果没有任何Plan,Wait会直接返回成功
  677. execCtx := exec.NewExecContext()
  678. exec.SetValueByType(execCtx, ctx.Args.StgMgr)
  679. ret, err := planBlder.Execute(execCtx).Wait(context.Background())
  680. if err != nil {
  681. return nil, fmt.Errorf("executing io plan: %w", err)
  682. }
  683. if !shouldUpdateBlocks {
  684. return nil, nil
  685. }
  686. for k, v := range ret {
  687. idx, err := strconv.ParseInt(k, 10, 64)
  688. if err != nil {
  689. return nil, fmt.Errorf("parsing result key %s as index: %w", k, err)
  690. }
  691. newBlocks[idx].FileHash = v.(*ops2.FileHashValue).Hash
  692. }
  693. return &coormq.UpdatingObjectRedundancy{
  694. ObjectID: obj.Object.ObjectID,
  695. Redundancy: tarRed,
  696. Blocks: newBlocks,
  697. }, nil
  698. }
  699. func (t *CheckPackageRedundancy) lrcToLRC(ctx ExecuteContext, obj stgmod.ObjectDetail, srcRed *cdssdk.LRCRedundancy, tarRed *cdssdk.LRCRedundancy, uploadNodes []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) {
  700. coorCli, err := stgglb.CoordinatorMQPool.Acquire()
  701. if err != nil {
  702. return nil, fmt.Errorf("new coordinator client: %w", err)
  703. }
  704. defer stgglb.CoordinatorMQPool.Release(coorCli)
  705. blocksGrpByIndex := obj.GroupBlocks()
  706. var lostBlocks []int
  707. var lostBlockGrps []int
  708. canGroupReconstruct := true
  709. allBlockFlags := make([]bool, srcRed.N)
  710. for _, block := range blocksGrpByIndex {
  711. allBlockFlags[block.Index] = true
  712. }
  713. for i, ok := range allBlockFlags {
  714. grpID := srcRed.FindGroup(i)
  715. if !ok {
  716. if grpID == -1 {
  717. canGroupReconstruct = false
  718. break
  719. }
  720. if len(lostBlocks) > 0 && lostBlockGrps[len(lostBlockGrps)-1] == grpID {
  721. canGroupReconstruct = false
  722. break
  723. }
  724. lostBlocks = append(lostBlocks, i)
  725. lostBlockGrps = append(lostBlockGrps, grpID)
  726. }
  727. }
  728. if canGroupReconstruct {
  729. // return t.groupReconstructLRC(obj, lostBlocks, lostBlockGrps, blocksGrpByIndex, srcRed, uploadNodes)
  730. }
  731. return t.reconstructLRC(ctx, obj, blocksGrpByIndex, srcRed, uploadNodes)
  732. }
  733. /*
  734. TODO2 修复这一块的代码
  735. func (t *CheckPackageRedundancy) groupReconstructLRC(obj stgmod.ObjectDetail, lostBlocks []int, lostBlockGrps []int, grpedBlocks []stgmod.GrouppedObjectBlock, red *cdssdk.LRCRedundancy, uploadNodes []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) {
  736. grped := make(map[int]stgmod.GrouppedObjectBlock)
  737. for _, b := range grpedBlocks {
  738. grped[b.Index] = b
  739. }
  740. plans := exec.NewPlanBuilder()
  741. for i := 0; i < len(lostBlocks); i++ {
  742. var froms []ioswitchlrc.From
  743. grpEles := red.GetGroupElements(lostBlockGrps[i])
  744. for _, ele := range grpEles {
  745. if ele == lostBlocks[i] {
  746. continue
  747. }
  748. froms = append(froms, ioswitchlrc.NewFromNode(grped[ele].FileHash, nil, ele))
  749. }
  750. err := lrcparser.ReconstructGroup(froms, []ioswitchlrc.To{
  751. ioswitchlrc.NewToNode(uploadNodes[i].Storage, lostBlocks[i], fmt.Sprintf("%d", lostBlocks[i])),
  752. }, plans)
  753. if err != nil {
  754. return nil, fmt.Errorf("parsing plan: %w", err)
  755. }
  756. }
  757. fmt.Printf("plans: %v\n", plans)
  758. // 如果没有任何Plan,Wait会直接返回成功
  759. // TODO 添加依赖
  760. ret, err := plans.Execute(exec.NewExecContext()).Wait(context.TODO())
  761. if err != nil {
  762. return nil, fmt.Errorf("executing io plan: %w", err)
  763. }
  764. var newBlocks []stgmod.ObjectBlock
  765. for _, i := range lostBlocks {
  766. newBlocks = append(newBlocks, stgmod.ObjectBlock{
  767. ObjectID: obj.Object.ObjectID,
  768. Index: i,
  769. StorageID: uploadNodes[i].Storage.Storage.StorageID,
  770. FileHash: ret[fmt.Sprintf("%d", i)].(*ops2.FileHashValue).Hash,
  771. })
  772. }
  773. for _, b := range grpedBlocks {
  774. for _, nodeID := range b.StorageIDs {
  775. newBlocks = append(newBlocks, stgmod.ObjectBlock{
  776. ObjectID: obj.Object.ObjectID,
  777. Index: b.Index,
  778. StorageID: nodeID,
  779. FileHash: b.FileHash,
  780. })
  781. }
  782. }
  783. return &coormq.UpdatingObjectRedundancy{
  784. ObjectID: obj.Object.ObjectID,
  785. Redundancy: red,
  786. Blocks: newBlocks,
  787. }, nil
  788. }
  789. */
  790. func (t *CheckPackageRedundancy) reconstructLRC(ctx ExecuteContext, obj stgmod.ObjectDetail, grpBlocks []stgmod.GrouppedObjectBlock, red *cdssdk.LRCRedundancy, uploadNodes []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) {
  791. var chosenBlocks []stgmod.GrouppedObjectBlock
  792. for _, block := range grpBlocks {
  793. if len(block.StorageIDs) > 0 && block.Index < red.M() {
  794. chosenBlocks = append(chosenBlocks, block)
  795. }
  796. if len(chosenBlocks) == red.K {
  797. break
  798. }
  799. }
  800. if len(chosenBlocks) < red.K {
  801. return nil, fmt.Errorf("no enough blocks to reconstruct the original file data")
  802. }
  803. // 目前LRC的参数都相同,所以可以不用重建出完整数据然后再分块,可以直接构建出目的节点需要的块
  804. planBlder := exec.NewPlanBuilder()
  805. var froms []ioswitchlrc.From
  806. var toes []ioswitchlrc.To
  807. var newBlocks []stgmod.ObjectBlock
  808. shouldUpdateBlocks := false
  809. for i, node := range uploadNodes {
  810. newBlock := stgmod.ObjectBlock{
  811. ObjectID: obj.Object.ObjectID,
  812. Index: i,
  813. StorageID: node.Storage.Storage.StorageID,
  814. }
  815. grp, ok := lo.Find(grpBlocks, func(grp stgmod.GrouppedObjectBlock) bool { return grp.Index == i })
  816. // 如果新选中的节点已经记录在Block表中,那么就不需要任何变更
  817. if ok && lo.Contains(grp.StorageIDs, node.Storage.Storage.StorageID) {
  818. newBlock.FileHash = grp.FileHash
  819. newBlocks = append(newBlocks, newBlock)
  820. continue
  821. }
  822. shouldUpdateBlocks = true
  823. // 否则就要重建出这个节点需要的块
  824. for _, block := range chosenBlocks {
  825. fmt.Printf("b: %v\n", block.Index)
  826. stg := node.Storage
  827. froms = append(froms, ioswitchlrc.NewFromNode(block.FileHash, *stg.MasterHub, stg.Storage, block.Index))
  828. }
  829. // 输出只需要自己要保存的那一块
  830. toes = append(toes, ioswitchlrc.NewToStorage(*node.Storage.MasterHub, node.Storage.Storage, i, fmt.Sprintf("%d", i)))
  831. newBlocks = append(newBlocks, newBlock)
  832. }
  833. err := lrcparser.ReconstructAny(froms, toes, planBlder)
  834. if err != nil {
  835. return nil, fmt.Errorf("parsing plan: %w", err)
  836. }
  837. fmt.Printf("plans: %v\n", planBlder)
  838. // 如果没有任何Plan,Wait会直接返回成功
  839. execCtx := exec.NewExecContext()
  840. exec.SetValueByType(execCtx, ctx.Args.StgMgr)
  841. ret, err := planBlder.Execute(execCtx).Wait(context.Background())
  842. if err != nil {
  843. return nil, fmt.Errorf("executing io plan: %w", err)
  844. }
  845. if !shouldUpdateBlocks {
  846. return nil, nil
  847. }
  848. for k, v := range ret {
  849. idx, err := strconv.ParseInt(k, 10, 64)
  850. if err != nil {
  851. return nil, fmt.Errorf("parsing result key %s as index: %w", k, err)
  852. }
  853. newBlocks[idx].FileHash = v.(*ops2.FileHashValue).Hash
  854. }
  855. return &coormq.UpdatingObjectRedundancy{
  856. ObjectID: obj.Object.ObjectID,
  857. Redundancy: red,
  858. Blocks: newBlocks,
  859. }, nil
  860. }
  861. // func (t *CheckPackageRedundancy) pinObject(nodeID cdssdk.NodeID, fileHash string) error {
  862. // agtCli, err := stgglb.AgentMQPool.Acquire(nodeID)
  863. // if err != nil {
  864. // return fmt.Errorf("new agent client: %w", err)
  865. // }
  866. // defer stgglb.AgentMQPool.Release(agtCli)
  867. // _, err = agtCli.PinObject(agtmq.ReqPinObject([]string{fileHash}, false))
  868. // if err != nil {
  869. // return fmt.Errorf("start pinning object: %w", err)
  870. // }
  871. // return nil
  872. // }
  873. func init() {
  874. RegisterMessageConvertor(NewCheckPackageRedundancy)
  875. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。