You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

clean_pinned.go 22 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754
  1. package event
  2. import (
  3. "fmt"
  4. "math"
  5. "math/rand"
  6. "strconv"
  7. "github.com/samber/lo"
  8. "gitlink.org.cn/cloudream/common/pkgs/logger"
  9. cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
  10. mylo "gitlink.org.cn/cloudream/common/utils/lo"
  11. mymath "gitlink.org.cn/cloudream/common/utils/math"
  12. myref "gitlink.org.cn/cloudream/common/utils/reflect"
  13. mysort "gitlink.org.cn/cloudream/common/utils/sort"
  14. stgglb "gitlink.org.cn/cloudream/storage/common/globals"
  15. stgmod "gitlink.org.cn/cloudream/storage/common/models"
  16. "gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
  17. "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
  18. "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch/plans"
  19. coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
  20. scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
  21. )
  22. type CleanPinned struct {
  23. *scevt.CleanPinned
  24. }
  25. func NewCleanPinned(evt *scevt.CleanPinned) *CleanPinned {
  26. return &CleanPinned{
  27. CleanPinned: evt,
  28. }
  29. }
  30. func (t *CleanPinned) TryMerge(other Event) bool {
  31. event, ok := other.(*CleanPinned)
  32. if !ok {
  33. return false
  34. }
  35. return t.PackageID == event.PackageID
  36. }
  37. func (t *CleanPinned) Execute(execCtx ExecuteContext) {
  38. log := logger.WithType[CleanPinned]("Event")
  39. log.Debugf("begin with %v", logger.FormatStruct(t.CleanPinned))
  40. defer log.Debugf("end")
  41. coorCli, err := stgglb.CoordinatorMQPool.Acquire()
  42. if err != nil {
  43. log.Warnf("new coordinator client: %s", err.Error())
  44. return
  45. }
  46. defer stgglb.CoordinatorMQPool.Release(coorCli)
  47. getObjs, err := coorCli.GetPackageObjectDetails(coormq.NewGetPackageObjectDetails(t.PackageID))
  48. if err != nil {
  49. log.Warnf("getting package objects: %s", err.Error())
  50. return
  51. }
  52. getLoadLog, err := coorCli.GetPackageLoadLogDetails(coormq.ReqGetPackageLoadLogDetails(t.PackageID))
  53. if err != nil {
  54. log.Warnf("getting package load log details: %s", err.Error())
  55. return
  56. }
  57. readerNodeIDs := lo.Map(getLoadLog.Logs, func(item coormq.PackageLoadLogDetail, idx int) cdssdk.NodeID { return item.Storage.NodeID })
  58. var changeRedEntries []coormq.ChangeObjectRedundancyEntry
  59. for _, obj := range getObjs.Objects {
  60. entry, err := t.doOne(execCtx, readerNodeIDs, coorCli, obj)
  61. if err != nil {
  62. log.WithField("PackageID", obj).Warn(err.Error())
  63. continue
  64. }
  65. if entry != nil {
  66. changeRedEntries = append(changeRedEntries, *entry)
  67. }
  68. }
  69. if len(changeRedEntries) > 0 {
  70. _, err = coorCli.ChangeObjectRedundancy(coormq.ReqChangeObjectRedundancy(changeRedEntries))
  71. if err != nil {
  72. log.Warnf("changing object redundancy: %s", err.Error())
  73. return
  74. }
  75. }
  76. }
  77. type doingContext struct {
  78. execCtx ExecuteContext
  79. readerNodeIDs []cdssdk.NodeID // 近期可能访问此对象的节点
  80. nodesSortedByReader map[cdssdk.NodeID][]nodeDist // 拥有数据的节点到每个可能访问对象的节点按距离排序
  81. nodeInfos map[cdssdk.NodeID]*model.Node
  82. blockList []objectBlock // 排序后的块分布情况
  83. nodeBlockBitmaps map[cdssdk.NodeID]*bitmap // 用位图的形式表示每一个节点上有哪些块
  84. allBlockTypeCount int // object总共被分成了几块
  85. minBlockTypeCount int // 最少要几块才能恢复出完整的object
  86. nodeCombTree combinatorialTree // 节点组合树,用于加速计算容灾度
  87. maxScore float64 // 搜索过程中得到过的最大分数
  88. maxScoreRmBlocks []bool // 最大分数对应的删除方案
  89. rmBlocks []bool // 当前删除方案
  90. inversedIndex int // 当前删除方案是从上一次的方案改动哪个flag而来的
  91. lastScore float64 // 上一次方案的分数
  92. }
  93. type objectBlock struct {
  94. Index int
  95. NodeID cdssdk.NodeID
  96. HasEntity bool // 节点拥有实际的文件数据块
  97. HasShadow bool // 如果节点拥有完整文件数据,那么认为这个节点拥有所有块,这些块被称为影子块
  98. FileHash string // 只有在拥有实际文件数据块时,这个字段才有值
  99. }
  100. type nodeDist struct {
  101. NodeID cdssdk.NodeID
  102. Distance float64
  103. }
  104. type combinatorialTree struct {
  105. nodes []combinatorialTreeNode
  106. blocksMaps map[int]bitmap
  107. nodeIDToLocalNodeID map[cdssdk.NodeID]int
  108. localNodeIDToNodeID []cdssdk.NodeID
  109. }
  110. const (
  111. iterActionNone = 0
  112. iterActionSkip = 1
  113. iterActionBreak = 2
  114. )
  115. func newCombinatorialTree(nodeBlocksMaps map[cdssdk.NodeID]*bitmap) combinatorialTree {
  116. tree := combinatorialTree{
  117. blocksMaps: make(map[int]bitmap),
  118. nodeIDToLocalNodeID: make(map[cdssdk.NodeID]int),
  119. }
  120. tree.nodes = make([]combinatorialTreeNode, (1 << len(nodeBlocksMaps)))
  121. for id, mp := range nodeBlocksMaps {
  122. tree.nodeIDToLocalNodeID[id] = len(tree.localNodeIDToNodeID)
  123. tree.blocksMaps[len(tree.localNodeIDToNodeID)] = *mp
  124. tree.localNodeIDToNodeID = append(tree.localNodeIDToNodeID, id)
  125. }
  126. tree.nodes[0].localNodeID = -1
  127. index := 1
  128. tree.initNode(0, &tree.nodes[0], &index)
  129. return tree
  130. }
  131. func (t *combinatorialTree) initNode(minAvaiLocalNodeID int, parent *combinatorialTreeNode, index *int) {
  132. for i := minAvaiLocalNodeID; i < len(t.nodeIDToLocalNodeID); i++ {
  133. curIndex := *index
  134. *index++
  135. bitMp := t.blocksMaps[i]
  136. bitMp.Or(&parent.blocksBitmap)
  137. t.nodes[curIndex] = combinatorialTreeNode{
  138. localNodeID: i,
  139. parent: parent,
  140. blocksBitmap: bitMp,
  141. }
  142. t.initNode(i+1, &t.nodes[curIndex], index)
  143. }
  144. }
  145. // 获得索引指定的节点所在的层
  146. func (t *combinatorialTree) GetDepth(index int) int {
  147. depth := 0
  148. // 反复判断节点在哪个子树。从左到右,子树节点的数量呈现8 4 2的变化,由此可以得到每个子树的索引值的范围
  149. subTreeCount := 1 << len(t.nodeIDToLocalNodeID)
  150. for index > 0 {
  151. if index < subTreeCount {
  152. // 定位到一个子树后,深度+1,然后进入这个子树,使用同样的方法再进行定位。
  153. // 进入子树后需要将索引值-1,因为要去掉子树的根节点
  154. index--
  155. depth++
  156. } else {
  157. // 如果索引值不在这个子树范围内,则将值减去子树的节点数量,
  158. // 这样每一次都可以视为使用同样的逻辑对不同大小的树进行判断。
  159. index -= subTreeCount
  160. }
  161. subTreeCount >>= 1
  162. }
  163. return depth
  164. }
  165. // 更新某一个算力中心节点的块分布位图,同时更新它对应组合树节点的所有子节点。
  166. // 如果更新到某个节点时,已有K个块,那么就不会再更新它的子节点
  167. func (t *combinatorialTree) UpdateBitmap(nodeID cdssdk.NodeID, mp bitmap, k int) {
  168. t.blocksMaps[t.nodeIDToLocalNodeID[nodeID]] = mp
  169. // 首先定义两种遍历树节点时的移动方式:
  170. // 1. 竖直移动(深度增加):从一个节点移动到它最左边的子节点。每移动一步,index+1
  171. // 2. 水平移动:从一个节点移动到它右边的兄弟节点。每移动一步,根据它所在的深度,index+8,+4,+2
  172. // LocalNodeID从0开始,将其+1后得到移动步数steps。
  173. // 将移动步数拆成多部分,分配到上述的两种移动方式上,并进行任意组合,且保证第一次为至少进行一次的竖直移动,移动之后的节点都会是同一个计算中心节点。
  174. steps := t.nodeIDToLocalNodeID[nodeID] + 1
  175. for d := 1; d <= steps; d++ {
  176. t.iterCombBits(len(t.nodeIDToLocalNodeID)-1, steps-d, 0, func(i int) {
  177. index := d + i
  178. node := &t.nodes[index]
  179. newMp := t.blocksMaps[node.localNodeID]
  180. newMp.Or(&node.parent.blocksBitmap)
  181. node.blocksBitmap = newMp
  182. if newMp.Weight() >= k {
  183. return
  184. }
  185. t.iterChildren(index, func(index, parentIndex, depth int) int {
  186. curNode := &t.nodes[index]
  187. parentNode := t.nodes[parentIndex]
  188. newMp := t.blocksMaps[curNode.localNodeID]
  189. newMp.Or(&parentNode.blocksBitmap)
  190. curNode.blocksBitmap = newMp
  191. if newMp.Weight() >= k {
  192. return iterActionSkip
  193. }
  194. return iterActionNone
  195. })
  196. })
  197. }
  198. }
  199. // 遍历树,找到至少拥有K个块的树节点的最大深度
  200. func (t *combinatorialTree) FindKBlocksMaxDepth(k int) int {
  201. maxDepth := -1
  202. t.iterChildren(0, func(index, parentIndex, depth int) int {
  203. if t.nodes[index].blocksBitmap.Weight() >= k {
  204. if maxDepth < depth {
  205. maxDepth = depth
  206. }
  207. return iterActionSkip
  208. }
  209. // 如果到了叶子节点,还没有找到K个块,那就认为要满足K个块,至少需要再多一个节点,即深度+1。
  210. // 由于遍历时采用的是深度优先的算法,因此遍历到这个叶子节点时,叶子节点再加一个节点的组合已经在前面搜索过,
  211. // 所以用当前叶子节点深度+1来作为当前分支的结果就可以,即使当前情况下增加任意一个节点依然不够K块,
  212. // 可以使用同样的思路去递推到当前叶子节点增加两个块的情况。
  213. if t.nodes[index].localNodeID == len(t.nodeIDToLocalNodeID)-1 {
  214. if maxDepth < depth+1 {
  215. maxDepth = depth + 1
  216. }
  217. }
  218. return iterActionNone
  219. })
  220. if maxDepth == -1 || maxDepth > len(t.nodeIDToLocalNodeID) {
  221. return len(t.nodeIDToLocalNodeID)
  222. }
  223. return maxDepth
  224. }
  225. func (t *combinatorialTree) iterCombBits(width int, count int, offset int, callback func(int)) {
  226. if count == 0 {
  227. callback(offset)
  228. return
  229. }
  230. for b := width; b >= count; b-- {
  231. t.iterCombBits(b-1, count-1, offset+(1<<b), callback)
  232. }
  233. }
  234. func (t *combinatorialTree) iterChildren(index int, do func(index int, parentIndex int, depth int) int) {
  235. curNode := &t.nodes[index]
  236. childIndex := index + 1
  237. curDepth := t.GetDepth(index)
  238. childCounts := len(t.nodeIDToLocalNodeID) - 1 - curNode.localNodeID
  239. if childCounts == 0 {
  240. return
  241. }
  242. childTreeNodeCnt := 1 << (childCounts - 1)
  243. for c := 0; c < childCounts; c++ {
  244. act := t.itering(childIndex, index, curDepth+1, do)
  245. if act == iterActionBreak {
  246. return
  247. }
  248. childIndex += childTreeNodeCnt
  249. childTreeNodeCnt >>= 1
  250. }
  251. }
  252. func (t *combinatorialTree) itering(index int, parentIndex int, depth int, do func(index int, parentIndex int, depth int) int) int {
  253. act := do(index, parentIndex, depth)
  254. if act == iterActionBreak {
  255. return act
  256. }
  257. if act == iterActionSkip {
  258. return iterActionNone
  259. }
  260. curNode := &t.nodes[index]
  261. childIndex := index + 1
  262. childCounts := len(t.nodeIDToLocalNodeID) - 1 - curNode.localNodeID
  263. if childCounts == 0 {
  264. return iterActionNone
  265. }
  266. childTreeNodeCnt := 1 << (childCounts - 1)
  267. for c := 0; c < childCounts; c++ {
  268. act = t.itering(childIndex, index, depth+1, do)
  269. if act == iterActionBreak {
  270. return act
  271. }
  272. childIndex += childTreeNodeCnt
  273. childTreeNodeCnt >>= 1
  274. }
  275. return iterActionNone
  276. }
  277. type combinatorialTreeNode struct {
  278. localNodeID int
  279. parent *combinatorialTreeNode
  280. blocksBitmap bitmap // 选择了这个中心之后,所有中心一共包含多少种块
  281. }
  282. type bitmap uint64
  283. func (b *bitmap) Set(index int, val bool) {
  284. if val {
  285. *b |= 1 << index
  286. } else {
  287. *b &= ^(1 << index)
  288. }
  289. }
  290. func (b *bitmap) Or(other *bitmap) {
  291. *b |= *other
  292. }
  293. func (b *bitmap) Weight() int {
  294. v := *b
  295. cnt := 0
  296. for v > 0 {
  297. cnt++
  298. v &= (v - 1)
  299. }
  300. return cnt
  301. }
  302. func (t *CleanPinned) doOne(execCtx ExecuteContext, readerNodeIDs []cdssdk.NodeID, coorCli *coormq.Client, obj stgmod.ObjectDetail) (*coormq.ChangeObjectRedundancyEntry, error) {
  303. if len(obj.PinnedAt) == 0 && len(obj.Blocks) == 0 {
  304. return nil, nil
  305. }
  306. ctx := doingContext{
  307. execCtx: execCtx,
  308. readerNodeIDs: readerNodeIDs,
  309. nodesSortedByReader: make(map[cdssdk.NodeID][]nodeDist),
  310. nodeInfos: make(map[cdssdk.NodeID]*model.Node),
  311. nodeBlockBitmaps: make(map[cdssdk.NodeID]*bitmap),
  312. }
  313. err := t.getNodeInfos(&ctx, coorCli, obj)
  314. if err != nil {
  315. return nil, err
  316. }
  317. err = t.makeBlockList(&ctx, obj)
  318. if err != nil {
  319. return nil, err
  320. }
  321. if ctx.blockList == nil {
  322. return nil, nil
  323. }
  324. t.makeNodeBlockBitmap(&ctx)
  325. t.sortNodeByReaderDistance(&ctx)
  326. ctx.rmBlocks = make([]bool, len(ctx.blockList))
  327. ctx.inversedIndex = -1
  328. ctx.nodeCombTree = newCombinatorialTree(ctx.nodeBlockBitmaps)
  329. ctx.lastScore = t.calcScore(&ctx)
  330. ctx.maxScore = ctx.lastScore
  331. ctx.maxScoreRmBlocks = mylo.ArrayClone(ctx.rmBlocks)
  332. // 模拟退火算法的温度
  333. curTemp := ctx.lastScore
  334. // 结束温度
  335. finalTemp := curTemp * 0.2
  336. // 冷却率
  337. coolingRate := 0.95
  338. for curTemp > finalTemp {
  339. ctx.inversedIndex = rand.Intn(len(ctx.rmBlocks))
  340. block := ctx.blockList[ctx.inversedIndex]
  341. ctx.rmBlocks[ctx.inversedIndex] = !ctx.rmBlocks[ctx.inversedIndex]
  342. ctx.nodeBlockBitmaps[block.NodeID].Set(block.Index, !ctx.rmBlocks[ctx.inversedIndex])
  343. ctx.nodeCombTree.UpdateBitmap(block.NodeID, *ctx.nodeBlockBitmaps[block.NodeID], ctx.minBlockTypeCount)
  344. curScore := t.calcScore(&ctx)
  345. dScore := curScore - ctx.lastScore
  346. // 如果新方案比旧方案得分低,且没有要求强制接受新方案,那么就将变化改回去
  347. if curScore == 0 || (dScore < 0 && !t.alwaysAccept(curTemp, dScore, coolingRate)) {
  348. ctx.rmBlocks[ctx.inversedIndex] = !ctx.rmBlocks[ctx.inversedIndex]
  349. ctx.nodeBlockBitmaps[block.NodeID].Set(block.Index, !ctx.rmBlocks[ctx.inversedIndex])
  350. ctx.nodeCombTree.UpdateBitmap(block.NodeID, *ctx.nodeBlockBitmaps[block.NodeID], ctx.minBlockTypeCount)
  351. fmt.Printf("\n")
  352. } else {
  353. fmt.Printf(" accept!\n")
  354. ctx.lastScore = curScore
  355. if ctx.maxScore < curScore {
  356. ctx.maxScore = ctx.lastScore
  357. ctx.maxScoreRmBlocks = mylo.ArrayClone(ctx.rmBlocks)
  358. }
  359. }
  360. curTemp *= coolingRate
  361. }
  362. return t.applySolution(ctx, obj)
  363. }
  364. func (t *CleanPinned) getNodeInfos(ctx *doingContext, coorCli *coormq.Client, obj stgmod.ObjectDetail) error {
  365. var nodeIDs []cdssdk.NodeID
  366. for _, b := range obj.Blocks {
  367. nodeIDs = append(nodeIDs, b.NodeID)
  368. }
  369. nodeIDs = append(nodeIDs, obj.PinnedAt...)
  370. nodeIDs = append(nodeIDs, ctx.readerNodeIDs...)
  371. getNode, err := coorCli.GetNodes(coormq.NewGetNodes(lo.Uniq(nodeIDs)))
  372. if err != nil {
  373. return fmt.Errorf("requesting to coordinator: %w", err)
  374. }
  375. for _, n := range getNode.Nodes {
  376. ctx.nodeInfos[n.NodeID] = &n
  377. }
  378. return nil
  379. }
  380. func (t *CleanPinned) makeBlockList(ctx *doingContext, obj stgmod.ObjectDetail) error {
  381. blockCnt := 1
  382. minBlockCnt := 1
  383. switch red := obj.Object.Redundancy.(type) {
  384. case *cdssdk.NoneRedundancy:
  385. return nil
  386. case *cdssdk.RepRedundancy:
  387. blockCnt = 1
  388. minBlockCnt = 1
  389. case *cdssdk.ECRedundancy:
  390. blockCnt = red.N
  391. minBlockCnt = red.K
  392. default:
  393. return fmt.Errorf("unknow redundancy type: %v", myref.TypeOfValue(obj.Object.Redundancy))
  394. }
  395. blocksMap := make(map[cdssdk.NodeID][]objectBlock)
  396. // 先生成所有的影子块
  397. for _, pinned := range obj.PinnedAt {
  398. blocks := make([]objectBlock, 0, blockCnt)
  399. for i := 0; i < blockCnt; i++ {
  400. blocks = append(blocks, objectBlock{
  401. Index: i,
  402. NodeID: pinned,
  403. HasShadow: true,
  404. })
  405. }
  406. blocksMap[pinned] = blocks
  407. }
  408. // 再填充实际块
  409. for _, b := range obj.Blocks {
  410. blocks := blocksMap[b.NodeID]
  411. has := false
  412. for i := range blocks {
  413. if blocks[i].Index == b.Index {
  414. blocks[i].HasEntity = true
  415. blocks[i].FileHash = b.FileHash
  416. has = true
  417. break
  418. }
  419. }
  420. if has {
  421. continue
  422. }
  423. blocks = append(blocks, objectBlock{
  424. Index: b.Index,
  425. NodeID: b.NodeID,
  426. HasEntity: true,
  427. FileHash: b.FileHash,
  428. })
  429. blocksMap[b.NodeID] = blocks
  430. }
  431. var sortedBlocks []objectBlock
  432. for _, bs := range blocksMap {
  433. sortedBlocks = append(sortedBlocks, bs...)
  434. }
  435. sortedBlocks = mysort.Sort(sortedBlocks, func(left objectBlock, right objectBlock) int {
  436. d := left.NodeID - right.NodeID
  437. if d != 0 {
  438. return int(d)
  439. }
  440. return left.Index - right.Index
  441. })
  442. ctx.allBlockTypeCount = blockCnt
  443. ctx.minBlockTypeCount = minBlockCnt
  444. ctx.blockList = sortedBlocks
  445. return nil
  446. }
  447. func (t *CleanPinned) makeNodeBlockBitmap(ctx *doingContext) {
  448. for _, b := range ctx.blockList {
  449. mp, ok := ctx.nodeBlockBitmaps[b.NodeID]
  450. if !ok {
  451. nb := bitmap(0)
  452. mp = &nb
  453. ctx.nodeBlockBitmaps[b.NodeID] = mp
  454. }
  455. mp.Set(b.Index, true)
  456. }
  457. }
  458. func (t *CleanPinned) sortNodeByReaderDistance(ctx *doingContext) {
  459. for _, r := range ctx.readerNodeIDs {
  460. var nodeDists []nodeDist
  461. for n := range ctx.nodeBlockBitmaps {
  462. if r == n {
  463. // 同节点时距离视为0.1
  464. nodeDists = append(nodeDists, nodeDist{
  465. NodeID: n,
  466. Distance: 0.1,
  467. })
  468. } else if ctx.nodeInfos[r].LocationID == ctx.nodeInfos[n].LocationID {
  469. // 同地区时距离视为1
  470. nodeDists = append(nodeDists, nodeDist{
  471. NodeID: n,
  472. Distance: 1,
  473. })
  474. } else {
  475. // 不同地区时距离视为5
  476. nodeDists = append(nodeDists, nodeDist{
  477. NodeID: n,
  478. Distance: 5,
  479. })
  480. }
  481. }
  482. ctx.nodesSortedByReader[r] = mysort.Sort(nodeDists, func(left, right nodeDist) int { return mysort.Cmp(left.Distance, right.Distance) })
  483. }
  484. }
  485. func (t *CleanPinned) calcScore(ctx *doingContext) float64 {
  486. dt := t.calcDisasterTolerance(ctx)
  487. ac := t.calcMinAccessCost(ctx)
  488. sc := t.calcSpaceCost(ctx)
  489. dtSc := 1.0
  490. if dt < 1 {
  491. dtSc = 0
  492. } else if dt >= 2 {
  493. dtSc = 1.5
  494. }
  495. newSc := 0.0
  496. if dt == 0 || ac == 0 {
  497. newSc = 0
  498. } else {
  499. newSc = dtSc / (sc * ac)
  500. }
  501. fmt.Printf("solu: %v, cur: %v, dt: %v, ac: %v, sc: %v ", ctx.rmBlocks, newSc, dt, ac, sc)
  502. return newSc
  503. }
  504. // 计算容灾度
  505. func (t *CleanPinned) calcDisasterTolerance(ctx *doingContext) float64 {
  506. if ctx.inversedIndex != -1 {
  507. node := ctx.blockList[ctx.inversedIndex]
  508. ctx.nodeCombTree.UpdateBitmap(node.NodeID, *ctx.nodeBlockBitmaps[node.NodeID], ctx.minBlockTypeCount)
  509. }
  510. return float64(len(ctx.nodeBlockBitmaps) - ctx.nodeCombTree.FindKBlocksMaxDepth(ctx.minBlockTypeCount))
  511. }
  512. // 计算最小访问数据的代价
  513. func (t *CleanPinned) calcMinAccessCost(ctx *doingContext) float64 {
  514. cost := math.MaxFloat64
  515. for _, reader := range ctx.readerNodeIDs {
  516. tarNodes := ctx.nodesSortedByReader[reader]
  517. gotBlocks := bitmap(0)
  518. thisCost := 0.0
  519. for _, tar := range tarNodes {
  520. tarNodeMp := ctx.nodeBlockBitmaps[tar.NodeID]
  521. // 只需要从目的节点上获得缺少的块
  522. curWeigth := gotBlocks.Weight()
  523. // 下面的if会在拿到k个块之后跳出循环,所以or多了块也没关系
  524. gotBlocks.Or(tarNodeMp)
  525. willGetBlocks := mymath.Min(gotBlocks.Weight()-curWeigth, ctx.minBlockTypeCount-curWeigth)
  526. thisCost += float64(willGetBlocks) * float64(tar.Distance)
  527. if gotBlocks.Weight() >= ctx.minBlockTypeCount {
  528. break
  529. }
  530. }
  531. if gotBlocks.Weight() >= ctx.minBlockTypeCount {
  532. cost = math.Min(cost, thisCost)
  533. }
  534. }
  535. return cost
  536. }
  537. // 计算冗余度
  538. func (t *CleanPinned) calcSpaceCost(ctx *doingContext) float64 {
  539. blockCount := 0
  540. for i, b := range ctx.blockList {
  541. if ctx.rmBlocks[i] {
  542. continue
  543. }
  544. if b.HasEntity {
  545. blockCount++
  546. }
  547. if b.HasShadow {
  548. blockCount++
  549. }
  550. }
  551. // 所有算力中心上拥有的块的总数 / 一个对象被分成了几个块
  552. return float64(blockCount) / float64(ctx.minBlockTypeCount)
  553. }
  554. // 如果新方案得分比旧方案小,那么在一定概率内也接受新方案
  555. func (t *CleanPinned) alwaysAccept(curTemp float64, dScore float64, coolingRate float64) bool {
  556. v := math.Exp(dScore / curTemp / coolingRate)
  557. fmt.Printf(" -- chance: %v, temp: %v", v, curTemp)
  558. return v > rand.Float64()
  559. }
  560. func (t *CleanPinned) applySolution(ctx doingContext, obj stgmod.ObjectDetail) (*coormq.ChangeObjectRedundancyEntry, error) {
  561. entry := coormq.ChangeObjectRedundancyEntry{
  562. ObjectID: obj.Object.ObjectID,
  563. Redundancy: obj.Object.Redundancy,
  564. }
  565. fmt.Printf("final solu: %v, score: %v\n", ctx.maxScoreRmBlocks, ctx.maxScore)
  566. reconstrct := make(map[cdssdk.NodeID]*[]int)
  567. for i, f := range ctx.maxScoreRmBlocks {
  568. block := ctx.blockList[i]
  569. if !f {
  570. entry.Blocks = append(entry.Blocks, stgmod.ObjectBlock{
  571. ObjectID: obj.Object.ObjectID,
  572. Index: block.Index,
  573. NodeID: block.NodeID,
  574. FileHash: block.FileHash,
  575. })
  576. // 如果这个块是影子块,那么就要从完整对象里重建这个块
  577. if !block.HasEntity {
  578. re, ok := reconstrct[block.NodeID]
  579. if !ok {
  580. re = &[]int{}
  581. reconstrct[block.NodeID] = re
  582. }
  583. *re = append(*re, block.Index)
  584. }
  585. }
  586. }
  587. bld := reqbuilder.NewBuilder()
  588. for id := range reconstrct {
  589. bld.IPFS().Buzy(id)
  590. }
  591. mutex, err := bld.MutexLock(ctx.execCtx.Args.DistLock)
  592. if err != nil {
  593. return nil, fmt.Errorf("acquiring distlock: %w", err)
  594. }
  595. defer mutex.Unlock()
  596. if ecRed, ok := obj.Object.Redundancy.(*cdssdk.ECRedundancy); ok {
  597. for id, idxs := range reconstrct {
  598. bld := plans.NewPlanBuilder()
  599. agt := bld.AtAgent(*ctx.nodeInfos[id])
  600. strs := agt.IPFSRead(obj.Object.FileHash).ChunkedSplit(ecRed.ChunkSize, ecRed.K, true)
  601. ss := agt.ECReconstructAny(*ecRed, lo.Range(ecRed.K), *idxs, strs.Streams...)
  602. for i, s := range ss.Streams {
  603. s.IPFSWrite(fmt.Sprintf("%d", (*idxs)[i]))
  604. }
  605. plan, err := bld.Build()
  606. if err != nil {
  607. return nil, fmt.Errorf("building io switch plan: %w", err)
  608. }
  609. exec, err := plans.Execute(*plan)
  610. if err != nil {
  611. return nil, fmt.Errorf("executing io switch plan: %w", err)
  612. }
  613. ret, err := exec.Wait()
  614. if err != nil {
  615. return nil, fmt.Errorf("executing io switch plan: %w", err)
  616. }
  617. for k, v := range ret.ResultValues {
  618. idx, err := strconv.ParseInt(k, 10, 32)
  619. if err != nil {
  620. return nil, fmt.Errorf("parsing plan result: %w", err)
  621. }
  622. for i := range entry.Blocks {
  623. if entry.Blocks[i].NodeID == id && entry.Blocks[i].Index == int(idx) {
  624. entry.Blocks[i].FileHash = v.(string)
  625. }
  626. }
  627. }
  628. }
  629. } else if _, ok := obj.Object.Redundancy.(*cdssdk.RepRedundancy); ok {
  630. // rep模式不分块,所以每一个Block的FileHash就是完整文件的FileHash
  631. for i := range entry.Blocks {
  632. entry.Blocks[i].FileHash = obj.Object.FileHash
  633. }
  634. }
  635. return &entry, nil
  636. }
  637. func init() {
  638. RegisterMessageConvertor(NewCleanPinned)
  639. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。