You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

redundancy_shrink.go 31 kB

5 months ago
5 months ago
6 months ago
6 months ago
6 months ago
6 months ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959
  1. package ticktock
  2. import (
  3. "context"
  4. "fmt"
  5. "math"
  6. "math/rand"
  7. "sync"
  8. "github.com/samber/lo"
  9. "gitlink.org.cn/cloudream/common/pkgs/bitmap"
  10. "gitlink.org.cn/cloudream/common/pkgs/logger"
  11. "gitlink.org.cn/cloudream/common/utils/lo2"
  12. "gitlink.org.cn/cloudream/common/utils/math2"
  13. "gitlink.org.cn/cloudream/common/utils/sort2"
  14. "gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
  15. clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types"
  16. "gitlink.org.cn/cloudream/jcs-pub/common/consts"
  17. "gitlink.org.cn/cloudream/jcs-pub/common/models/datamap"
  18. "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec"
  19. "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
  20. "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2"
  21. "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser"
  22. "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock"
  23. "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock/reqbuilder"
  24. )
  25. func (t *ChangeRedundancy) doRedundancyShrink(execCtx *changeRedundancyContext, pkg clitypes.PackageDetail, objs []clitypes.ObjectDetail, reen *publock.Reentrant) ([]db.UpdatingObjectRedundancy, []datamap.SysEventBody, error) {
  26. log := logger.WithType[ChangeRedundancy]("TickTock")
  27. var readerStgIDs []clitypes.UserSpaceID
  28. for _, space := range execCtx.allUserSpaces {
  29. // TODO 可以考虑做成配置
  30. if space.AccessAmount >= float64(pkg.ObjectCount/2) {
  31. readerStgIDs = append(readerStgIDs, space.UserSpace.UserSpace.UserSpaceID)
  32. }
  33. }
  34. // 只对ec和rep对象进行处理
  35. var ecObjects []clitypes.ObjectDetail
  36. var repObjects []clitypes.ObjectDetail
  37. for _, obj := range objs {
  38. if _, ok := obj.Object.Redundancy.(*clitypes.ECRedundancy); ok {
  39. ecObjects = append(ecObjects, obj)
  40. } else if _, ok := obj.Object.Redundancy.(*clitypes.RepRedundancy); ok {
  41. repObjects = append(repObjects, obj)
  42. }
  43. }
  44. planBld := exec.NewPlanBuilder()
  45. planningStgIDs := make(map[clitypes.UserSpaceID]bool)
  46. var sysEvents []datamap.SysEventBody
  47. // 对于rep对象,统计出所有对象块分布最多的两个节点,用这两个节点代表所有rep对象块的分布,去进行退火算法
  48. var repObjectsUpdating []db.UpdatingObjectRedundancy
  49. repMostHubIDs := t.summaryRepObjectBlockNodes(repObjects)
  50. solu := t.startAnnealing(execCtx, readerStgIDs, annealingObject{
  51. totalBlockCount: 1,
  52. minBlockCnt: 1,
  53. pinnedAt: repMostHubIDs,
  54. blocks: nil,
  55. })
  56. for _, obj := range repObjects {
  57. repObjectsUpdating = append(repObjectsUpdating, t.makePlansForRepObject(execCtx, solu, obj, planBld, planningStgIDs))
  58. sysEvents = append(sysEvents, t.generateSysEventForRepObject(solu, obj)...)
  59. }
  60. // 对于ec对象,则每个对象单独进行退火算法
  61. var ecObjectsUpdating []db.UpdatingObjectRedundancy
  62. for _, obj := range ecObjects {
  63. ecRed := obj.Object.Redundancy.(*clitypes.ECRedundancy)
  64. solu := t.startAnnealing(execCtx, readerStgIDs, annealingObject{
  65. totalBlockCount: ecRed.N,
  66. minBlockCnt: ecRed.K,
  67. pinnedAt: obj.PinnedAt,
  68. blocks: obj.Blocks,
  69. })
  70. ecObjectsUpdating = append(ecObjectsUpdating, t.makePlansForECObject(execCtx, solu, obj, planBld, planningStgIDs))
  71. sysEvents = append(sysEvents, t.generateSysEventForECObject(solu, obj)...)
  72. }
  73. ioSwRets, err := t.executePlans(execCtx, planBld, planningStgIDs, reen)
  74. if err != nil {
  75. log.Warn(err.Error())
  76. return nil, nil, fmt.Errorf("execute plans: %w", err)
  77. }
  78. // 根据按照方案进行调整的结果,填充更新元数据的命令
  79. for i := range ecObjectsUpdating {
  80. t.populateECObjectEntry(&ecObjectsUpdating[i], ecObjects[i], ioSwRets)
  81. }
  82. return append(repObjectsUpdating, ecObjectsUpdating...), sysEvents, nil
  83. }
  84. func (t *ChangeRedundancy) summaryRepObjectBlockNodes(objs []clitypes.ObjectDetail) []clitypes.UserSpaceID {
  85. type stgBlocks struct {
  86. UserSpaceID clitypes.UserSpaceID
  87. Count int
  88. }
  89. stgBlocksMap := make(map[clitypes.UserSpaceID]*stgBlocks)
  90. for _, obj := range objs {
  91. cacheBlockStgs := make(map[clitypes.UserSpaceID]bool)
  92. for _, block := range obj.Blocks {
  93. if _, ok := stgBlocksMap[block.UserSpaceID]; !ok {
  94. stgBlocksMap[block.UserSpaceID] = &stgBlocks{
  95. UserSpaceID: block.UserSpaceID,
  96. Count: 0,
  97. }
  98. }
  99. stgBlocksMap[block.UserSpaceID].Count++
  100. cacheBlockStgs[block.UserSpaceID] = true
  101. }
  102. for _, hubID := range obj.PinnedAt {
  103. if cacheBlockStgs[hubID] {
  104. continue
  105. }
  106. if _, ok := stgBlocksMap[hubID]; !ok {
  107. stgBlocksMap[hubID] = &stgBlocks{
  108. UserSpaceID: hubID,
  109. Count: 0,
  110. }
  111. }
  112. stgBlocksMap[hubID].Count++
  113. }
  114. }
  115. stgs := lo.Values(stgBlocksMap)
  116. sort2.Sort(stgs, func(left *stgBlocks, right *stgBlocks) int {
  117. return right.Count - left.Count
  118. })
  119. // 只选出块数超过一半的节点,但要保证至少有两个节点
  120. for i := 2; i < len(stgs); i++ {
  121. if stgs[i].Count < len(objs)/2 {
  122. stgs = stgs[:i]
  123. break
  124. }
  125. }
  126. return lo.Map(stgs, func(item *stgBlocks, idx int) clitypes.UserSpaceID { return item.UserSpaceID })
  127. }
  128. type annealingState struct {
  129. ctx *changeRedundancyContext
  130. readerStgIDs []clitypes.UserSpaceID // 近期可能访问此对象的节点
  131. stgsSortedByReader map[clitypes.UserSpaceID][]stgDist // 拥有数据的节点到每个可能访问对象的节点按距离排序
  132. object annealingObject // 进行退火的对象
  133. blockList []objectBlock // 排序后的块分布情况
  134. stgBlockBitmaps map[clitypes.UserSpaceID]*bitmap.Bitmap64 // 用位图的形式表示每一个节点上有哪些块
  135. stgCombTree combinatorialTree // 节点组合树,用于加速计算容灾度
  136. maxScore float64 // 搜索过程中得到过的最大分数
  137. maxScoreRmBlocks []bool // 最大分数对应的删除方案
  138. rmBlocks []bool // 当前删除方案
  139. inversedIndex int // 当前删除方案是从上一次的方案改动哪个flag而来的
  140. lastDisasterTolerance float64 // 上一次方案的容灾度
  141. lastSpaceCost float64 // 上一次方案的冗余度
  142. lastMinAccessCost float64 // 上一次方案的最小访问费用
  143. lastScore float64 // 上一次方案的分数
  144. }
  145. type objectBlock struct {
  146. Index int
  147. UserSpaceID clitypes.UserSpaceID
  148. HasEntity bool // 节点拥有实际的文件数据块
  149. HasShadow bool // 如果节点拥有完整文件数据,那么认为这个节点拥有所有块,这些块被称为影子块
  150. FileHash clitypes.FileHash // 只有在拥有实际文件数据块时,这个字段才有值
  151. Size int64 // 块大小
  152. }
  153. type stgDist struct {
  154. UserSpaceID clitypes.UserSpaceID
  155. Distance float64
  156. }
  157. type combinatorialTree struct {
  158. nodes []combinatorialTreeNode
  159. blocksMaps map[int]bitmap.Bitmap64
  160. stgIDToLocalStgID map[clitypes.UserSpaceID]int
  161. localStgIDToStgID []clitypes.UserSpaceID
  162. }
  163. type annealingObject struct {
  164. totalBlockCount int
  165. minBlockCnt int
  166. pinnedAt []clitypes.UserSpaceID
  167. blocks []clitypes.ObjectBlock
  168. }
  169. const (
  170. iterActionNone = 0
  171. iterActionSkip = 1
  172. iterActionBreak = 2
  173. )
  174. func newCombinatorialTree(stgBlocksMaps map[clitypes.UserSpaceID]*bitmap.Bitmap64) combinatorialTree {
  175. tree := combinatorialTree{
  176. blocksMaps: make(map[int]bitmap.Bitmap64),
  177. stgIDToLocalStgID: make(map[clitypes.UserSpaceID]int),
  178. }
  179. tree.nodes = make([]combinatorialTreeNode, (1 << len(stgBlocksMaps)))
  180. for id, mp := range stgBlocksMaps {
  181. tree.stgIDToLocalStgID[id] = len(tree.localStgIDToStgID)
  182. tree.blocksMaps[len(tree.localStgIDToStgID)] = *mp
  183. tree.localStgIDToStgID = append(tree.localStgIDToStgID, id)
  184. }
  185. tree.nodes[0].localHubID = -1
  186. index := 1
  187. tree.initNode(0, &tree.nodes[0], &index)
  188. return tree
  189. }
  190. func (t *combinatorialTree) initNode(minAvaiLocalHubID int, parent *combinatorialTreeNode, index *int) {
  191. for i := minAvaiLocalHubID; i < len(t.stgIDToLocalStgID); i++ {
  192. curIndex := *index
  193. *index++
  194. bitMp := t.blocksMaps[i]
  195. bitMp.Or(&parent.blocksBitmap)
  196. t.nodes[curIndex] = combinatorialTreeNode{
  197. localHubID: i,
  198. parent: parent,
  199. blocksBitmap: bitMp,
  200. }
  201. t.initNode(i+1, &t.nodes[curIndex], index)
  202. }
  203. }
  204. // 获得索引指定的节点所在的层
  205. func (t *combinatorialTree) GetDepth(index int) int {
  206. depth := 0
  207. // 反复判断节点在哪个子树。从左到右,子树节点的数量呈现8 4 2的变化,由此可以得到每个子树的索引值的范围
  208. subTreeCount := 1 << len(t.stgIDToLocalStgID)
  209. for index > 0 {
  210. if index < subTreeCount {
  211. // 定位到一个子树后,深度+1,然后进入这个子树,使用同样的方法再进行定位。
  212. // 进入子树后需要将索引值-1,因为要去掉子树的根节点
  213. index--
  214. depth++
  215. } else {
  216. // 如果索引值不在这个子树范围内,则将值减去子树的节点数量,
  217. // 这样每一次都可以视为使用同样的逻辑对不同大小的树进行判断。
  218. index -= subTreeCount
  219. }
  220. subTreeCount >>= 1
  221. }
  222. return depth
  223. }
  224. // 更新某一个算力中心节点的块分布位图,同时更新它对应组合树节点的所有子节点。
  225. // 如果更新到某个节点时,已有K个块,那么就不会再更新它的子节点
  226. func (t *combinatorialTree) UpdateBitmap(stgID clitypes.UserSpaceID, mp bitmap.Bitmap64, k int) {
  227. t.blocksMaps[t.stgIDToLocalStgID[stgID]] = mp
  228. // 首先定义两种遍历树节点时的移动方式:
  229. // 1. 竖直移动(深度增加):从一个节点移动到它最左边的子节点。每移动一步,index+1
  230. // 2. 水平移动:从一个节点移动到它右边的兄弟节点。每移动一步,根据它所在的深度,index+8,+4,+2
  231. // LocalID从0开始,将其+1后得到移动步数steps。
  232. // 将移动步数拆成多部分,分配到上述的两种移动方式上,并进行任意组合,且保证第一次为至少进行一次的竖直移动,移动之后的节点都会是同一个计算中心节点。
  233. steps := t.stgIDToLocalStgID[stgID] + 1
  234. for d := 1; d <= steps; d++ {
  235. t.iterCombBits(len(t.stgIDToLocalStgID)-1, steps-d, 0, func(i int) {
  236. index := d + i
  237. node := &t.nodes[index]
  238. newMp := t.blocksMaps[node.localHubID]
  239. newMp.Or(&node.parent.blocksBitmap)
  240. node.blocksBitmap = newMp
  241. if newMp.Weight() >= k {
  242. return
  243. }
  244. t.iterChildren(index, func(index, parentIndex, depth int) int {
  245. curNode := &t.nodes[index]
  246. parentNode := t.nodes[parentIndex]
  247. newMp := t.blocksMaps[curNode.localHubID]
  248. newMp.Or(&parentNode.blocksBitmap)
  249. curNode.blocksBitmap = newMp
  250. if newMp.Weight() >= k {
  251. return iterActionSkip
  252. }
  253. return iterActionNone
  254. })
  255. })
  256. }
  257. }
  258. // 遍历树,找到至少拥有K个块的树节点的最大深度
  259. func (t *combinatorialTree) FindKBlocksMaxDepth(k int) int {
  260. maxDepth := -1
  261. t.iterChildren(0, func(index, parentIndex, depth int) int {
  262. if t.nodes[index].blocksBitmap.Weight() >= k {
  263. if maxDepth < depth {
  264. maxDepth = depth
  265. }
  266. return iterActionSkip
  267. }
  268. // 如果到了叶子节点,还没有找到K个块,那就认为要满足K个块,至少需要再多一个节点,即深度+1。
  269. // 由于遍历时采用的是深度优先的算法,因此遍历到这个叶子节点时,叶子节点再加一个节点的组合已经在前面搜索过,
  270. // 所以用当前叶子节点深度+1来作为当前分支的结果就可以,即使当前情况下增加任意一个节点依然不够K块,
  271. // 可以使用同样的思路去递推到当前叶子节点增加两个块的情况。
  272. if t.nodes[index].localHubID == len(t.stgIDToLocalStgID)-1 {
  273. if maxDepth < depth+1 {
  274. maxDepth = depth + 1
  275. }
  276. }
  277. return iterActionNone
  278. })
  279. if maxDepth == -1 || maxDepth > len(t.stgIDToLocalStgID) {
  280. return len(t.stgIDToLocalStgID)
  281. }
  282. return maxDepth
  283. }
  284. func (t *combinatorialTree) iterCombBits(width int, count int, offset int, callback func(int)) {
  285. if count == 0 {
  286. callback(offset)
  287. return
  288. }
  289. for b := width; b >= count; b-- {
  290. t.iterCombBits(b-1, count-1, offset+(1<<b), callback)
  291. }
  292. }
  293. func (t *combinatorialTree) iterChildren(index int, do func(index int, parentIndex int, depth int) int) {
  294. curNode := &t.nodes[index]
  295. childIndex := index + 1
  296. curDepth := t.GetDepth(index)
  297. childCounts := len(t.stgIDToLocalStgID) - 1 - curNode.localHubID
  298. if childCounts == 0 {
  299. return
  300. }
  301. childTreeNodeCnt := 1 << (childCounts - 1)
  302. for c := 0; c < childCounts; c++ {
  303. act := t.itering(childIndex, index, curDepth+1, do)
  304. if act == iterActionBreak {
  305. return
  306. }
  307. childIndex += childTreeNodeCnt
  308. childTreeNodeCnt >>= 1
  309. }
  310. }
  311. func (t *combinatorialTree) itering(index int, parentIndex int, depth int, do func(index int, parentIndex int, depth int) int) int {
  312. act := do(index, parentIndex, depth)
  313. if act == iterActionBreak {
  314. return act
  315. }
  316. if act == iterActionSkip {
  317. return iterActionNone
  318. }
  319. curNode := &t.nodes[index]
  320. childIndex := index + 1
  321. childCounts := len(t.stgIDToLocalStgID) - 1 - curNode.localHubID
  322. if childCounts == 0 {
  323. return iterActionNone
  324. }
  325. childTreeNodeCnt := 1 << (childCounts - 1)
  326. for c := 0; c < childCounts; c++ {
  327. act = t.itering(childIndex, index, depth+1, do)
  328. if act == iterActionBreak {
  329. return act
  330. }
  331. childIndex += childTreeNodeCnt
  332. childTreeNodeCnt >>= 1
  333. }
  334. return iterActionNone
  335. }
  336. type combinatorialTreeNode struct {
  337. localHubID int
  338. parent *combinatorialTreeNode
  339. blocksBitmap bitmap.Bitmap64 // 选择了这个中心之后,所有中心一共包含多少种块
  340. }
  341. type annealingSolution struct {
  342. blockList []objectBlock // 所有节点的块分布情况
  343. rmBlocks []bool // 要删除哪些块
  344. disasterTolerance float64 // 本方案的容灾度
  345. spaceCost float64 // 本方案的冗余度
  346. minAccessCost float64 // 本方案的最小访问费用
  347. }
  348. func (t *ChangeRedundancy) startAnnealing(ctx *changeRedundancyContext, readerStgIDs []clitypes.UserSpaceID, object annealingObject) annealingSolution {
  349. state := &annealingState{
  350. ctx: ctx,
  351. readerStgIDs: readerStgIDs,
  352. stgsSortedByReader: make(map[clitypes.UserSpaceID][]stgDist),
  353. object: object,
  354. stgBlockBitmaps: make(map[clitypes.UserSpaceID]*bitmap.Bitmap64),
  355. }
  356. t.initBlockList(state)
  357. if state.blockList == nil {
  358. return annealingSolution{}
  359. }
  360. t.initNodeBlockBitmap(state)
  361. t.sortNodeByReaderDistance(state)
  362. state.rmBlocks = make([]bool, len(state.blockList))
  363. state.inversedIndex = -1
  364. state.stgCombTree = newCombinatorialTree(state.stgBlockBitmaps)
  365. state.lastScore = t.calcScore(state)
  366. state.maxScore = state.lastScore
  367. state.maxScoreRmBlocks = lo2.ArrayClone(state.rmBlocks)
  368. // 模拟退火算法的温度
  369. curTemp := state.lastScore
  370. // 结束温度
  371. finalTemp := curTemp * 0.2
  372. // 冷却率
  373. coolingRate := 0.95
  374. for curTemp > finalTemp {
  375. state.inversedIndex = rand.Intn(len(state.rmBlocks))
  376. block := state.blockList[state.inversedIndex]
  377. state.rmBlocks[state.inversedIndex] = !state.rmBlocks[state.inversedIndex]
  378. state.stgBlockBitmaps[block.UserSpaceID].Set(block.Index, !state.rmBlocks[state.inversedIndex])
  379. state.stgCombTree.UpdateBitmap(block.UserSpaceID, *state.stgBlockBitmaps[block.UserSpaceID], state.object.minBlockCnt)
  380. curScore := t.calcScore(state)
  381. dScore := curScore - state.lastScore
  382. // 如果新方案比旧方案得分低,且没有要求强制接受新方案,那么就将变化改回去
  383. if curScore == 0 || (dScore < 0 && !t.alwaysAccept(curTemp, dScore, coolingRate)) {
  384. state.rmBlocks[state.inversedIndex] = !state.rmBlocks[state.inversedIndex]
  385. state.stgBlockBitmaps[block.UserSpaceID].Set(block.Index, !state.rmBlocks[state.inversedIndex])
  386. state.stgCombTree.UpdateBitmap(block.UserSpaceID, *state.stgBlockBitmaps[block.UserSpaceID], state.object.minBlockCnt)
  387. // fmt.Printf("\n")
  388. } else {
  389. // fmt.Printf(" accept!\n")
  390. state.lastScore = curScore
  391. if state.maxScore < curScore {
  392. state.maxScore = state.lastScore
  393. state.maxScoreRmBlocks = lo2.ArrayClone(state.rmBlocks)
  394. }
  395. }
  396. curTemp *= coolingRate
  397. }
  398. // fmt.Printf("final: %v\n", state.maxScoreRmBlocks)
  399. return annealingSolution{
  400. blockList: state.blockList,
  401. rmBlocks: state.maxScoreRmBlocks,
  402. disasterTolerance: state.lastDisasterTolerance,
  403. spaceCost: state.lastSpaceCost,
  404. minAccessCost: state.lastMinAccessCost,
  405. }
  406. }
  407. func (t *ChangeRedundancy) initBlockList(ctx *annealingState) {
  408. blocksMap := make(map[clitypes.UserSpaceID][]objectBlock)
  409. // 先生成所有的影子块
  410. for _, pinned := range ctx.object.pinnedAt {
  411. blocks := make([]objectBlock, 0, ctx.object.totalBlockCount)
  412. for i := 0; i < ctx.object.totalBlockCount; i++ {
  413. blocks = append(blocks, objectBlock{
  414. Index: i,
  415. UserSpaceID: pinned,
  416. HasShadow: true,
  417. })
  418. }
  419. blocksMap[pinned] = blocks
  420. }
  421. // 再填充实际块
  422. for _, b := range ctx.object.blocks {
  423. blocks := blocksMap[b.UserSpaceID]
  424. has := false
  425. for i := range blocks {
  426. if blocks[i].Index == b.Index {
  427. blocks[i].HasEntity = true
  428. blocks[i].FileHash = b.FileHash
  429. has = true
  430. break
  431. }
  432. }
  433. if has {
  434. continue
  435. }
  436. blocks = append(blocks, objectBlock{
  437. Index: b.Index,
  438. UserSpaceID: b.UserSpaceID,
  439. HasEntity: true,
  440. FileHash: b.FileHash,
  441. Size: b.Size,
  442. })
  443. blocksMap[b.UserSpaceID] = blocks
  444. }
  445. var sortedBlocks []objectBlock
  446. for _, bs := range blocksMap {
  447. sortedBlocks = append(sortedBlocks, bs...)
  448. }
  449. sortedBlocks = sort2.Sort(sortedBlocks, func(left objectBlock, right objectBlock) int {
  450. d := left.UserSpaceID - right.UserSpaceID
  451. if d != 0 {
  452. return int(d)
  453. }
  454. return left.Index - right.Index
  455. })
  456. ctx.blockList = sortedBlocks
  457. }
  458. func (t *ChangeRedundancy) initNodeBlockBitmap(state *annealingState) {
  459. for _, b := range state.blockList {
  460. mp, ok := state.stgBlockBitmaps[b.UserSpaceID]
  461. if !ok {
  462. nb := bitmap.Bitmap64(0)
  463. mp = &nb
  464. state.stgBlockBitmaps[b.UserSpaceID] = mp
  465. }
  466. mp.Set(b.Index, true)
  467. }
  468. }
  469. func (t *ChangeRedundancy) sortNodeByReaderDistance(state *annealingState) {
  470. for _, r := range state.readerStgIDs {
  471. var nodeDists []stgDist
  472. for n := range state.stgBlockBitmaps {
  473. if r == n {
  474. // 同节点时距离视为0.1
  475. nodeDists = append(nodeDists, stgDist{
  476. UserSpaceID: n,
  477. Distance: consts.StorageDistanceSameStorage,
  478. })
  479. } else if state.ctx.allUserSpaces[r].UserSpace.UserSpace.Storage.GetLocation() == state.ctx.allUserSpaces[n].UserSpace.UserSpace.Storage.GetLocation() {
  480. // 同地区时距离视为1
  481. nodeDists = append(nodeDists, stgDist{
  482. UserSpaceID: n,
  483. Distance: consts.StorageDistanceSameLocation,
  484. })
  485. } else {
  486. // 不同地区时距离视为5
  487. nodeDists = append(nodeDists, stgDist{
  488. UserSpaceID: n,
  489. Distance: consts.StorageDistanceOther,
  490. })
  491. }
  492. }
  493. state.stgsSortedByReader[r] = sort2.Sort(nodeDists, func(left, right stgDist) int { return sort2.Cmp(left.Distance, right.Distance) })
  494. }
  495. }
  496. func (t *ChangeRedundancy) calcScore(state *annealingState) float64 {
  497. dt := t.calcDisasterTolerance(state)
  498. ac := t.calcMinAccessCost(state)
  499. sc := t.calcSpaceCost(state)
  500. state.lastDisasterTolerance = dt
  501. state.lastMinAccessCost = ac
  502. state.lastSpaceCost = sc
  503. dtSc := 1.0
  504. if dt < 1 {
  505. dtSc = 0
  506. } else if dt >= 2 {
  507. dtSc = 1.5
  508. }
  509. newSc := 0.0
  510. if dt == 0 || ac == 0 {
  511. newSc = 0
  512. } else {
  513. newSc = dtSc / (sc * ac)
  514. }
  515. // fmt.Printf("solu: %v, cur: %v, dt: %v, ac: %v, sc: %v \n", state.rmBlocks, newSc, dt, ac, sc)
  516. return newSc
  517. }
  518. // 计算容灾度
  519. func (t *ChangeRedundancy) calcDisasterTolerance(state *annealingState) float64 {
  520. if state.inversedIndex != -1 {
  521. node := state.blockList[state.inversedIndex]
  522. state.stgCombTree.UpdateBitmap(node.UserSpaceID, *state.stgBlockBitmaps[node.UserSpaceID], state.object.minBlockCnt)
  523. }
  524. return float64(len(state.stgBlockBitmaps) - state.stgCombTree.FindKBlocksMaxDepth(state.object.minBlockCnt))
  525. }
  526. // 计算最小访问数据的代价
  527. func (t *ChangeRedundancy) calcMinAccessCost(state *annealingState) float64 {
  528. cost := math.MaxFloat64
  529. for _, reader := range state.readerStgIDs {
  530. tarNodes := state.stgsSortedByReader[reader]
  531. gotBlocks := bitmap.Bitmap64(0)
  532. thisCost := 0.0
  533. for _, tar := range tarNodes {
  534. tarNodeMp := state.stgBlockBitmaps[tar.UserSpaceID]
  535. // 只需要从目的节点上获得缺少的块
  536. curWeigth := gotBlocks.Weight()
  537. // 下面的if会在拿到k个块之后跳出循环,所以or多了块也没关系
  538. gotBlocks.Or(tarNodeMp)
  539. // 但是算读取块的消耗时,不能多算,最多算读了k个块的消耗
  540. willGetBlocks := math2.Min(gotBlocks.Weight()-curWeigth, state.object.minBlockCnt-curWeigth)
  541. thisCost += float64(willGetBlocks) * float64(tar.Distance)
  542. if gotBlocks.Weight() >= state.object.minBlockCnt {
  543. break
  544. }
  545. }
  546. if gotBlocks.Weight() >= state.object.minBlockCnt {
  547. cost = math.Min(cost, thisCost)
  548. }
  549. }
  550. return cost
  551. }
  552. // 计算冗余度
  553. func (t *ChangeRedundancy) calcSpaceCost(ctx *annealingState) float64 {
  554. blockCount := 0
  555. for i, b := range ctx.blockList {
  556. if ctx.rmBlocks[i] {
  557. continue
  558. }
  559. if b.HasEntity {
  560. blockCount++
  561. }
  562. if b.HasShadow {
  563. blockCount++
  564. }
  565. }
  566. // 所有算力中心上拥有的块的总数 / 一个对象被分成了几个块
  567. return float64(blockCount) / float64(ctx.object.minBlockCnt)
  568. }
  569. // 如果新方案得分比旧方案小,那么在一定概率内也接受新方案
  570. func (t *ChangeRedundancy) alwaysAccept(curTemp float64, dScore float64, coolingRate float64) bool {
  571. v := math.Exp(dScore / curTemp / coolingRate)
  572. // fmt.Printf(" -- chance: %v, temp: %v", v, curTemp)
  573. return v > rand.Float64()
  574. }
  575. func (t *ChangeRedundancy) makePlansForRepObject(ctx *changeRedundancyContext, solu annealingSolution, obj clitypes.ObjectDetail, planBld *exec.PlanBuilder, planningHubIDs map[clitypes.UserSpaceID]bool) db.UpdatingObjectRedundancy {
  576. entry := db.UpdatingObjectRedundancy{
  577. ObjectID: obj.Object.ObjectID,
  578. FileHash: obj.Object.FileHash,
  579. Size: obj.Object.Size,
  580. Redundancy: obj.Object.Redundancy,
  581. }
  582. ft := ioswitch2.NewFromTo()
  583. fromStg := ctx.allUserSpaces[obj.Blocks[0].UserSpaceID].UserSpace
  584. ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *fromStg, ioswitch2.RawStream()))
  585. for i, f := range solu.rmBlocks {
  586. hasCache := lo.ContainsBy(obj.Blocks, func(b clitypes.ObjectBlock) bool { return b.UserSpaceID == solu.blockList[i].UserSpaceID }) ||
  587. lo.ContainsBy(obj.PinnedAt, func(n clitypes.UserSpaceID) bool { return n == solu.blockList[i].UserSpaceID })
  588. willRm := f
  589. if !willRm {
  590. // 如果对象在退火后要保留副本的节点没有副本,则需要在这个节点创建副本
  591. if !hasCache {
  592. toStg := ctx.allUserSpaces[solu.blockList[i].UserSpaceID].UserSpace
  593. ft.AddTo(ioswitch2.NewToShardStore(*toStg, ioswitch2.RawStream(), fmt.Sprintf("%d.0", obj.Object.ObjectID)))
  594. planningHubIDs[solu.blockList[i].UserSpaceID] = true
  595. }
  596. entry.Blocks = append(entry.Blocks, clitypes.ObjectBlock{
  597. ObjectID: obj.Object.ObjectID,
  598. Index: solu.blockList[i].Index,
  599. UserSpaceID: solu.blockList[i].UserSpaceID,
  600. FileHash: obj.Object.FileHash,
  601. Size: solu.blockList[i].Size,
  602. })
  603. }
  604. }
  605. err := parser.Parse(ft, planBld)
  606. if err != nil {
  607. // TODO 错误处理
  608. }
  609. return entry
  610. }
  611. func (t *ChangeRedundancy) generateSysEventForRepObject(solu annealingSolution, obj clitypes.ObjectDetail) []datamap.SysEventBody {
  612. var blockChgs []datamap.BlockChange
  613. for i, f := range solu.rmBlocks {
  614. hasCache := lo.ContainsBy(obj.Blocks, func(b clitypes.ObjectBlock) bool { return b.UserSpaceID == solu.blockList[i].UserSpaceID }) ||
  615. lo.ContainsBy(obj.PinnedAt, func(n clitypes.UserSpaceID) bool { return n == solu.blockList[i].UserSpaceID })
  616. willRm := f
  617. if !willRm {
  618. // 如果对象在退火后要保留副本的节点没有副本,则需要在这个节点创建副本
  619. if !hasCache {
  620. blockChgs = append(blockChgs, &datamap.BlockChangeClone{
  621. BlockType: datamap.BlockTypeRaw,
  622. SourceUserSpaceID: obj.Blocks[0].UserSpaceID,
  623. TargetUserSpaceID: solu.blockList[i].UserSpaceID,
  624. })
  625. }
  626. } else {
  627. blockChgs = append(blockChgs, &datamap.BlockChangeDeleted{
  628. Index: 0,
  629. UserSpaceID: solu.blockList[i].UserSpaceID,
  630. })
  631. }
  632. }
  633. transEvt := &datamap.BodyBlockTransfer{
  634. ObjectID: obj.Object.ObjectID,
  635. PackageID: obj.Object.PackageID,
  636. BlockChanges: blockChgs,
  637. }
  638. var blockDist []datamap.BlockDistributionObjectInfo
  639. for i, f := range solu.rmBlocks {
  640. if !f {
  641. blockDist = append(blockDist, datamap.BlockDistributionObjectInfo{
  642. BlockType: datamap.BlockTypeRaw,
  643. Index: 0,
  644. UserSpaceID: solu.blockList[i].UserSpaceID,
  645. })
  646. }
  647. }
  648. distEvt := &datamap.BodyBlockDistribution{
  649. ObjectID: obj.Object.ObjectID,
  650. PackageID: obj.Object.PackageID,
  651. Path: obj.Object.Path,
  652. Size: obj.Object.Size,
  653. FileHash: obj.Object.FileHash,
  654. FaultTolerance: solu.disasterTolerance,
  655. Redundancy: solu.spaceCost,
  656. AvgAccessCost: 0, // TODO 计算平均访问代价,从日常访问数据中统计
  657. BlockDistribution: blockDist,
  658. // TODO 不好计算传输量
  659. }
  660. return []datamap.SysEventBody{transEvt, distEvt}
  661. }
  662. func (t *ChangeRedundancy) makePlansForECObject(ctx *changeRedundancyContext, solu annealingSolution, obj clitypes.ObjectDetail, planBld *exec.PlanBuilder, planningHubIDs map[clitypes.UserSpaceID]bool) db.UpdatingObjectRedundancy {
  663. entry := db.UpdatingObjectRedundancy{
  664. ObjectID: obj.Object.ObjectID,
  665. FileHash: obj.Object.FileHash,
  666. Size: obj.Object.Size,
  667. Redundancy: obj.Object.Redundancy,
  668. }
  669. reconstrct := make(map[clitypes.UserSpaceID]*[]int)
  670. for i, f := range solu.rmBlocks {
  671. block := solu.blockList[i]
  672. if !f {
  673. entry.Blocks = append(entry.Blocks, clitypes.ObjectBlock{
  674. ObjectID: obj.Object.ObjectID,
  675. Index: block.Index,
  676. UserSpaceID: block.UserSpaceID,
  677. FileHash: block.FileHash,
  678. Size: block.Size,
  679. })
  680. // 如果这个块是影子块,那么就要从完整对象里重建这个块
  681. if !block.HasEntity {
  682. re, ok := reconstrct[block.UserSpaceID]
  683. if !ok {
  684. re = &[]int{}
  685. reconstrct[block.UserSpaceID] = re
  686. }
  687. *re = append(*re, block.Index)
  688. }
  689. }
  690. }
  691. ecRed := obj.Object.Redundancy.(*clitypes.ECRedundancy)
  692. for id, idxs := range reconstrct {
  693. // 依次生成每个节点上的执行计划,因为如果放到一个计划里一起生成,不能保证每个节点上的块用的都是本节点上的副本
  694. ft := ioswitch2.NewFromTo()
  695. ft.ECParam = ecRed
  696. ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *ctx.allUserSpaces[id].UserSpace, ioswitch2.RawStream()))
  697. for _, i := range *idxs {
  698. ft.AddTo(ioswitch2.NewToShardStore(*ctx.allUserSpaces[id].UserSpace, ioswitch2.ECStream(i), fmt.Sprintf("%d.%d", obj.Object.ObjectID, i)))
  699. }
  700. err := parser.Parse(ft, planBld)
  701. if err != nil {
  702. // TODO 错误处理
  703. continue
  704. }
  705. planningHubIDs[id] = true
  706. }
  707. return entry
  708. }
  709. func (t *ChangeRedundancy) generateSysEventForECObject(solu annealingSolution, obj clitypes.ObjectDetail) []datamap.SysEventBody {
  710. var blockChgs []datamap.BlockChange
  711. reconstrct := make(map[clitypes.UserSpaceID]*[]int)
  712. for i, f := range solu.rmBlocks {
  713. block := solu.blockList[i]
  714. if !f {
  715. // 如果这个块是影子块,那么就要从完整对象里重建这个块
  716. if !block.HasEntity {
  717. re, ok := reconstrct[block.UserSpaceID]
  718. if !ok {
  719. re = &[]int{}
  720. reconstrct[block.UserSpaceID] = re
  721. }
  722. *re = append(*re, block.Index)
  723. }
  724. } else {
  725. blockChgs = append(blockChgs, &datamap.BlockChangeDeleted{
  726. Index: block.Index,
  727. UserSpaceID: block.UserSpaceID,
  728. })
  729. }
  730. }
  731. // 由于每一个需要被重建的块都是从同中心的副本里构建出来的,所以对于每一个中心都要产生一个BlockChangeEnDecode
  732. for id, idxs := range reconstrct {
  733. var tarBlocks []datamap.Block
  734. for _, idx := range *idxs {
  735. tarBlocks = append(tarBlocks, datamap.Block{
  736. BlockType: datamap.BlockTypeEC,
  737. Index: idx,
  738. UserSpaceID: id,
  739. })
  740. }
  741. blockChgs = append(blockChgs, &datamap.BlockChangeEnDecode{
  742. SourceBlocks: []datamap.Block{{
  743. BlockType: datamap.BlockTypeRaw,
  744. Index: 0,
  745. UserSpaceID: id, // 影子块的原始对象就在同一个节点上
  746. }},
  747. TargetBlocks: tarBlocks,
  748. // 传输量为0
  749. })
  750. }
  751. transEvt := &datamap.BodyBlockTransfer{
  752. ObjectID: obj.Object.ObjectID,
  753. PackageID: obj.Object.PackageID,
  754. BlockChanges: blockChgs,
  755. }
  756. var blockDist []datamap.BlockDistributionObjectInfo
  757. for i, f := range solu.rmBlocks {
  758. if !f {
  759. blockDist = append(blockDist, datamap.BlockDistributionObjectInfo{
  760. BlockType: datamap.BlockTypeEC,
  761. Index: solu.blockList[i].Index,
  762. UserSpaceID: solu.blockList[i].UserSpaceID,
  763. })
  764. }
  765. }
  766. distEvt := &datamap.BodyBlockDistribution{
  767. ObjectID: obj.Object.ObjectID,
  768. PackageID: obj.Object.PackageID,
  769. Path: obj.Object.Path,
  770. Size: obj.Object.Size,
  771. FileHash: obj.Object.FileHash,
  772. FaultTolerance: solu.disasterTolerance,
  773. Redundancy: solu.spaceCost,
  774. AvgAccessCost: 0, // TODO 计算平均访问代价,从日常访问数据中统计
  775. BlockDistribution: blockDist,
  776. // TODO 不好计算传输量
  777. }
  778. return []datamap.SysEventBody{transEvt, distEvt}
  779. }
  780. func (t *ChangeRedundancy) executePlans(ctx *changeRedundancyContext, planBld *exec.PlanBuilder, planningSpaceIDs map[clitypes.UserSpaceID]bool, reen *publock.Reentrant) (exec.PlanResult, error) {
  781. reqBlder := reqbuilder.NewBuilder()
  782. for id, _ := range planningSpaceIDs {
  783. reqBlder.UserSpace().Buzy(id)
  784. }
  785. err := reen.Lock(reqBlder.Build())
  786. if err != nil {
  787. return exec.PlanResult{}, fmt.Errorf("locking shard resources: %w", err)
  788. }
  789. wg := sync.WaitGroup{}
  790. // 执行IO计划
  791. var ioSwRets exec.PlanResult
  792. var ioSwErr error
  793. wg.Add(1)
  794. go func() {
  795. defer wg.Done()
  796. execCtx := exec.NewExecContext()
  797. exec.SetValueByType(execCtx, ctx.ticktock.stgPool)
  798. ret, err := planBld.Execute(execCtx).Wait(context.TODO())
  799. if err != nil {
  800. ioSwErr = fmt.Errorf("executing io switch plan: %w", err)
  801. return
  802. }
  803. ioSwRets = ret
  804. }()
  805. wg.Wait()
  806. if ioSwErr != nil {
  807. return exec.PlanResult{}, ioSwErr
  808. }
  809. return ioSwRets, nil
  810. }
  811. func (t *ChangeRedundancy) populateECObjectEntry(entry *db.UpdatingObjectRedundancy, obj clitypes.ObjectDetail, ioRets exec.PlanResult) {
  812. for i := range entry.Blocks {
  813. if entry.Blocks[i].FileHash != "" {
  814. continue
  815. }
  816. key := fmt.Sprintf("%d.%d", obj.Object.ObjectID, entry.Blocks[i].Index)
  817. // 不应该出现key不存在的情况
  818. r := ioRets.Get(key).(*ops2.FileInfoValue)
  819. entry.Blocks[i].FileHash = r.Hash
  820. entry.Blocks[i].Size = r.Size
  821. }
  822. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。