You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

check_rep_count.go 7.3 kB

2 years ago
2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. package event
  2. import (
  3. "fmt"
  4. "math"
  5. "github.com/samber/lo"
  6. "gitlink.org.cn/cloudream/common/pkgs/logger"
  7. mymath "gitlink.org.cn/cloudream/common/utils/math"
  8. mysort "gitlink.org.cn/cloudream/common/utils/sort"
  9. "gitlink.org.cn/cloudream/storage/common/consts"
  10. "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
  11. "gitlink.org.cn/cloudream/storage/scanner/internal/config"
  12. "gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
  13. scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
  14. )
  15. type CheckRepCount struct {
  16. *scevt.CheckRepCount
  17. }
  18. func NewCheckRepCount(fileHashes []string) *CheckRepCount {
  19. return &CheckRepCount{
  20. CheckRepCount: scevt.NewCheckRepCount(fileHashes),
  21. }
  22. }
  23. func (t *CheckRepCount) TryMerge(other Event) bool {
  24. event, ok := other.(*CheckRepCount)
  25. if !ok {
  26. return false
  27. }
  28. t.FileHashes = lo.Union(t.FileHashes, event.FileHashes)
  29. return true
  30. }
  31. func (t *CheckRepCount) Execute(execCtx ExecuteContext) {
  32. log := logger.WithType[CheckRepCount]("Event")
  33. log.Debugf("begin with %v", logger.FormatStruct(t))
  34. defer log.Debugf("end")
  35. mutex, err := reqbuilder.NewBuilder().
  36. Metadata().
  37. // 读取某个FileHash的备份数设定
  38. ObjectRep().ReadAny().
  39. // 读取某个FileHash是否被Block引用
  40. ObjectBlock().ReadAny().
  41. // 获取所有可用的节点
  42. Node().ReadAny().
  43. // 增加或修改FileHash关联的Cache记录
  44. Cache().WriteAny().
  45. MutexLock(execCtx.Args.DistLock)
  46. if err != nil {
  47. log.Warnf("acquire locks failed, err: %s", err.Error())
  48. return
  49. }
  50. defer mutex.Unlock()
  51. updatedNodeAndHashes := make(map[int64][]string)
  52. for _, fileHash := range t.FileHashes {
  53. updatedNodeIDs, err := t.checkOneRepCount(fileHash, execCtx)
  54. if err != nil {
  55. log.WithField("FileHash", fileHash).Warnf("check file rep count failed, err: %s", err.Error())
  56. continue
  57. }
  58. for _, id := range updatedNodeIDs {
  59. hashes := updatedNodeAndHashes[id]
  60. updatedNodeAndHashes[id] = append(hashes, fileHash)
  61. }
  62. }
  63. for nodeID, hashes := range updatedNodeAndHashes {
  64. // 新任务继承本任务的执行设定(紧急任务依然保持紧急任务)
  65. execCtx.Executor.Post(NewAgentCheckCache(nodeID, hashes), execCtx.Option)
  66. }
  67. }
  68. func (t *CheckRepCount) checkOneRepCount(fileHash string, execCtx ExecuteContext) ([]int64, error) {
  69. log := logger.WithType[CheckRepCount]("Event")
  70. sqlCtx := execCtx.Args.DB.SQLCtx()
  71. var updatedNodeIDs []int64
  72. // 计算所需的最少备份数:
  73. // 1. ObjectRep中期望备份数的最大值
  74. // 2. 如果ObjectBlock存在对此文件的引用,则至少为1
  75. repMaxCnt, err := execCtx.Args.DB.ObjectRep().GetFileMaxRepCount(sqlCtx, fileHash)
  76. if err != nil {
  77. return nil, fmt.Errorf("get file max rep count failed, err: %w", err)
  78. }
  79. blkCnt, err := execCtx.Args.DB.ObjectBlock().CountBlockWithHash(sqlCtx, fileHash)
  80. if err != nil {
  81. return nil, fmt.Errorf("count block with hash failed, err: %w", err)
  82. }
  83. needRepCount := mymath.Max(repMaxCnt, mymath.Min(1, blkCnt))
  84. repNodes, err := execCtx.Args.DB.Cache().GetCachingFileNodes(sqlCtx, fileHash)
  85. if err != nil {
  86. return nil, fmt.Errorf("get caching file nodes failed, err: %w", err)
  87. }
  88. allNodes, err := execCtx.Args.DB.Node().GetAllNodes(sqlCtx)
  89. if err != nil {
  90. return nil, fmt.Errorf("get all nodes failed, err: %w", err)
  91. }
  92. var normalNodes, unavaiNodes []model.Node
  93. for _, node := range repNodes {
  94. if node.State == consts.NodeStateNormal {
  95. normalNodes = append(normalNodes, node)
  96. } else if node.State == consts.NodeStateUnavailable {
  97. unavaiNodes = append(unavaiNodes, node)
  98. }
  99. }
  100. // 如果Available的备份数超过期望备份数,则让一些节点退出
  101. if len(normalNodes) > needRepCount {
  102. delNodes := chooseDeleteAvaiRepNodes(allNodes, normalNodes, len(normalNodes)-needRepCount)
  103. for _, node := range delNodes {
  104. err := execCtx.Args.DB.Cache().SetTemp(sqlCtx, fileHash, node.NodeID)
  105. if err != nil {
  106. return nil, fmt.Errorf("change cache state failed, err: %w", err)
  107. }
  108. updatedNodeIDs = append(updatedNodeIDs, node.NodeID)
  109. }
  110. return updatedNodeIDs, nil
  111. }
  112. // 因为总备份数不够,而需要增加的备份数
  113. add1 := mymath.Max(0, needRepCount-len(repNodes))
  114. // 因为Available的备份数占比过少,而需要增加的备份数
  115. minAvaiNodeCnt := int(math.Ceil(float64(config.Cfg().MinAvailableRepProportion) * float64(needRepCount)))
  116. add2 := mymath.Max(0, minAvaiNodeCnt-len(normalNodes))
  117. // 最终需要增加的备份数,是以上两种情况的最大值
  118. finalAddCount := mymath.Max(add1, add2)
  119. if finalAddCount > 0 {
  120. newNodes := chooseNewRepNodes(allNodes, repNodes, finalAddCount)
  121. if len(newNodes) < finalAddCount {
  122. log.WithField("FileHash", fileHash).Warnf("need %d more rep nodes, but get only %d nodes", finalAddCount, len(newNodes))
  123. // TODO 节点数不够,进行一个告警
  124. }
  125. for _, node := range newNodes {
  126. err := execCtx.Args.DB.Cache().CreatePinned(sqlCtx, fileHash, node.NodeID, 0)
  127. if err != nil {
  128. return nil, fmt.Errorf("create cache failed, err: %w", err)
  129. }
  130. updatedNodeIDs = append(updatedNodeIDs, node.NodeID)
  131. }
  132. }
  133. return updatedNodeIDs, err
  134. }
  135. func chooseNewRepNodes(allNodes []model.Node, curRepNodes []model.Node, newCount int) []model.Node {
  136. noRepNodes := lo.Reject(allNodes, func(node model.Node, index int) bool {
  137. return lo.ContainsBy(curRepNodes, func(n model.Node) bool { return node.NodeID == n.NodeID }) ||
  138. node.State != consts.NodeStateNormal
  139. })
  140. repNodeLocationIDs := make(map[int64]bool)
  141. for _, node := range curRepNodes {
  142. repNodeLocationIDs[node.LocationID] = true
  143. }
  144. mysort.Sort(noRepNodes, func(l, r model.Node) int {
  145. // LocationID不存在时为false,false - true < 0,所以LocationID不存在的会排在前面
  146. return mysort.CmpBool(repNodeLocationIDs[l.LocationID], repNodeLocationIDs[r.LocationID])
  147. })
  148. return noRepNodes[:mymath.Min(newCount, len(noRepNodes))]
  149. }
  150. func chooseDeleteAvaiRepNodes(allNodes []model.Node, curAvaiRepNodes []model.Node, delCount int) []model.Node {
  151. // 按照地域ID分组
  152. locationGroupedNodes := make(map[int64][]model.Node)
  153. for _, node := range curAvaiRepNodes {
  154. nodes := locationGroupedNodes[node.LocationID]
  155. nodes = append(nodes, node)
  156. locationGroupedNodes[node.LocationID] = nodes
  157. }
  158. // 每次从每个分组中取出一个元素放入结果数组,并将这个元素从分组中删除
  159. // 最后结果数组中的元素会按照地域交错循环排列,比如:ABCABCBCC。同时还有一个特征:靠后的循环节中的元素都来自于元素数多的分组
  160. // 将结果数组反转(此处是用存放时就逆序的形式实现),就把元素数多的分组提前了,此时从头部取出要删除的节点即可
  161. alternatedNodes := make([]model.Node, len(curAvaiRepNodes))
  162. for i := len(curAvaiRepNodes) - 1; i >= 0; {
  163. for id, nodes := range locationGroupedNodes {
  164. alternatedNodes[i] = nodes[0]
  165. if len(nodes) == 1 {
  166. delete(locationGroupedNodes, id)
  167. } else {
  168. locationGroupedNodes[id] = nodes[1:]
  169. }
  170. // 放置一个元素就移动一下下一个存放点
  171. i--
  172. }
  173. }
  174. return alternatedNodes[:mymath.Min(delCount, len(alternatedNodes))]
  175. }
  176. func init() {
  177. RegisterMessageConvertor(func(msg *scevt.CheckRepCount) Event { return NewCheckRepCount(msg.FileHashes) })
  178. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。