Browse Source

重构事务机制

gitlink
Sydonian 1 year ago
parent
commit
adec4652f7
60 changed files with 741 additions and 2244 deletions
  1. +20
    -81
      agent/internal/services/mq/cache.go
  2. +81
    -43
      agent/internal/services/mq/storage.go
  3. +7
    -11
      agent/internal/task/cache_move_package.go
  4. +0
    -4
      client/internal/cmdline/scanner.go
  5. +0
    -29
      client/internal/services/bucket.go
  6. +0
    -44
      client/internal/services/package.go
  7. +4
    -18
      client/internal/task/storage_load_package.go
  8. +7
    -2
      common/assets/scripts/create_database.sql
  9. +0
    -5
      common/consts/consts.go
  10. +3
    -22
      common/pkgs/cmd/create_package.go
  11. +3
    -20
      common/pkgs/cmd/update_package.go
  12. +10
    -53
      common/pkgs/db/cache.go
  13. +0
    -2
      common/pkgs/db/model/model.go
  14. +3
    -3
      common/pkgs/db/object.go
  15. +11
    -0
      common/pkgs/db/object_block.go
  16. +53
    -0
      common/pkgs/db/pinned_object.go
  17. +22
    -99
      common/pkgs/distlock/lockprovider/ipfs_lock.go
  18. +15
    -25
      common/pkgs/distlock/lockprovider/ipfs_lock_test.go
  19. +10
    -72
      common/pkgs/distlock/lockprovider/metadata_lock.go
  20. +28
    -114
      common/pkgs/distlock/lockprovider/storage_lock.go
  21. +4
    -31
      common/pkgs/distlock/reqbuilder/ipfs.go
  22. +0
    -64
      common/pkgs/distlock/reqbuilder/metadata_bucket.go
  23. +0
    -64
      common/pkgs/distlock/reqbuilder/metadata_cache.go
  24. +0
    -64
      common/pkgs/distlock/reqbuilder/metadata_node.go
  25. +4
    -45
      common/pkgs/distlock/reqbuilder/metadata_object.go
  26. +0
    -63
      common/pkgs/distlock/reqbuilder/metadata_object_block.go
  27. +0
    -63
      common/pkgs/distlock/reqbuilder/metadata_object_rep.go
  28. +0
    -64
      common/pkgs/distlock/reqbuilder/metadata_package.go
  29. +3
    -43
      common/pkgs/distlock/reqbuilder/metadata_storage_package.go
  30. +0
    -64
      common/pkgs/distlock/reqbuilder/metadata_user_bucket.go
  31. +0
    -64
      common/pkgs/distlock/reqbuilder/metadata_user_storage.go
  32. +4
    -40
      common/pkgs/distlock/reqbuilder/storage.go
  33. +2
    -28
      common/pkgs/iterator/download_object_iterator.go
  34. +30
    -26
      common/pkgs/mq/agent/cache.go
  35. +38
    -30
      common/pkgs/mq/agent/storage.go
  36. +4
    -4
      common/pkgs/mq/scanner/event/agent_cache_gc.go
  37. +3
    -5
      common/pkgs/mq/scanner/event/agent_check_cache.go
  38. +3
    -5
      common/pkgs/mq/scanner/event/agent_check_storage.go
  39. +18
    -0
      common/pkgs/mq/scanner/event/agent_storage_gc.go
  40. +0
    -16
      common/pkgs/mq/scanner/event/check_rep_count.go
  41. +4
    -0
      common/utils/utils.go
  42. +3
    -3
      coordinator/internal/services/cache.go
  43. +13
    -29
      coordinator/internal/services/package.go
  44. +99
    -0
      scanner/internal/event/agent_cache_gc.go
  45. +85
    -90
      scanner/internal/event/agent_check_cache.go
  46. +3
    -28
      scanner/internal/event/agent_check_state.go
  47. +46
    -110
      scanner/internal/event/agent_check_storage.go
  48. +83
    -0
      scanner/internal/event/agent_storage_gc.go
  49. +0
    -83
      scanner/internal/event/check_cache.go
  50. +3
    -17
      scanner/internal/event/check_package.go
  51. +0
    -218
      scanner/internal/event/check_rep_count.go
  52. +0
    -158
      scanner/internal/event/check_rep_count_test.go
  53. +2
    -1
      scanner/internal/tickevent/batch_all_agent_check_cache.go
  54. +2
    -3
      scanner/internal/tickevent/batch_check_all_package.go
  55. +0
    -41
      scanner/internal/tickevent/batch_check_all_rep_count.go
  56. +2
    -1
      scanner/internal/tickevent/batch_check_all_storage.go
  57. +4
    -0
      scanner/internal/tickevent/batch_check_package_redudancy.go
  58. +2
    -1
      scanner/internal/tickevent/check_agent_state.go
  59. +0
    -29
      scanner/internal/tickevent/check_cache.go
  60. +0
    -2
      scanner/main.go

+ 20
- 81
agent/internal/services/mq/cache.go View File

@@ -3,15 +3,11 @@ package mq
import (
"time"

shell "github.com/ipfs/go-ipfs-api"
"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/consts/errorcode"
"gitlink.org.cn/cloudream/common/pkgs/ipfs"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/storage/agent/internal/config"
"gitlink.org.cn/cloudream/storage/agent/internal/task"
mytask "gitlink.org.cn/cloudream/storage/agent/internal/task"
"gitlink.org.cn/cloudream/storage/common/consts"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
)
@@ -24,96 +20,39 @@ func (svc *Service) CheckCache(msg *agtmq.CheckCache) (*agtmq.CheckCacheResp, *m
}
defer ipfsCli.Close()

filesMap, err := ipfsCli.GetPinnedFiles()
files, err := ipfsCli.GetPinnedFiles()
if err != nil {
logger.Warnf("get pinned files from ipfs failed, err: %s", err.Error())
return nil, mq.Failed(errorcode.OperationFailed, "get pinned files from ipfs failed")
}

// TODO 根据锁定清单过滤被锁定的文件的记录
if msg.IsComplete {
return svc.checkComplete(msg, filesMap, ipfsCli)
} else {
return svc.checkIncrement(msg, filesMap, ipfsCli)
}
return mq.ReplyOK(agtmq.NewCheckCacheResp(lo.Keys(files)))
}

func (svc *Service) checkIncrement(msg *agtmq.CheckCache, filesMap map[string]shell.PinInfo, ipfsCli *ipfs.PoolClient) (*agtmq.CheckCacheResp, *mq.CodeMessage) {
var entries []agtmq.CheckIPFSRespEntry
for _, cache := range msg.Caches {
_, ok := filesMap[cache.FileHash]
if ok {
if cache.State == consts.CacheStatePinned {
// 不处理
} else if cache.State == consts.CacheStateTemp {
logger.WithField("FileHash", cache.FileHash).Debugf("unpin for cache entry state is temp")
err := ipfsCli.Unpin(cache.FileHash)
if err != nil {
logger.WithField("FileHash", cache.FileHash).Warnf("unpin file failed, err: %s", err.Error())
}
}

// 删除map中的记录,表示此记录已被检查过
delete(filesMap, cache.FileHash)

} else {
if cache.State == consts.CacheStatePinned {
svc.taskManager.StartComparable(task.NewIPFSPin(cache.FileHash))

} else if cache.State == consts.CacheStateTemp {
if time.Since(cache.CreateTime) > time.Duration(config.Cfg().TempFileLifetime)*time.Second {
entries = append(entries, agtmq.NewCheckCacheRespEntry(cache.FileHash, agtmq.CHECK_IPFS_RESP_OP_DELETE_TEMP))
}
}
}
func (svc *Service) CacheGC(msg *agtmq.CacheGC) (*agtmq.CacheGCResp, *mq.CodeMessage) {
ipfsCli, err := stgglb.IPFSPool.Acquire()
if err != nil {
logger.Warnf("new ipfs client: %s", err.Error())
return nil, mq.Failed(errorcode.OperationFailed, "new ipfs client failed")
}
defer ipfsCli.Close()

// 增量情况下,不需要对filesMap中没检查的记录进行处理

return mq.ReplyOK(agtmq.NewCheckCacheResp(entries))
}

func (svc *Service) checkComplete(msg *agtmq.CheckCache, filesMap map[string]shell.PinInfo, ipfsCli *ipfs.PoolClient) (*agtmq.CheckCacheResp, *mq.CodeMessage) {
var entries []agtmq.CheckIPFSRespEntry
for _, cache := range msg.Caches {
_, ok := filesMap[cache.FileHash]
if ok {
if cache.State == consts.CacheStatePinned {
// 不处理
} else if cache.State == consts.CacheStateTemp {
logger.WithField("FileHash", cache.FileHash).Debugf("unpin for cache entry state is temp")
err := ipfsCli.Unpin(cache.FileHash)
if err != nil {
logger.WithField("FileHash", cache.FileHash).Warnf("unpin file failed, err: %s", err.Error())
}
}

// 删除map中的记录,表示此记录已被检查过
delete(filesMap, cache.FileHash)

} else {
if cache.State == consts.CacheStatePinned {
svc.taskManager.StartComparable(task.NewIPFSPin(cache.FileHash))

} else if cache.State == consts.CacheStateTemp {
if time.Since(cache.CreateTime) > time.Duration(config.Cfg().TempFileLifetime)*time.Second {
entries = append(entries, agtmq.NewCheckCacheRespEntry(cache.FileHash, agtmq.CHECK_IPFS_RESP_OP_DELETE_TEMP))
}
}
}
files, err := ipfsCli.GetPinnedFiles()
if err != nil {
logger.Warnf("get pinned files from ipfs failed, err: %s", err.Error())
return nil, mq.Failed(errorcode.OperationFailed, "get pinned files from ipfs failed")
}

// map中剩下的数据是没有被遍历过,即Cache中没有记录的,那么就Unpin文件,并产生一条Temp记录
for hash := range filesMap {
logger.WithField("FileHash", hash).Debugf("unpin for no cacah entry")
err := ipfsCli.Unpin(hash)
if err != nil {
logger.WithField("FileHash", hash).Warnf("unpin file failed, err: %s", err.Error())
// unpin所有没有没记录到元数据的文件
shouldPinnedFiles := lo.SliceToMap(msg.PinnedFileHashes, func(hash string) (string, bool) { return hash, true })
for hash := range files {
if !shouldPinnedFiles[hash] {
ipfsCli.Unpin(hash)
logger.WithField("FileHash", hash).Debugf("unpinned by gc")
}
entries = append(entries, agtmq.NewCheckCacheRespEntry(hash, agtmq.CHECK_IPFS_RESP_OP_CREATE_TEMP))
}

return mq.ReplyOK(agtmq.NewCheckCacheResp(entries))
return mq.ReplyOK(agtmq.RespCacheGC())
}

func (svc *Service) StartCacheMovePackage(msg *agtmq.StartCacheMovePackage) (*agtmq.StartCacheMovePackageResp, *mq.CodeMessage) {


+ 81
- 43
agent/internal/services/mq/storage.go View File

@@ -1,18 +1,22 @@
package mq

import (
"fmt"
"io/fs"
"os"
"path/filepath"
"strconv"
"time"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/consts/errorcode"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
mytask "gitlink.org.cn/cloudream/storage/agent/internal/task"
"gitlink.org.cn/cloudream/storage/common/consts"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
@@ -95,66 +99,100 @@ func (svc *Service) StorageCheck(msg *agtmq.StorageCheck) (*agtmq.StorageCheckRe
))
}

dirInfos := lo.Filter(infos, func(info fs.DirEntry, index int) bool { return info.IsDir() })
var stgPkgs []model.StoragePackage

if msg.IsComplete {
return svc.checkStorageComplete(msg, dirInfos)
} else {
return svc.checkStorageIncrement(msg, dirInfos)
}
}

func (svc *Service) checkStorageIncrement(msg *agtmq.StorageCheck, dirInfos []fs.DirEntry) (*agtmq.StorageCheckResp, *mq.CodeMessage) {
infosMap := make(map[string]fs.DirEntry)
for _, info := range dirInfos {
infosMap[info.Name()] = info
}
userDirs := lo.Filter(infos, func(info fs.DirEntry, index int) bool { return info.IsDir() })
for _, dir := range userDirs {
userIDInt, err := strconv.ParseInt(dir.Name(), 10, 64)
if err != nil {
logger.Warnf("parsing user id %s: %s", dir.Name(), err.Error())
continue
}

var entries []agtmq.StorageCheckRespEntry
for _, obj := range msg.Packages {
dirName := utils.MakeStorageLoadPackagePath(msg.Directory, obj.UserID, obj.PackageID)
_, ok := infosMap[dirName]
pkgDir := utils.MakeStorageLoadDirectory(msg.Directory, dir.Name())
pkgDirs, err := os.ReadDir(pkgDir)
if err != nil {
logger.Warnf("reading package dir %s: %s", pkgDir, err.Error())
continue
}

if ok {
// 不需要做处理
// 删除map中的记录,表示此记录已被检查过
delete(infosMap, dirName)
for _, pkg := range pkgDirs {
pkgIDInt, err := strconv.ParseInt(pkg.Name(), 10, 64)
if err != nil {
logger.Warnf("parsing package dir %s: %s", pkg.Name(), err.Error())
continue
}

} else {
// 只要文件不存在,就删除StoragePackage表中的记录
entries = append(entries, agtmq.NewStorageCheckRespEntry(obj.PackageID, obj.UserID, agtmq.CHECK_STORAGE_RESP_OP_DELETE))
stgPkgs = append(stgPkgs, model.StoragePackage{
StorageID: msg.StorageID,
PackageID: cdssdk.PackageID(pkgIDInt),
UserID: cdssdk.UserID(userIDInt),
})
}
}

// 增量情况下,不需要对infosMap中没检查的记录进行处理

return mq.ReplyOK(agtmq.NewStorageCheckResp(consts.StorageDirectoryStateOK, entries))
return mq.ReplyOK(agtmq.NewStorageCheckResp(consts.StorageDirectoryStateOK, stgPkgs))
}

func (svc *Service) checkStorageComplete(msg *agtmq.StorageCheck, dirInfos []fs.DirEntry) (*agtmq.StorageCheckResp, *mq.CodeMessage) {
func (svc *Service) StorageGC(msg *agtmq.StorageGC) (*agtmq.StorageGCResp, *mq.CodeMessage) {
infos, err := os.ReadDir(msg.Directory)
if err != nil {
logger.Warnf("list storage directory failed, err: %s", err.Error())
return nil, mq.Failed(errorcode.OperationFailed, "list directory files failed")
}

// userID->pkgID->pkg
userPkgs := make(map[string]map[string]bool)
for _, pkg := range msg.Packages {
userIDStr := fmt.Sprintf("%d", pkg.UserID)

pkgs, ok := userPkgs[userIDStr]
if !ok {
pkgs = make(map[string]bool)
userPkgs[userIDStr] = pkgs
}

infosMap := make(map[string]fs.DirEntry)
for _, info := range dirInfos {
infosMap[info.Name()] = info
pkgIDStr := fmt.Sprintf("%d", pkg.PackageID)
pkgs[pkgIDStr] = true
}

var entries []agtmq.StorageCheckRespEntry
for _, obj := range msg.Packages {
dirName := utils.MakeStorageLoadPackagePath(msg.Directory, obj.UserID, obj.PackageID)
_, ok := infosMap[dirName]
userDirs := lo.Filter(infos, func(info fs.DirEntry, index int) bool { return info.IsDir() })
for _, dir := range userDirs {
pkgMap, ok := userPkgs[dir.Name()]
// 第一级目录名是UserID,先删除UserID在StoragePackage表里没出现过的文件夹
if !ok {
rmPath := filepath.Join(msg.Directory, dir.Name())
err := os.RemoveAll(rmPath)
if err != nil {
logger.Warnf("removing user dir %s: %s", rmPath, err.Error())
} else {
logger.Debugf("user dir %s removed by gc", rmPath)
}
continue
}

if ok {
// 不需要做处理
// 删除map中的记录,表示此记录已被检查过
delete(infosMap, dirName)
pkgDir := utils.MakeStorageLoadDirectory(msg.Directory, dir.Name())
// 遍历每个UserID目录的packages目录里的内容
pkgs, err := os.ReadDir(pkgDir)
if err != nil {
logger.Warnf("reading package dir %s: %s", pkgDir, err.Error())
continue
}

} else {
// 只要文件不存在,就删除StoragePackage表中的记录
entries = append(entries, agtmq.NewStorageCheckRespEntry(obj.PackageID, obj.UserID, agtmq.CHECK_STORAGE_RESP_OP_DELETE))
for _, pkg := range pkgs {
if !pkgMap[pkg.Name()] {
rmPath := filepath.Join(pkgDir, pkg.Name())
err := os.RemoveAll(rmPath)
if err != nil {
logger.Warnf("removing package dir %s: %s", rmPath, err.Error())
} else {
logger.Debugf("package dir %s removed by gc", rmPath)
}
}
}
}

return mq.ReplyOK(agtmq.NewStorageCheckResp(consts.StorageDirectoryStateOK, entries))
return mq.ReplyOK(agtmq.RespStorageGC())
}

func (svc *Service) StartStorageCreatePackage(msg *agtmq.StartStorageCreatePackage) (*agtmq.StartStorageCreatePackageResp, *mq.CodeMessage) {


+ 7
- 11
agent/internal/task/cache_move_package.go View File

@@ -37,18 +37,9 @@ func (t *CacheMovePackage) do(ctx TaskContext) error {
log.Debugf("begin with %v", logger.FormatStruct(t))
defer log.Debugf("end")

// TOOD EC的锁
mutex, err := reqbuilder.NewBuilder().
Metadata().
// 读取Package信息和包含的Object信息
Package().ReadOne(t.packageID).Object().ReadAny().
// 读取Rep对象的配置
ObjectRep().ReadAny().
// 创建Cache记录
Cache().CreateAny().
IPFS().
// pin文件
CreateAnyRep(*stgglb.Local.NodeID).
// 保护解码出来的Object数据
IPFS().Buzy(*stgglb.Local.NodeID).
MutexLock(ctx.distlock)
if err != nil {
return fmt.Errorf("acquiring distlock: %w", err)
@@ -94,5 +85,10 @@ func (t *CacheMovePackage) do(ctx TaskContext) error {
}
}

_, err = coorCli.CachePackageMoved(coormq.NewCachePackageMoved(t.packageID, *stgglb.Local.NodeID))
if err != nil {
return fmt.Errorf("request to coordinator: %w", err)
}

return nil
}

+ 0
- 4
client/internal/cmdline/scanner.go View File

@@ -31,12 +31,8 @@ func init() {

parseScannerEventCmdTrie.MustAdd(scevt.NewAgentCheckStorage, myreflect.TypeNameOf[scevt.AgentCheckStorage]())

parseScannerEventCmdTrie.MustAdd(scevt.NewCheckCache, myreflect.TypeNameOf[scevt.CheckCache]())

parseScannerEventCmdTrie.MustAdd(scevt.NewCheckPackage, myreflect.TypeNameOf[scevt.CheckPackage]())

parseScannerEventCmdTrie.MustAdd(scevt.NewCheckRepCount, myreflect.TypeNameOf[scevt.CheckRepCount]())

parseScannerEventCmdTrie.MustAdd(scevt.NewCheckPackageRedundancy, myreflect.TypeNameOf[scevt.CheckPackageRedundancy]())

commands.MustAdd(ScannerPostEvent, "scanner", "event")


+ 0
- 29
client/internal/services/bucket.go View File

@@ -6,7 +6,6 @@ import (
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)

@@ -60,19 +59,6 @@ func (svc *BucketService) CreateBucket(userID cdssdk.UserID, bucketName string)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

// TODO 只有阅读了系统操作的源码,才能知道要加哪些锁,但用户的命令可能会调用不止一个系统操作。
// 因此加锁的操作还是必须在用户命令里完成,但具体加锁的内容,则需要被封装起来与系统操作放到一起,方便管理,避免分散改动。

mutex, err := reqbuilder.NewBuilder().
Metadata().Bucket().CreateOne(userID, bucketName).
// TODO 可以考虑二次加锁,加的更精确
UserBucket().CreateAny().
MutexLock(svc.DistLock)
if err != nil {
return 0, fmt.Errorf("acquire locks failed, err: %w", err)
}
defer mutex.Unlock()

resp, err := coorCli.CreateBucket(coormq.NewCreateBucket(userID, bucketName))
if err != nil {
return 0, fmt.Errorf("creating bucket: %w", err)
@@ -90,21 +76,6 @@ func (svc *BucketService) DeleteBucket(userID cdssdk.UserID, bucketID cdssdk.Buc

// TODO 检查用户是否有删除这个Bucket的权限。检查的时候可以只上UserBucket的Read锁

mutex, err := reqbuilder.NewBuilder().
Metadata().
UserBucket().WriteAny().
Bucket().WriteOne(bucketID).
Package().WriteAny().
Object().WriteAny().
ObjectRep().WriteAny().
ObjectBlock().WriteAny().
StoragePackage().WriteAny().
MutexLock(svc.DistLock)
if err != nil {
return fmt.Errorf("acquire locks failed, err: %w", err)
}
defer mutex.Unlock()

_, err = coorCli.DeleteBucket(coormq.NewDeleteBucket(userID, bucketID))
if err != nil {
return fmt.Errorf("request to coordinator failed, err: %w", err)


+ 0
- 44
client/internal/services/package.go View File

@@ -10,7 +10,6 @@ import (
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
agtcmd "gitlink.org.cn/cloudream/storage/common/pkgs/cmd"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)
@@ -45,26 +44,6 @@ func (svc *PackageService) DownloadPackage(userID cdssdk.UserID, packageID cdssd
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

mutex, err := reqbuilder.NewBuilder().
// 用于判断用户是否有对象权限
Metadata().UserBucket().ReadAny().
// 用于查询可用的下载节点
Node().ReadAny().
// 用于读取包信息
Package().ReadOne(packageID).
// 用于读取包内的文件信息
Object().ReadAny().
// 用于查询Rep配置
ObjectRep().ReadAny().
// 用于查询Block配置
ObjectBlock().ReadAny().
// 用于查询包含了副本的节点
Cache().ReadAny().
MutexLock(svc.DistLock)
if err != nil {
return nil, fmt.Errorf("acquire locks failed, err: %w", err)
}

getObjsResp, err := coorCli.GetPackageObjectDetails(coormq.NewGetPackageObjectDetails(packageID))
if err != nil {
return nil, fmt.Errorf("getting package object details: %w", err)
@@ -74,9 +53,6 @@ func (svc *PackageService) DownloadPackage(userID cdssdk.UserID, packageID cdssd
Distlock: svc.DistLock,
})

iter.OnClosing = func() {
mutex.Unlock()
}
return iter, nil
}

@@ -115,26 +91,6 @@ func (svc *PackageService) DeletePackage(userID cdssdk.UserID, packageID cdssdk.
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 用于判断用户是否有对象的权限
UserBucket().ReadAny().
// 用于读取、修改包信息
Package().WriteOne(packageID).
// 用于删除包内的所有文件
Object().WriteAny().
// 用于删除Rep配置
ObjectRep().WriteAny().
// 用于删除Block配置
ObjectBlock().WriteAny().
// 用于修改Move此Object的记录的状态
StoragePackage().WriteAny().
MutexLock(svc.DistLock)
if err != nil {
return fmt.Errorf("acquire locks failed, err: %w", err)
}
defer mutex.Unlock()

_, err = coorCli.DeletePackage(coormq.NewDeletePackage(userID, packageID))
if err != nil {
return fmt.Errorf("deleting package: %w", err)


+ 4
- 18
client/internal/task/storage_load_package.go View File

@@ -38,24 +38,10 @@ func (t *StorageLoadPackage) Execute(task *task.Task[TaskContext], ctx TaskConte

func (t *StorageLoadPackage) do(ctx TaskContext) error {
mutex, err := reqbuilder.NewBuilder().
Metadata().
// 用于判断用户是否有Storage权限
UserStorage().ReadOne(t.userID, t.storageID).
// 用于判断用户是否有对象权限
UserBucket().ReadAny().
// 用于读取包信息
Package().ReadOne(t.packageID).
// 用于读取对象信息
Object().ReadAny().
// 用于查询Rep配置
ObjectRep().ReadAny().
// 用于查询Block配置
ObjectBlock().ReadAny().
// 用于创建Move记录
StoragePackage().CreateOne(t.storageID, t.userID, t.packageID).
Storage().
// 用于创建对象文件
CreateOnePackage(t.storageID, t.userID, t.packageID).
// 提前占位
Metadata().StoragePackage().CreateOne(t.userID, t.storageID, t.packageID).
// 保护在storage目录中下载的文件
Storage().Buzy(t.storageID).
MutexLock(ctx.distlock)
if err != nil {
return fmt.Errorf("acquire locks failed, err: %w", err)


+ 7
- 2
common/assets/scripts/create_database.sql View File

@@ -136,13 +136,18 @@ create table ObjectBlock (
create table Cache (
FileHash varchar(100) not null comment '编码块块ID',
NodeID int not null comment '节点ID',
State varchar(100) not null comment '状态',
FrozenTime timestamp comment '文件被冻结的时间',
CreateTime timestamp not null comment '缓存时间',
Priority int not null comment '编码块优先级',
primary key(FileHash, NodeID)
) comment = '缓存表';

create table PinnedObject (
NodeID int not null comment '节点ID',
ObjectID int not null comment '对象ID',
CreateTime timestamp not null comment '缓存时间',
primary key(NodeID, ObjectID)
) comment = '临时对象表';

create table StoragePackage (
StorageID int not null comment '存储服务ID',
PackageID int not null comment '包ID',


+ 0
- 5
common/consts/consts.go View File

@@ -9,8 +9,3 @@ const (
NodeStateNormal = "Normal"
NodeStateUnavailable = "Unavailable"
)

const (
CacheStatePinned = "Pinned"
CacheStateTemp = "Temp"
)

+ 3
- 22
common/pkgs/cmd/create_package.go View File

@@ -66,26 +66,6 @@ func (t *CreatePackage) Execute(ctx *UpdatePackageContext) (*CreatePackageResult
return nil, fmt.Errorf("new coordinator client: %w", err)
}

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 用于判断用户是否有桶的权限
UserBucket().ReadOne(t.userID, t.bucketID).
// 用于查询可用的上传节点
Node().ReadAny().
// 用于创建包信息
Package().CreateOne(t.bucketID, t.name).
// 用于创建包中的文件的信息
Object().CreateAny().
// 用于设置EC配置
ObjectBlock().CreateAny().
// 用于创建Cache记录
Cache().CreateAny().
MutexLock(ctx.Distlock)
if err != nil {
return nil, fmt.Errorf("acquire locks failed, err: %w", err)
}
defer mutex.Unlock()

createPkgResp, err := coorCli.CreatePackage(coormq.NewCreatePackage(t.userID, t.bucketID, t.name))
if err != nil {
return nil, fmt.Errorf("creating package: %w", err)
@@ -107,15 +87,16 @@ func (t *CreatePackage) Execute(ctx *UpdatePackageContext) (*CreatePackageResult
ipfsReqBlder := reqbuilder.NewBuilder()
// 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁
if stgglb.Local.NodeID != nil {
ipfsReqBlder.IPFS().CreateAnyRep(*stgglb.Local.NodeID)
ipfsReqBlder.IPFS().Buzy(*stgglb.Local.NodeID)
}
for _, node := range userNodes {
if stgglb.Local.NodeID != nil && node.Node.NodeID == *stgglb.Local.NodeID {
continue
}

ipfsReqBlder.IPFS().CreateAnyRep(node.Node.NodeID)
ipfsReqBlder.IPFS().Buzy(node.Node.NodeID)
}
// TODO 考虑加Object的Create锁
// 防止上传的副本被清除
ipfsMutex, err := ipfsReqBlder.MutexLock(ctx.Distlock)
if err != nil {


+ 3
- 20
common/pkgs/cmd/update_package.go View File

@@ -45,24 +45,6 @@ func (t *UpdatePackage) Execute(ctx *UpdatePackageContext) (*UpdatePackageResult
return nil, fmt.Errorf("new coordinator client: %w", err)
}

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 用于查询可用的上传节点
Node().ReadAny().
// 用于修改包信息
Package().WriteOne(t.packageID).
// 用于创建包中的文件的信息
Object().CreateAny().
// 用于设置EC配置
ObjectBlock().CreateAny().
// 用于创建Cache记录
Cache().CreateAny().
MutexLock(ctx.Distlock)
if err != nil {
return nil, fmt.Errorf("acquire locks failed, err: %w", err)
}
defer mutex.Unlock()

getUserNodesResp, err := coorCli.GetUserNodes(coormq.NewGetUserNodes(t.userID))
if err != nil {
return nil, fmt.Errorf("getting user nodes: %w", err)
@@ -79,15 +61,16 @@ func (t *UpdatePackage) Execute(ctx *UpdatePackageContext) (*UpdatePackageResult
ipfsReqBlder := reqbuilder.NewBuilder()
// 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁
if stgglb.Local.NodeID != nil {
ipfsReqBlder.IPFS().CreateAnyRep(*stgglb.Local.NodeID)
ipfsReqBlder.IPFS().Buzy(*stgglb.Local.NodeID)
}
for _, node := range userNodes {
if stgglb.Local.NodeID != nil && node.Node.NodeID == *stgglb.Local.NodeID {
continue
}

ipfsReqBlder.IPFS().CreateAnyRep(node.Node.NodeID)
ipfsReqBlder.IPFS().Buzy(node.Node.NodeID)
}
// TODO 加Object的Create锁,最好一次性能加多个
// 防止上传的副本被清除
ipfsMutex, err := ipfsReqBlder.MutexLock(ctx.Distlock)
if err != nil {


+ 10
- 53
common/pkgs/db/cache.go View File

@@ -5,7 +5,6 @@ import (

"github.com/jmoiron/sqlx"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/consts"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
)

@@ -29,15 +28,15 @@ func (*CacheDB) BatchGetAllFileHashes(ctx SQLContext, start int, count int) ([]s
return ret, err
}

func (*CacheDB) GetNodeCaches(ctx SQLContext, nodeID cdssdk.NodeID) ([]model.Cache, error) {
func (*CacheDB) GetByNodeID(ctx SQLContext, nodeID cdssdk.NodeID) ([]model.Cache, error) {
var ret []model.Cache
err := sqlx.Select(ctx, &ret, "select * from Cache where NodeID = ?", nodeID)
return ret, err
}

// CreateNew 创建一条新的缓存记录
func (*CacheDB) CreateNew(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID) error {
_, err := ctx.Exec("insert into Cache values(?,?,?,?,?,?)", fileHash, nodeID, consts.CacheStatePinned, nil, time.Now(), 0)
// Create 创建一条新的缓存记录
func (*CacheDB) Create(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID, priority int) error {
_, err := ctx.Exec("insert into Cache values(?,?,?,?)", fileHash, nodeID, time.Now(), priority)
if err != nil {
return err
}
@@ -45,53 +44,28 @@ func (*CacheDB) CreateNew(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID)
return nil
}

func (*CacheDB) SetPackageObjectFrozen(ctx SQLContext, pkgID cdssdk.PackageID, nodeID cdssdk.NodeID) error {
var nowTime = time.Now()
_, err := ctx.Exec(
"insert into Cache(FileHash,NodeID,State,FrozenTime,CreateTime,Priority)"+
" select FileHash, ?, ?, ?, ?, ? from Object where PackageID = ?"+
" on duplicate key update State = ?, FrozenTime = ?",
nodeID, consts.CacheStatePinned, &nowTime, &nowTime, 0,
pkgID,
consts.CacheStatePinned, &nowTime,
)

return err
}

// CreatePinned 创建一条缓存记录,如果已存在,但不是pinned状态,则将其设置为pin状态
func (*CacheDB) CreatePinned(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID, priority int) error {
_, err := ctx.Exec("insert into Cache values(?,?,?,?,?,?) on duplicate key update State = ?, CreateTime = ?, Priority = ?",
fileHash, nodeID, consts.CacheStatePinned, nil, time.Now(), priority,
consts.CacheStatePinned, time.Now(), priority,
)
return err
}

func (*CacheDB) BatchCreatePinned(ctx SQLContext, fileHashes []string, nodeID cdssdk.NodeID, priority int) error {
func (*CacheDB) BatchCreate(ctx SQLContext, fileHashes []string, nodeID cdssdk.NodeID, priority int) error {
var caches []model.Cache
var nowTime = time.Now()
for _, hash := range fileHashes {
caches = append(caches, model.Cache{
FileHash: hash,
NodeID: nodeID,
State: consts.CacheStatePinned,
FrozenTime: nil,
CreateTime: nowTime,
Priority: priority,
})
}

_, err := sqlx.NamedExec(ctx, "insert into Cache(FileHash,NodeID,State,FrozenTime,CreateTime,Priority) values(:FileHash,:NodeID,:State,:FrozenTime,:CreateTime,:Priority)"+
" on duplicate key update State=values(State), CreateTime=values(CreateTime), Priority=values(Priority)",
_, err := sqlx.NamedExec(ctx, "insert into Cache(FileHash,NodeID,CreateTime,Priority) values(:FileHash,:NodeID,:CreateTime,:Priority)"+
" on duplicate key update CreateTime=values(CreateTime), Priority=values(Priority)",
caches,
)
return err
}

// Create 创建一条Temp状态的缓存记录,如果已存在则不产生效果
func (*CacheDB) CreateTemp(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID) error {
_, err := ctx.Exec("insert ignore into Cache values(?,?,?,?,?,?)", fileHash, nodeID, consts.CacheStateTemp, nil, time.Now(), 0)
func (*CacheDB) NodeBatchDelete(ctx SQLContext, nodeID cdssdk.NodeID, fileHashes []string) error {
// TODO in语句有长度限制
_, err := ctx.Exec("delete from Cache where NodeID = ? and FileHash in (?)", nodeID, fileHashes)
return err
}

@@ -103,12 +77,6 @@ func (*CacheDB) GetCachingFileNodes(ctx SQLContext, fileHash string) ([]model.No
return x, err
}

// DeleteTemp 删除一条Temp状态的记录
func (*CacheDB) DeleteTemp(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID) error {
_, err := ctx.Exec("delete from Cache where FileHash = ? and NodeID = ? and State = ?", fileHash, nodeID, consts.CacheStateTemp)
return err
}

// DeleteNodeAll 删除一个节点所有的记录
func (*CacheDB) DeleteNodeAll(ctx SQLContext, nodeID cdssdk.NodeID) error {
_, err := ctx.Exec("delete from Cache where NodeID = ?", nodeID)
@@ -124,14 +92,3 @@ func (*CacheDB) FindCachingFileUserNodes(ctx SQLContext, userID cdssdk.NodeID, f
" UserNode.UserID = ? and UserNode.NodeID = Node.NodeID", fileHash, userID)
return x, err
}

// 设置一条记录为Temp,对Frozen的记录无效
func (*CacheDB) SetTemp(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID) error {
_, err := ctx.Exec("update Cache set State = ?, CreateTime = ? where FileHash = ? and NodeID = ? and FrozenTime = null",
consts.CacheStateTemp,
time.Now(),
fileHash,
nodeID,
)
return err
}

+ 0
- 2
common/pkgs/db/model/model.go View File

@@ -105,8 +105,6 @@ type ObjectBlock = stgmod.ObjectBlock
type Cache struct {
FileHash string `db:"FileHash" json:"fileHash"`
NodeID cdssdk.NodeID `db:"NodeID" json:"nodeID"`
State string `db:"State" json:"state"`
FrozenTime *time.Time `db:"FrozenTime" json:"frozenTime"`
CreateTime time.Time `db:"CreateTime" json:"createTime"`
Priority int `db:"Priority" json:"priority"`
}


+ 3
- 3
common/pkgs/db/object.go View File

@@ -112,14 +112,14 @@ func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID cdssdk.PackageID, objs []
}
}

// 首次上传默认使用不分块的rep模式
// 首次上传默认使用不分块的none模式
err = db.ObjectBlock().Create(ctx, objID, 0, obj.NodeID, obj.FileHash)
if err != nil {
return nil, fmt.Errorf("creating object block: %w", err)
}

// 创建缓存记录
err = db.Cache().CreatePinned(ctx, obj.FileHash, obj.NodeID, 0)
err = db.Cache().Create(ctx, obj.FileHash, obj.NodeID, 0)
if err != nil {
return nil, fmt.Errorf("creating cache: %w", err)
}
@@ -148,7 +148,7 @@ func (db *ObjectDB) BatchUpdateRedundancy(ctx SQLContext, objs []coormq.ChangeOb
}

// 创建缓存记录
err = db.Cache().CreatePinned(ctx, block.FileHash, block.NodeID, 0)
err = db.Cache().Create(ctx, block.FileHash, block.NodeID, 0)
if err != nil {
return fmt.Errorf("creating cache: %w", err)
}


+ 11
- 0
common/pkgs/db/object_block.go View File

@@ -20,6 +20,12 @@ func (db *DB) ObjectBlock() *ObjectBlockDB {
return &ObjectBlockDB{DB: db}
}

func (db *ObjectBlockDB) GetByNodeID(ctx SQLContext, nodeID cdssdk.NodeID) ([]stgmod.ObjectBlock, error) {
var rets []stgmod.ObjectBlock
_, err := ctx.Exec("select * from ObjectBlock where NodeID = ?", nodeID)
return rets, err
}

func (db *ObjectBlockDB) Create(ctx SQLContext, objectID cdssdk.ObjectID, index int, nodeID cdssdk.NodeID, fileHash string) error {
_, err := ctx.Exec("insert into ObjectBlock values(?,?,?,?)", objectID, index, nodeID, fileHash)
return err
@@ -35,6 +41,11 @@ func (db *ObjectBlockDB) DeleteInPackage(ctx SQLContext, packageID cdssdk.Packag
return err
}

func (db *ObjectBlockDB) NodeBatchDelete(ctx SQLContext, nodeID cdssdk.NodeID, fileHashes []string) error {
_, err := ctx.Exec("delete from ObjectBlock where NodeID = ? and FileHash in (?)", nodeID, fileHashes)
return err
}

func (db *ObjectBlockDB) CountBlockWithHash(ctx SQLContext, fileHash string) (int, error) {
var cnt int
err := sqlx.Get(ctx, &cnt,


+ 53
- 0
common/pkgs/db/pinned_object.go View File

@@ -0,0 +1,53 @@
package db

import (
"time"

"github.com/jmoiron/sqlx"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
)

type PinnedObjectDB struct {
*DB
}

func (db *DB) PinnedObject() *PinnedObjectDB {
return &PinnedObjectDB{DB: db}
}

func (*PinnedObjectDB) GetByNodeID(ctx SQLContext, nodeID cdssdk.NodeID) ([]cdssdk.PinnedObject, error) {
var ret []cdssdk.PinnedObject
err := sqlx.Select(ctx, &ret, "select * from PinnedObject where NodeID = ?", nodeID)
return ret, err
}

func (*PinnedObjectDB) GetObjectsByNodeID(ctx SQLContext, nodeID cdssdk.NodeID) ([]cdssdk.Object, error) {
var ret []cdssdk.Object
err := sqlx.Select(ctx, &ret, "select Object.* from PinnedObject, Object where PinnedObject.ObjectID = Object.ObjectID and NodeID = ?", nodeID)
return ret, err
}

func (*PinnedObjectDB) Create(ctx SQLContext, nodeID cdssdk.NodeID, objectID cdssdk.ObjectID, createTime time.Time) error {
_, err := ctx.Exec("insert into PinnedObject values(?,?,?)", nodeID, objectID, createTime)
return err
}

func (*PinnedObjectDB) CreateFromPackage(ctx SQLContext, packageID cdssdk.PackageID, nodeID cdssdk.NodeID) error {
_, err := ctx.Exec(
"insert ignore into PinnedObject(NodeID, ObjectID, CreateTime) select ?, ObjectID, ? from Object where PackageID = ?",
nodeID,
time.Now(),
packageID,
)
return err
}

func (*PinnedObjectDB) Delete(ctx SQLContext, nodeID cdssdk.NodeID, objectID cdssdk.ObjectID) error {
_, err := ctx.Exec("delete from PinnedObject where NodeID = ? and ObjectID = ?")
return err
}

func (*PinnedObjectDB) NodeBatchDelete(ctx SQLContext, nodeID cdssdk.NodeID, objectIDs []cdssdk.ObjectID) error {
_, err := ctx.Exec("delete from PinnedObject where NodeID = ? and ObjectID in (?)", objectIDs)
return err
}

+ 22
- 99
common/pkgs/distlock/lockprovider/ipfs_lock.go View File

@@ -3,22 +3,15 @@ package lockprovider
import (
"fmt"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/distlock"
mylo "gitlink.org.cn/cloudream/common/utils/lo"
)

const (
IPFSLockPathPrefix = "IPFS"

IPFS_SET_READ_LOCK = "SetRead"
IPFS_SET_WRITE_LOCK = "SetWrite"
IPFS_SET_CREATE_LOCK = "SetCreate"

IPFS_ELEMENT_READ_LOCK = "ElementRead"
IPFS_ELEMENT_WRITE_LOCK = "ElementWrite"

IPFS_NODE_ID_PATH_INDEX = 1
IPFSLockPathPrefix = "IPFS"
IPFSNodeIDPathIndex = 1
IPFSBuzyLock = "Buzy"
IPFSGCLock = "GC"
)

type IPFSLock struct {
@@ -35,7 +28,7 @@ func NewIPFSLock() *IPFSLock {

// CanLock 判断这个锁能否锁定成功
func (l *IPFSLock) CanLock(lock distlock.Lock) error {
nodeLock, ok := l.nodeLocks[lock.Path[IPFS_NODE_ID_PATH_INDEX]]
nodeLock, ok := l.nodeLocks[lock.Path[IPFSNodeIDPathIndex]]
if !ok {
// 不能直接返回nil,因为如果锁数据的格式不对,也不能获取锁。
// 这里使用一个空Provider来进行检查。
@@ -47,7 +40,7 @@ func (l *IPFSLock) CanLock(lock distlock.Lock) error {

// 锁定。在内部可以不用判断能否加锁,外部需要保证调用此函数前调用了CanLock进行检查
func (l *IPFSLock) Lock(reqID string, lock distlock.Lock) error {
nodeID := lock.Path[IPFS_NODE_ID_PATH_INDEX]
nodeID := lock.Path[IPFSNodeIDPathIndex]

nodeLock, ok := l.nodeLocks[nodeID]
if !ok {
@@ -60,7 +53,7 @@ func (l *IPFSLock) Lock(reqID string, lock distlock.Lock) error {

// 解锁
func (l *IPFSLock) Unlock(reqID string, lock distlock.Lock) error {
nodeID := lock.Path[IPFS_NODE_ID_PATH_INDEX]
nodeID := lock.Path[IPFSNodeIDPathIndex]

nodeLock, ok := l.nodeLocks[nodeID]
if !ok {
@@ -86,18 +79,9 @@ func (l *IPFSLock) Clear() {
l.nodeLocks = make(map[string]*IPFSNodeLock)
}

type ipfsElementLock struct {
target StringLockTarget
requestIDs []string
}

type IPFSNodeLock struct {
setReadReqIDs []string
setWriteReqIDs []string
setCreateReqIDs []string

elementReadLocks []*ipfsElementLock
elementWriteLocks []*ipfsElementLock
buzyReqIDs []string
gcReqIDs []string

lockCompatibilityTable *LockCompatibilityTable
}
@@ -110,29 +94,14 @@ func NewIPFSNodeLock() *IPFSNodeLock {
}

compTable.
Column(IPFS_ELEMENT_READ_LOCK, func() bool { return len(ipfsLock.elementReadLocks) > 0 }).
Column(IPFS_ELEMENT_WRITE_LOCK, func() bool { return len(ipfsLock.elementWriteLocks) > 0 }).
Column(IPFS_SET_READ_LOCK, func() bool { return len(ipfsLock.setReadReqIDs) > 0 }).
Column(IPFS_SET_WRITE_LOCK, func() bool { return len(ipfsLock.setWriteReqIDs) > 0 }).
Column(IPFS_SET_CREATE_LOCK, func() bool { return len(ipfsLock.setCreateReqIDs) > 0 })
Column(IPFSBuzyLock, func() bool { return len(ipfsLock.buzyReqIDs) > 0 }).
Column(IPFSGCLock, func() bool { return len(ipfsLock.gcReqIDs) > 0 })

comp := LockCompatible()
uncp := LockUncompatible()
trgt := LockSpecial(func(lock distlock.Lock, testLockName string) bool {
strTar := lock.Target.(StringLockTarget)
if testLockName == IPFS_ELEMENT_READ_LOCK {
// 如果没有任何锁的锁对象与当前的锁对象冲突,那么这个锁可以加
return lo.NoneBy(ipfsLock.elementReadLocks, func(other *ipfsElementLock) bool { return strTar.IsConflict(&other.target) })
}

return lo.NoneBy(ipfsLock.elementWriteLocks, func(other *ipfsElementLock) bool { return strTar.IsConflict(&other.target) })
})

compTable.MustRow(comp, trgt, comp, uncp, comp)
compTable.MustRow(trgt, trgt, uncp, uncp, uncp)
compTable.MustRow(comp, uncp, comp, uncp, uncp)
compTable.MustRow(uncp, uncp, uncp, uncp, uncp)
compTable.MustRow(comp, uncp, uncp, uncp, comp)

compTable.MustRow(comp, uncp)
compTable.MustRow(uncp, comp)

return &ipfsLock
}
@@ -145,18 +114,10 @@ func (l *IPFSNodeLock) CanLock(lock distlock.Lock) error {
// 锁定
func (l *IPFSNodeLock) Lock(reqID string, lock distlock.Lock) error {
switch lock.Name {
case IPFS_SET_READ_LOCK:
l.setReadReqIDs = append(l.setReadReqIDs, reqID)
case IPFS_SET_WRITE_LOCK:
l.setWriteReqIDs = append(l.setWriteReqIDs, reqID)
case IPFS_SET_CREATE_LOCK:
l.setCreateReqIDs = append(l.setCreateReqIDs, reqID)

case IPFS_ELEMENT_READ_LOCK:
l.elementReadLocks = l.addElementLock(lock, l.elementReadLocks, reqID)
case IPFS_ELEMENT_WRITE_LOCK:
l.elementWriteLocks = l.addElementLock(lock, l.elementWriteLocks, reqID)

case IPFSBuzyLock:
l.buzyReqIDs = append(l.buzyReqIDs, reqID)
case IPFSGCLock:
l.gcReqIDs = append(l.gcReqIDs, reqID)
default:
return fmt.Errorf("unknow lock name: %s", lock.Name)
}
@@ -164,54 +125,16 @@ func (l *IPFSNodeLock) Lock(reqID string, lock distlock.Lock) error {
return nil
}

func (l *IPFSNodeLock) addElementLock(lock distlock.Lock, locks []*ipfsElementLock, reqID string) []*ipfsElementLock {
strTarget := lock.Target.(StringLockTarget)
lck, ok := lo.Find(locks, func(l *ipfsElementLock) bool { return strTarget.IsConflict(&l.target) })
if !ok {
lck = &ipfsElementLock{
target: strTarget,
}
locks = append(locks, lck)
}

lck.requestIDs = append(lck.requestIDs, reqID)
return locks
}

// 解锁
func (l *IPFSNodeLock) Unlock(reqID string, lock distlock.Lock) error {
switch lock.Name {
case IPFS_SET_READ_LOCK:
l.setReadReqIDs = mylo.Remove(l.setReadReqIDs, reqID)
case IPFS_SET_WRITE_LOCK:
l.setWriteReqIDs = mylo.Remove(l.setWriteReqIDs, reqID)
case IPFS_SET_CREATE_LOCK:
l.setCreateReqIDs = mylo.Remove(l.setCreateReqIDs, reqID)

case IPFS_ELEMENT_READ_LOCK:
l.elementReadLocks = l.removeElementLock(lock, l.elementReadLocks, reqID)
case IPFS_ELEMENT_WRITE_LOCK:
l.elementWriteLocks = l.removeElementLock(lock, l.elementWriteLocks, reqID)

case IPFSBuzyLock:
l.buzyReqIDs = mylo.Remove(l.buzyReqIDs, reqID)
case IPFSGCLock:
l.gcReqIDs = mylo.Remove(l.gcReqIDs, reqID)
default:
return fmt.Errorf("unknow lock name: %s", lock.Name)
}

return nil
}

func (l *IPFSNodeLock) removeElementLock(lock distlock.Lock, locks []*ipfsElementLock, reqID string) []*ipfsElementLock {
strTarget := lock.Target.(StringLockTarget)
lck, index, ok := lo.FindIndexOf(locks, func(l *ipfsElementLock) bool { return strTarget.IsConflict(&l.target) })
if !ok {
return locks
}

lck.requestIDs = mylo.Remove(lck.requestIDs, reqID)

if len(lck.requestIDs) == 0 {
locks = mylo.RemoveAt(locks, index)
}

return locks
}

+ 15
- 25
common/pkgs/distlock/lockprovider/ipfs_lock_test.go View File

@@ -15,59 +15,45 @@ func Test_IPFSLock(t *testing.T) {
wantOK bool
}{
{
title: "同节点,同一个Read锁",
title: "同节点,同一个Buzy锁",
initLocks: []distlock.Lock{
{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFS_SET_READ_LOCK,
Name: IPFSBuzyLock,
},
},
doLock: distlock.Lock{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFS_SET_READ_LOCK,
Name: IPFSBuzyLock,
},
wantOK: true,
},
{
title: "同节点,同一个Write锁",
title: "同节点,同一个GC锁",
initLocks: []distlock.Lock{
{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFS_SET_WRITE_LOCK,
Name: IPFSGCLock,
},
},
doLock: distlock.Lock{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFS_SET_WRITE_LOCK,
},
wantOK: false,
},
{
title: "不同节点,同一个Write锁",
initLocks: []distlock.Lock{
{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFS_SET_WRITE_LOCK,
},
},
doLock: distlock.Lock{
Path: []string{IPFSLockPathPrefix, "node2"},
Name: IPFS_SET_WRITE_LOCK,
Name: IPFSGCLock,
},
wantOK: true,
},
{
title: "相同对象的Read、Write锁",
title: "同时设置Buzy和GC",
initLocks: []distlock.Lock{
{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFS_ELEMENT_WRITE_LOCK,
Name: IPFSBuzyLock,
Target: *NewStringLockTarget(),
},
},
doLock: distlock.Lock{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFS_ELEMENT_WRITE_LOCK,
Name: IPFSGCLock,
Target: *NewStringLockTarget(),
},
wantOK: false,
@@ -96,16 +82,20 @@ func Test_IPFSLock(t *testing.T) {

lock := distlock.Lock{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFS_SET_WRITE_LOCK,
Name: IPFSBuzyLock,
}

ipfsLock.Lock("req1", lock)

err := ipfsLock.CanLock(lock)
So(err, ShouldNotBeNil)
So(err, ShouldBeNil)

ipfsLock.Unlock("req1", lock)

lock = distlock.Lock{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFSGCLock,
}
err = ipfsLock.CanLock(lock)
So(err, ShouldBeNil)
})


+ 10
- 72
common/pkgs/distlock/lockprovider/metadata_lock.go View File

@@ -10,14 +10,7 @@ import (

const (
MetadataLockPathPrefix = "Metadata"

METADATA_SET_READ_LOCK = "SetRead"
METADATA_SET_WRITE_LOCK = "SetWrite"
METADATA_SET_CREATE_LOCK = "SetCreate"

METADATA_ELEMENT_READ_LOCK = "ElementRead"
METADATA_ELEMENT_WRITE_LOCK = "ElementWrite"
METADATA_ELEMENT_CREATE_LOCK = "ElementCreate"
MetadataCreateLock = "Create"
)

type metadataElementLock struct {
@@ -26,13 +19,7 @@ type metadataElementLock struct {
}

type MetadataLock struct {
setReadReqIDs []string
setWriteReqIDs []string
setCreateReqIDs []string

elementReadLocks []*metadataElementLock
elementWriteLocks []*metadataElementLock
elementCreateLocks []*metadataElementLock
createReqIDs []*metadataElementLock

lockCompatibilityTable LockCompatibilityTable
}
@@ -46,35 +33,13 @@ func NewMetadataLock() *MetadataLock {
compTable := &metadataLock.lockCompatibilityTable

compTable.
Column(METADATA_ELEMENT_READ_LOCK, func() bool { return len(metadataLock.elementReadLocks) > 0 }).
Column(METADATA_ELEMENT_WRITE_LOCK, func() bool { return len(metadataLock.elementWriteLocks) > 0 }).
Column(METADATA_ELEMENT_CREATE_LOCK, func() bool { return len(metadataLock.elementCreateLocks) > 0 }).
Column(METADATA_SET_READ_LOCK, func() bool { return len(metadataLock.setReadReqIDs) > 0 }).
Column(METADATA_SET_WRITE_LOCK, func() bool { return len(metadataLock.setWriteReqIDs) > 0 }).
Column(METADATA_SET_CREATE_LOCK, func() bool { return len(metadataLock.setCreateReqIDs) > 0 })

comp := LockCompatible()
uncp := LockUncompatible()
Column(MetadataCreateLock, func() bool { return len(metadataLock.createReqIDs) > 0 })
trgt := LockSpecial(func(lock distlock.Lock, testLockName string) bool {
strTar := lock.Target.(StringLockTarget)
if testLockName == METADATA_ELEMENT_READ_LOCK {
// 如果没有任何锁的锁对象与当前的锁对象冲突,那么这个锁可以加
return lo.NoneBy(metadataLock.elementReadLocks, func(other *metadataElementLock) bool { return strTar.IsConflict(&other.target) })
}

if testLockName == METADATA_ELEMENT_WRITE_LOCK {
return lo.NoneBy(metadataLock.elementWriteLocks, func(other *metadataElementLock) bool { return strTar.IsConflict(&other.target) })
}

return lo.NoneBy(metadataLock.elementCreateLocks, func(other *metadataElementLock) bool { return strTar.IsConflict(&other.target) })
return lo.NoneBy(metadataLock.createReqIDs, func(other *metadataElementLock) bool { return strTar.IsConflict(&other.target) })
})

compTable.MustRow(comp, trgt, comp, comp, uncp, comp)
compTable.MustRow(trgt, trgt, comp, uncp, uncp, comp)
compTable.MustRow(comp, comp, trgt, uncp, uncp, uncp)
compTable.MustRow(comp, uncp, uncp, comp, uncp, uncp)
compTable.MustRow(uncp, uncp, uncp, uncp, uncp, uncp)
compTable.MustRow(comp, comp, uncp, uncp, uncp, uncp)
compTable.MustRow(trgt)

return &metadataLock
}
@@ -87,19 +52,8 @@ func (l *MetadataLock) CanLock(lock distlock.Lock) error {
// 锁定
func (l *MetadataLock) Lock(reqID string, lock distlock.Lock) error {
switch lock.Name {
case METADATA_SET_READ_LOCK:
l.setReadReqIDs = append(l.setReadReqIDs, reqID)
case METADATA_SET_WRITE_LOCK:
l.setWriteReqIDs = append(l.setWriteReqIDs, reqID)
case METADATA_SET_CREATE_LOCK:
l.setCreateReqIDs = append(l.setCreateReqIDs, reqID)

case METADATA_ELEMENT_READ_LOCK:
l.elementReadLocks = l.addElementLock(lock, l.elementReadLocks, reqID)
case METADATA_ELEMENT_WRITE_LOCK:
l.elementWriteLocks = l.addElementLock(lock, l.elementWriteLocks, reqID)
case METADATA_ELEMENT_CREATE_LOCK:
l.elementCreateLocks = l.addElementLock(lock, l.elementCreateLocks, reqID)
case MetadataCreateLock:
l.createReqIDs = l.addElementLock(lock, l.createReqIDs, reqID)

default:
return fmt.Errorf("unknow lock name: %s", lock.Name)
@@ -125,19 +79,8 @@ func (l *MetadataLock) addElementLock(lock distlock.Lock, locks []*metadataEleme
// 解锁
func (l *MetadataLock) Unlock(reqID string, lock distlock.Lock) error {
switch lock.Name {
case METADATA_SET_READ_LOCK:
l.setReadReqIDs = mylo.Remove(l.setReadReqIDs, reqID)
case METADATA_SET_WRITE_LOCK:
l.setWriteReqIDs = mylo.Remove(l.setWriteReqIDs, reqID)
case METADATA_SET_CREATE_LOCK:
l.setCreateReqIDs = mylo.Remove(l.setCreateReqIDs, reqID)

case METADATA_ELEMENT_READ_LOCK:
l.elementReadLocks = l.removeElementLock(lock, l.elementReadLocks, reqID)
case METADATA_ELEMENT_WRITE_LOCK:
l.elementWriteLocks = l.removeElementLock(lock, l.elementWriteLocks, reqID)
case METADATA_ELEMENT_CREATE_LOCK:
l.elementCreateLocks = l.removeElementLock(lock, l.elementCreateLocks, reqID)
case MetadataCreateLock:
l.createReqIDs = l.removeElementLock(lock, l.createReqIDs, reqID)

default:
return fmt.Errorf("unknow lock name: %s", lock.Name)
@@ -175,10 +118,5 @@ func (l *MetadataLock) ParseTargetString(targetStr string) (any, error) {

// Clear 清除内部所有状态
func (l *MetadataLock) Clear() {
l.setReadReqIDs = nil
l.setWriteReqIDs = nil
l.setCreateReqIDs = nil
l.elementReadLocks = nil
l.elementWriteLocks = nil
l.elementCreateLocks = nil
l.createReqIDs = nil
}

+ 28
- 114
common/pkgs/distlock/lockprovider/storage_lock.go View File

@@ -3,23 +3,15 @@ package lockprovider
import (
"fmt"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/distlock"
mylo "gitlink.org.cn/cloudream/common/utils/lo"
)

const (
StorageLockPathPrefix = "Storage"

STORAGE_SET_READ_LOCK = "SetRead"
STORAGE_SET_WRITE_LOCK = "SetWrite"
STORAGE_SET_CREATE_LOCK = "SetCreate"

STORAGE_ELEMENT_READ_LOCK = "ElementRead"
STORAGE_ELEMENT_WRITE_LOCK = "ElementWrite"
STORAGE_ELEMENT_CREATE_LOCK = "ElementCreate"

STORAGE_STORAGE_ID_PATH_INDEX = 1
StorageLockPathPrefix = "Storage"
StorageNodeIDPathIndex = 1
StorageBuzyLock = "Buzy"
StorageGCLock = "GC"
)

type StorageLock struct {
@@ -36,7 +28,7 @@ func NewStorageLock() *StorageLock {

// CanLock 判断这个锁能否锁定成功
func (l *StorageLock) CanLock(lock distlock.Lock) error {
nodeLock, ok := l.nodeLocks[lock.Path[STORAGE_STORAGE_ID_PATH_INDEX]]
nodeLock, ok := l.nodeLocks[lock.Path[StorageNodeIDPathIndex]]
if !ok {
// 不能直接返回nil,因为如果锁数据的格式不对,也不能获取锁。
// 这里使用一个空Provider来进行检查。
@@ -48,7 +40,7 @@ func (l *StorageLock) CanLock(lock distlock.Lock) error {

// 锁定。在内部可以不用判断能否加锁,外部需要保证调用此函数前调用了CanLock进行检查
func (l *StorageLock) Lock(reqID string, lock distlock.Lock) error {
nodeID := lock.Path[STORAGE_STORAGE_ID_PATH_INDEX]
nodeID := lock.Path[StorageNodeIDPathIndex]

nodeLock, ok := l.nodeLocks[nodeID]
if !ok {
@@ -61,7 +53,7 @@ func (l *StorageLock) Lock(reqID string, lock distlock.Lock) error {

// 解锁
func (l *StorageLock) Unlock(reqID string, lock distlock.Lock) error {
nodeID := lock.Path[STORAGE_STORAGE_ID_PATH_INDEX]
nodeID := lock.Path[StorageNodeIDPathIndex]

nodeLock, ok := l.nodeLocks[nodeID]
if !ok {
@@ -87,63 +79,31 @@ func (l *StorageLock) Clear() {
l.nodeLocks = make(map[string]*StorageNodeLock)
}

type storageElementLock struct {
target StringLockTarget
requestIDs []string
}

type StorageNodeLock struct {
setReadReqIDs []string
setWriteReqIDs []string
setCreateReqIDs []string

elementReadLocks []*storageElementLock
elementWriteLocks []*storageElementLock
elementCreateLocks []*storageElementLock
buzyReqIDs []string
gcReqIDs []string

lockCompatibilityTable LockCompatibilityTable
lockCompatibilityTable *LockCompatibilityTable
}

func NewStorageNodeLock() *StorageNodeLock {
compTable := &LockCompatibilityTable{}

storageLock := StorageNodeLock{
lockCompatibilityTable: LockCompatibilityTable{},
StorageLock := StorageNodeLock{
lockCompatibilityTable: compTable,
}

compTable := &storageLock.lockCompatibilityTable

compTable.
Column(STORAGE_ELEMENT_READ_LOCK, func() bool { return len(storageLock.elementReadLocks) > 0 }).
Column(STORAGE_ELEMENT_WRITE_LOCK, func() bool { return len(storageLock.elementWriteLocks) > 0 }).
Column(STORAGE_ELEMENT_CREATE_LOCK, func() bool { return len(storageLock.elementCreateLocks) > 0 }).
Column(STORAGE_SET_READ_LOCK, func() bool { return len(storageLock.setReadReqIDs) > 0 }).
Column(STORAGE_SET_WRITE_LOCK, func() bool { return len(storageLock.setWriteReqIDs) > 0 }).
Column(STORAGE_SET_CREATE_LOCK, func() bool { return len(storageLock.setCreateReqIDs) > 0 })
Column(StorageBuzyLock, func() bool { return len(StorageLock.buzyReqIDs) > 0 }).
Column(StorageGCLock, func() bool { return len(StorageLock.gcReqIDs) > 0 })

comp := LockCompatible()
uncp := LockUncompatible()
trgt := LockSpecial(func(lock distlock.Lock, testLockName string) bool {
strTar := lock.Target.(StringLockTarget)
if testLockName == STORAGE_ELEMENT_READ_LOCK {
// 如果没有任何锁的锁对象与当前的锁对象冲突,那么这个锁可以加
return lo.NoneBy(storageLock.elementReadLocks, func(other *storageElementLock) bool { return strTar.IsConflict(&other.target) })
}

if testLockName == STORAGE_ELEMENT_WRITE_LOCK {
return lo.NoneBy(storageLock.elementWriteLocks, func(other *storageElementLock) bool { return strTar.IsConflict(&other.target) })
}

return lo.NoneBy(storageLock.elementCreateLocks, func(other *storageElementLock) bool { return strTar.IsConflict(&other.target) })
})

compTable.MustRow(comp, trgt, comp, comp, uncp, comp)
compTable.MustRow(trgt, trgt, comp, uncp, uncp, comp)
compTable.MustRow(comp, comp, trgt, uncp, uncp, uncp)
compTable.MustRow(comp, uncp, uncp, comp, uncp, uncp)
compTable.MustRow(uncp, uncp, uncp, uncp, uncp, uncp)
compTable.MustRow(comp, comp, uncp, uncp, uncp, uncp)

return &storageLock

compTable.MustRow(comp, uncp)
compTable.MustRow(uncp, comp)

return &StorageLock
}

// CanLock 判断这个锁能否锁定成功
@@ -154,18 +114,10 @@ func (l *StorageNodeLock) CanLock(lock distlock.Lock) error {
// 锁定
func (l *StorageNodeLock) Lock(reqID string, lock distlock.Lock) error {
switch lock.Name {
case STORAGE_SET_READ_LOCK:
l.setReadReqIDs = append(l.setReadReqIDs, reqID)
case STORAGE_SET_WRITE_LOCK:
l.setWriteReqIDs = append(l.setWriteReqIDs, reqID)
case STORAGE_SET_CREATE_LOCK:
l.setCreateReqIDs = append(l.setCreateReqIDs, reqID)

case STORAGE_ELEMENT_READ_LOCK:
l.elementReadLocks = l.addElementLock(lock, l.elementReadLocks, reqID)
case STORAGE_ELEMENT_WRITE_LOCK:
l.elementWriteLocks = l.addElementLock(lock, l.elementWriteLocks, reqID)

case StorageBuzyLock:
l.buzyReqIDs = append(l.buzyReqIDs, reqID)
case StorageGCLock:
l.gcReqIDs = append(l.gcReqIDs, reqID)
default:
return fmt.Errorf("unknow lock name: %s", lock.Name)
}
@@ -173,54 +125,16 @@ func (l *StorageNodeLock) Lock(reqID string, lock distlock.Lock) error {
return nil
}

func (l *StorageNodeLock) addElementLock(lock distlock.Lock, locks []*storageElementLock, reqID string) []*storageElementLock {
strTarget := lock.Target.(StringLockTarget)
lck, ok := lo.Find(locks, func(l *storageElementLock) bool { return strTarget.IsConflict(&l.target) })
if !ok {
lck = &storageElementLock{
target: strTarget,
}
locks = append(locks, lck)
}

lck.requestIDs = append(lck.requestIDs, reqID)
return locks
}

// 解锁
func (l *StorageNodeLock) Unlock(reqID string, lock distlock.Lock) error {
switch lock.Name {
case STORAGE_SET_READ_LOCK:
l.setReadReqIDs = mylo.Remove(l.setReadReqIDs, reqID)
case STORAGE_SET_WRITE_LOCK:
l.setWriteReqIDs = mylo.Remove(l.setWriteReqIDs, reqID)
case STORAGE_SET_CREATE_LOCK:
l.setCreateReqIDs = mylo.Remove(l.setCreateReqIDs, reqID)

case STORAGE_ELEMENT_READ_LOCK:
l.elementReadLocks = l.removeElementLock(lock, l.elementReadLocks, reqID)
case STORAGE_ELEMENT_WRITE_LOCK:
l.elementWriteLocks = l.removeElementLock(lock, l.elementWriteLocks, reqID)

case StorageBuzyLock:
l.buzyReqIDs = mylo.Remove(l.buzyReqIDs, reqID)
case StorageGCLock:
l.gcReqIDs = mylo.Remove(l.gcReqIDs, reqID)
default:
return fmt.Errorf("unknow lock name: %s", lock.Name)
}

return nil
}

func (l *StorageNodeLock) removeElementLock(lock distlock.Lock, locks []*storageElementLock, reqID string) []*storageElementLock {
strTarget := lock.Target.(StringLockTarget)
lck, index, ok := lo.FindIndexOf(locks, func(l *storageElementLock) bool { return strTarget.IsConflict(&l.target) })
if !ok {
return locks
}

lck.requestIDs = mylo.Remove(lck.requestIDs, reqID)

if len(lck.requestIDs) == 0 {
locks = mylo.RemoveAt(locks, index)
}

return locks
}

+ 4
- 31
common/pkgs/distlock/reqbuilder/ipfs.go View File

@@ -15,46 +15,19 @@ type IPFSLockReqBuilder struct {
func (b *LockRequestBuilder) IPFS() *IPFSLockReqBuilder {
return &IPFSLockReqBuilder{LockRequestBuilder: b}
}
func (b *IPFSLockReqBuilder) ReadOneRep(nodeID cdssdk.NodeID, fileHash string) *IPFSLockReqBuilder {
func (b *IPFSLockReqBuilder) Buzy(nodeID cdssdk.NodeID) *IPFSLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(nodeID),
Name: lockprovider.IPFS_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(fileHash),
})
return b
}

func (b *IPFSLockReqBuilder) WriteOneRep(nodeID cdssdk.NodeID, fileHash string) *IPFSLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(nodeID),
Name: lockprovider.IPFS_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(fileHash),
})
return b
}

func (b *IPFSLockReqBuilder) ReadAnyRep(nodeID cdssdk.NodeID) *IPFSLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(nodeID),
Name: lockprovider.IPFS_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

func (b *IPFSLockReqBuilder) WriteAnyRep(nodeID cdssdk.NodeID) *IPFSLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(nodeID),
Name: lockprovider.IPFS_SET_WRITE_LOCK,
Name: lockprovider.IPFSBuzyLock,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

func (b *IPFSLockReqBuilder) CreateAnyRep(nodeID cdssdk.NodeID) *IPFSLockReqBuilder {
func (b *IPFSLockReqBuilder) GC(nodeID cdssdk.NodeID) *IPFSLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(nodeID),
Name: lockprovider.IPFS_SET_CREATE_LOCK,
Name: lockprovider.IPFSGCLock,
Target: *lockprovider.NewStringLockTarget(),
})
return b


+ 0
- 64
common/pkgs/distlock/reqbuilder/metadata_bucket.go View File

@@ -1,64 +0,0 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider"
)

type MetadataBucketLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) Bucket() *MetadataBucketLockReqBuilder {
return &MetadataBucketLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataBucketLockReqBuilder) ReadOne(bucketID cdssdk.BucketID) *MetadataBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Bucket"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(bucketID),
})
return b
}
func (b *MetadataBucketLockReqBuilder) WriteOne(bucketID cdssdk.BucketID) *MetadataBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Bucket"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(bucketID),
})
return b
}
func (b *MetadataBucketLockReqBuilder) CreateOne(userID cdssdk.UserID, bucketName string) *MetadataBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Bucket"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, bucketName),
})
return b
}
func (b *MetadataBucketLockReqBuilder) ReadAny() *MetadataBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Bucket"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataBucketLockReqBuilder) WriteAny() *MetadataBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Bucket"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataBucketLockReqBuilder) CreateAny() *MetadataBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Bucket"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 0
- 64
common/pkgs/distlock/reqbuilder/metadata_cache.go View File

@@ -1,64 +0,0 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider"
)

type MetadataCacheLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) Cache() *MetadataCacheLockReqBuilder {
return &MetadataCacheLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataCacheLockReqBuilder) ReadOne(nodeID cdssdk.NodeID, fileHash string) *MetadataCacheLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Cache"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(nodeID, fileHash),
})
return b
}
func (b *MetadataCacheLockReqBuilder) WriteOne(nodeID cdssdk.NodeID, fileHash string) *MetadataCacheLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Cache"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(nodeID, fileHash),
})
return b
}
func (b *MetadataCacheLockReqBuilder) CreateOne(nodeID cdssdk.NodeID, fileHash string) *MetadataCacheLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Cache"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(nodeID, fileHash),
})
return b
}
func (b *MetadataCacheLockReqBuilder) ReadAny() *MetadataCacheLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Cache"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataCacheLockReqBuilder) WriteAny() *MetadataCacheLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Cache"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataCacheLockReqBuilder) CreateAny() *MetadataCacheLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Cache"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 0
- 64
common/pkgs/distlock/reqbuilder/metadata_node.go View File

@@ -1,64 +0,0 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider"
)

type MetadataNodeLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) Node() *MetadataNodeLockReqBuilder {
return &MetadataNodeLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataNodeLockReqBuilder) ReadOne(nodeID cdssdk.NodeID) *MetadataNodeLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Node"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(nodeID),
})
return b
}
func (b *MetadataNodeLockReqBuilder) WriteOne(nodeID cdssdk.NodeID) *MetadataNodeLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Node"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(nodeID),
})
return b
}
func (b *MetadataNodeLockReqBuilder) CreateOne() *MetadataNodeLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Node"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataNodeLockReqBuilder) ReadAny() *MetadataNodeLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Node"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataNodeLockReqBuilder) WriteAny() *MetadataNodeLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Node"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataNodeLockReqBuilder) CreateAny() *MetadataNodeLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Node"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 4
- 45
common/pkgs/distlock/reqbuilder/metadata_object.go View File

@@ -2,11 +2,10 @@ package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider"
)

// TODO 可以考虑增加基于PackageID的锁,让访问不同Package的Object的操作能并行

type MetadataObjectLockReqBuilder struct {
*MetadataLockReqBuilder
}
@@ -15,51 +14,11 @@ func (b *MetadataLockReqBuilder) Object() *MetadataObjectLockReqBuilder {
return &MetadataObjectLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataObjectLockReqBuilder) ReadOne(objectID int64) *MetadataObjectLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Object"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(objectID),
})
return b
}
func (b *MetadataObjectLockReqBuilder) WriteOne(objectID int64) *MetadataObjectLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Object"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(objectID),
})
return b
}
func (b *MetadataObjectLockReqBuilder) CreateOne(bucketID int64, objectName string) *MetadataObjectLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Object"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(bucketID, objectName),
})
return b
}
func (b *MetadataObjectLockReqBuilder) ReadAny() *MetadataObjectLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Object"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataObjectLockReqBuilder) WriteAny() *MetadataObjectLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Object"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataObjectLockReqBuilder) CreateAny() *MetadataObjectLockReqBuilder {
func (b *MetadataObjectLockReqBuilder) CreateOne(packageID cdssdk.PackageID, objectPath string) *MetadataObjectLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Object"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
Name: lockprovider.MetadataCreateLock,
Target: *lockprovider.NewStringLockTarget().Add(packageID, objectPath),
})
return b
}

+ 0
- 63
common/pkgs/distlock/reqbuilder/metadata_object_block.go View File

@@ -1,63 +0,0 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider"
)

type MetadataObjectBlockLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) ObjectBlock() *MetadataObjectBlockLockReqBuilder {
return &MetadataObjectBlockLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataObjectBlockLockReqBuilder) ReadOne(objectID int) *MetadataObjectBlockLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectBlock"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(objectID),
})
return b
}
func (b *MetadataObjectBlockLockReqBuilder) WriteOne(objectID int) *MetadataObjectBlockLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectBlock"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(objectID),
})
return b
}
func (b *MetadataObjectBlockLockReqBuilder) CreateOne() *MetadataObjectBlockLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectBlock"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataObjectBlockLockReqBuilder) ReadAny() *MetadataObjectBlockLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectBlock"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataObjectBlockLockReqBuilder) WriteAny() *MetadataObjectBlockLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectBlock"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataObjectBlockLockReqBuilder) CreateAny() *MetadataObjectBlockLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectBlock"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 0
- 63
common/pkgs/distlock/reqbuilder/metadata_object_rep.go View File

@@ -1,63 +0,0 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider"
)

type MetadataObjectRepLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) ObjectRep() *MetadataObjectRepLockReqBuilder {
return &MetadataObjectRepLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataObjectRepLockReqBuilder) ReadOne(objectID int64) *MetadataObjectRepLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectRep"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(objectID),
})
return b
}
func (b *MetadataObjectRepLockReqBuilder) WriteOne(objectID int64) *MetadataObjectRepLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectRep"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(objectID),
})
return b
}
func (b *MetadataObjectRepLockReqBuilder) CreateOne() *MetadataObjectRepLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectRep"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataObjectRepLockReqBuilder) ReadAny() *MetadataObjectRepLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectRep"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataObjectRepLockReqBuilder) WriteAny() *MetadataObjectRepLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectRep"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataObjectRepLockReqBuilder) CreateAny() *MetadataObjectRepLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectRep"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 0
- 64
common/pkgs/distlock/reqbuilder/metadata_package.go View File

@@ -1,64 +0,0 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider"
)

type MetadataPackageLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) Package() *MetadataPackageLockReqBuilder {
return &MetadataPackageLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataPackageLockReqBuilder) ReadOne(packageID cdssdk.PackageID) *MetadataPackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Package"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(packageID),
})
return b
}
func (b *MetadataPackageLockReqBuilder) WriteOne(packageID cdssdk.PackageID) *MetadataPackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Package"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(packageID),
})
return b
}
func (b *MetadataPackageLockReqBuilder) CreateOne(bucketID cdssdk.BucketID, packageName string) *MetadataPackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Package"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(bucketID, packageName),
})
return b
}
func (b *MetadataPackageLockReqBuilder) ReadAny() *MetadataPackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Package"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataPackageLockReqBuilder) WriteAny() *MetadataPackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Package"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataPackageLockReqBuilder) CreateAny() *MetadataPackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Package"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 3
- 43
common/pkgs/distlock/reqbuilder/metadata_storage_package.go View File

@@ -14,51 +14,11 @@ func (b *MetadataLockReqBuilder) StoragePackage() *MetadataStoragePackageLockReq
return &MetadataStoragePackageLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataStoragePackageLockReqBuilder) ReadOne(storageID cdssdk.StorageID, userID cdssdk.UserID, packageID cdssdk.PackageID) *MetadataStoragePackageLockReqBuilder {
func (b *MetadataStoragePackageLockReqBuilder) CreateOne(userID cdssdk.UserID, storageID cdssdk.StorageID, packageID cdssdk.PackageID) *MetadataStoragePackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("StoragePackage"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(storageID, userID, packageID),
})
return b
}
func (b *MetadataStoragePackageLockReqBuilder) WriteOne(storageID cdssdk.StorageID, userID cdssdk.UserID, packageID cdssdk.PackageID) *MetadataStoragePackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("StoragePackage"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(storageID, userID, packageID),
})
return b
}
func (b *MetadataStoragePackageLockReqBuilder) CreateOne(storageID cdssdk.StorageID, userID cdssdk.UserID, packageID cdssdk.PackageID) *MetadataStoragePackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("StoragePackage"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(storageID, userID, packageID),
})
return b
}
func (b *MetadataStoragePackageLockReqBuilder) ReadAny() *MetadataStoragePackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("StoragePackage"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataStoragePackageLockReqBuilder) WriteAny() *MetadataStoragePackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("StoragePackage"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataStoragePackageLockReqBuilder) CreateAny() *MetadataStoragePackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("StoragePackage"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
Name: lockprovider.MetadataCreateLock,
Target: *lockprovider.NewStringLockTarget().Add(userID, storageID, packageID),
})
return b
}

+ 0
- 64
common/pkgs/distlock/reqbuilder/metadata_user_bucket.go View File

@@ -1,64 +0,0 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider"
)

type MetadataUserBucketLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) UserBucket() *MetadataUserBucketLockReqBuilder {
return &MetadataUserBucketLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataUserBucketLockReqBuilder) ReadOne(userID cdssdk.UserID, bucketID cdssdk.BucketID) *MetadataUserBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserBucket"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, bucketID),
})
return b
}
func (b *MetadataUserBucketLockReqBuilder) WriteOne(userID cdssdk.UserID, bucketID cdssdk.BucketID) *MetadataUserBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserBucket"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, bucketID),
})
return b
}
func (b *MetadataUserBucketLockReqBuilder) CreateOne(userID cdssdk.UserID, bucketID cdssdk.BucketID) *MetadataUserBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserBucket"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, bucketID),
})
return b
}
func (b *MetadataUserBucketLockReqBuilder) ReadAny() *MetadataUserBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserBucket"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataUserBucketLockReqBuilder) WriteAny() *MetadataUserBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserBucket"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataUserBucketLockReqBuilder) CreateAny() *MetadataUserBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserBucket"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 0
- 64
common/pkgs/distlock/reqbuilder/metadata_user_storage.go View File

@@ -1,64 +0,0 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider"
)

type MetadataUserStorageLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) UserStorage() *MetadataUserStorageLockReqBuilder {
return &MetadataUserStorageLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataUserStorageLockReqBuilder) ReadOne(userID cdssdk.UserID, storageID cdssdk.StorageID) *MetadataUserStorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserStorage"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, storageID),
})
return b
}
func (b *MetadataUserStorageLockReqBuilder) WriteOne(userID cdssdk.UserID, storageID cdssdk.StorageID) *MetadataUserStorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserStorage"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, storageID),
})
return b
}
func (b *MetadataUserStorageLockReqBuilder) CreateOne(userID cdssdk.UserID, storageID cdssdk.StorageID) *MetadataUserStorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserStorage"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, storageID),
})
return b
}
func (b *MetadataUserStorageLockReqBuilder) ReadAny() *MetadataUserStorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserStorage"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataUserStorageLockReqBuilder) WriteAny() *MetadataUserStorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserStorage"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataUserStorageLockReqBuilder) CreateAny() *MetadataUserStorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserStorage"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 4
- 40
common/pkgs/distlock/reqbuilder/storage.go View File

@@ -16,55 +16,19 @@ func (b *LockRequestBuilder) Storage() *StorageLockReqBuilder {
return &StorageLockReqBuilder{LockRequestBuilder: b}
}

func (b *StorageLockReqBuilder) ReadOnePackage(storageID cdssdk.StorageID, userID cdssdk.UserID, packageID cdssdk.PackageID) *StorageLockReqBuilder {
func (b *StorageLockReqBuilder) Buzy(storageID cdssdk.StorageID) *StorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(storageID),
Name: lockprovider.STORAGE_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, packageID),
})
return b
}

func (b *StorageLockReqBuilder) WriteOnePackage(storageID cdssdk.StorageID, userID cdssdk.UserID, packageID cdssdk.PackageID) *StorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(storageID),
Name: lockprovider.STORAGE_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, packageID),
})
return b
}

func (b *StorageLockReqBuilder) CreateOnePackage(storageID cdssdk.StorageID, userID cdssdk.UserID, packageID cdssdk.PackageID) *StorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(storageID),
Name: lockprovider.STORAGE_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, packageID),
})
return b
}

func (b *StorageLockReqBuilder) ReadAnyPackage(storageID cdssdk.StorageID) *StorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(storageID),
Name: lockprovider.STORAGE_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

func (b *StorageLockReqBuilder) WriteAnyPackage(storageID cdssdk.StorageID) *StorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(storageID),
Name: lockprovider.STORAGE_SET_WRITE_LOCK,
Name: lockprovider.StorageBuzyLock,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

func (b *StorageLockReqBuilder) CreateAnyPackage(storageID cdssdk.StorageID) *StorageLockReqBuilder {
func (b *StorageLockReqBuilder) GC(storageID cdssdk.StorageID) *StorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(storageID),
Name: lockprovider.STORAGE_SET_CREATE_LOCK,
Name: lockprovider.StorageGCLock,
Target: *lockprovider.NewStringLockTarget(),
})
return b


+ 2
- 28
common/pkgs/iterator/download_object_iterator.go View File

@@ -16,7 +16,6 @@ import (
stgmodels "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage/common/pkgs/ec"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)
@@ -265,16 +264,6 @@ func downloadFile(ctx *DownloadContext, node DownloadNodeInfo, fileHash string)
}

func downloadFromNode(ctx *DownloadContext, nodeID cdssdk.NodeID, nodeIP string, grpcPort int, fileHash string) (io.ReadCloser, error) {
// 二次获取锁
mutex, err := reqbuilder.NewBuilder().
// 用于从IPFS下载文件
IPFS().ReadOneRep(nodeID, fileHash).
MutexLock(ctx.Distlock)
if err != nil {
return nil, fmt.Errorf("acquire locks failed, err: %w", err)
}

// 连接grpc
agtCli, err := stgglb.AgentRPCPool.Acquire(nodeIP, grpcPort)
if err != nil {
return nil, fmt.Errorf("new agent grpc client: %w", err)
@@ -286,27 +275,12 @@ func downloadFromNode(ctx *DownloadContext, nodeID cdssdk.NodeID, nodeIP string,
}

reader = myio.AfterReadClosed(reader, func(io.ReadCloser) {
mutex.Unlock()
agtCli.Close()
})
return reader, nil
}

func downloadFromLocalIPFS(ctx *DownloadContext, fileHash string) (io.ReadCloser, error) {
onClosed := func() {}
if stgglb.Local.NodeID != nil {
// 二次获取锁
mutex, err := reqbuilder.NewBuilder().
// 用于从IPFS下载文件
IPFS().ReadOneRep(*stgglb.Local.NodeID, fileHash).
MutexLock(ctx.Distlock)
if err != nil {
return nil, fmt.Errorf("acquire locks failed, err: %w", err)
}
onClosed = func() {
mutex.Unlock()
}
}

ipfsCli, err := stgglb.IPFSPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new ipfs client: %w", err)
@@ -318,7 +292,7 @@ func downloadFromLocalIPFS(ctx *DownloadContext, fileHash string) (io.ReadCloser
}

reader = myio.AfterReadClosed(reader, func(io.ReadCloser) {
onClosed()
ipfsCli.Close()
})
return reader, nil
}

+ 30
- 26
common/pkgs/mq/agent/cache.go View File

@@ -3,12 +3,13 @@ package agent
import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
)

type CacheService interface {
CheckCache(msg *CheckCache) (*CheckCacheResp, *mq.CodeMessage)

CacheGC(msg *CacheGC) (*CacheGCResp, *mq.CodeMessage)

StartCacheMovePackage(msg *StartCacheMovePackage) (*StartCacheMovePackageResp, *mq.CodeMessage)
WaitCacheMovePackage(msg *WaitCacheMovePackage) (*WaitCacheMovePackageResp, *mq.CodeMessage)
}
@@ -16,46 +17,49 @@ type CacheService interface {
// 检查节点上的IPFS
var _ = Register(Service.CheckCache)

const (
CHECK_IPFS_RESP_OP_DELETE_TEMP = "DeleteTemp"
CHECK_IPFS_RESP_OP_CREATE_TEMP = "CreateTemp"
)

type CheckCache struct {
mq.MessageBodyBase
IsComplete bool `json:"isComplete"`
Caches []model.Cache `json:"caches"`
}
type CheckCacheResp struct {
mq.MessageBodyBase
Entries []CheckIPFSRespEntry `json:"entries"`
}
type CheckIPFSRespEntry struct {
FileHash string `json:"fileHash"`
Operation string `json:"operation"`
FileHashes []string `json:"fileHashes"`
}

func NewCheckCache(isComplete bool, caches []model.Cache) *CheckCache {
return &CheckCache{
IsComplete: isComplete,
Caches: caches,
}
func NewCheckCache() *CheckCache {
return &CheckCache{}
}
func NewCheckCacheResp(entries []CheckIPFSRespEntry) *CheckCacheResp {
func NewCheckCacheResp(fileHashes []string) *CheckCacheResp {
return &CheckCacheResp{
Entries: entries,
}
}
func NewCheckCacheRespEntry(fileHash string, op string) CheckIPFSRespEntry {
return CheckIPFSRespEntry{
FileHash: fileHash,
Operation: op,
FileHashes: fileHashes,
}
}
func (client *Client) CheckCache(msg *CheckCache, opts ...mq.RequestOption) (*CheckCacheResp, error) {
return mq.Request(Service.CheckCache, client.rabbitCli, msg, opts...)
}

// 清理Cache中不用的文件
var _ = Register(Service.CacheGC)

type CacheGC struct {
mq.MessageBodyBase
PinnedFileHashes []string `json:"pinnedFileHashes"`
}
type CacheGCResp struct {
mq.MessageBodyBase
}

func ReqCacheGC(pinnedFileHashes []string) *CacheGC {
return &CacheGC{
PinnedFileHashes: pinnedFileHashes,
}
}
func RespCacheGC() *CacheGCResp {
return &CacheGCResp{}
}
func (client *Client) CacheGC(msg *CacheGC, opts ...mq.RequestOption) (*CacheGCResp, error) {
return mq.Request(Service.CacheGC, client.rabbitCli, msg, opts...)
}

// 将Package的缓存移动到这个节点
var _ = Register(Service.StartCacheMovePackage)



+ 38
- 30
common/pkgs/mq/agent/storage.go View File

@@ -14,6 +14,8 @@ type StorageService interface {

StorageCheck(msg *StorageCheck) (*StorageCheckResp, *mq.CodeMessage)

StorageGC(msg *StorageGC) (*StorageGCResp, *mq.CodeMessage)

StartStorageCreatePackage(msg *StartStorageCreatePackage) (*StartStorageCreatePackageResp, *mq.CodeMessage)

WaitStorageCreatePackage(msg *WaitStorageCreatePackage) (*WaitStorageCreatePackageResp, *mq.CodeMessage)
@@ -84,54 +86,60 @@ func (client *Client) WaitStorageLoadPackage(msg *WaitStorageLoadPackage, opts .
// 检查Storage
var _ = Register(Service.StorageCheck)

const (
CHECK_STORAGE_RESP_OP_DELETE = "Delete"
CHECK_STORAGE_RESP_OP_SET_NORMAL = "SetNormal"
)

type StorageCheck struct {
mq.MessageBodyBase
StorageID cdssdk.StorageID `json:"storageID"`
Directory string `json:"directory"`
IsComplete bool `json:"isComplete"`
Packages []model.StoragePackage `json:"packages"`
StorageID cdssdk.StorageID `json:"storageID"`
Directory string `json:"directory"`
}
type StorageCheckResp struct {
mq.MessageBodyBase
DirectoryState string `json:"directoryState"`
Entries []StorageCheckRespEntry `json:"entries"`
}
type StorageCheckRespEntry struct {
PackageID cdssdk.PackageID `json:"packageID"`
UserID cdssdk.UserID `json:"userID"`
Operation string `json:"operation"`
DirectoryState string `json:"directoryState"`
Packages []model.StoragePackage `json:"packages"`
}

func NewStorageCheck(storageID cdssdk.StorageID, directory string, isComplete bool, packages []model.StoragePackage) *StorageCheck {
func NewStorageCheck(storageID cdssdk.StorageID, directory string) *StorageCheck {
return &StorageCheck{
StorageID: storageID,
Directory: directory,
IsComplete: isComplete,
Packages: packages,
StorageID: storageID,
Directory: directory,
}
}
func NewStorageCheckResp(dirState string, entries []StorageCheckRespEntry) *StorageCheckResp {
func NewStorageCheckResp(dirState string, packages []model.StoragePackage) *StorageCheckResp {
return &StorageCheckResp{
DirectoryState: dirState,
Entries: entries,
}
}
func NewStorageCheckRespEntry(packageID cdssdk.PackageID, userID cdssdk.UserID, op string) StorageCheckRespEntry {
return StorageCheckRespEntry{
PackageID: packageID,
UserID: userID,
Operation: op,
Packages: packages,
}
}
func (client *Client) StorageCheck(msg *StorageCheck, opts ...mq.RequestOption) (*StorageCheckResp, error) {
return mq.Request(Service.StorageCheck, client.rabbitCli, msg, opts...)
}

// 清理Cache中不用的文件
var _ = Register(Service.StorageGC)

type StorageGC struct {
mq.MessageBodyBase
StorageID cdssdk.StorageID `json:"storageID"`
Directory string `json:"directory"`
Packages []model.StoragePackage `json:"packages"`
}
type StorageGCResp struct {
mq.MessageBodyBase
}

func ReqStorageGC(storageID cdssdk.StorageID, directory string, packages []model.StoragePackage) *StorageGC {
return &StorageGC{
StorageID: storageID,
Directory: directory,
Packages: packages,
}
}
func RespStorageGC() *StorageGCResp {
return &StorageGCResp{}
}
func (client *Client) StorageGC(msg *StorageGC, opts ...mq.RequestOption) (*StorageGCResp, error) {
return mq.Request(Service.StorageGC, client.rabbitCli, msg, opts...)
}

// 启动从Storage上传Package的任务
var _ = Register(Service.StartStorageCreatePackage)



common/pkgs/mq/scanner/event/check_cache.go → common/pkgs/mq/scanner/event/agent_cache_gc.go View File

@@ -2,17 +2,17 @@ package event

import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

type CheckCache struct {
type AgentCacheGC struct {
EventBase
NodeID cdssdk.NodeID `json:"nodeID"`
}

func NewCheckCache(nodeID cdssdk.NodeID) *CheckCache {
return &CheckCache{
func NewAgentCacheGC(nodeID cdssdk.NodeID) *AgentCacheGC {
return &AgentCacheGC{
NodeID: nodeID,
}
}

func init() {
Register[*CheckCache]()
Register[*AgentCacheGC]()
}

+ 3
- 5
common/pkgs/mq/scanner/event/agent_check_cache.go View File

@@ -4,14 +4,12 @@ import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

type AgentCheckCache struct {
EventBase
NodeID cdssdk.NodeID `json:"nodeID"`
FileHashes []string `json:"fileHashes"` // 需要检查的FileHash列表,如果为nil(不是为空),则代表进行全量检查
NodeID cdssdk.NodeID `json:"nodeID"`
}

func NewAgentCheckCache(nodeID cdssdk.NodeID, fileHashes []string) *AgentCheckCache {
func NewAgentCheckCache(nodeID cdssdk.NodeID) *AgentCheckCache {
return &AgentCheckCache{
NodeID: nodeID,
FileHashes: fileHashes,
NodeID: nodeID,
}
}



+ 3
- 5
common/pkgs/mq/scanner/event/agent_check_storage.go View File

@@ -4,14 +4,12 @@ import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

type AgentCheckStorage struct {
EventBase
StorageID cdssdk.StorageID `json:"storageID"`
PackageIDs []cdssdk.PackageID `json:"packageIDs"` // 需要检查的Package文件列表,如果为nil(不是为空),则代表进行全量检查
StorageID cdssdk.StorageID `json:"storageID"`
}

func NewAgentCheckStorage(storageID cdssdk.StorageID, packageIDs []cdssdk.PackageID) *AgentCheckStorage {
func NewAgentCheckStorage(storageID cdssdk.StorageID) *AgentCheckStorage {
return &AgentCheckStorage{
StorageID: storageID,
PackageIDs: packageIDs,
StorageID: storageID,
}
}



+ 18
- 0
common/pkgs/mq/scanner/event/agent_storage_gc.go View File

@@ -0,0 +1,18 @@
package event

import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

type AgentStorageGC struct {
EventBase
StorageID cdssdk.StorageID `json:"storageID"`
}

func NewAgentStorageGC(storageID cdssdk.StorageID) *AgentStorageGC {
return &AgentStorageGC{
StorageID: storageID,
}
}

func init() {
Register[*AgentStorageGC]()
}

+ 0
- 16
common/pkgs/mq/scanner/event/check_rep_count.go View File

@@ -1,16 +0,0 @@
package event

type CheckRepCount struct {
EventBase
FileHashes []string `json:"fileHashes"`
}

func NewCheckRepCount(fileHashes []string) *CheckRepCount {
return &CheckRepCount{
FileHashes: fileHashes,
}
}

func init() {
Register[*CheckRepCount]()
}

+ 4
- 0
common/utils/utils.go View File

@@ -11,3 +11,7 @@ import (
func MakeStorageLoadPackagePath(stgDir string, userID cdssdk.UserID, packageID cdssdk.PackageID) string {
return filepath.Join(stgDir, strconv.FormatInt(int64(userID), 10), "packages", strconv.FormatInt(int64(packageID), 10))
}

func MakeStorageLoadDirectory(stgDir string, userIDStr string) string {
return filepath.Join(stgDir, userIDStr, "packages")
}

+ 3
- 3
coordinator/internal/services/cache.go View File

@@ -8,9 +8,9 @@ import (
)

func (svc *Service) CachePackageMoved(msg *coormq.CachePackageMoved) (*coormq.CachePackageMovedResp, *mq.CodeMessage) {
if err := svc.db.Cache().SetPackageObjectFrozen(svc.db.SQLCtx(), msg.PackageID, msg.NodeID); err != nil {
logger.Warnf("setting package object frozen: %s", err.Error())
return nil, mq.Failed(errorcode.OperationFailed, "set package object frozen failed")
if err := svc.db.PinnedObject().CreateFromPackage(svc.db.SQLCtx(), msg.PackageID, msg.NodeID); err != nil {
logger.Warnf("create package pinned objects: %s", err.Error())
return nil, mq.Failed(errorcode.OperationFailed, "create package pinned objects failed")
}

return mq.ReplyOK(coormq.NewCachePackageMovedResp())


+ 13
- 29
coordinator/internal/services/package.go View File

@@ -12,8 +12,6 @@ import (
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
scmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
)

func (svc *Service) GetPackage(msg *coormq.GetPackage) (*coormq.GetPackageResp, *mq.CodeMessage) {
@@ -96,7 +94,19 @@ func (svc *Service) DeletePackage(msg *coormq.DeletePackage) (*coormq.DeletePack
}

err = svc.db.DoTx(sql.LevelDefault, func(tx *sqlx.Tx) error {
return svc.db.Package().SoftDelete(tx, msg.PackageID)
err := svc.db.Package().SoftDelete(tx, msg.PackageID)
if err != nil {
return fmt.Errorf("soft delete package: %w", err)
}

err = svc.db.Package().DeleteUnused(tx, msg.PackageID)
if err != nil {
logger.WithField("UserID", msg.UserID).
WithField("PackageID", msg.PackageID).
Warnf("deleting unused package: %w", err.Error())
}

return nil
})
if err != nil {
logger.WithField("UserID", msg.UserID).
@@ -105,32 +115,6 @@ func (svc *Service) DeletePackage(msg *coormq.DeletePackage) (*coormq.DeletePack
return nil, mq.Failed(errorcode.OperationFailed, "set package deleted failed")
}

stgs, err := svc.db.StoragePackage().FindPackageStorages(svc.db.SQLCtx(), msg.PackageID)
if err != nil {
logger.Warnf("find package storages failed, but this will not affect the deleting, err: %s", err.Error())
return mq.ReplyOK(coormq.NewDeletePackageResp())
}

// 不追求及时、准确
if len(stgs) == 0 {
// 如果没有被引用,直接投递CheckPackage的任务
err := svc.scanner.PostEvent(scmq.NewPostEvent(scevt.NewCheckPackage([]cdssdk.PackageID{msg.PackageID}), false, false))
if err != nil {
logger.Warnf("post event to scanner failed, but this will not affect deleting, err: %s", err.Error())
}
logger.Debugf("post check package event")

} else {
// 有引用则让Agent去检查StoragePackage
for _, stg := range stgs {
err := svc.scanner.PostEvent(scmq.NewPostEvent(scevt.NewAgentCheckStorage(stg.StorageID, []cdssdk.PackageID{msg.PackageID}), false, false))
if err != nil {
logger.Warnf("post event to scanner failed, but this will not affect deleting, err: %s", err.Error())
}
}
logger.Debugf("post agent check storage event")
}

return mq.ReplyOK(coormq.NewDeletePackageResp())
}



+ 99
- 0
scanner/internal/event/agent_cache_gc.go View File

@@ -0,0 +1,99 @@
package event

import (
"database/sql"
"fmt"
"time"

"github.com/jmoiron/sqlx"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"

agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
)

type AgentCacheGC struct {
*scevt.AgentCacheGC
}

func NewAgentCacheGC(evt *scevt.AgentCacheGC) *AgentCacheGC {
return &AgentCacheGC{
AgentCacheGC: evt,
}
}

func (t *AgentCacheGC) TryMerge(other Event) bool {
event, ok := other.(*AgentCacheGC)
if !ok {
return false
}

if event.NodeID != t.NodeID {
return false
}

return true
}

func (t *AgentCacheGC) Execute(execCtx ExecuteContext) {
log := logger.WithType[AgentCacheGC]("Event")
log.Debugf("begin with %v", logger.FormatStruct(t.AgentCacheGC))
defer log.Debugf("end")

// TODO unavailable的节点需不需要发送任务?

mutex, err := reqbuilder.NewBuilder().
// 进行GC
IPFS().GC(t.NodeID).
MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
return
}
defer mutex.Unlock()

var allFileHashes []string
err = execCtx.Args.DB.DoTx(sql.LevelLinearizable, func(tx *sqlx.Tx) error {
blocks, err := execCtx.Args.DB.ObjectBlock().GetByNodeID(tx, t.NodeID)
if err != nil {
return fmt.Errorf("getting object blocks by node id: %w", err)
}
for _, c := range blocks {
allFileHashes = append(allFileHashes, c.FileHash)
}

objs, err := execCtx.Args.DB.PinnedObject().GetObjectsByNodeID(tx, t.NodeID)
if err != nil {
return fmt.Errorf("getting pinned objects by node id: %w", err)
}
for _, o := range objs {
allFileHashes = append(allFileHashes, o.FileHash)
}

return nil
})
if err != nil {
log.WithField("NodeID", t.NodeID).Warn(err.Error())
return
}

agtCli, err := stgglb.AgentMQPool.Acquire(t.NodeID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("create agent client failed, err: %s", err.Error())
return
}
defer stgglb.AgentMQPool.Release(agtCli)

_, err = agtCli.CacheGC(agtmq.ReqCacheGC(allFileHashes), mq.RequestOption{Timeout: time.Minute})
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("ipfs gc: %s", err.Error())
return
}
}

func init() {
RegisterMessageConvertor(NewAgentCacheGC)
}

+ 85
- 90
scanner/internal/event/agent_check_cache.go View File

@@ -4,13 +4,12 @@ import (
"database/sql"
"time"

"github.com/jmoiron/sqlx"
"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"

agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
@@ -20,9 +19,9 @@ type AgentCheckCache struct {
*scevt.AgentCheckCache
}

func NewAgentCheckCache(nodeID cdssdk.NodeID, fileHashes []string) *AgentCheckCache {
func NewAgentCheckCache(evt *scevt.AgentCheckCache) *AgentCheckCache {
return &AgentCheckCache{
AgentCheckCache: scevt.NewAgentCheckCache(nodeID, fileHashes),
AgentCheckCache: evt,
}
}

@@ -36,13 +35,6 @@ func (t *AgentCheckCache) TryMerge(other Event) bool {
return false
}

// FileHashes为nil时代表全量检查
if event.FileHashes == nil {
t.FileHashes = nil
} else if t.FileHashes != nil {
t.FileHashes = lo.Union(t.FileHashes, event.FileHashes)
}

return true
}

@@ -53,123 +45,126 @@ func (t *AgentCheckCache) Execute(execCtx ExecuteContext) {

// TODO unavailable的节点需不需要发送任务?

if t.FileHashes == nil {
t.checkComplete(execCtx)
} else {
t.checkIncrement(execCtx)
agtCli, err := stgglb.AgentMQPool.Acquire(t.NodeID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("create agent client failed, err: %s", err.Error())
return
}
defer stgglb.AgentMQPool.Release(agtCli)

checkResp, err := agtCli.CheckCache(agtmq.NewCheckCache(), mq.RequestOption{Timeout: time.Minute})
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("checking ipfs: %s", err.Error())
return
}

realFileHashes := lo.SliceToMap(checkResp.FileHashes, func(hash string) (string, bool) { return hash, true })

// 根据IPFS中实际文件情况修改元数据。修改过程中的失败均忽略。(但关联修改需要原子性)
execCtx.Args.DB.DoTx(sql.LevelLinearizable, func(tx *sqlx.Tx) error {
t.checkCache(execCtx, tx, realFileHashes)

t.checkPinnedObject(execCtx, tx, realFileHashes)

t.checkObjectBlock(execCtx, tx, realFileHashes)
return nil
})
}

func (t *AgentCheckCache) checkComplete(execCtx ExecuteContext) {
// 对比Cache表中的记录,多了增加,少了删除
func (t *AgentCheckCache) checkCache(execCtx ExecuteContext, tx *sqlx.Tx, realFileHashes map[string]bool) {
log := logger.WithType[AgentCheckCache]("Event")

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 全量模式下修改某个节点所有的Cache记录
Cache().WriteAny().
IPFS().
// 全量模式下修改某个节点所有的副本数据
WriteAnyRep(t.NodeID).
MutexLock(execCtx.Args.DistLock)
caches, err := execCtx.Args.DB.Cache().GetByNodeID(tx, t.NodeID)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
log.WithField("NodeID", t.NodeID).Warnf("getting caches by node id: %s", err.Error())
return
}
defer mutex.Unlock()

caches, err := execCtx.Args.DB.Cache().GetNodeCaches(execCtx.Args.DB.SQLCtx(), t.NodeID)
realFileHashesCp := make(map[string]bool)
for k, v := range realFileHashes {
realFileHashesCp[k] = v
}

var rms []string
for _, c := range caches {
if realFileHashesCp[c.FileHash] {
// Cache表使用FileHash和NodeID作为主键,
// 所以通过同一个NodeID查询的结果不会存在两条相同FileHash的情况
delete(realFileHashesCp, c.FileHash)
continue
}
rms = append(rms, c.FileHash)
}

err = execCtx.Args.DB.Cache().NodeBatchDelete(tx, t.NodeID, rms)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("get node caches failed, err: %s", err.Error())
log.Warnf("batch delete node caches: %w", err.Error())
return
}

t.startCheck(execCtx, true, caches)
err = execCtx.Args.DB.Cache().BatchCreate(tx, lo.Keys(realFileHashes), t.NodeID, 0)
if err != nil {
log.Warnf("batch create node caches: %w", err)
return
}
}

func (t *AgentCheckCache) checkIncrement(execCtx ExecuteContext) {
// 对比PinnedObject表,多了不变,少了删除
func (t *AgentCheckCache) checkPinnedObject(execCtx ExecuteContext, tx *sqlx.Tx, realFileHashes map[string]bool) {
log := logger.WithType[AgentCheckCache]("Event")

builder := reqbuilder.NewBuilder()
for _, hash := range t.FileHashes {
builder.
// 增量模式下,不会有改动到Cache记录的操作
Metadata().Cache().ReadOne(t.NodeID, hash).
// 由于副本Write锁的特点,Pin文件(创建文件)不需要Create锁
IPFS().WriteOneRep(t.NodeID, hash)
}
mutex, err := builder.MutexLock(execCtx.Args.DistLock)
objs, err := execCtx.Args.DB.PinnedObject().GetObjectsByNodeID(tx, t.NodeID)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
log.WithField("NodeID", t.NodeID).Warnf("getting pinned objects by node id: %s", err.Error())
return
}
defer mutex.Unlock()

var caches []model.Cache
for _, hash := range t.FileHashes {
ch, err := execCtx.Args.DB.Cache().Get(execCtx.Args.DB.SQLCtx(), hash, t.NodeID)
// 记录不存在则跳过
if err == sql.ErrNoRows {
var rms []cdssdk.ObjectID
for _, c := range objs {
if realFileHashes[c.FileHash] {
continue
}

if err != nil {
log.WithField("FileHash", hash).WithField("NodeID", t.NodeID).Warnf("get cache failed, err: %s", err.Error())
return
}

caches = append(caches, ch)
rms = append(rms, c.ObjectID)
}

t.startCheck(execCtx, false, caches)
err = execCtx.Args.DB.PinnedObject().NodeBatchDelete(tx, t.NodeID, rms)
if err != nil {
log.Warnf("batch delete node pinned objects: %s", err.Error())
return
}
}

func (t *AgentCheckCache) startCheck(execCtx ExecuteContext, isComplete bool, caches []model.Cache) {
// 对比ObjectBlock表,多了不变,少了删除
func (t *AgentCheckCache) checkObjectBlock(execCtx ExecuteContext, tx *sqlx.Tx, realFileHashes map[string]bool) {
log := logger.WithType[AgentCheckCache]("Event")

// 然后向代理端发送移动文件的请求
agtCli, err := stgglb.AgentMQPool.Acquire(t.NodeID)
objs, err := execCtx.Args.DB.ObjectBlock().GetByNodeID(tx, t.NodeID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("create agent client failed, err: %s", err.Error())
log.WithField("NodeID", t.NodeID).Warnf("getting object blocks by node id: %s", err.Error())
return
}
defer stgglb.AgentMQPool.Release(agtCli)

checkResp, err := agtCli.CheckCache(agtmq.NewCheckCache(isComplete, caches), mq.RequestOption{Timeout: time.Minute})
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("checking ipfs: %s", err.Error())
return
realFileHashesCp := make(map[string]bool)
for k, v := range realFileHashes {
realFileHashesCp[k] = v
}

// 根据返回结果修改数据库
for _, entry := range checkResp.Entries {
switch entry.Operation {
case agtmq.CHECK_IPFS_RESP_OP_DELETE_TEMP:
err := execCtx.Args.DB.Cache().DeleteTemp(execCtx.Args.DB.SQLCtx(), entry.FileHash, t.NodeID)
if err != nil {
log.WithField("FileHash", entry.FileHash).
WithField("NodeID", t.NodeID).
Warnf("delete temp cache failed, err: %s", err.Error())
}

log.WithField("FileHash", entry.FileHash).
WithField("NodeID", t.NodeID).
Debugf("delete temp cache")

case agtmq.CHECK_IPFS_RESP_OP_CREATE_TEMP:
err := execCtx.Args.DB.Cache().CreateTemp(execCtx.Args.DB.SQLCtx(), entry.FileHash, t.NodeID)
if err != nil {
log.WithField("FileHash", entry.FileHash).
WithField("NodeID", t.NodeID).
Warnf("create temp cache failed, err: %s", err.Error())
}

log.WithField("FileHash", entry.FileHash).
WithField("NodeID", t.NodeID).
Debugf("create temp cache")
var rms []string
for _, c := range objs {
if realFileHashesCp[c.FileHash] {
continue
}
rms = append(rms, c.FileHash)
}

err = execCtx.Args.DB.ObjectBlock().NodeBatchDelete(tx, t.NodeID, rms)
if err != nil {
log.Warnf("batch delete node object blocks: %w", err)
return
}
}

func init() {
RegisterMessageConvertor(func(msg *scevt.AgentCheckCache) Event { return NewAgentCheckCache(msg.NodeID, msg.FileHashes) })
RegisterMessageConvertor(NewAgentCheckCache)
}

+ 3
- 28
scanner/internal/event/agent_check_state.go View File

@@ -6,10 +6,8 @@ import (

"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/consts"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
"gitlink.org.cn/cloudream/storage/scanner/internal/config"
@@ -19,9 +17,9 @@ type AgentCheckState struct {
*scevt.AgentCheckState
}

func NewAgentCheckState(nodeID cdssdk.NodeID) *AgentCheckState {
func NewAgentCheckState(evt *scevt.AgentCheckState) *AgentCheckState {
return &AgentCheckState{
AgentCheckState: scevt.NewAgentCheckState(nodeID),
AgentCheckState: evt,
}
}

@@ -39,17 +37,6 @@ func (t *AgentCheckState) Execute(execCtx ExecuteContext) {
log.Debugf("begin with %v", logger.FormatStruct(t.AgentCheckState))
defer log.Debugf("end")

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 查询、修改节点状态
Node().WriteOne(t.NodeID).
MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
return
}
defer mutex.Unlock()

node, err := execCtx.Args.DB.Node().GetByID(execCtx.Args.DB.SQLCtx(), t.NodeID)
if err == sql.ErrNoRows {
return
@@ -77,19 +64,7 @@ func (t *AgentCheckState) Execute(execCtx ExecuteContext) {
err := execCtx.Args.DB.Node().UpdateState(execCtx.Args.DB.SQLCtx(), t.NodeID, consts.NodeStateUnavailable)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("set node state failed, err: %s", err.Error())
return
}
/*
caches, err := execCtx.Args.DB.Cache().GetNodeCaches(execCtx.Args.DB.SQLCtx(), t.NodeID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("get node caches failed, err: %s", err.Error())
return
}

// 补充备份数
execCtx.Executor.Post(NewCheckRepCount(lo.Map(caches, func(ch model.Cache, index int) string { return ch.FileHash })))
*/
return
}
return
}
@@ -113,5 +88,5 @@ func (t *AgentCheckState) Execute(execCtx ExecuteContext) {
}

func init() {
RegisterMessageConvertor(func(msg *scevt.AgentCheckState) Event { return NewAgentCheckState(msg.NodeID) })
RegisterMessageConvertor(NewAgentCheckState)
}

+ 46
- 110
scanner/internal/event/agent_check_storage.go View File

@@ -4,14 +4,13 @@ import (
"database/sql"
"time"

"github.com/samber/lo"
"github.com/jmoiron/sqlx"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/consts"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
)
@@ -20,9 +19,9 @@ type AgentCheckStorage struct {
*scevt.AgentCheckStorage
}

func NewAgentCheckStorage(storageID cdssdk.StorageID, packageIDs []cdssdk.PackageID) *AgentCheckStorage {
func NewAgentCheckStorage(evt *scevt.AgentCheckStorage) *AgentCheckStorage {
return &AgentCheckStorage{
AgentCheckStorage: scevt.NewAgentCheckStorage(storageID, packageIDs),
AgentCheckStorage: evt,
}
}

@@ -36,13 +35,6 @@ func (t *AgentCheckStorage) TryMerge(other Event) bool {
return false
}

// PackageIDs为nil时代表全量检查
if event.PackageIDs == nil {
t.PackageIDs = nil
} else if t.PackageIDs != nil {
t.PackageIDs = lo.Union(t.PackageIDs, event.PackageIDs)
}

return true
}

@@ -69,132 +61,76 @@ func (t *AgentCheckStorage) Execute(execCtx ExecuteContext) {
return
}

// TODO unavailable的节点需不需要发送任务?
if node.State != consts.NodeStateNormal {
return
}

if t.PackageIDs == nil {
t.checkComplete(execCtx, stg)
} else {
t.checkIncrement(execCtx, stg)
}
}

func (t *AgentCheckStorage) checkComplete(execCtx ExecuteContext, stg model.Storage) {
log := logger.WithType[AgentCheckStorage]("Event")

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 全量模式下查询、修改Move记录
StoragePackage().WriteAny().
Storage().
// 全量模式下删除对象文件
WriteAnyPackage(t.StorageID).
MutexLock(execCtx.Args.DistLock)
agtCli, err := stgglb.AgentMQPool.Acquire(stg.NodeID)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
log.WithField("NodeID", stg.NodeID).Warnf("create agent client failed, err: %s", err.Error())
return
}
defer mutex.Unlock()
defer stgglb.AgentMQPool.Release(agtCli)

packages, err := execCtx.Args.DB.StoragePackage().GetAllByStorageID(execCtx.Args.DB.SQLCtx(), t.StorageID)
checkResp, err := agtCli.StorageCheck(agtmq.NewStorageCheck(stg.StorageID, stg.Directory), mq.RequestOption{Timeout: time.Minute})
if err != nil {
log.WithField("StorageID", t.StorageID).Warnf("get storage packages failed, err: %s", err.Error())
log.WithField("NodeID", stg.NodeID).Warnf("checking storage: %s", err.Error())
return
}
realPkgs := make(map[cdssdk.UserID]map[cdssdk.PackageID]bool)
for _, pkg := range checkResp.Packages {
pkgs, ok := realPkgs[pkg.UserID]
if !ok {
pkgs = make(map[cdssdk.PackageID]bool)
realPkgs[pkg.UserID] = pkgs
}

t.startCheck(execCtx, stg, true, packages)
}

func (t *AgentCheckStorage) checkIncrement(execCtx ExecuteContext, stg model.Storage) {
log := logger.WithType[AgentCheckStorage]("Event")

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 全量模式下查询、修改Move记录。因为可能有多个User Move相同的文件,所以只能用集合Write锁
StoragePackage().WriteAny().
Storage().
// 全量模式下删除对象文件。因为可能有多个User Move相同的文件,所以只能用集合Write锁
WriteAnyPackage(t.StorageID).
MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
return
pkgs[pkg.PackageID] = true
}
defer mutex.Unlock()

var packages []model.StoragePackage
for _, objID := range t.PackageIDs {
objs, err := execCtx.Args.DB.StoragePackage().GetAllByStorageAndPackageID(execCtx.Args.DB.SQLCtx(), t.StorageID, objID)
execCtx.Args.DB.DoTx(sql.LevelLinearizable, func(tx *sqlx.Tx) error {
packages, err := execCtx.Args.DB.StoragePackage().GetAllByStorageID(execCtx.Args.DB.SQLCtx(), t.StorageID)
if err != nil {
log.WithField("StorageID", t.StorageID).
WithField("PackageID", objID).
Warnf("get storage package failed, err: %s", err.Error())
return
log.Warnf("getting storage package: %s", err.Error())
return nil
}

packages = append(packages, objs...)
}

t.startCheck(execCtx, stg, false, packages)
}

func (t *AgentCheckStorage) startCheck(execCtx ExecuteContext, stg model.Storage, isComplete bool, packages []model.StoragePackage) {
log := logger.WithType[AgentCheckStorage]("Event")

// 投递任务
agtCli, err := stgglb.AgentMQPool.Acquire(stg.NodeID)
if err != nil {
log.WithField("NodeID", stg.NodeID).Warnf("create agent client failed, err: %s", err.Error())
return
}
defer stgglb.AgentMQPool.Release(agtCli)
var rms []model.StoragePackage
for _, pkg := range packages {
pkgMap, ok := realPkgs[pkg.UserID]
if !ok {
rms = append(rms, pkg)
continue
}

checkResp, err := agtCli.StorageCheck(agtmq.NewStorageCheck(stg.StorageID, stg.Directory, isComplete, packages), mq.RequestOption{Timeout: time.Minute})
if err != nil {
log.WithField("NodeID", stg.NodeID).Warnf("checking storage: %s", err.Error())
return
}
if !pkgMap[pkg.PackageID] {
rms = append(rms, pkg)
}
}

// 根据返回结果修改数据库
var chkObjIDs []cdssdk.PackageID
for _, entry := range checkResp.Entries {
switch entry.Operation {
case agtmq.CHECK_STORAGE_RESP_OP_DELETE:
err := execCtx.Args.DB.StoragePackage().Delete(execCtx.Args.DB.SQLCtx(), t.StorageID, entry.PackageID, entry.UserID)
rmdPkgIDs := make(map[cdssdk.PackageID]bool)
for _, rm := range rms {
err := execCtx.Args.DB.StoragePackage().Delete(tx, rm.StorageID, rm.PackageID, rm.UserID)
if err != nil {
log.WithField("StorageID", t.StorageID).
WithField("PackageID", entry.PackageID).
Warnf("delete storage package failed, err: %s", err.Error())
log.Warnf("deleting storage package: %s", err.Error())
continue
}
chkObjIDs = append(chkObjIDs, entry.PackageID)

log.WithField("StorageID", t.StorageID).
WithField("PackageID", entry.PackageID).
WithField("UserID", entry.UserID).
Debugf("delete storage package")
rmdPkgIDs[rm.PackageID] = true
}

case agtmq.CHECK_STORAGE_RESP_OP_SET_NORMAL:
err := execCtx.Args.DB.StoragePackage().SetStateNormal(execCtx.Args.DB.SQLCtx(), t.StorageID, entry.PackageID, entry.UserID)
// 彻底删除已经是Deleted状态,且不被再引用的Package
for pkgID := range rmdPkgIDs {
err := execCtx.Args.DB.Package().DeleteUnused(tx, pkgID)
if err != nil {
log.WithField("StorageID", t.StorageID).
WithField("PackageID", entry.PackageID).
Warnf("change storage package state failed, err: %s", err.Error())
log.Warnf("deleting unused package: %s", err.Error())
continue
}

log.WithField("StorageID", t.StorageID).
WithField("PackageID", entry.PackageID).
WithField("UserID", entry.UserID).
Debugf("set storage package normal")
}
}

if len(chkObjIDs) > 0 {
execCtx.Executor.Post(NewCheckPackage(chkObjIDs))
}
return nil
})
}

func init() {
RegisterMessageConvertor(func(msg *scevt.AgentCheckStorage) Event { return NewAgentCheckStorage(msg.StorageID, msg.PackageIDs) })
RegisterMessageConvertor(NewAgentCheckStorage)
}

+ 83
- 0
scanner/internal/event/agent_storage_gc.go View File

@@ -0,0 +1,83 @@
package event

import (
"time"

"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"

agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
)

type AgentStorageGC struct {
*scevt.AgentStorageGC
}

func NewAgentStorageGC(evt *scevt.AgentStorageGC) *AgentStorageGC {
return &AgentStorageGC{
AgentStorageGC: evt,
}
}

func (t *AgentStorageGC) TryMerge(other Event) bool {
event, ok := other.(*AgentStorageGC)
if !ok {
return false
}

if event.StorageID != t.StorageID {
return false
}

return true
}

func (t *AgentStorageGC) Execute(execCtx ExecuteContext) {
log := logger.WithType[AgentStorageGC]("Event")
log.Debugf("begin with %v", logger.FormatStruct(t.AgentStorageGC))
defer log.Debugf("end")

// TODO unavailable的节点需不需要发送任务?

mutex, err := reqbuilder.NewBuilder().
// 进行GC
Storage().GC(t.StorageID).
MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
return
}
defer mutex.Unlock()

getStg, err := execCtx.Args.DB.Storage().GetByID(execCtx.Args.DB.SQLCtx(), t.StorageID)
if err != nil {
log.WithField("StorageID", t.StorageID).Warnf("getting storage: %s", err.Error())
return
}

stgPkgs, err := execCtx.Args.DB.StoragePackage().GetAllByStorageID(execCtx.Args.DB.SQLCtx(), t.StorageID)
if err != nil {
log.WithField("StorageID", t.StorageID).Warnf("getting storage packages: %s", err.Error())
return
}

agtCli, err := stgglb.AgentMQPool.Acquire(getStg.NodeID)
if err != nil {
log.WithField("NodeID", getStg.NodeID).Warnf("create agent client failed, err: %s", err.Error())
return
}
defer stgglb.AgentMQPool.Release(agtCli)

_, err = agtCli.StorageGC(agtmq.ReqStorageGC(t.StorageID, getStg.Directory, stgPkgs), mq.RequestOption{Timeout: time.Minute})
if err != nil {
log.WithField("StorageID", t.StorageID).Warnf("storage gc: %s", err.Error())
return
}
}

func init() {
RegisterMessageConvertor(NewAgentStorageGC)
}

+ 0
- 83
scanner/internal/event/check_cache.go View File

@@ -1,83 +0,0 @@
package event

import (
"database/sql"

"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/consts"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
)

type CheckCache struct {
*scevt.CheckCache
}

func NewCheckCache(nodeID cdssdk.NodeID) *CheckCache {
return &CheckCache{
CheckCache: scevt.NewCheckCache(nodeID),
}
}

func (t *CheckCache) TryMerge(other Event) bool {
event, ok := other.(*CheckCache)
if !ok {
return false
}
if event.NodeID != t.NodeID {
return false
}

return true
}

func (t *CheckCache) Execute(execCtx ExecuteContext) {
log := logger.WithType[AgentCheckStorage]("Event")
log.Debugf("begin with %v", logger.FormatStruct(t.CheckCache))
defer log.Debugf("end")

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 查询节点状态
Node().ReadOne(t.NodeID).
// 删除节点所有的Cache记录
Cache().WriteAny().
MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
return
}
defer mutex.Unlock()

node, err := execCtx.Args.DB.Node().GetByID(execCtx.Args.DB.SQLCtx(), t.NodeID)
if err == sql.ErrNoRows {
return
}
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("get node failed, err: %s", err.Error())
return
}

if node.State != consts.NodeStateUnavailable {
return
}
/*
caches, err := execCtx.Args.DB.Cache().GetNodeCaches(execCtx.Args.DB.SQLCtx(), t.NodeID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("get node caches failed, err: %s", err.Error())
return
}

err = execCtx.Args.DB.Cache().DeleteNodeAll(execCtx.Args.DB.SQLCtx(), t.NodeID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("delete node all caches failed, err: %s", err.Error())
return
}
*/
//execCtx.Executor.Post(NewCheckRepCount(lo.Map(caches, func(ch model.Cache, index int) string { return ch.FileHash })))
}

func init() {
RegisterMessageConvertor(func(msg *scevt.CheckCache) Event { return NewCheckCache(msg.NodeID) })
}

+ 3
- 17
scanner/internal/event/check_package.go View File

@@ -3,8 +3,6 @@ package event
import (
"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
)

@@ -12,9 +10,9 @@ type CheckPackage struct {
*scevt.CheckPackage
}

func NewCheckPackage(pkgIDs []cdssdk.PackageID) *CheckPackage {
func NewCheckPackage(evt *scevt.CheckPackage) *CheckPackage {
return &CheckPackage{
CheckPackage: scevt.NewCheckPackage(pkgIDs),
CheckPackage: evt,
}
}

@@ -33,18 +31,6 @@ func (t *CheckPackage) Execute(execCtx ExecuteContext) {
log.Debugf("begin with %v", logger.FormatStruct(t.CheckPackage))
defer log.Debugf("end")

// 检查对象是否没有被引用的时候,需要读取StoragePackage表
builder := reqbuilder.NewBuilder().Metadata().StoragePackage().ReadAny()
for _, objID := range t.PackageIDs {
builder.Metadata().Package().WriteOne(objID)
}
mutex, err := builder.MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
return
}
defer mutex.Unlock()

for _, objID := range t.PackageIDs {
err := execCtx.Args.DB.Package().DeleteUnused(execCtx.Args.DB.SQLCtx(), objID)
if err != nil {
@@ -54,5 +40,5 @@ func (t *CheckPackage) Execute(execCtx ExecuteContext) {
}

func init() {
RegisterMessageConvertor(func(msg *scevt.CheckPackage) Event { return NewCheckPackage(msg.PackageIDs) })
RegisterMessageConvertor(NewCheckPackage)
}

+ 0
- 218
scanner/internal/event/check_rep_count.go View File

@@ -1,218 +0,0 @@
package event

/*
// TODO 可作为新逻辑的参考
import (
"fmt"
"math"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/logger"
mymath "gitlink.org.cn/cloudream/common/utils/math"
mysort "gitlink.org.cn/cloudream/common/utils/sort"
"gitlink.org.cn/cloudream/storage/common/consts"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage/scanner/internal/config"

"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
)

type CheckRepCount struct {
*scevt.CheckRepCount
}

func NewCheckRepCount(fileHashes []string) *CheckRepCount {
return &CheckRepCount{
CheckRepCount: scevt.NewCheckRepCount(fileHashes),
}
}

func (t *CheckRepCount) TryMerge(other Event) bool {
event, ok := other.(*CheckRepCount)
if !ok {
return false
}

t.FileHashes = lo.Union(t.FileHashes, event.FileHashes)
return true
}

func (t *CheckRepCount) Execute(execCtx ExecuteContext) {
log := logger.WithType[CheckRepCount]("Event")
log.Debugf("begin with %v", logger.FormatStruct(t.CheckRepCount))
defer log.Debugf("end")

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 读取某个FileHash的备份数设定
ObjectRep().ReadAny().
// 读取某个FileHash是否被Block引用
ObjectBlock().ReadAny().
// 获取所有可用的节点
Node().ReadAny().
// 增加或修改FileHash关联的Cache记录
Cache().WriteAny().
MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
return
}
defer mutex.Unlock()

updatedNodeAndHashes := make(map[int64][]string)

for _, fileHash := range t.FileHashes {
updatedNodeIDs, err := t.checkOneRepCount(fileHash, execCtx)
if err != nil {
log.WithField("FileHash", fileHash).Warnf("check file rep count failed, err: %s", err.Error())
continue
}

for _, id := range updatedNodeIDs {
hashes := updatedNodeAndHashes[id]
updatedNodeAndHashes[id] = append(hashes, fileHash)
}
}

for nodeID, hashes := range updatedNodeAndHashes {
// 新任务继承本任务的执行设定(紧急任务依然保持紧急任务)
execCtx.Executor.Post(NewAgentCheckCache(nodeID, hashes), execCtx.Option)
}
}

func (t *CheckRepCount) checkOneRepCount(fileHash string, execCtx ExecuteContext) ([]int64, error) {
log := logger.WithType[CheckRepCount]("Event")
sqlCtx := execCtx.Args.DB.SQLCtx()

var updatedNodeIDs []int64
// 计算所需的最少备份数:
// 1. ObjectRep中期望备份数的最大值
// 2. 如果ObjectBlock存在对此文件的引用,则至少为1
repMaxCnt, err := execCtx.Args.DB.ObjectRep().GetFileMaxRepCount(sqlCtx, fileHash)
if err != nil {
return nil, fmt.Errorf("get file max rep count failed, err: %w", err)
}

blkCnt, err := execCtx.Args.DB.ObjectBlock().CountBlockWithHash(sqlCtx, fileHash)
if err != nil {
return nil, fmt.Errorf("count block with hash failed, err: %w", err)
}

needRepCount := mymath.Max(repMaxCnt, mymath.Min(1, blkCnt))

repNodes, err := execCtx.Args.DB.Cache().GetCachingFileNodes(sqlCtx, fileHash)
if err != nil {
return nil, fmt.Errorf("get caching file nodes failed, err: %w", err)
}

allNodes, err := execCtx.Args.DB.Node().GetAllNodes(sqlCtx)
if err != nil {
return nil, fmt.Errorf("get all nodes failed, err: %w", err)
}

var normalNodes, unavaiNodes []model.Node
for _, node := range repNodes {
if node.State == consts.NodeStateNormal {
normalNodes = append(normalNodes, node)
} else if node.State == consts.NodeStateUnavailable {
unavaiNodes = append(unavaiNodes, node)
}
}

// 如果Available的备份数超过期望备份数,则让一些节点退出
if len(normalNodes) > needRepCount {
delNodes := chooseDeleteAvaiRepNodes(allNodes, normalNodes, len(normalNodes)-needRepCount)
for _, node := range delNodes {
err := execCtx.Args.DB.Cache().SetTemp(sqlCtx, fileHash, node.NodeID)
if err != nil {
return nil, fmt.Errorf("change cache state failed, err: %w", err)
}
updatedNodeIDs = append(updatedNodeIDs, node.NodeID)
}
return updatedNodeIDs, nil
}

// 因为总备份数不够,而需要增加的备份数
add1 := mymath.Max(0, needRepCount-len(repNodes))

// 因为Available的备份数占比过少,而需要增加的备份数
minAvaiNodeCnt := int(math.Ceil(float64(config.Cfg().MinAvailableRepProportion) * float64(needRepCount)))
add2 := mymath.Max(0, minAvaiNodeCnt-len(normalNodes))

// 最终需要增加的备份数,是以上两种情况的最大值
finalAddCount := mymath.Max(add1, add2)

if finalAddCount > 0 {
newNodes := chooseNewRepNodes(allNodes, repNodes, finalAddCount)
if len(newNodes) < finalAddCount {
log.WithField("FileHash", fileHash).Warnf("need %d more rep nodes, but get only %d nodes", finalAddCount, len(newNodes))
// TODO 节点数不够,进行一个告警
}

for _, node := range newNodes {
err := execCtx.Args.DB.Cache().CreatePinned(sqlCtx, fileHash, node.NodeID, 0)
if err != nil {
return nil, fmt.Errorf("create cache failed, err: %w", err)
}
updatedNodeIDs = append(updatedNodeIDs, node.NodeID)
}
}

return updatedNodeIDs, err
}

func chooseNewRepNodes(allNodes []model.Node, curRepNodes []model.Node, newCount int) []model.Node {
noRepNodes := lo.Reject(allNodes, func(node model.Node, index int) bool {
return lo.ContainsBy(curRepNodes, func(n model.Node) bool { return node.NodeID == n.NodeID }) ||
node.State != consts.NodeStateNormal
})

repNodeLocationIDs := make(map[int64]bool)
for _, node := range curRepNodes {
repNodeLocationIDs[node.LocationID] = true
}

mysort.Sort(noRepNodes, func(l, r model.Node) int {
// LocationID不存在时为false,false - true < 0,所以LocationID不存在的会排在前面
return mysort.CmpBool(repNodeLocationIDs[l.LocationID], repNodeLocationIDs[r.LocationID])
})

return noRepNodes[:mymath.Min(newCount, len(noRepNodes))]
}

func chooseDeleteAvaiRepNodes(allNodes []model.Node, curAvaiRepNodes []model.Node, delCount int) []model.Node {
// 按照地域ID分组
locationGroupedNodes := make(map[int64][]model.Node)
for _, node := range curAvaiRepNodes {
nodes := locationGroupedNodes[node.LocationID]
nodes = append(nodes, node)
locationGroupedNodes[node.LocationID] = nodes
}

// 每次从每个分组中取出一个元素放入结果数组,并将这个元素从分组中删除
// 最后结果数组中的元素会按照地域交错循环排列,比如:ABCABCBCC。同时还有一个特征:靠后的循环节中的元素都来自于元素数多的分组
// 将结果数组反转(此处是用存放时就逆序的形式实现),就把元素数多的分组提前了,此时从头部取出要删除的节点即可
alternatedNodes := make([]model.Node, len(curAvaiRepNodes))
for i := len(curAvaiRepNodes) - 1; i >= 0; {
for id, nodes := range locationGroupedNodes {
alternatedNodes[i] = nodes[0]

if len(nodes) == 1 {
delete(locationGroupedNodes, id)
} else {
locationGroupedNodes[id] = nodes[1:]
}

// 放置一个元素就移动一下下一个存放点
i--
}
}

return alternatedNodes[:mymath.Min(delCount, len(alternatedNodes))]
}

func init() {
RegisterMessageConvertor(func(msg *scevt.CheckRepCount) Event { return NewCheckRepCount(msg.FileHashes) })
}
*/

+ 0
- 158
scanner/internal/event/check_rep_count_test.go View File

@@ -1,158 +0,0 @@
package event

/*
import (
"testing"

"github.com/samber/lo"
. "github.com/smartystreets/goconvey/convey"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/sort"
"gitlink.org.cn/cloudream/storage/common/consts"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
)

func Test_chooseNewRepNodes(t *testing.T) {
testcases := []struct {
title string
allNodes []model.Node
curRepNodes []model.Node
newCount int
wantNodeIDs []cdssdk.NodeID
}{
{
title: "优先选择不同地域的节点",
allNodes: []model.Node{
{
NodeID: 1,
LocationID: 1,
State: consts.NodeStateNormal,
},
{
NodeID: 2,
LocationID: 1,
State: consts.NodeStateNormal,
},
{
NodeID: 3,
LocationID: 2,
State: consts.NodeStateNormal,
},
{
NodeID: 4,
LocationID: 3,
State: consts.NodeStateNormal,
},
},
curRepNodes: []model.Node{
{
NodeID: 1,
LocationID: 1,
},
},
newCount: 2,
wantNodeIDs: []cdssdk.NodeID{3, 4},
},
{
title: "就算节点数不足,也不能选择重复节点",
allNodes: []model.Node{
{
NodeID: 1,
LocationID: 1,
State: consts.NodeStateNormal,
},
{
NodeID: 2,
LocationID: 1,
State: consts.NodeStateNormal,
},
},
curRepNodes: []model.Node{
{
NodeID: 1,
LocationID: 1,
},
},
newCount: 2,
wantNodeIDs: []cdssdk.NodeID{2},
},
{
title: "就算节点数不足,也不能选择状态unavailable的节点",
allNodes: []model.Node{
{
NodeID: 1,
LocationID: 1,
State: consts.NodeStateUnavailable,
},
{
NodeID: 2,
LocationID: 1,
State: consts.NodeStateNormal,
},
},
curRepNodes: []model.Node{
{
NodeID: 3,
LocationID: 1,
},
},
newCount: 2,
wantNodeIDs: []cdssdk.NodeID{2},
},
}

for _, test := range testcases {
Convey(test.title, t, func() {
chooseNodes := chooseNewRepNodes(test.allNodes, test.curRepNodes, test.newCount)
chooseNodeIDs := lo.Map(chooseNodes, func(node model.Node, index int) cdssdk.NodeID { return node.NodeID })

sort.Sort(chooseNodeIDs, sort.Cmp[cdssdk.NodeID])

So(chooseNodeIDs, ShouldResemble, test.wantNodeIDs)
})
}
}

func Test_chooseDeleteAvaiRepNodes(t *testing.T) {
testcases := []struct {
title string
allNodes []model.Node
curRepNodes []model.Node
delCount int
wantNodeLocationIDs []cdssdk.LocationID
}{
{
title: "优先选择地域重复的节点",
allNodes: []model.Node{},
curRepNodes: []model.Node{
{NodeID: 1, LocationID: 1}, {NodeID: 2, LocationID: 1},
{NodeID: 3, LocationID: 2}, {NodeID: 4, LocationID: 2},
{NodeID: 5, LocationID: 3}, {NodeID: 6, LocationID: 3}, {NodeID: 7, LocationID: 3},
{NodeID: 8, LocationID: 4},
},
delCount: 4,
wantNodeLocationIDs: []cdssdk.LocationID{1, 2, 3, 3},
},
{
title: "节点不够删",
allNodes: []model.Node{},
curRepNodes: []model.Node{
{NodeID: 1, LocationID: 1},
},
delCount: 2,
wantNodeLocationIDs: []cdssdk.LocationID{1},
},
}

for _, test := range testcases {
Convey(test.title, t, func() {
chooseNodes := chooseDeleteAvaiRepNodes(test.allNodes, test.curRepNodes, test.delCount)
chooseNodeLocationIDs := lo.Map(chooseNodes, func(node model.Node, index int) cdssdk.LocationID { return node.LocationID })

sort.Sort(chooseNodeLocationIDs, sort.Cmp[cdssdk.LocationID])

So(chooseNodeLocationIDs, ShouldResemble, test.wantNodeLocationIDs)
})
}
}
*/

+ 2
- 1
scanner/internal/tickevent/batch_all_agent_check_cache.go View File

@@ -5,6 +5,7 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
"gitlink.org.cn/cloudream/storage/scanner/internal/event"
)

@@ -38,7 +39,7 @@ func (e *BatchAllAgentCheckCache) Execute(ctx ExecuteContext) {
checkedCnt := 0
for ; checkedCnt < len(e.nodeIDs) && checkedCnt < AGENT_CHECK_CACHE_BATCH_SIZE; checkedCnt++ {
// nil代表进行全量检查
ctx.Args.EventExecutor.Post(event.NewAgentCheckCache(e.nodeIDs[checkedCnt], nil))
ctx.Args.EventExecutor.Post(event.NewAgentCheckCache(scevt.NewAgentCheckCache(e.nodeIDs[checkedCnt])))
}
e.nodeIDs = e.nodeIDs[checkedCnt:]
}

+ 2
- 3
scanner/internal/tickevent/batch_check_all_package.go View File

@@ -2,11 +2,10 @@ package tickevent

import (
"gitlink.org.cn/cloudream/common/pkgs/logger"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
"gitlink.org.cn/cloudream/storage/scanner/internal/event"
)

const CheckPackageBatchSize = 100

type BatchCheckAllPackage struct {
lastCheckStart int
}
@@ -26,7 +25,7 @@ func (e *BatchCheckAllPackage) Execute(ctx ExecuteContext) {
return
}

ctx.Args.EventExecutor.Post(event.NewCheckPackage(packageIDs))
ctx.Args.EventExecutor.Post(event.NewCheckPackage(scevt.NewCheckPackage(packageIDs)))

// 如果结果的长度小于预期的长度,则认为已经查询了所有,下次从头再来
if len(packageIDs) < CheckPackageBatchSize {


+ 0
- 41
scanner/internal/tickevent/batch_check_all_rep_count.go View File

@@ -1,41 +0,0 @@
package tickevent

/*
import (
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/storage/scanner/internal/event"
)

const CHECK_CACHE_BATCH_SIZE = 100

type BatchCheckAllRepCount struct {
lastCheckStart int
}

func NewBatchCheckAllRepCount() *BatchCheckAllRepCount {
return &BatchCheckAllRepCount{}
}

func (e *BatchCheckAllRepCount) Execute(ctx ExecuteContext) {
log := logger.WithType[BatchCheckAllRepCount]("TickEvent")
log.Debugf("begin")
defer log.Debugf("end")

fileHashes, err := ctx.Args.DB.Cache().BatchGetAllFileHashes(ctx.Args.DB.SQLCtx(), e.lastCheckStart, CHECK_CACHE_BATCH_SIZE)
if err != nil {
log.Warnf("batch get file hashes failed, err: %s", err.Error())
return
}

ctx.Args.EventExecutor.Post(event.NewCheckRepCount(fileHashes))

// 如果结果的长度小于预期的长度,则认为已经查询了所有,下次从头再来
if len(fileHashes) < CHECK_CACHE_BATCH_SIZE {
e.lastCheckStart = 0
log.Debugf("all rep count checked, next time will start check at 0")

} else {
e.lastCheckStart += CHECK_CACHE_BATCH_SIZE
}
}
*/

+ 2
- 1
scanner/internal/tickevent/batch_check_all_storage.go View File

@@ -2,6 +2,7 @@ package tickevent

import (
"gitlink.org.cn/cloudream/common/pkgs/logger"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
"gitlink.org.cn/cloudream/storage/scanner/internal/event"
)

@@ -28,7 +29,7 @@ func (e *BatchCheckAllStorage) Execute(ctx ExecuteContext) {

for _, stgID := range storageIDs {
// 设置nil代表进行全量检查
ctx.Args.EventExecutor.Post(event.NewAgentCheckStorage(stgID, nil))
ctx.Args.EventExecutor.Post(event.NewAgentCheckStorage(scevt.NewAgentCheckStorage(stgID)))
}

// 如果结果的长度小于预期的长度,则认为已经查询了所有,下次从头再来


+ 4
- 0
scanner/internal/tickevent/batch_check_package_redudancy.go View File

@@ -8,6 +8,10 @@ import (
evt "gitlink.org.cn/cloudream/storage/scanner/internal/event"
)

const (
CheckPackageBatchSize = 100
)

type BatchCheckPackageRedundancy struct {
lastCheckStart int
}


+ 2
- 1
scanner/internal/tickevent/check_agent_state.go View File

@@ -2,6 +2,7 @@ package tickevent

import (
"gitlink.org.cn/cloudream/common/pkgs/logger"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
"gitlink.org.cn/cloudream/storage/scanner/internal/event"
)

@@ -24,7 +25,7 @@ func (e *CheckAgentState) Execute(ctx ExecuteContext) {
}

for _, node := range nodes {
ctx.Args.EventExecutor.Post(event.NewAgentCheckState(node.NodeID), event.ExecuteOption{
ctx.Args.EventExecutor.Post(event.NewAgentCheckState(scevt.NewAgentCheckState(node.NodeID)), event.ExecuteOption{
IsEmergency: true,
DontMerge: true,
})


+ 0
- 29
scanner/internal/tickevent/check_cache.go View File

@@ -1,29 +0,0 @@
package tickevent

import (
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/storage/scanner/internal/event"
)

type CheckCache struct {
}

func NewCheckCache() *CheckCache {
return &CheckCache{}
}

func (e *CheckCache) Execute(ctx ExecuteContext) {
log := logger.WithType[CheckCache]("TickEvent")
log.Debugf("begin")
defer log.Debugf("end")

nodes, err := ctx.Args.DB.Node().GetAllNodes(ctx.Args.DB.SQLCtx())
if err != nil {
log.Warnf("get all nodes failed, err: %s", err.Error())
return
}

for _, node := range nodes {
ctx.Args.EventExecutor.Post(event.NewCheckCache(node.NodeID))
}
}

+ 0
- 2
scanner/main.go View File

@@ -125,7 +125,5 @@ func startTickEvent(tickExecutor *tickevent.Executor) {

tickExecutor.Start(tickevent.NewCheckAgentState(), 5*60*1000, tickevent.StartOption{RandomStartDelayMs: 60 * 1000})

tickExecutor.Start(tickevent.NewCheckCache(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000})

tickExecutor.Start(tickevent.NewBatchCheckPackageRedundancy(), interval, tickevent.StartOption{RandomStartDelayMs: 10 * 60 * 1000})
}

Loading…
Cancel
Save