Browse Source

Merge branch 'master' of https://gitlink.org.cn/cloudream/storage into feature_zqj

# Conflicts:
#	go.mod
#	go.sum
gitlink
songjc 10 months ago
parent
commit
6678562a05
100 changed files with 3330 additions and 2766 deletions
  1. +25
    -13
      agent/internal/cmd/serve.go
  2. +12
    -10
      agent/internal/config/config.go
  3. +1
    -1
      agent/internal/grpc/io.go
  4. +4
    -4
      agent/internal/grpc/service.go
  5. +1
    -1
      agent/internal/http/hub_io.go
  6. +4
    -4
      agent/internal/http/service.go
  7. +2
    -2
      agent/internal/mq/cache.go
  8. +4
    -4
      agent/internal/mq/service.go
  9. +0
    -93
      agent/internal/mq/storage.go
  10. +1
    -1
      agent/internal/task/cache_move_package.go
  11. +1
    -1
      agent/internal/task/create_package.go
  12. +0
    -339
      agent/internal/task/storage_load_package.go
  13. +4
    -4
      agent/internal/task/task.go
  14. +1
    -1
      client/internal/cmdline/getp.go
  15. +9
    -20
      client/internal/cmdline/load.go
  16. +1
    -1
      client/internal/cmdline/lsp.go
  17. +15
    -7
      client/internal/cmdline/newloadp.go
  18. +1
    -1
      client/internal/cmdline/object.go
  19. +0
    -23
      client/internal/cmdline/package.go
  20. +2
    -2
      client/internal/cmdline/put.go
  21. +7
    -1
      client/internal/cmdline/serve.go
  22. +0
    -39
      client/internal/cmdline/storage.go
  23. +25
    -24
      client/internal/cmdline/test.go
  24. +14
    -9
      client/internal/config/config.go
  25. +197
    -0
      client/internal/http/aws_auth.go
  26. +50
    -5
      client/internal/http/object.go
  27. +35
    -30
      client/internal/http/package.go
  28. +48
    -4
      client/internal/http/server.go
  29. +4
    -29
      client/internal/http/storage.go
  30. +2
    -1
      client/internal/services/cache.go
  31. +1
    -1
      client/internal/services/hub.go
  32. +30
    -0
      client/internal/services/object.go
  33. +16
    -18
      client/internal/services/package.go
  34. +25
    -11
      client/internal/services/service.go
  35. +69
    -45
      client/internal/services/storage.go
  36. +4
    -4
      client/internal/task/task.go
  37. +20
    -10
      client/main.go
  38. +3
    -1
      common/assets/confs/agent.config.json
  39. +7
    -2
      common/assets/confs/client.config.json
  40. +7
    -7
      common/assets/confs/scanner.config.json
  41. +2
    -2
      common/globals/pools.go
  42. +13
    -24
      common/pkgs/connectivity/collector.go
  43. +1
    -22
      common/pkgs/db2/bucket.go
  44. +0
    -18
      common/pkgs/db2/model/model.go
  45. +38
    -3
      common/pkgs/db2/object.go
  46. +10
    -0
      common/pkgs/db2/object_block.go
  47. +25
    -30
      common/pkgs/db2/package.go
  48. +8
    -4
      common/pkgs/db2/pinned_object.go
  49. +0
    -83
      common/pkgs/db2/storage_package.go
  50. +7
    -3
      common/pkgs/db2/user_bucket.go
  51. +0
    -24
      common/pkgs/distlock/reqbuilder/metadata_storage_package.go
  52. +2
    -3
      common/pkgs/distlock/service.go
  53. +0
    -2
      common/pkgs/downloader/config.go
  54. +13
    -10
      common/pkgs/downloader/downloader.go
  55. +95
    -304
      common/pkgs/downloader/iterator.go
  56. +17
    -31
      common/pkgs/downloader/lrc.go
  57. +5
    -4
      common/pkgs/downloader/lrc_strip_iterator.go
  58. +6
    -0
      common/pkgs/downloader/strategy/config.go
  59. +337
    -0
      common/pkgs/downloader/strategy/selector.go
  60. +7
    -6
      common/pkgs/downloader/strip_iterator.go
  61. +21
    -24
      common/pkgs/ioswitch2/fromto.go
  62. +100
    -12
      common/pkgs/ioswitch2/ops2/bypass.go
  63. +4
    -1
      common/pkgs/ioswitch2/ops2/chunked.go
  64. +2
    -1
      common/pkgs/ioswitch2/ops2/driver.go
  65. +49
    -9
      common/pkgs/ioswitch2/ops2/ec.go
  66. +6
    -4
      common/pkgs/ioswitch2/ops2/faas.go
  67. +30
    -18
      common/pkgs/ioswitch2/ops2/multipart.go
  68. +0
    -68
      common/pkgs/ioswitch2/ops2/ops.go
  69. +2
    -2
      common/pkgs/ioswitch2/ops2/range.go
  70. +115
    -0
      common/pkgs/ioswitch2/ops2/s2s.go
  71. +5
    -5
      common/pkgs/ioswitch2/ops2/shard_store.go
  72. +19
    -43
      common/pkgs/ioswitch2/ops2/shared_store.go
  73. +488
    -0
      common/pkgs/ioswitch2/parser/gen/generator.go
  74. +98
    -0
      common/pkgs/ioswitch2/parser/opt/chunked.go
  75. +38
    -0
      common/pkgs/ioswitch2/parser/opt/ec.go
  76. +154
    -0
      common/pkgs/ioswitch2/parser/opt/misc.go
  77. +105
    -0
      common/pkgs/ioswitch2/parser/opt/multipart.go
  78. +69
    -0
      common/pkgs/ioswitch2/parser/opt/pin.go
  79. +131
    -0
      common/pkgs/ioswitch2/parser/opt/s2s.go
  80. +98
    -0
      common/pkgs/ioswitch2/parser/opt/segment.go
  81. +26
    -1046
      common/pkgs/ioswitch2/parser/parser.go
  82. +35
    -0
      common/pkgs/ioswitch2/parser/state/state.go
  83. +9
    -8
      common/pkgs/ioswitchlrc/fromto.go
  84. +4
    -1
      common/pkgs/ioswitchlrc/ops2/chunked.go
  85. +2
    -2
      common/pkgs/ioswitchlrc/ops2/range.go
  86. +5
    -5
      common/pkgs/ioswitchlrc/ops2/shard_store.go
  87. +5
    -4
      common/pkgs/ioswitchlrc/parser/generator.go
  88. +5
    -6
      common/pkgs/ioswitchlrc/parser/passes.go
  89. +96
    -0
      common/pkgs/metacache/connectivity.go
  90. +27
    -0
      common/pkgs/metacache/host.go
  91. +75
    -0
      common/pkgs/metacache/hubmeta.go
  92. +121
    -0
      common/pkgs/metacache/simple.go
  93. +76
    -0
      common/pkgs/metacache/storagemeta.go
  94. +4
    -4
      common/pkgs/mq/agent/client.go
  95. +2
    -3
      common/pkgs/mq/agent/server.go
  96. +0
    -128
      common/pkgs/mq/agent/storage.go
  97. +0
    -19
      common/pkgs/mq/config.go
  98. +4
    -4
      common/pkgs/mq/coordinator/client.go
  99. +3
    -3
      common/pkgs/mq/coordinator/hub.go
  100. +59
    -0
      common/pkgs/mq/coordinator/object.go

+ 25
- 13
agent/internal/cmd/serve.go View File

@@ -19,8 +19,10 @@ import (
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
"gitlink.org.cn/cloudream/storage/common/pkgs/metacache"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
"gitlink.org.cn/cloudream/storage/common/pkgs/uploader"

"google.golang.org/grpc"
@@ -46,16 +48,16 @@ func serve(configPath string) {
}

stgglb.InitLocal(&config.Cfg().Local)
stgglb.InitMQPool(&config.Cfg().RabbitMQ)
stgglb.InitMQPool(config.Cfg().RabbitMQ)
stgglb.InitAgentRPCPool(&agtrpc.PoolConfig{})

// 获取Hub配置
hubCfg := downloadHubConfig()

// 初始化存储服务管理器
stgMgr := svcmgr.NewManager()
stgAgts := agtpool.NewPool()
for _, stg := range hubCfg.Storages {
err := stgMgr.CreateService(stg)
err := stgAgts.SetupAgent(stg)
if err != nil {
fmt.Printf("init storage %v: %v", stg.Storage.String(), err)
os.Exit(1)
@@ -66,7 +68,7 @@ func serve(configPath string) {
worker := exec.NewWorker()

// 初始化HTTP服务
httpSvr, err := http.NewServer(config.Cfg().ListenAddr, http.NewService(&worker, stgMgr))
httpSvr, err := http.NewServer(config.Cfg().ListenAddr, http.NewService(&worker, stgAgts))
if err != nil {
logger.Fatalf("new http server failed, err: %s", err.Error())
}
@@ -87,15 +89,15 @@ func serve(configPath string) {
hubCons := make([]cdssdk.HubConnectivity, 0, len(cons))
for _, con := range cons {
var delay *float32
if con.Delay != nil {
v := float32(con.Delay.Microseconds()) / 1000
if con.Latency != nil {
v := float32(con.Latency.Microseconds()) / 1000
delay = &v
}

hubCons = append(hubCons, cdssdk.HubConnectivity{
FromHubID: *stgglb.Local.HubID,
ToHubID: con.ToHubID,
Delay: delay,
Latency: delay,
TestTime: con.TestTime,
})
}
@@ -107,6 +109,13 @@ func serve(configPath string) {
})
conCol.CollectInPlace()

// 初始化元数据缓存服务
metacacheHost := metacache.NewHost()
go metacacheHost.Serve()
stgMeta := metacacheHost.AddStorageMeta()
hubMeta := metacacheHost.AddHubMeta()
conMeta := metacacheHost.AddConnectivity()

// 启动访问统计服务
acStat := accessstat.NewAccessStat(accessstat.Config{
// TODO 考虑放到配置里
@@ -120,18 +129,21 @@ func serve(configPath string) {
logger.Fatalf("new ipfs failed, err: %s", err.Error())
}

// 初始化下载策略选择器
strgSel := strategy.NewSelector(config.Cfg().DownloadStrategy, stgMeta, hubMeta, conMeta)

// 初始化下载器
dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgMgr)
dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgAgts, strgSel)

// 初始化上传器
uploader := uploader.NewUploader(distlock, &conCol, stgMgr)
uploader := uploader.NewUploader(distlock, &conCol, stgAgts, stgMeta)

// 初始化任务管理器
taskMgr := task.NewManager(distlock, &conCol, &dlder, acStat, stgMgr, uploader)
taskMgr := task.NewManager(distlock, &conCol, &dlder, acStat, stgAgts, uploader)

// 启动命令服务器
// TODO 需要设计AgentID持久化机制
agtSvr, err := agtmq.NewServer(cmdsvc.NewService(&taskMgr, stgMgr), config.Cfg().ID, &config.Cfg().RabbitMQ)
agtSvr, err := agtmq.NewServer(cmdsvc.NewService(&taskMgr, stgAgts), config.Cfg().ID, config.Cfg().RabbitMQ)
if err != nil {
logger.Fatalf("new agent server failed, err: %s", err.Error())
}
@@ -147,7 +159,7 @@ func serve(configPath string) {
logger.Fatalf("listen on %s failed, err: %s", listenAddr, err.Error())
}
s := grpc.NewServer()
agtrpc.RegisterAgentServer(s, grpcsvc.NewService(&worker, stgMgr))
agtrpc.RegisterAgentServer(s, grpcsvc.NewService(&worker, stgAgts))
go serveGRPC(s, lis)

go serveDistLock(distlock)


+ 12
- 10
agent/internal/config/config.go View File

@@ -3,25 +3,27 @@ package config
import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
log "gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
c "gitlink.org.cn/cloudream/common/utils/config"
stgmodels "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
"gitlink.org.cn/cloudream/storage/common/pkgs/grpc"
stgmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq"
)

type Config struct {
ID cdssdk.HubID `json:"id"`
ListenAddr string `json:"listenAddr"`
Local stgmodels.LocalMachineInfo `json:"local"`
GRPC *grpc.Config `json:"grpc"`
Logger log.Config `json:"logger"`
RabbitMQ stgmq.Config `json:"rabbitMQ"`
DistLock distlock.Config `json:"distlock"`
Connectivity connectivity.Config `json:"connectivity"`
Downloader downloader.Config `json:"downloader"`
ID cdssdk.HubID `json:"id"`
ListenAddr string `json:"listenAddr"`
Local stgmodels.LocalMachineInfo `json:"local"`
GRPC *grpc.Config `json:"grpc"`
Logger log.Config `json:"logger"`
RabbitMQ mq.Config `json:"rabbitMQ"`
DistLock distlock.Config `json:"distlock"`
Connectivity connectivity.Config `json:"connectivity"`
Downloader downloader.Config `json:"downloader"`
DownloadStrategy strategy.Config `json:"downloadStrategy"`
}

var cfg Config


+ 1
- 1
agent/internal/grpc/io.go View File

@@ -29,7 +29,7 @@ func (s *Service) ExecuteIOPlan(ctx context.Context, req *agtrpc.ExecuteIOPlanRe
defer s.swWorker.Remove(sw)

execCtx := exec.NewWithContext(ctx)
exec.SetValueByType(execCtx, s.stgMgr)
exec.SetValueByType(execCtx, s.stgAgts)
_, err = sw.Run(execCtx)
if err != nil {
log.Warnf("running io plan: %v", err)


+ 4
- 4
agent/internal/grpc/service.go View File

@@ -3,18 +3,18 @@ package grpc
import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
agentserver "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
)

type Service struct {
agentserver.AgentServer
swWorker *exec.Worker
stgMgr *svcmgr.Manager
stgAgts *agtpool.AgentPool
}

func NewService(swWorker *exec.Worker, stgMgr *svcmgr.Manager) *Service {
func NewService(swWorker *exec.Worker, stgAgts *agtpool.AgentPool) *Service {
return &Service{
swWorker: swWorker,
stgMgr: stgMgr,
stgAgts: stgAgts,
}
}

+ 1
- 1
agent/internal/http/hub_io.go View File

@@ -162,7 +162,7 @@ func (s *IOService) ExecuteIOPlan(ctx *gin.Context) {
defer s.svc.swWorker.Remove(sw)

execCtx := exec.NewWithContext(ctx.Request.Context())
exec.SetValueByType(execCtx, s.svc.stgMgr)
exec.SetValueByType(execCtx, s.svc.stgAgts)
_, err = sw.Run(execCtx)
if err != nil {
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("executing plan: %v", err)))


+ 4
- 4
agent/internal/http/service.go View File

@@ -2,17 +2,17 @@ package http

import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
)

type Service struct {
swWorker *exec.Worker
stgMgr *svcmgr.Manager
stgAgts *agtpool.AgentPool
}

func NewService(swWorker *exec.Worker, stgMgr *svcmgr.Manager) *Service {
func NewService(swWorker *exec.Worker, stgAgts *agtpool.AgentPool) *Service {
return &Service{
swWorker: swWorker,
stgMgr: stgMgr,
stgAgts: stgAgts,
}
}

+ 2
- 2
agent/internal/mq/cache.go View File

@@ -12,7 +12,7 @@ import (
)

func (svc *Service) CheckCache(msg *agtmq.CheckCache) (*agtmq.CheckCacheResp, *mq.CodeMessage) {
store, err := svc.stgMgr.GetShardStore(msg.StorageID)
store, err := svc.stgAgts.GetShardStore(msg.StorageID)
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, fmt.Sprintf("get shard store of storage %v: %v", msg.StorageID, err))
}
@@ -31,7 +31,7 @@ func (svc *Service) CheckCache(msg *agtmq.CheckCache) (*agtmq.CheckCacheResp, *m
}

func (svc *Service) CacheGC(msg *agtmq.CacheGC) (*agtmq.CacheGCResp, *mq.CodeMessage) {
store, err := svc.stgMgr.GetShardStore(msg.StorageID)
store, err := svc.stgAgts.GetShardStore(msg.StorageID)
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, fmt.Sprintf("get shard store of storage %v: %v", msg.StorageID, err))
}


+ 4
- 4
agent/internal/mq/service.go View File

@@ -2,17 +2,17 @@ package mq

import (
"gitlink.org.cn/cloudream/storage/agent/internal/task"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
)

type Service struct {
taskManager *task.Manager
stgMgr *svcmgr.Manager
stgAgts *agtpool.AgentPool
}

func NewService(taskMgr *task.Manager, stgMgr *svcmgr.Manager) *Service {
func NewService(taskMgr *task.Manager, stgAgts *agtpool.AgentPool) *Service {
return &Service{
taskManager: taskMgr,
stgMgr: stgMgr,
stgAgts: stgAgts,
}
}

+ 0
- 93
agent/internal/mq/storage.go View File

@@ -4,104 +4,11 @@ import (
"time"

"gitlink.org.cn/cloudream/common/consts/errorcode"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
mytask "gitlink.org.cn/cloudream/storage/agent/internal/task"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
)

func (svc *Service) StartStorageLoadPackage(msg *agtmq.StartStorageLoadPackage) (*agtmq.StartStorageLoadPackageResp, *mq.CodeMessage) {
tsk := svc.taskManager.StartNew(mytask.NewStorageLoadPackage(msg.UserID, msg.PackageID, msg.StorageID))
return mq.ReplyOK(agtmq.NewStartStorageLoadPackageResp(tsk.ID()))
}

func (svc *Service) WaitStorageLoadPackage(msg *agtmq.WaitStorageLoadPackage) (*agtmq.WaitStorageLoadPackageResp, *mq.CodeMessage) {
logger.WithField("TaskID", msg.TaskID).Debugf("wait loading package")

tsk := svc.taskManager.FindByID(msg.TaskID)
if tsk == nil {
return nil, mq.Failed(errorcode.TaskNotFound, "task not found")
}

if msg.WaitTimeoutMs == 0 {
tsk.Wait()

errMsg := ""
if tsk.Error() != nil {
errMsg = tsk.Error().Error()
}

loadTsk := tsk.Body().(*mytask.StorageLoadPackage)

return mq.ReplyOK(agtmq.NewWaitStorageLoadPackageResp(true, errMsg, loadTsk.PackagePath, loadTsk.LocalBase, loadTsk.RemoteBase))

} else {
if tsk.WaitTimeout(time.Duration(msg.WaitTimeoutMs) * time.Millisecond) {

errMsg := ""
if tsk.Error() != nil {
errMsg = tsk.Error().Error()
}

loadTsk := tsk.Body().(*mytask.StorageLoadPackage)

return mq.ReplyOK(agtmq.NewWaitStorageLoadPackageResp(true, errMsg, loadTsk.PackagePath, loadTsk.LocalBase, loadTsk.RemoteBase))
}

return mq.ReplyOK(agtmq.NewWaitStorageLoadPackageResp(false, "", "", "", ""))
}
}

func (svc *Service) StorageCheck(msg *agtmq.StorageCheck) (*agtmq.StorageCheckResp, *mq.CodeMessage) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

shared, err := svc.stgMgr.GetSharedStore(msg.StorageID)
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}

loaded, err := shared.ListLoadedPackages()
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}

return mq.ReplyOK(agtmq.NewStorageCheckResp(loaded))
}

func (svc *Service) StorageGC(msg *agtmq.StorageGC) (*agtmq.StorageGCResp, *mq.CodeMessage) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

shared, err := svc.stgMgr.GetSharedStore(msg.StorageID)
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}

var loadeds []stgmod.LoadedPackageID
for _, pkg := range msg.Packages {
loadeds = append(loadeds, stgmod.LoadedPackageID{
UserID: pkg.UserID,
PackageID: pkg.PackageID,
})
}

err = shared.PackageGC(loadeds)
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}

return mq.ReplyOK(agtmq.RespStorageGC())
}

func (svc *Service) StartStorageCreatePackage(msg *agtmq.StartStorageCreatePackage) (*agtmq.StartStorageCreatePackageResp, *mq.CodeMessage) {
return nil, mq.Failed(errorcode.OperationFailed, "not implemented")
// coorCli, err := stgglb.CoordinatorMQPool.Acquire()


+ 1
- 1
agent/internal/task/cache_move_package.go View File

@@ -39,7 +39,7 @@ func (t *CacheMovePackage) do(ctx TaskContext) error {
log.Debugf("begin with %v", logger.FormatStruct(t))
defer log.Debugf("end")

store, err := ctx.stgMgr.GetShardStore(t.storageID)
store, err := ctx.stgAgts.GetShardStore(t.storageID)
if err != nil {
return fmt.Errorf("get shard store of storage %v: %w", t.storageID, err)
}


+ 1
- 1
agent/internal/task/create_package.go View File

@@ -85,7 +85,7 @@ func (t *CreatePackage) Execute(task *task.Task[TaskContext], ctx TaskContext, c
return
}

up, err := ctx.uploader.BeginUpdate(t.userID, createResp.Package.PackageID, t.stgAffinity)
up, err := ctx.uploader.BeginUpdate(t.userID, createResp.Package.PackageID, t.stgAffinity, nil, nil)
if err != nil {
err = fmt.Errorf("begin update: %w", err)
log.Error(err.Error())


+ 0
- 339
agent/internal/task/storage_load_package.go View File

@@ -1,339 +0,0 @@
package task

import (
"fmt"
"io"
"math"
"time"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/bitmap"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/task"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/io2"
"gitlink.org.cn/cloudream/common/utils/reflect2"
"gitlink.org.cn/cloudream/common/utils/sort2"
"gitlink.org.cn/cloudream/storage/common/consts"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage/common/pkgs/ec"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/types"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/utils"
)

type StorageLoadPackage struct {
PackagePath string
LocalBase string
RemoteBase string

userID cdssdk.UserID
packageID cdssdk.PackageID
storageID cdssdk.StorageID
pinnedBlocks []stgmod.ObjectBlock
}

func NewStorageLoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID) *StorageLoadPackage {
return &StorageLoadPackage{
userID: userID,
packageID: packageID,
storageID: storageID,
}
}
func (t *StorageLoadPackage) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) {
startTime := time.Now()
log := logger.WithType[StorageLoadPackage]("Task")
log.WithField("TaskID", task.ID()).
Infof("begin to load package %v to %v", t.packageID, t.storageID)

err := t.do(task, ctx)
if err == nil {
log.WithField("TaskID", task.ID()).
Infof("loading success, cost: %v", time.Since(startTime))
} else {
log.WithField("TaskID", task.ID()).
Warnf("loading package: %v, cost: %v", err, time.Since(startTime))
}

complete(err, CompleteOption{
RemovingDelay: time.Minute,
})
}

func (t *StorageLoadPackage) do(task *task.Task[TaskContext], ctx TaskContext) error {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return fmt.Errorf("new coordinator client: %w", err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

shared, err := ctx.stgMgr.GetSharedStore(t.storageID)
if err != nil {
return fmt.Errorf("get shared store of storage %v: %w", t.storageID, err)
}
t.PackagePath = utils.MakeLoadedPackagePath(t.userID, t.packageID)

getObjectDetails, err := coorCli.GetPackageObjectDetails(coormq.ReqGetPackageObjectDetails(t.packageID))
if err != nil {
return fmt.Errorf("getting package object details: %w", err)
}

shardstore, err := ctx.stgMgr.GetShardStore(t.storageID)
if err != nil {
return fmt.Errorf("get shard store of storage %v: %w", t.storageID, err)
}

mutex, err := reqbuilder.NewBuilder().
// 提前占位
Metadata().StoragePackage().CreateOne(t.userID, t.storageID, t.packageID).
// 保护在storage目录中下载的文件
Storage().Buzy(t.storageID).
// 保护下载文件时同时保存到IPFS的文件
Shard().Buzy(t.storageID).
MutexLock(ctx.distlock)
if err != nil {
return fmt.Errorf("acquire locks failed, err: %w", err)
}
defer mutex.Unlock()

for _, obj := range getObjectDetails.Objects {
err := t.downloadOne(coorCli, shardstore, shared, obj)
if err != nil {
return err
}
ctx.accessStat.AddAccessCounter(obj.Object.ObjectID, t.packageID, t.storageID, 1)
}

_, err = coorCli.StoragePackageLoaded(coormq.NewStoragePackageLoaded(t.userID, t.storageID, t.packageID, t.pinnedBlocks))
if err != nil {
return fmt.Errorf("loading package to storage: %w", err)
}

// TODO 要防止下载的临时文件被删除
return err
}

func (t *StorageLoadPackage) downloadOne(coorCli *coormq.Client, shardStore types.ShardStore, shared types.SharedStore, obj stgmod.ObjectDetail) error {
var file io.ReadCloser

switch red := obj.Object.Redundancy.(type) {
case *cdssdk.NoneRedundancy:
reader, err := t.downloadNoneOrRepObject(shardStore, obj)
if err != nil {
return fmt.Errorf("downloading object: %w", err)
}
file = reader

case *cdssdk.RepRedundancy:
reader, err := t.downloadNoneOrRepObject(shardStore, obj)
if err != nil {
return fmt.Errorf("downloading rep object: %w", err)
}
file = reader

case *cdssdk.ECRedundancy:
reader, pinnedBlocks, err := t.downloadECObject(coorCli, shardStore, obj, red)
if err != nil {
return fmt.Errorf("downloading ec object: %w", err)
}
file = reader
t.pinnedBlocks = append(t.pinnedBlocks, pinnedBlocks...)

default:
return fmt.Errorf("unknow redundancy type: %v", reflect2.TypeOfValue(obj.Object.Redundancy))
}
defer file.Close()

if _, err := shared.WritePackageObject(t.userID, t.packageID, obj.Object.Path, file); err != nil {
return fmt.Errorf("writting object to file: %w", err)
}

return nil
}

func (t *StorageLoadPackage) downloadNoneOrRepObject(shardStore types.ShardStore, obj stgmod.ObjectDetail) (io.ReadCloser, error) {
if len(obj.Blocks) == 0 && len(obj.PinnedAt) == 0 {
return nil, fmt.Errorf("no storage has this object")
}

file, err := shardStore.Open(types.NewOpen(obj.Object.FileHash))
if err != nil {
return nil, err
}

return file, nil
}

func (t *StorageLoadPackage) downloadECObject(coorCli *coormq.Client, shardStore types.ShardStore, obj stgmod.ObjectDetail, ecRed *cdssdk.ECRedundancy) (io.ReadCloser, []stgmod.ObjectBlock, error) {
allStorages, err := t.sortDownloadStorages(coorCli, obj)
if err != nil {
return nil, nil, err
}
bsc, blocks := t.getMinReadingBlockSolution(allStorages, ecRed.K)
osc, _ := t.getMinReadingObjectSolution(allStorages, ecRed.K)
if bsc < osc {
var fileStrs []io.ReadCloser

rs, err := ec.NewStreamRs(ecRed.K, ecRed.N, ecRed.ChunkSize)
if err != nil {
return nil, nil, fmt.Errorf("new rs: %w", err)
}

for i := range blocks {
str, err := shardStore.Open(types.NewOpen(blocks[i].Block.FileHash))
if err != nil {
for i -= 1; i >= 0; i-- {
fileStrs[i].Close()
}
return nil, nil, fmt.Errorf("donwloading file: %w", err)
}

fileStrs = append(fileStrs, str)
}

fileReaders, filesCloser := io2.ToReaders(fileStrs)

var indexes []int
for _, b := range blocks {
indexes = append(indexes, b.Block.Index)
}

outputs, outputsCloser := io2.ToReaders(rs.ReconstructData(fileReaders, indexes))
return io2.AfterReadClosed(io2.Length(io2.ChunkedJoin(outputs, int(ecRed.ChunkSize)), obj.Object.Size), func(c io.ReadCloser) {
filesCloser()
outputsCloser()
}), nil, nil
}

// bsc >= osc,如果osc是MaxFloat64,那么bsc也一定是,也就意味着没有足够块来恢复文件
if osc == math.MaxFloat64 {
return nil, nil, fmt.Errorf("no enough blocks to reconstruct the file, want %d, get only %d", ecRed.K, len(blocks))
}

// 如果是直接读取的文件,那么就不需要Pin文件块
str, err := shardStore.Open(types.NewOpen(obj.Object.FileHash))
return str, nil, err
}

type downloadStorageInfo struct {
Storage stgmod.StorageDetail
ObjectPinned bool
Blocks []stgmod.ObjectBlock
Distance float64
}

func (t *StorageLoadPackage) sortDownloadStorages(coorCli *coormq.Client, obj stgmod.ObjectDetail) ([]*downloadStorageInfo, error) {
var stgIDs []cdssdk.StorageID
for _, id := range obj.PinnedAt {
if !lo.Contains(stgIDs, id) {
stgIDs = append(stgIDs, id)
}
}
for _, b := range obj.Blocks {
if !lo.Contains(stgIDs, b.StorageID) {
stgIDs = append(stgIDs, b.StorageID)
}
}

getStgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails(stgIDs))
if err != nil {
return nil, fmt.Errorf("getting storage details: %w", err)
}
allStgs := make(map[cdssdk.StorageID]stgmod.StorageDetail)
for _, stg := range getStgs.Storages {
allStgs[stg.Storage.StorageID] = *stg
}

downloadStorageMap := make(map[cdssdk.StorageID]*downloadStorageInfo)
for _, id := range obj.PinnedAt {
storage, ok := downloadStorageMap[id]
if !ok {
mod := allStgs[id]
storage = &downloadStorageInfo{
Storage: mod,
ObjectPinned: true,
Distance: t.getStorageDistance(mod),
}
downloadStorageMap[id] = storage
}

storage.ObjectPinned = true
}

for _, b := range obj.Blocks {
storage, ok := downloadStorageMap[b.StorageID]
if !ok {
mod := allStgs[b.StorageID]
storage = &downloadStorageInfo{
Storage: mod,
Distance: t.getStorageDistance(mod),
}
downloadStorageMap[b.StorageID] = storage
}

storage.Blocks = append(storage.Blocks, b)
}

return sort2.Sort(lo.Values(downloadStorageMap), func(left, right *downloadStorageInfo) int {
return sort2.Cmp(left.Distance, right.Distance)
}), nil
}

type downloadBlock struct {
Storage stgmod.StorageDetail
Block stgmod.ObjectBlock
}

func (t *StorageLoadPackage) getMinReadingBlockSolution(sortedStorages []*downloadStorageInfo, k int) (float64, []downloadBlock) {
gotBlocksMap := bitmap.Bitmap64(0)
var gotBlocks []downloadBlock
dist := float64(0.0)
for _, n := range sortedStorages {
for _, b := range n.Blocks {
if !gotBlocksMap.Get(b.Index) {
gotBlocks = append(gotBlocks, downloadBlock{
Storage: n.Storage,
Block: b,
})
gotBlocksMap.Set(b.Index, true)
dist += n.Distance
}

if len(gotBlocks) >= k {
return dist, gotBlocks
}
}
}

return math.MaxFloat64, gotBlocks
}

func (t *StorageLoadPackage) getMinReadingObjectSolution(sortedStorages []*downloadStorageInfo, k int) (float64, *stgmod.StorageDetail) {
dist := math.MaxFloat64
var downloadStg *stgmod.StorageDetail
for _, n := range sortedStorages {
if n.ObjectPinned && float64(k)*n.Distance < dist {
dist = float64(k) * n.Distance
stg := n.Storage
downloadStg = &stg
}
}

return dist, downloadStg
}

func (t *StorageLoadPackage) getStorageDistance(stg stgmod.StorageDetail) float64 {
if stgglb.Local.HubID != nil {
if stg.MasterHub.HubID == *stgglb.Local.HubID {
return consts.StorageDistanceSameStorage
}
}

if stg.MasterHub.LocationID == stgglb.Local.LocationID {
return consts.StorageDistanceSameLocation
}

return consts.StorageDistanceOther
}

+ 4
- 4
agent/internal/task/task.go View File

@@ -6,7 +6,7 @@ import (
"gitlink.org.cn/cloudream/storage/common/pkgs/accessstat"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
"gitlink.org.cn/cloudream/storage/common/pkgs/uploader"
)

@@ -16,7 +16,7 @@ type TaskContext struct {
connectivity *connectivity.Collector
downloader *downloader.Downloader
accessStat *accessstat.AccessStat
stgMgr *svcmgr.Manager
stgAgts *agtpool.AgentPool
uploader *uploader.Uploader
}

@@ -35,13 +35,13 @@ type Task = task.Task[TaskContext]
// CompleteOption 类型定义了任务完成时的选项,可用于定制化任务完成的处理方式
type CompleteOption = task.CompleteOption

func NewManager(distlock *distlock.Service, connectivity *connectivity.Collector, downloader *downloader.Downloader, accessStat *accessstat.AccessStat, stgMgr *svcmgr.Manager, uploader *uploader.Uploader) Manager {
func NewManager(distlock *distlock.Service, connectivity *connectivity.Collector, downloader *downloader.Downloader, accessStat *accessstat.AccessStat, stgAgts *agtpool.AgentPool, uploader *uploader.Uploader) Manager {
return task.NewManager(TaskContext{
distlock: distlock,
connectivity: connectivity,
downloader: downloader,
accessStat: accessStat,
stgMgr: stgMgr,
stgAgts: stgAgts,
uploader: uploader,
})
}

+ 1
- 1
client/internal/cmdline/getp.go View File

@@ -52,7 +52,7 @@ func getpByPath(cmdCtx *CommandContext, path string, output string) {
return
}

pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByName(userID, comps[0], comps[1])
pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByFullName(userID, comps[0], comps[1])
if err != nil {
fmt.Println(err)
return


+ 9
- 20
client/internal/cmdline/load.go View File

@@ -15,7 +15,7 @@ func init() {
cmd := cobra.Command{
Use: "load",
Short: "Load data from CDS to a storage service",
Args: cobra.ExactArgs(2),
Args: cobra.ExactArgs(3),
Run: func(cmd *cobra.Command, args []string) {
cmdCtx := GetCmdCtx(cmd)

@@ -30,9 +30,9 @@ func init() {
fmt.Printf("Invalid storage ID: %s\n", args[1])
}

loadByID(cmdCtx, cdssdk.PackageID(pkgID), cdssdk.StorageID(stgID))
loadByID(cmdCtx, cdssdk.PackageID(pkgID), cdssdk.StorageID(stgID), args[2])
} else {
loadByPath(cmdCtx, args[0], args[1])
loadByPath(cmdCtx, args[0], args[1], args[2])
}
},
}
@@ -40,7 +40,7 @@ func init() {
rootCmd.AddCommand(&cmd)
}

func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string) {
func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string, rootPath string) {
userID := cdssdk.UserID(1)

comps := strings.Split(strings.Trim(pkgPath, cdssdk.ObjectPathSeparator), cdssdk.ObjectPathSeparator)
@@ -49,7 +49,7 @@ func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string) {
return
}

pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByName(userID, comps[0], comps[1])
pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByFullName(userID, comps[0], comps[1])
if err != nil {
fmt.Println(err)
return
@@ -61,29 +61,18 @@ func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string) {
return
}

loadByID(cmdCtx, pkg.PackageID, stg.StorageID)
loadByID(cmdCtx, pkg.PackageID, stg.StorageID, rootPath)
}

func loadByID(cmdCtx *CommandContext, pkgID cdssdk.PackageID, stgID cdssdk.StorageID) {
func loadByID(cmdCtx *CommandContext, pkgID cdssdk.PackageID, stgID cdssdk.StorageID, rootPath string) {
userID := cdssdk.UserID(1)
startTime := time.Now()

hubID, taskID, err := cmdCtx.Cmdline.Svc.StorageSvc().StartStorageLoadPackage(userID, pkgID, stgID)
err := cmdCtx.Cmdline.Svc.StorageSvc().LoadPackage(userID, pkgID, stgID, rootPath)
if err != nil {
fmt.Println(err)
return
}

for {
complete, fullPath, err := cmdCtx.Cmdline.Svc.StorageSvc().WaitStorageLoadPackage(hubID, taskID, time.Second*10)
if err != nil {
fmt.Println(err)
return
}

if complete {
fmt.Printf("Package loaded to: %s in %v\n", fullPath, time.Since(startTime))
break
}
}
fmt.Printf("Package loaded to: %v:%v in %v\n", stgID, rootPath, time.Since(startTime))
}

+ 1
- 1
client/internal/cmdline/lsp.go View File

@@ -46,7 +46,7 @@ func lspByPath(cmdCtx *CommandContext, path string) {
return
}

pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByName(userID, comps[0], comps[1])
pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByFullName(userID, comps[0], comps[1])
if err != nil {
fmt.Println(err)
return


+ 15
- 7
client/internal/cmdline/newloadp.go View File

@@ -29,26 +29,34 @@ func init() {

packageName := args[2]
storageIDs := make([]cdssdk.StorageID, 0)
for _, sID := range args[3:] {
sID, err := strconv.ParseInt(sID, 10, 64)
rootPathes := make([]string, 0)
for _, dst := range args[3:] {
comps := strings.Split(dst, ":")
if len(comps) != 2 {
fmt.Println("invalid storage destination: ", dst)
return
}

sID, err := strconv.ParseInt(comps[0], 10, 64)
if err != nil {
fmt.Println(err)
return
}
storageIDs = append(storageIDs, cdssdk.StorageID(sID))
rootPathes = append(rootPathes, comps[1])
}

newloadp(cmdCtx, localPath, cdssdk.BucketID(bktID), packageName, storageIDs)
newloadp(cmdCtx, localPath, cdssdk.BucketID(bktID), packageName, storageIDs, rootPathes)
},
}

rootCmd.AddCommand(cmd)
}

func newloadp(cmdCtx *CommandContext, path string, bucketID cdssdk.BucketID, packageName string, storageIDs []cdssdk.StorageID) {
func newloadp(cmdCtx *CommandContext, path string, bucketID cdssdk.BucketID, packageName string, storageIDs []cdssdk.StorageID, rootPathes []string) {
userID := cdssdk.UserID(1)

up, err := cmdCtx.Cmdline.Svc.Uploader.BeginCreateLoad(userID, bucketID, packageName, storageIDs)
up, err := cmdCtx.Cmdline.Svc.Uploader.BeginCreateLoad(userID, bucketID, packageName, storageIDs, rootPathes)
if err != nil {
fmt.Println(err)
return
@@ -94,7 +102,7 @@ func newloadp(cmdCtx *CommandContext, path string, bucketID cdssdk.BucketID, pac
}

wr := table.NewWriter()
wr.AppendHeader(table.Row{"ID", "Name", "FileCount", "TotalSize", "LoadedDirs"})
wr.AppendRow(table.Row{ret.Package.PackageID, ret.Package.Name, fileCount, totalSize, strings.Join(ret.LoadedDirs, "\n")})
wr.AppendHeader(table.Row{"ID", "Name", "FileCount", "TotalSize"})
wr.AppendRow(table.Row{ret.Package.PackageID, ret.Package.Name, fileCount, totalSize})
fmt.Println(wr.Render())
}

+ 1
- 1
client/internal/cmdline/object.go View File

@@ -33,7 +33,7 @@ var _ = MustAddCmd(func(ctx CommandContext, packageID cdssdk.PackageID, rootPath
storageAff = storageAffinity[0]
}

up, err := ctx.Cmdline.Svc.Uploader.BeginUpdate(userID, packageID, storageAff)
up, err := ctx.Cmdline.Svc.Uploader.BeginUpdate(userID, packageID, storageAff, nil, nil)
if err != nil {
return fmt.Errorf("begin updating package: %w", err)
}


+ 0
- 23
client/internal/cmdline/package.go View File

@@ -181,26 +181,6 @@ func PackageGetCachedStorages(ctx CommandContext, packageID cdssdk.PackageID) er
return nil
}

// PackageGetLoadedStorages 获取指定包裹的已加载节点信息。
//
// 参数:
//
// ctx - 命令上下文。
// packageID - 包裹ID。
//
// 返回值:
//
// error - 操作过程中发生的任何错误。
func PackageGetLoadedStorages(ctx CommandContext, packageID cdssdk.PackageID) error {
userID := cdssdk.UserID(1)
hubIDs, err := ctx.Cmdline.Svc.PackageSvc().GetLoadedStorages(userID, packageID)
fmt.Printf("hubIDs: %v\n", hubIDs)
if err != nil {
return fmt.Errorf("get package %d loaded storages failed, err: %w", packageID, err)
}
return nil
}

// 初始化命令行工具的包相关命令。
func init() {
commands.MustAdd(PackageListBucketPackages, "pkg", "ls")
@@ -213,7 +193,4 @@ func init() {

// 查询package缓存到哪些节点
commands.MustAdd(PackageGetCachedStorages, "pkg", "cached")

// 查询package调度到哪些节点
commands.MustAdd(PackageGetLoadedStorages, "pkg", "loaded")
}

+ 2
- 2
client/internal/cmdline/put.go View File

@@ -48,7 +48,7 @@ func init() {
return
}

pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByName(userID, comps[0], comps[1])
pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByFullName(userID, comps[0], comps[1])
if err != nil {
if codeMsg, ok := err.(*mq.CodeMessageError); ok && codeMsg.Code == errorcode.DataNotFound {
pkg2, err := cmdCtx.Cmdline.Svc.PackageSvc().Create(userID, bkt.BucketID, comps[1])
@@ -68,7 +68,7 @@ func init() {
storageAff = cdssdk.StorageID(stgID)
}

up, err := cmdCtx.Cmdline.Svc.Uploader.BeginUpdate(userID, pkg.PackageID, storageAff)
up, err := cmdCtx.Cmdline.Svc.Uploader.BeginUpdate(userID, pkg.PackageID, storageAff, nil, nil)
if err != nil {
fmt.Printf("begin updating package: %v\n", err)
return


+ 7
- 1
client/internal/cmdline/serve.go View File

@@ -3,6 +3,7 @@ package cmdline
import (
"fmt"

"gitlink.org.cn/cloudream/storage/client/internal/config"
"gitlink.org.cn/cloudream/storage/client/internal/http"
)

@@ -17,8 +18,13 @@ func ServeHTTP(ctx CommandContext, args []string) error {
listenAddr = args[0]
}

awsAuth, err := http.NewAWSAuth(config.Cfg().AuthAccessKey, config.Cfg().AuthSecretKey)
if err != nil {
return fmt.Errorf("new aws auth: %w", err)
}

// 创建一个新的HTTP服务器实例。
httpSvr, err := http.NewServer(listenAddr, ctx.Cmdline.Svc)
httpSvr, err := http.NewServer(listenAddr, ctx.Cmdline.Svc, awsAuth)
if err != nil {
return fmt.Errorf("new http server: %w", err)
}


+ 0
- 39
client/internal/cmdline/storage.go View File

@@ -7,42 +7,6 @@ import (
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
)

// StorageLoadPackage 加载指定的包到存储系统中。
// ctx: 命令上下文,提供必要的服务和环境配置。
// packageID: 需要加载的包的唯一标识。
// storageID: 目标存储系统的唯一标识。
// 返回值: 执行过程中遇到的任何错误。
func StorageLoadPackage(ctx CommandContext, packageID cdssdk.PackageID, storageID cdssdk.StorageID) error {
startTime := time.Now()
defer func() {
// 打印函数执行时间
fmt.Printf("%v\n", time.Since(startTime).Seconds())
}()

// 开始加载包到存储系统
hubID, taskID, err := ctx.Cmdline.Svc.StorageSvc().StartStorageLoadPackage(1, packageID, storageID)
if err != nil {
return fmt.Errorf("start loading package to storage: %w", err)
}

// 循环等待加载完成
for {
complete, fullPath, err := ctx.Cmdline.Svc.StorageSvc().WaitStorageLoadPackage(hubID, taskID, time.Second*10)
if complete {
if err != nil {
return fmt.Errorf("moving complete with: %w", err)
}

fmt.Printf("Load To: %s\n", fullPath)
return nil
}

if err != nil {
return fmt.Errorf("wait moving: %w", err)
}
}
}

// StorageCreatePackage 创建一个新的包并上传到指定的存储系统。
// ctx: 命令上下文,提供必要的服务和环境配置。
// bucketID: 存储桶的唯一标识,包将被上传到这个存储桶中。
@@ -83,9 +47,6 @@ func StorageCreatePackage(ctx CommandContext, bucketID cdssdk.BucketID, name str

// 初始化函数,注册加载包和创建包的命令到命令行解析器。
func init() {
// 注册加载包命令
commands.MustAdd(StorageLoadPackage, "stg", "pkg", "load")

// 注册创建包命令
commands.MustAdd(StorageCreatePackage, "stg", "pkg", "new")
}

+ 25
- 24
client/internal/cmdline/test.go View File

@@ -9,6 +9,7 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/future"
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/math2"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser"
@@ -26,20 +27,20 @@ func init() {
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

stgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{1, 2, 3}))
stgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{1, 2, 3, 4}))
if err != nil {
panic(err)
}

ft := ioswitch2.NewFromTo()
ft.SegmentParam = cdssdk.NewSegmentRedundancy(1024*100*3, 3)
ft.AddFrom(ioswitch2.NewFromShardstore("FullE58B075E9F7C5744CB1C2CBBECC30F163DE699DCDA94641DDA34A0C2EB01E240", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(0)))
ft.AddFrom(ioswitch2.NewFromShardstore("FullEA14D17544786427C3A766F0C5E6DEB221D00D3DE1875BBE3BD0AD5C8118C1A0", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(1)))
ft.AddFrom(ioswitch2.NewFromShardstore("Full4D142C458F2399175232D5636235B09A84664D60869E925EB20FFBE931045BDD", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(2)))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[2].MasterHub, *stgs.Storages[2], ioswitch2.RawStream(), "0"))
// ft.AddFrom(ioswitch2.NewFromShardstore("CA56E5934859E0220D1F3B848F41619D937D7B874D4EBF63A6CC98D2D8E3280F", *stgs.Storages[0].MasterHub, stgs.Storages[0].Storage, ioswitch2.RawStream()))
// ft.AddFrom(ioswitch2.NewFromShardstore("FullE58B075E9F7C5744CB1C2CBBECC30F163DE699DCDA94641DDA34A0C2EB01E240", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(0)))
// ft.AddFrom(ioswitch2.NewFromShardstore("FullEA14D17544786427C3A766F0C5E6DEB221D00D3DE1875BBE3BD0AD5C8118C1A0", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1)))
// ft.AddFrom(ioswitch2.NewFromShardstore("Full4D142C458F2399175232D5636235B09A84664D60869E925EB20FFBE931045BDD", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(2)))
ft.AddFrom(ioswitch2.NewFromShardstore("Full03B5CF4B57251D7BB4308FE5C81AF5A21E2B28994CC7CB1FB37698DAE271DC22", *stgs.Storages[2].MasterHub, *stgs.Storages[2], ioswitch2.RawStream()))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[3].MasterHub, *stgs.Storages[3], ioswitch2.RawStream(), "0"))
// ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(0), "0"))
// ft.AddTo(ioswitch2.NewToShardStoreWithRange(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1), "1", exec.Range{Offset: 1}))
// ft.AddTo(ioswitch2.NewToShardStoreWithRange(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1), "1", math2.Range{Offset: 1}))
// ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(0), "0"))
// ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1), "1"))
// ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(2), "2"))
@@ -86,9 +87,9 @@ func init() {

ft := ioswitch2.NewFromTo()
ft.SegmentParam = cdssdk.NewSegmentRedundancy(1293, 3)
ft.AddFrom(ioswitch2.NewFromShardstore("4E69A8B8CD9F42EDE371DA94458BADFB2308AFCA736AA393784A3D81F4746377", *stgs.Storages[0].MasterHub, stgs.Storages[0].Storage, ioswitch2.RawStream()))
ft.AddFrom(ioswitch2.NewFromShardstore("4E69A8B8CD9F42EDE371DA94458BADFB2308AFCA736AA393784A3D81F4746377", *stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.RawStream()))
// ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(0), "0"))
ft.AddTo(ioswitch2.NewToShardStoreWithRange(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1), "1", exec.Range{Offset: 1}))
ft.AddTo(ioswitch2.NewToShardStoreWithRange(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1), "1", math2.Range{Offset: 1}))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(2), "2"))

plans := exec.NewPlanBuilder()
@@ -133,15 +134,15 @@ func init() {
ft := ioswitch2.NewFromTo()
ft.SegmentParam = cdssdk.NewSegmentRedundancy(1293, 3)
ft.ECParam = &cdssdk.DefaultECRedundancy
ft.AddFrom(ioswitch2.NewFromShardstore("22CC59CE3297F78F2D20DC1E33181B77F21E6782097C94E1664F99F129834069", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(0)))
ft.AddFrom(ioswitch2.NewFromShardstore("5EAC20EB3EBC7B5FA176C5BD1C01041FB2A6D14C35D6A232CA83D7F1E4B01ADE", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(1)))
ft.AddFrom(ioswitch2.NewFromShardstore("A9BC1802F37100C80C72A1D6E8F53C0E0B73F85F99153D8C78FB01CEC9D8D903", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(2)))
ft.AddFrom(ioswitch2.NewFromShardstore("22CC59CE3297F78F2D20DC1E33181B77F21E6782097C94E1664F99F129834069", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(0)))
ft.AddFrom(ioswitch2.NewFromShardstore("5EAC20EB3EBC7B5FA176C5BD1C01041FB2A6D14C35D6A232CA83D7F1E4B01ADE", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1)))
ft.AddFrom(ioswitch2.NewFromShardstore("A9BC1802F37100C80C72A1D6E8F53C0E0B73F85F99153D8C78FB01CEC9D8D903", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(2)))

toDrv, drvStr := ioswitch2.NewToDriverWithRange(ioswitch2.RawStream(), exec.NewRange(0, 1293))
toDrv, drvStr := ioswitch2.NewToDriverWithRange(ioswitch2.RawStream(), math2.NewRange(0, 1293))
ft.AddTo(toDrv)
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(0), "EC0"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(1), "EC1"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(2), "EC2"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(0), "EC0"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(1), "EC1"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(2), "EC2"))

plans := exec.NewPlanBuilder()
err = parser.Parse(ft, plans)
@@ -202,10 +203,10 @@ func init() {

ft := ioswitch2.NewFromTo()
ft.ECParam = &cdssdk.DefaultECRedundancy
ft.AddFrom(ioswitch2.NewFromShardstore("4E69A8B8CD9F42EDE371DA94458BADFB2308AFCA736AA393784A3D81F4746377", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.RawStream()))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(0), "EC0"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(1), "EC1"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(2), "EC2"))
ft.AddFrom(ioswitch2.NewFromShardstore("4E69A8B8CD9F42EDE371DA94458BADFB2308AFCA736AA393784A3D81F4746377", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.RawStream()))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(0), "EC0"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(1), "EC1"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(2), "EC2"))

plans := exec.NewPlanBuilder()
err = parser.Parse(ft, plans)
@@ -253,10 +254,10 @@ func init() {
ft := ioswitch2.NewFromTo()
ft.SegmentParam = cdssdk.NewSegmentRedundancy(1293, 3)
ft.ECParam = &cdssdk.DefaultECRedundancy
ft.AddFrom(ioswitch2.NewFromShardstore("22CC59CE3297F78F2D20DC1E33181B77F21E6782097C94E1664F99F129834069", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(0)))
ft.AddFrom(ioswitch2.NewFromShardstore("5EAC20EB3EBC7B5FA176C5BD1C01041FB2A6D14C35D6A232CA83D7F1E4B01ADE", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(1)))
ft.AddFrom(ioswitch2.NewFromShardstore("A9BC1802F37100C80C72A1D6E8F53C0E0B73F85F99153D8C78FB01CEC9D8D903", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(2)))
ft.AddTo(ioswitch2.NewToShardStoreWithRange(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.RawStream(), "raw", exec.NewRange(10, 645)))
ft.AddFrom(ioswitch2.NewFromShardstore("22CC59CE3297F78F2D20DC1E33181B77F21E6782097C94E1664F99F129834069", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(0)))
ft.AddFrom(ioswitch2.NewFromShardstore("5EAC20EB3EBC7B5FA176C5BD1C01041FB2A6D14C35D6A232CA83D7F1E4B01ADE", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1)))
ft.AddFrom(ioswitch2.NewFromShardstore("A9BC1802F37100C80C72A1D6E8F53C0E0B73F85F99153D8C78FB01CEC9D8D903", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(2)))
ft.AddTo(ioswitch2.NewToShardStoreWithRange(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.RawStream(), "raw", math2.NewRange(10, 645)))

plans := exec.NewPlanBuilder()
err = parser.Parse(ft, plans)


+ 14
- 9
client/internal/config/config.go View File

@@ -3,24 +3,29 @@ package config
import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/config"
stgmodels "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent"
stgmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq"
)

type Config struct {
Local stgmodels.LocalMachineInfo `json:"local"`
AgentGRPC agtrpc.PoolConfig `json:"agentGRPC"`
Logger logger.Config `json:"logger"`
RabbitMQ stgmq.Config `json:"rabbitMQ"`
DistLock distlock.Config `json:"distlock"`
Connectivity connectivity.Config `json:"connectivity"`
Downloader downloader.Config `json:"downloader"`
StorageID cdssdk.StorageID `json:"storageID"` // TODO 进行访问量统计时,当前客户端所属的存储ID。临时解决方案。
Local stgmodels.LocalMachineInfo `json:"local"`
AgentGRPC agtrpc.PoolConfig `json:"agentGRPC"`
Logger logger.Config `json:"logger"`
RabbitMQ mq.Config `json:"rabbitMQ"`
DistLock distlock.Config `json:"distlock"`
Connectivity connectivity.Config `json:"connectivity"`
Downloader downloader.Config `json:"downloader"`
DownloadStrategy strategy.Config `json:"downloadStrategy"`
StorageID cdssdk.StorageID `json:"storageID"` // TODO 进行访问量统计时,当前客户端所属的存储ID。临时解决方案。
AuthAccessKey string `json:"authAccessKey"` // TODO 临时办法
AuthSecretKey string `json:"authSecretKey"`
MaxHTTPBodySize int64 `json:"maxHttpBodySize"`
}

var cfg Config


+ 197
- 0
client/internal/http/aws_auth.go View File

@@ -0,0 +1,197 @@
package http

import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"net/http"
"strings"
"time"

"github.com/aws/aws-sdk-go-v2/aws"
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/gin-gonic/gin"
"gitlink.org.cn/cloudream/common/consts/errorcode"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/storage/client/internal/config"
)

const (
AuthRegion = "any"
AuthService = "jcs"
AuthorizationHeader = "Authorization"
)

type AWSAuth struct {
cred aws.Credentials
signer *v4.Signer
}

func NewAWSAuth(accessKey string, secretKey string) (*AWSAuth, error) {
prod := credentials.NewStaticCredentialsProvider(accessKey, secretKey, "")
cred, err := prod.Retrieve(context.TODO())
if err != nil {
return nil, err
}

return &AWSAuth{
cred: cred,
signer: v4.NewSigner(),
}, nil
}

func (a *AWSAuth) Auth(c *gin.Context) {
authorizationHeader := c.GetHeader(AuthorizationHeader)
if authorizationHeader == "" {
c.AbortWithStatusJSON(http.StatusBadRequest, Failed(errorcode.Unauthorized, "authorization header is missing"))
return
}

_, headers, reqSig, err := parseAuthorizationHeader(authorizationHeader)
if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, Failed(errorcode.Unauthorized, "invalid Authorization header format"))
return
}

// 限制请求体大小
rd := io.LimitReader(c.Request.Body, config.Cfg().MaxHTTPBodySize)
body, err := io.ReadAll(rd)
if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "read request body failed"))
return
}

payloadHash := sha256.Sum256(body)
hexPayloadHash := hex.EncodeToString(payloadHash[:])

// 构造验签用的请求
verifyReq, err := http.NewRequest(c.Request.Method, c.Request.URL.String(), nil)
if err != nil {
c.AbortWithStatusJSON(http.StatusOK, Failed(errorcode.OperationFailed, err.Error()))
return
}
for _, h := range headers {
verifyReq.Header.Add(h, c.Request.Header.Get(h))
}
verifyReq.Host = c.Request.Host

timestamp, err := time.Parse("20060102T150405Z", c.GetHeader("X-Amz-Date"))
if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "invalid X-Amz-Date header format"))
return
}

signer := v4.NewSigner()
err = signer.SignHTTP(context.TODO(), a.cred, verifyReq, hexPayloadHash, AuthService, AuthRegion, timestamp)
if err != nil {
logger.Warnf("sign request: %v", err)
c.AbortWithStatusJSON(http.StatusOK, Failed(errorcode.OperationFailed, "sign request failed"))
return
}

verifySig := a.getSignature(verifyReq)
if !strings.EqualFold(verifySig, reqSig) {
c.AbortWithStatusJSON(http.StatusOK, Failed(errorcode.Unauthorized, "signature mismatch"))
return
}

c.Request.Body = io.NopCloser(bytes.NewReader(body))

c.Next()
}

func (a *AWSAuth) AuthWithoutBody(c *gin.Context) {
authorizationHeader := c.GetHeader(AuthorizationHeader)
if authorizationHeader == "" {
c.AbortWithStatusJSON(http.StatusBadRequest, Failed(errorcode.Unauthorized, "authorization header is missing"))
return
}

_, headers, reqSig, err := parseAuthorizationHeader(authorizationHeader)
if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, Failed(errorcode.Unauthorized, "invalid Authorization header format"))
return
}

// 构造验签用的请求
verifyReq, err := http.NewRequest(c.Request.Method, c.Request.URL.String(), nil)
if err != nil {
c.AbortWithStatusJSON(http.StatusOK, Failed(errorcode.OperationFailed, err.Error()))
return
}
for _, h := range headers {
verifyReq.Header.Add(h, c.Request.Header.Get(h))
}
verifyReq.Host = c.Request.Host

timestamp, err := time.Parse("20060102T150405Z", c.GetHeader("X-Amz-Date"))
if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "invalid X-Amz-Date header format"))
return
}

err = a.signer.SignHTTP(context.TODO(), a.cred, verifyReq, "", AuthService, AuthRegion, timestamp)
if err != nil {
logger.Warnf("sign request: %v", err)
c.AbortWithStatusJSON(http.StatusOK, Failed(errorcode.OperationFailed, "sign request failed"))
return
}

verifySig := a.getSignature(verifyReq)
if strings.EqualFold(verifySig, reqSig) {
c.AbortWithStatusJSON(http.StatusOK, Failed(errorcode.Unauthorized, "signature mismatch"))
return
}

c.Next()
}

// 解析 Authorization 头部
func parseAuthorizationHeader(authorizationHeader string) (string, []string, string, error) {
if !strings.HasPrefix(authorizationHeader, "AWS4-HMAC-SHA256 ") {
return "", nil, "", fmt.Errorf("invalid Authorization header format")
}

authorizationHeader = strings.TrimPrefix(authorizationHeader, "AWS4-HMAC-SHA256")

parts := strings.Split(authorizationHeader, ",")
if len(parts) != 3 {
return "", nil, "", fmt.Errorf("invalid Authorization header format")
}

var credential, signedHeaders, signature string
for _, part := range parts {
part = strings.TrimSpace(part)

if strings.HasPrefix(part, "Credential=") {
credential = strings.TrimPrefix(part, "Credential=")
}
if strings.HasPrefix(part, "SignedHeaders=") {
signedHeaders = strings.TrimPrefix(part, "SignedHeaders=")
}
if strings.HasPrefix(part, "Signature=") {
signature = strings.TrimPrefix(part, "Signature=")
}
}

if credential == "" || signedHeaders == "" || signature == "" {
return "", nil, "", fmt.Errorf("missing necessary parts in Authorization header")
}

headers := strings.Split(signedHeaders, ";")
return credential, headers, signature, nil
}

func (a *AWSAuth) getSignature(req *http.Request) string {
auth := req.Header.Get(AuthorizationHeader)
idx := strings.Index(auth, "Signature=")
if idx == -1 {
return ""
}

return auth[idx+len("Signature="):]
}

+ 50
- 5
client/internal/http/object.go View File

@@ -28,10 +28,10 @@ func (s *Server) Object() *ObjectService {
}
}

func (s *ObjectService) List(ctx *gin.Context) {
log := logger.WithField("HTTP", "Object.List")
func (s *ObjectService) ListByPath(ctx *gin.Context) {
log := logger.WithField("HTTP", "Object.ListByPath")

var req cdsapi.ObjectList
var req cdsapi.ObjectListByPath
if err := ctx.ShouldBindQuery(&req); err != nil {
log.Warnf("binding body: %s", err.Error())
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
@@ -45,7 +45,27 @@ func (s *ObjectService) List(ctx *gin.Context) {
return
}

ctx.JSON(http.StatusOK, OK(cdsapi.ObjectListResp{Objects: objs}))
ctx.JSON(http.StatusOK, OK(cdsapi.ObjectListByPathResp{Objects: objs}))
}

func (s *ObjectService) ListByIDs(ctx *gin.Context) {
log := logger.WithField("HTTP", "Object.ListByIDs")

var req cdsapi.ObjectListByIDs
if err := ctx.ShouldBindJSON(&req); err != nil {
log.Warnf("binding body: %s", err.Error())
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
return
}

objs, err := s.svc.ObjectSvc().GetByIDs(req.UserID, req.ObjectIDs)
if err != nil {
log.Warnf("listing objects: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("listing objects: %v", err)))
return
}

ctx.JSON(http.StatusOK, OK(cdsapi.ObjectListByIDsResp{Objects: objs}))
}

type ObjectUploadReq struct {
@@ -63,7 +83,7 @@ func (s *ObjectService) Upload(ctx *gin.Context) {
return
}

up, err := s.svc.Uploader.BeginUpdate(req.Info.UserID, req.Info.PackageID, req.Info.Affinity)
up, err := s.svc.Uploader.BeginUpdate(req.Info.UserID, req.Info.PackageID, req.Info.Affinity, req.Info.LoadTo, req.Info.LoadToPath)
if err != nil {
log.Warnf("begin update: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("begin update: %v", err)))
@@ -138,6 +158,11 @@ func (s *ObjectService) Download(ctx *gin.Context) {
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "download object failed"))
return
}
if file.File == nil {
log.Warnf("object not found: %d", req.ObjectID)
ctx.JSON(http.StatusOK, Failed(errorcode.DataNotFound, "object not found"))
return
}
defer file.File.Close()

ctx.Header("Content-Disposition", "attachment; filename="+url.PathEscape(path.Base(file.Object.Path)))
@@ -338,6 +363,26 @@ func (s *ObjectService) DeleteByPath(ctx *gin.Context) {
ctx.JSON(http.StatusOK, OK(nil))
}

func (s *ObjectService) Clone(ctx *gin.Context) {
log := logger.WithField("HTTP", "Object.Clone")

var req cdsapi.ObjectClone
if err := ctx.ShouldBindJSON(&req); err != nil {
log.Warnf("binding body: %s", err.Error())
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
return
}

objs, err := s.svc.ObjectSvc().Clone(req.UserID, req.Clonings)
if err != nil {
log.Warnf("cloning object: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "clone object failed"))
return
}

ctx.JSON(http.StatusOK, OK(cdsapi.ObjectCloneResp{Objects: objs}))
}

func (s *ObjectService) GetPackageObjects(ctx *gin.Context) {
log := logger.WithField("HTTP", "Object.GetPackageObjects")



+ 35
- 30
client/internal/http/package.go View File

@@ -46,24 +46,24 @@ func (s *PackageService) Get(ctx *gin.Context) {
ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetResp{Package: *pkg}))
}

func (s *PackageService) GetByName(ctx *gin.Context) {
log := logger.WithField("HTTP", "Package.GetByName")
func (s *PackageService) GetByFullName(ctx *gin.Context) {
log := logger.WithField("HTTP", "Package.GetByFullName")

var req cdsapi.PackageGetByName
var req cdsapi.PackageGetByFullName
if err := ctx.ShouldBindQuery(&req); err != nil {
log.Warnf("binding query: %s", err.Error())
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
return
}

pkg, err := s.svc.PackageSvc().GetByName(req.UserID, req.BucketName, req.PackageName)
pkg, err := s.svc.PackageSvc().GetByFullName(req.UserID, req.BucketName, req.PackageName)
if err != nil {
log.Warnf("getting package by name: %s", err.Error())
ctx.JSON(http.StatusOK, FailedError(err))
return
}

ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetByNameResp{Package: *pkg}))
ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetByFullNameResp{Package: *pkg}))
}

// Create 处理创建新包的HTTP请求。
@@ -103,7 +103,13 @@ func (s *PackageService) CreateLoad(ctx *gin.Context) {
return
}

up, err := s.svc.Uploader.BeginCreateLoad(req.Info.UserID, req.Info.BucketID, req.Info.Name, req.Info.LoadTo)
if len(req.Info.LoadTo) != len(req.Info.LoadToPath) {
log.Warnf("load to and load to path count not match")
ctx.JSON(http.StatusOK, Failed(errorcode.BadArgument, "load to and load to path count not match"))
return
}

up, err := s.svc.Uploader.BeginCreateLoad(req.Info.UserID, req.Info.BucketID, req.Info.Name, req.Info.LoadTo, req.Info.LoadToPath)
if err != nil {
log.Warnf("begin package create load: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("begin package create load: %v", err)))
@@ -149,7 +155,7 @@ func (s *PackageService) CreateLoad(ctx *gin.Context) {
objs[i] = ret.Objects[pathes[i]]
}

ctx.JSON(http.StatusOK, OK(cdsapi.PackageCreateLoadResp{Package: ret.Package, Objects: objs, LoadedDirs: ret.LoadedDirs}))
ctx.JSON(http.StatusOK, OK(cdsapi.PackageCreateLoadResp{Package: ret.Package, Objects: objs}))

}
func (s *PackageService) Delete(ctx *gin.Context) {
@@ -172,6 +178,28 @@ func (s *PackageService) Delete(ctx *gin.Context) {
ctx.JSON(http.StatusOK, OK(nil))
}

func (s *PackageService) Clone(ctx *gin.Context) {
log := logger.WithField("HTTP", "Package.Clone")

var req cdsapi.PackageClone
if err := ctx.ShouldBindJSON(&req); err != nil {
log.Warnf("binding body: %s", err.Error())
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
return
}

pkg, err := s.svc.PackageSvc().Clone(req.UserID, req.PackageID, req.BucketID, req.Name)
if err != nil {
log.Warnf("cloning package: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "clone package failed"))
return
}

ctx.JSON(http.StatusOK, OK(cdsapi.PackageCloneResp{
Package: pkg,
}))
}

func (s *PackageService) ListBucketPackages(ctx *gin.Context) {
log := logger.WithField("HTTP", "Package.ListBucketPackages")

@@ -214,26 +242,3 @@ func (s *PackageService) GetCachedStorages(ctx *gin.Context) {

ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetCachedStoragesResp{PackageCachingInfo: resp}))
}

// GetLoadedStorages 处理获取包的加载节点的HTTP请求。
func (s *PackageService) GetLoadedStorages(ctx *gin.Context) {
log := logger.WithField("HTTP", "Package.GetLoadedStorages")

var req cdsapi.PackageGetLoadedStoragesReq
if err := ctx.ShouldBindQuery(&req); err != nil {
log.Warnf("binding query: %s", err.Error())
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
return
}

stgIDs, err := s.svc.PackageSvc().GetLoadedStorages(req.UserID, req.PackageID)
if err != nil {
log.Warnf("get package loaded storages failed: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get package loaded storages failed"))
return
}

ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetLoadedStoragesResp{
StorageIDs: stgIDs,
}))
}

+ 48
- 4
client/internal/http/server.go View File

@@ -11,15 +11,17 @@ type Server struct {
engine *gin.Engine
listenAddr string
svc *services.Service
awsAuth *AWSAuth
}

func NewServer(listenAddr string, svc *services.Service) (*Server, error) {
func NewServer(listenAddr string, svc *services.Service, awsAuth *AWSAuth) (*Server, error) {
engine := gin.New()

return &Server{
engine: engine,
listenAddr: listenAddr,
svc: svc,
awsAuth: awsAuth,
}, nil
}

@@ -43,7 +45,10 @@ func (s *Server) initRouters() {

// initTemp(rt, s)

rt.GET(cdsapi.ObjectListPath, s.Object().List)
s.routeV1(s.engine)

rt.GET(cdsapi.ObjectListPathByPath, s.Object().ListByPath)
rt.POST(cdsapi.ObjectListByIDsPath, s.Object().ListByIDs)
rt.GET(cdsapi.ObjectDownloadPath, s.Object().Download)
rt.GET(cdsapi.ObjectDownloadByPathPath, s.Object().DownloadByPath)
rt.POST(cdsapi.ObjectUploadPath, s.Object().Upload)
@@ -53,15 +58,16 @@ func (s *Server) initRouters() {
rt.POST(cdsapi.ObjectMovePath, s.Object().Move)
rt.POST(cdsapi.ObjectDeletePath, s.Object().Delete)
rt.POST(cdsapi.ObjectDeleteByPathPath, s.Object().DeleteByPath)
rt.POST(cdsapi.ObjectClonePath, s.Object().Clone)

rt.GET(cdsapi.PackageGetPath, s.Package().Get)
rt.GET(cdsapi.PackageGetByNamePath, s.Package().GetByName)
rt.GET(cdsapi.PackageGetByFullNamePath, s.Package().GetByFullName)
rt.POST(cdsapi.PackageCreatePath, s.Package().Create)
rt.POST(cdsapi.PackageCreateLoadPath, s.Package().CreateLoad)
rt.POST(cdsapi.PackageDeletePath, s.Package().Delete)
rt.POST(cdsapi.PackageClonePath, s.Package().Clone)
rt.GET(cdsapi.PackageListBucketPackagesPath, s.Package().ListBucketPackages)
rt.GET(cdsapi.PackageGetCachedStoragesPath, s.Package().GetCachedStorages)
rt.GET(cdsapi.PackageGetLoadedStoragesPath, s.Package().GetLoadedStorages)

rt.POST(cdsapi.StorageLoadPackagePath, s.Storage().LoadPackage)
rt.POST(cdsapi.StorageCreatePackagePath, s.Storage().CreatePackage)
@@ -73,4 +79,42 @@ func (s *Server) initRouters() {
rt.POST(cdsapi.BucketCreatePath, s.Bucket().Create)
rt.POST(cdsapi.BucketDeletePath, s.Bucket().Delete)
rt.GET(cdsapi.BucketListUserBucketsPath, s.Bucket().ListUserBuckets)

}

func (s *Server) routeV1(eg *gin.Engine) {
v1 := eg.Group("/v1")

v1.GET(cdsapi.ObjectListPathByPath, s.awsAuth.Auth, s.Object().ListByPath)
v1.POST(cdsapi.ObjectListByIDsPath, s.awsAuth.Auth, s.Object().ListByIDs)
v1.GET(cdsapi.ObjectDownloadPath, s.awsAuth.Auth, s.Object().Download)
v1.GET(cdsapi.ObjectDownloadByPathPath, s.awsAuth.Auth, s.Object().DownloadByPath)
v1.POST(cdsapi.ObjectUploadPath, s.awsAuth.AuthWithoutBody, s.Object().Upload)
v1.GET(cdsapi.ObjectGetPackageObjectsPath, s.awsAuth.Auth, s.Object().GetPackageObjects)
v1.POST(cdsapi.ObjectUpdateInfoPath, s.awsAuth.Auth, s.Object().UpdateInfo)
v1.POST(cdsapi.ObjectUpdateInfoByPathPath, s.awsAuth.Auth, s.Object().UpdateInfoByPath)
v1.POST(cdsapi.ObjectMovePath, s.awsAuth.Auth, s.Object().Move)
v1.POST(cdsapi.ObjectDeletePath, s.awsAuth.Auth, s.Object().Delete)
v1.POST(cdsapi.ObjectDeleteByPathPath, s.awsAuth.Auth, s.Object().DeleteByPath)
v1.POST(cdsapi.ObjectClonePath, s.awsAuth.Auth, s.Object().Clone)

v1.GET(cdsapi.PackageGetPath, s.awsAuth.Auth, s.Package().Get)
v1.GET(cdsapi.PackageGetByFullNamePath, s.awsAuth.Auth, s.Package().GetByFullName)
v1.POST(cdsapi.PackageCreatePath, s.awsAuth.Auth, s.Package().Create)
v1.POST(cdsapi.PackageCreateLoadPath, s.awsAuth.Auth, s.Package().CreateLoad)
v1.POST(cdsapi.PackageDeletePath, s.awsAuth.Auth, s.Package().Delete)
v1.POST(cdsapi.PackageClonePath, s.awsAuth.Auth, s.Package().Clone)
v1.GET(cdsapi.PackageListBucketPackagesPath, s.awsAuth.Auth, s.Package().ListBucketPackages)
v1.GET(cdsapi.PackageGetCachedStoragesPath, s.awsAuth.Auth, s.Package().GetCachedStorages)

v1.POST(cdsapi.StorageLoadPackagePath, s.awsAuth.Auth, s.Storage().LoadPackage)
v1.POST(cdsapi.StorageCreatePackagePath, s.awsAuth.Auth, s.Storage().CreatePackage)
v1.GET(cdsapi.StorageGetPath, s.awsAuth.Auth, s.Storage().Get)

v1.POST(cdsapi.CacheMovePackagePath, s.awsAuth.Auth, s.Cache().MovePackage)

v1.GET(cdsapi.BucketGetByNamePath, s.awsAuth.Auth, s.Bucket().GetByName)
v1.POST(cdsapi.BucketCreatePath, s.awsAuth.Auth, s.Bucket().Create)
v1.POST(cdsapi.BucketDeletePath, s.awsAuth.Auth, s.Bucket().Delete)
v1.GET(cdsapi.BucketListUserBucketsPath, s.awsAuth.Auth, s.Bucket().ListUserBuckets)
}

+ 4
- 29
client/internal/http/storage.go View File

@@ -1,9 +1,7 @@
package http

import (
"fmt"
"net/http"
"path/filepath"
"time"

"github.com/gin-gonic/gin"
@@ -32,37 +30,14 @@ func (s *StorageService) LoadPackage(ctx *gin.Context) {
return
}

hubID, taskID, err := s.svc.StorageSvc().StartStorageLoadPackage(req.UserID, req.PackageID, req.StorageID)
err := s.svc.StorageSvc().LoadPackage(req.UserID, req.PackageID, req.StorageID, req.RootPath)
if err != nil {
log.Warnf("start storage load package: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("start loading: %v", err)))
log.Warnf("loading package: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "loading package failed"))
return
}

for {
complete, ret, err := s.svc.StorageSvc().WaitStorageLoadPackage(hubID, taskID, time.Second*10)
if complete {
if err != nil {
log.Warnf("loading complete with: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("loading complete with: %v", err)))
return
}

ctx.JSON(http.StatusOK, OK(cdsapi.StorageLoadPackageResp{
FullPath: filepath.Join(ret.RemoteBase, ret.PackagePath),
PackagePath: ret.PackagePath,
LocalBase: ret.LocalBase,
RemoteBase: ret.RemoteBase,
}))
return
}

if err != nil {
log.Warnf("wait loadding: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("wait loading: %v", err)))
return
}
}
ctx.JSON(http.StatusOK, OK(cdsapi.StorageLoadPackageResp{}))
}

func (s *StorageService) CreatePackage(ctx *gin.Context) {


+ 2
- 1
client/internal/services/cache.go View File

@@ -9,6 +9,7 @@ import (
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/factory"
)

type CacheService struct {
@@ -31,7 +32,7 @@ func (svc *CacheService) StartCacheMovePackage(userID cdssdk.UserID, packageID c
return 0, "", fmt.Errorf("get storage detail: %w", err)
}

if getStg.Storages[0].Storage.ShardStore == nil {
if !factory.GetBuilder(*getStg.Storages[0]).ShardStoreDesc().Enabled() {
return 0, "", fmt.Errorf("shard storage is not enabled")
}



+ 1
- 1
client/internal/services/hub.go View File

@@ -26,7 +26,7 @@ func (svc *Service) HubSvc() *HubService {
//
// []cdssdk.Hub - 获取到的节点信息列表
// error - 如果过程中发生错误,则返回错误信息
func (svc *HubService) GetHubs(hubIDs []cdssdk.HubID) ([]cdssdk.Hub, error) {
func (svc *HubService) GetHubs(hubIDs []cdssdk.HubID) ([]*cdssdk.Hub, error) {
// 从协调器MQ池中获取一个客户端实例
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {


+ 30
- 0
client/internal/services/object.go View File

@@ -37,6 +37,21 @@ func (svc *ObjectService) GetByPath(userID cdssdk.UserID, pkgID cdssdk.PackageID
return listResp.Objects, nil
}

func (svc *ObjectService) GetByIDs(userID cdssdk.UserID, objectIDs []cdssdk.ObjectID) ([]*cdssdk.Object, error) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

listResp, err := coorCli.GetObjects(coormq.ReqGetObjects(userID, objectIDs))
if err != nil {
return nil, fmt.Errorf("requsting to coodinator: %w", err)
}

return listResp.Objects, nil
}

func (svc *ObjectService) UpdateInfo(userID cdssdk.UserID, updatings []cdsapi.UpdatingObject) ([]cdssdk.ObjectID, error) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
@@ -98,6 +113,21 @@ func (svc *ObjectService) Delete(userID cdssdk.UserID, objectIDs []cdssdk.Object
return nil
}

func (svc *ObjectService) Clone(userID cdssdk.UserID, clonings []cdsapi.CloningObject) ([]*cdssdk.Object, error) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

resp, err := coorCli.CloneObjects(coormq.ReqCloneObjects(userID, clonings))
if err != nil {
return nil, fmt.Errorf("requsting to coodinator: %w", err)
}

return resp.Objects, nil
}

// GetPackageObjects 获取包中的对象列表。
// userID: 用户ID。
// packageID: 包ID。


+ 16
- 18
client/internal/services/package.go View File

@@ -36,7 +36,7 @@ func (svc *PackageService) Get(userID cdssdk.UserID, packageID cdssdk.PackageID)
return &getResp.Package, nil
}

func (svc *PackageService) GetByName(userID cdssdk.UserID, bucketName string, packageName string) (*cdssdk.Package, error) {
func (svc *PackageService) GetByFullName(userID cdssdk.UserID, bucketName string, packageName string) (*cdssdk.Package, error) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
@@ -106,6 +106,21 @@ func (svc *PackageService) DeletePackage(userID cdssdk.UserID, packageID cdssdk.
return nil
}

func (svc *PackageService) Clone(userID cdssdk.UserID, packageID cdssdk.PackageID, bucketID cdssdk.BucketID, name string) (cdssdk.Package, error) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return cdssdk.Package{}, fmt.Errorf("new coordinator client: %w", err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

resp, err := coorCli.ClonePackage(coormq.ReqClonePackage(userID, packageID, bucketID, name))
if err != nil {
return cdssdk.Package{}, fmt.Errorf("cloning package: %w", err)
}

return resp.Package, nil
}

// GetCachedStorages 获取指定包的缓存节点信息
func (svc *PackageService) GetCachedStorages(userID cdssdk.UserID, packageID cdssdk.PackageID) (cdssdk.PackageCachingInfo, error) {
// 从协调器MQ池中获取客户端
@@ -128,20 +143,3 @@ func (svc *PackageService) GetCachedStorages(userID cdssdk.UserID, packageID cds
}
return tmp, nil
}

// GetLoadedStorages 获取指定包加载的节点列表
func (svc *PackageService) GetLoadedStorages(userID cdssdk.UserID, packageID cdssdk.PackageID) ([]cdssdk.StorageID, error) {
// 从协调器MQ池中获取客户端
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

// 向协调器请求获取加载指定包的节点ID列表
resp, err := coorCli.GetPackageLoadedStorages(coormq.ReqGetPackageLoadedStorages(userID, packageID))
if err != nil {
return nil, fmt.Errorf("get package loaded storages: %w", err)
}
return resp.StorageIDs, nil
}

+ 25
- 11
client/internal/services/service.go View File

@@ -7,24 +7,38 @@ import (
"gitlink.org.cn/cloudream/storage/client/internal/task"
"gitlink.org.cn/cloudream/storage/common/pkgs/accessstat"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
"gitlink.org.cn/cloudream/storage/common/pkgs/metacache"
"gitlink.org.cn/cloudream/storage/common/pkgs/uploader"
)

// Service 结构体封装了分布锁服务和任务管理服务。
type Service struct {
DistLock *distlock.Service
TaskMgr *task.Manager
Downloader *downloader.Downloader
AccessStat *accessstat.AccessStat
Uploader *uploader.Uploader
DistLock *distlock.Service
TaskMgr *task.Manager
Downloader *downloader.Downloader
AccessStat *accessstat.AccessStat
Uploader *uploader.Uploader
StrategySelector *strategy.Selector
StorageMeta *metacache.StorageMeta
}

func NewService(distlock *distlock.Service, taskMgr *task.Manager, downloader *downloader.Downloader, accStat *accessstat.AccessStat, uploder *uploader.Uploader) (*Service, error) {
func NewService(
distlock *distlock.Service,
taskMgr *task.Manager,
downloader *downloader.Downloader,
accStat *accessstat.AccessStat,
uploder *uploader.Uploader,
strategySelector *strategy.Selector,
storageMeta *metacache.StorageMeta,
) (*Service, error) {
return &Service{
DistLock: distlock,
TaskMgr: taskMgr,
Downloader: downloader,
AccessStat: accStat,
Uploader: uploder,
DistLock: distlock,
TaskMgr: taskMgr,
Downloader: downloader,
AccessStat: accStat,
Uploader: uploder,
StrategySelector: strategySelector,
StorageMeta: storageMeta,
}, nil
}

+ 69
- 45
client/internal/services/storage.go View File

@@ -1,15 +1,23 @@
package services

import (
"context"
"fmt"
"path"
"time"

"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/factory"
)

type StorageService struct {
@@ -50,74 +58,90 @@ func (svc *StorageService) GetByName(userID cdssdk.UserID, name string) (*model.
return &getResp.Storage, nil
}

func (svc *StorageService) StartStorageLoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID) (cdssdk.HubID, string, error) {
func (svc *StorageService) LoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID, rootPath string) error {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return 0, "", fmt.Errorf("new coordinator client: %w", err)
return fmt.Errorf("new coordinator client: %w", err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

stgResp, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{storageID}))
if err != nil {
return 0, "", fmt.Errorf("getting storage info: %w", err)
destStg := svc.StorageMeta.Get(storageID)
if destStg == nil {
return fmt.Errorf("storage not found: %d", storageID)
}

if stgResp.Storages[0].Storage.ShardStore == nil {
return 0, "", fmt.Errorf("shard storage is not enabled")
if destStg.MasterHub == nil {
return fmt.Errorf("storage %v has no master hub", storageID)
}

agentCli, err := stgglb.AgentMQPool.Acquire(stgResp.Storages[0].MasterHub.HubID)
details, err := coorCli.GetPackageObjectDetails(coormq.ReqGetPackageObjectDetails(packageID))
if err != nil {
return 0, "", fmt.Errorf("new agent client: %w", err)
return err
}
defer stgglb.AgentMQPool.Release(agentCli)

startResp, err := agentCli.StartStorageLoadPackage(agtmq.NewStartStorageLoadPackage(userID, packageID, storageID))
if err != nil {
return 0, "", fmt.Errorf("start storage load package: %w", err)
}
var pinned []cdssdk.ObjectID
plans := exec.NewPlanBuilder()
for _, obj := range details.Objects {
strg, err := svc.StrategySelector.Select(strategy.Request{
Detail: obj,
DestHub: destStg.MasterHub.HubID,
})
if err != nil {
return fmt.Errorf("select download strategy: %w", err)
}

return stgResp.Storages[0].MasterHub.HubID, startResp.TaskID, nil
}
ft := ioswitch2.NewFromTo()
switch strg := strg.(type) {
case *strategy.DirectStrategy:
ft.AddFrom(ioswitch2.NewFromShardstore(strg.Detail.Object.FileHash, *strg.Storage.MasterHub, strg.Storage, ioswitch2.RawStream()))

type StorageLoadPackageResult struct {
PackagePath string
LocalBase string
RemoteBase string
}
case *strategy.ECReconstructStrategy:
for i, b := range strg.Blocks {
ft.AddFrom(ioswitch2.NewFromShardstore(b.FileHash, *strg.Storages[i].MasterHub, strg.Storages[i], ioswitch2.ECStream(b.Index)))
ft.ECParam = &strg.Redundancy
}
default:
return fmt.Errorf("unsupported download strategy: %T", strg)
}

func (svc *StorageService) WaitStorageLoadPackage(hubID cdssdk.HubID, taskID string, waitTimeout time.Duration) (bool, *StorageLoadPackageResult, error) {
agentCli, err := stgglb.AgentMQPool.Acquire(hubID)
if err != nil {
// TODO 失败是否要当做任务已经结束?
return true, nil, fmt.Errorf("new agent client: %w", err)
ft.AddTo(ioswitch2.NewLoadToShared(*destStg.MasterHub, *destStg, path.Join(rootPath, obj.Object.Path)))
// 顺便保存到同存储服务的分片存储中
if factory.GetBuilder(*destStg).ShardStoreDesc().Enabled() {
ft.AddTo(ioswitch2.NewToShardStore(*destStg.MasterHub, *destStg, ioswitch2.RawStream(), ""))
pinned = append(pinned, obj.Object.ObjectID)
}

err = parser.Parse(ft, plans)
if err != nil {
return fmt.Errorf("parse plan: %w", err)
}
}
defer stgglb.AgentMQPool.Release(agentCli)

waitResp, err := agentCli.WaitStorageLoadPackage(agtmq.NewWaitStorageLoadPackage(taskID, waitTimeout.Milliseconds()))
mutex, err := reqbuilder.NewBuilder().
// 保护在storage目录中下载的文件
Storage().Buzy(storageID).
// 保护下载文件时同时保存到IPFS的文件
Shard().Buzy(storageID).
MutexLock(svc.DistLock)
if err != nil {
// TODO 请求失败是否要当做任务已经结束?
return true, nil, fmt.Errorf("wait storage load package: %w", err)
return fmt.Errorf("acquire locks failed, err: %w", err)
}

if !waitResp.IsComplete {
return false, nil, nil
// 记录访问统计
for _, obj := range details.Objects {
svc.AccessStat.AddAccessCounter(obj.Object.ObjectID, packageID, storageID, 1)
}

if waitResp.Error != "" {
return true, nil, fmt.Errorf("%s", waitResp.Error)
}
defer mutex.Unlock()

return true, &StorageLoadPackageResult{
PackagePath: waitResp.PackagePath,
LocalBase: waitResp.LocalBase,
RemoteBase: waitResp.RemoteBase,
}, nil
}
drv := plans.Execute(exec.NewExecContext())
_, err = drv.Wait(context.Background())
if err != nil {
return err
}

func (svc *StorageService) DeleteStoragePackage(userID int64, packageID int64, storageID int64) error {
// TODO
panic("not implement yet")
// 失败也没关系
coorCli.StoragePackageLoaded(coormq.ReqStoragePackageLoaded(userID, storageID, packageID, rootPath, pinned))
return nil
}

// 请求节点启动从Storage中上传文件的任务。会返回节点ID和任务ID


+ 4
- 4
client/internal/task/task.go View File

@@ -4,14 +4,14 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/distlock" // 引入分布式锁服务
"gitlink.org.cn/cloudream/common/pkgs/task" // 引入任务处理相关的包
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity" // 引入网络连接状态收集器
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
)

// TaskContext 定义了任务执行的上下文环境,包含分布式锁服务和网络连接状态收集器
type TaskContext struct {
distlock *distlock.Service
connectivity *connectivity.Collector
stgMgr *svcmgr.Manager
stgAgts *agtpool.AgentPool
}

// CompleteFn 类型定义了任务完成时的回调函数,用于设置任务的执行结果
@@ -31,10 +31,10 @@ type CompleteOption = task.CompleteOption

// NewManager 创建一个新的任务管理器实例,接受一个分布式锁服务和一个网络连接状态收集器作为参数
// 返回一个初始化好的任务管理器实例
func NewManager(distlock *distlock.Service, connectivity *connectivity.Collector, stgMgr *svcmgr.Manager) Manager {
func NewManager(distlock *distlock.Service, connectivity *connectivity.Collector, stgAgts *agtpool.AgentPool) Manager {
return task.NewManager(TaskContext{
distlock: distlock,
connectivity: connectivity,
stgMgr: stgMgr,
stgAgts: stgAgts,
})
}

+ 20
- 10
client/main.go View File

@@ -18,8 +18,10 @@ import (
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
"gitlink.org.cn/cloudream/storage/common/pkgs/metacache"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
"gitlink.org.cn/cloudream/storage/common/pkgs/uploader"
)

@@ -37,7 +39,7 @@ func main() {
}

stgglb.InitLocal(&config.Cfg().Local)
stgglb.InitMQPool(&config.Cfg().RabbitMQ)
stgglb.InitMQPool(config.Cfg().RabbitMQ)
stgglb.InitAgentRPCPool(&config.Cfg().AgentGRPC)

// 连接性信息收集
@@ -57,13 +59,13 @@ func main() {
consMap := make(map[cdssdk.HubID]connectivity.Connectivity)
for _, con := range getCons.Connectivities {
var delay *time.Duration
if con.Delay != nil {
d := time.Duration(*con.Delay * float32(time.Millisecond))
if con.Latency != nil {
d := time.Duration(*con.Latency * float32(time.Millisecond))
delay = &d
}
consMap[con.FromHubID] = connectivity.Connectivity{
ToHubID: con.ToHubID,
Delay: delay,
Latency: delay,
}
}
conCol = connectivity.NewCollectorWithInitData(&config.Cfg().Connectivity, nil, consMap)
@@ -75,6 +77,12 @@ func main() {
conCol.CollectInPlace()
}

metaCacheHost := metacache.NewHost()
go metaCacheHost.Serve()
stgMeta := metaCacheHost.AddStorageMeta()
hubMeta := metaCacheHost.AddHubMeta()
conMeta := metaCacheHost.AddConnectivity()

// 分布式锁
distlockSvc, err := distlock.NewService(&config.Cfg().DistLock)
if err != nil {
@@ -91,18 +99,20 @@ func main() {
go serveAccessStat(acStat)

// 存储管理器
stgMgr := svcmgr.NewManager()
stgAgts := agtpool.NewPool()

// 任务管理器
taskMgr := task.NewManager(distlockSvc, &conCol, stgMgr)
taskMgr := task.NewManager(distlockSvc, &conCol, stgAgts)

strgSel := strategy.NewSelector(config.Cfg().DownloadStrategy, stgMeta, hubMeta, conMeta)

// 下载器
dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgMgr)
dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgAgts, strgSel)

// 上传器
uploader := uploader.NewUploader(distlockSvc, &conCol, stgMgr)
uploader := uploader.NewUploader(distlockSvc, &conCol, stgAgts, stgMeta)

svc, err := services.NewService(distlockSvc, &taskMgr, &dlder, acStat, uploader)
svc, err := services.NewService(distlockSvc, &taskMgr, &dlder, acStat, uploader, strgSel, stgMeta)
if err != nil {
logger.Warnf("new services failed, err: %s", err.Error())
os.Exit(1)


+ 3
- 1
common/assets/confs/agent.config.json View File

@@ -39,7 +39,9 @@
},
"downloader": {
"maxStripCacheCount": 100,
"highLatencyHub": 35,
"ecStripPrefetchCount": 1
},
"downloadStrategy": {
"highLatencyHub": 35
}
}

+ 7
- 2
common/assets/confs/client.config.json View File

@@ -34,8 +34,13 @@
},
"downloader": {
"maxStripCacheCount": 100,
"highLatencyHub": 35,
"ecStripPrefetchCount": 1
},
"storageID": 0
"downloadStrategy": {
"highLatencyHub": 35
},
"storageID": 0,
"authAccessKey": "",
"authSecretKey": "",
"maxHttpBodySize": 5242880
}

+ 7
- 7
common/assets/confs/scanner.config.json View File

@@ -9,15 +9,15 @@
"level": "debug"
},
"db": {
"address": "106.75.6.194:3306",
"account": "root",
"password": "cloudream123456",
"address": "127.0.0.1:3306",
"account": "",
"password": "",
"databaseName": "cloudream"
},
"rabbitMQ": {
"address": "106.75.6.194:5672",
"account": "cloudream",
"password": "cloudream123456",
"address": "127.0.0.1:5672",
"account": "",
"password": "",
"vhost": "/",
"param": {
"retryNum": 5,
@@ -25,7 +25,7 @@
}
},
"distlock": {
"etcdAddress": "106.75.6.194:2379",
"etcdAddress": "127.0.0.1:2379",
"etcdUsername": "",
"etcdPassword": "",
"etcdLockLeaseTimeSec": 5,


+ 2
- 2
common/globals/pools.go View File

@@ -1,8 +1,8 @@
package stgglb

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent"
stgmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
scmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner"
@@ -18,7 +18,7 @@ var ScannerMQPool scmq.Pool
//
// @Description: 初始化MQ连接池
// @param cfg
func InitMQPool(cfg *stgmq.Config) {
func InitMQPool(cfg mq.Config) {
AgentMQPool = agtmq.NewPool(cfg)

CoordinatorMQPool = coormq.NewPool(cfg)


+ 13
- 24
common/pkgs/connectivity/collector.go View File

@@ -13,7 +13,7 @@ import (

type Connectivity struct {
ToHubID cdssdk.HubID
Delay *time.Duration
Latency *time.Duration
TestTime time.Time
}

@@ -52,17 +52,6 @@ func NewCollectorWithInitData(cfg *Config, onCollected func(collector *Collector
return rpt
}

func (r *Collector) Get(hubID cdssdk.HubID) *Connectivity {
r.lock.RLock()
defer r.lock.RUnlock()

con, ok := r.connectivities[hubID]
if ok {
return &con
}

return nil
}
func (r *Collector) GetAll() map[cdssdk.HubID]Connectivity {
r.lock.RLock()
defer r.lock.RUnlock()
@@ -101,8 +90,8 @@ func (r *Collector) serve() {

// 为了防止同时启动的节点会集中进行Ping,所以第一次上报间隔为0-TestInterval秒之间随机
startup := true
firstReportDelay := time.Duration(float64(r.cfg.TestInterval) * float64(time.Second) * rand.Float64())
ticker := time.NewTicker(firstReportDelay)
firstReportLatency := time.Duration(float64(r.cfg.TestInterval) * float64(time.Second) * rand.Float64())
ticker := time.NewTicker(firstReportLatency)

loop:
for {
@@ -150,7 +139,7 @@ func (r *Collector) testing() {
wg.Add(1)
go func() {
defer wg.Done()
cons[tmpIdx] = r.ping(tmpHub)
cons[tmpIdx] = r.ping(*tmpHub)
}()
}

@@ -190,7 +179,7 @@ func (r *Collector) ping(hub cdssdk.Hub) Connectivity {

return Connectivity{
ToHubID: hub.HubID,
Delay: nil,
Latency: nil,
TestTime: time.Now(),
}
}
@@ -200,7 +189,7 @@ func (r *Collector) ping(hub cdssdk.Hub) Connectivity {
log.Warnf("new agent %v:%v rpc client: %w", ip, port, err)
return Connectivity{
ToHubID: hub.HubID,
Delay: nil,
Latency: nil,
TestTime: time.Now(),
}
}
@@ -212,13 +201,13 @@ func (r *Collector) ping(hub cdssdk.Hub) Connectivity {
log.Warnf("pre ping: %v", err)
return Connectivity{
ToHubID: hub.HubID,
Delay: nil,
Latency: nil,
TestTime: time.Now(),
}
}

// 后几次ping计算延迟
var avgDelay time.Duration
var avgLatency time.Duration
for i := 0; i < 3; i++ {
start := time.Now()
err = agtCli.Ping()
@@ -226,22 +215,22 @@ func (r *Collector) ping(hub cdssdk.Hub) Connectivity {
log.Warnf("ping: %v", err)
return Connectivity{
ToHubID: hub.HubID,
Delay: nil,
Latency: nil,
TestTime: time.Now(),
}
}

delay := time.Since(start)
avgDelay += delay
latency := time.Since(start)
avgLatency += latency

// 每次ping之间间隔1秒
<-time.After(time.Second)
}
delay := avgDelay / 3
latency := avgLatency / 3

return Connectivity{
ToHubID: hub.HubID,
Delay: &delay,
Latency: &latency,
TestTime: time.Now(),
}
}

+ 1
- 22
common/pkgs/db2/bucket.go View File

@@ -113,26 +113,5 @@ func (db *BucketDB) Create(ctx SQLContext, userID cdssdk.UserID, bucketName stri
}

func (db *BucketDB) Delete(ctx SQLContext, bucketID cdssdk.BucketID) error {
if err := ctx.Exec("DELETE FROM UserBucket WHERE BucketID = ?", bucketID).Error; err != nil {
return fmt.Errorf("delete user bucket failed, err: %w", err)
}

if err := ctx.Exec("DELETE FROM Bucket WHERE BucketID = ?", bucketID).Error; err != nil {
return fmt.Errorf("delete bucket failed, err: %w", err)
}

var pkgIDs []cdssdk.PackageID
if err := ctx.Table("Package").Select("PackageID").Where("BucketID = ?", bucketID).Find(&pkgIDs).Error; err != nil {
return fmt.Errorf("query package failed, err: %w", err)
}

for _, pkgID := range pkgIDs {
if err := db.Package().SoftDelete(ctx, pkgID); err != nil {
return fmt.Errorf("set package selected failed, err: %w", err)
}

// 失败也没关系,会有定时任务再次尝试
db.Package().DeleteUnused(ctx, pkgID)
}
return nil
return ctx.Delete(&cdssdk.Bucket{}, "BucketID = ?", bucketID).Error
}

+ 0
- 18
common/pkgs/db2/model/model.go View File

@@ -67,24 +67,6 @@ func (Cache) TableName() string {
return "Cache"
}

const (
StoragePackageStateNormal = "Normal"
StoragePackageStateDeleted = "Deleted"
StoragePackageStateOutdated = "Outdated"
)

// Storage当前加载的Package
type StoragePackage struct {
StorageID cdssdk.StorageID `gorm:"column:StorageID; primaryKey; type:bigint" json:"storageID"`
PackageID cdssdk.PackageID `gorm:"column:PackageID; primaryKey; type:bigint" json:"packageID"`
UserID cdssdk.UserID `gorm:"column:UserID; primaryKey; type:bigint" json:"userID"`
State string `gorm:"column:State; type:varchar(255); not null" json:"state"`
}

func (StoragePackage) TableName() string {
return "StoragePackage"
}

type Location struct {
LocationID cdssdk.LocationID `gorm:"column:LocationID; primaryKey; type:bigint; autoIncrement" json:"locationID"`
Name string `gorm:"column:Name; type:varchar(255); not null" json:"name"`


+ 38
- 3
common/pkgs/db2/object.go View File

@@ -20,7 +20,7 @@ func (db *DB) Object() *ObjectDB {
return &ObjectDB{DB: db}
}

func (db *ObjectDB) GetByID(ctx SQLContext, objectID cdssdk.ObjectID) (model.Object, error) {
func (db *ObjectDB) GetByID(ctx SQLContext, objectID cdssdk.ObjectID) (cdssdk.Object, error) {
var ret cdssdk.Object
err := ctx.Table("Object").Where("ObjectID = ?", objectID).First(&ret).Error
return ret, err
@@ -57,7 +57,7 @@ func (db *ObjectDB) BatchTestObjectID(ctx SQLContext, objectIDs []cdssdk.ObjectI
return avaiIDMap, nil
}

func (db *ObjectDB) BatchGet(ctx SQLContext, objectIDs []cdssdk.ObjectID) ([]model.Object, error) {
func (db *ObjectDB) BatchGet(ctx SQLContext, objectIDs []cdssdk.ObjectID) ([]cdssdk.Object, error) {
if len(objectIDs) == 0 {
return nil, nil
}
@@ -85,6 +85,41 @@ func (db *ObjectDB) BatchGetByPackagePath(ctx SQLContext, pkgID cdssdk.PackageID
return objs, nil
}

// 仅返回查询到的对象
func (db *ObjectDB) BatchGetDetails(ctx SQLContext, objectIDs []cdssdk.ObjectID) ([]stgmod.ObjectDetail, error) {
var objs []cdssdk.Object

err := ctx.Table("Object").Where("ObjectID IN ?", objectIDs).Order("ObjectID ASC").Find(&objs).Error
if err != nil {
return nil, err
}

// 获取所有的 ObjectBlock
var allBlocks []stgmod.ObjectBlock
err = ctx.Table("ObjectBlock").Where("ObjectID IN ?", objectIDs).Order("ObjectID, `Index` ASC").Find(&allBlocks).Error
if err != nil {
return nil, err
}

// 获取所有的 PinnedObject
var allPinnedObjs []cdssdk.PinnedObject
err = ctx.Table("PinnedObject").Where("ObjectID IN ?", objectIDs).Order("ObjectID ASC").Find(&allPinnedObjs).Error
if err != nil {
return nil, err
}

details := make([]stgmod.ObjectDetail, len(objs))
for i, obj := range objs {
details[i] = stgmod.ObjectDetail{
Object: obj,
}
}

stgmod.DetailsFillObjectBlocks(details, allBlocks)
stgmod.DetailsFillPinnedAt(details, allPinnedObjs)
return details, nil
}

func (db *ObjectDB) Create(ctx SQLContext, obj cdssdk.Object) (cdssdk.ObjectID, error) {
err := ctx.Table("Object").Create(&obj).Error
if err != nil {
@@ -128,7 +163,7 @@ func (db *ObjectDB) BatchUpdateColumns(ctx SQLContext, objs []cdssdk.Object, col
}).Create(objs).Error
}

func (db *ObjectDB) GetPackageObjects(ctx SQLContext, packageID cdssdk.PackageID) ([]model.Object, error) {
func (db *ObjectDB) GetPackageObjects(ctx SQLContext, packageID cdssdk.PackageID) ([]cdssdk.Object, error) {
var ret []cdssdk.Object
err := ctx.Table("Object").Where("PackageID = ?", packageID).Order("ObjectID ASC").Find(&ret).Error
return ret, err


+ 10
- 0
common/pkgs/db2/object_block.go View File

@@ -33,6 +33,16 @@ func (db *ObjectBlockDB) BatchGetByObjectID(ctx SQLContext, objectIDs []cdssdk.O
return blocks, err
}

func (*ObjectBlockDB) GetInPackageID(ctx SQLContext, packageID cdssdk.PackageID) ([]stgmod.ObjectBlock, error) {
var rets []stgmod.ObjectBlock
err := ctx.Table("ObjectBlock").
Joins("INNER JOIN Object ON ObjectBlock.ObjectID = Object.ObjectID").
Where("Object.PackageID = ?", packageID).
Order("ObjectBlock.ObjectID, ObjectBlock.`Index` ASC").
Find(&rets).Error
return rets, err
}

func (db *ObjectBlockDB) Create(ctx SQLContext, objectID cdssdk.ObjectID, index int, stgID cdssdk.StorageID, fileHash cdssdk.FileHash) error {
block := stgmod.ObjectBlock{ObjectID: objectID, Index: index, StorageID: stgID, FileHash: fileHash}
return ctx.Table("ObjectBlock").Create(&block).Error


+ 25
- 30
common/pkgs/db2/package.go View File

@@ -57,7 +57,7 @@ func (*PackageDB) BatchGetAllPackageIDs(ctx SQLContext, start int, count int) ([
return ret, err
}

func (db *PackageDB) GetBucketPackages(ctx SQLContext, userID cdssdk.UserID, bucketID cdssdk.BucketID) ([]model.Package, error) {
func (db *PackageDB) GetUserBucketPackages(ctx SQLContext, userID cdssdk.UserID, bucketID cdssdk.BucketID) ([]model.Package, error) {
var ret []model.Package
err := ctx.Table("UserBucket").
Select("Package.*").
@@ -67,6 +67,15 @@ func (db *PackageDB) GetBucketPackages(ctx SQLContext, userID cdssdk.UserID, buc
return ret, err
}

func (db *PackageDB) GetBucketPackages(ctx SQLContext, bucketID cdssdk.BucketID) ([]model.Package, error) {
var ret []model.Package
err := ctx.Table("Package").
Select("Package.*").
Where("BucketID = ?", bucketID).
Find(&ret).Error
return ret, err
}

// IsAvailable 判断一个用户是否拥有指定对象
func (db *PackageDB) IsAvailable(ctx SQLContext, userID cdssdk.UserID, packageID cdssdk.PackageID) (bool, error) {
var pkgID cdssdk.PackageID
@@ -110,7 +119,7 @@ func (*PackageDB) GetUserPackageByName(ctx SQLContext, userID cdssdk.UserID, buc
return ret, err
}

func (db *PackageDB) Create(ctx SQLContext, bucketID cdssdk.BucketID, name string) (cdssdk.PackageID, error) {
func (db *PackageDB) Create(ctx SQLContext, bucketID cdssdk.BucketID, name string) (cdssdk.Package, error) {
var packageID int64
err := ctx.Table("Package").
Select("PackageID").
@@ -118,33 +127,29 @@ func (db *PackageDB) Create(ctx SQLContext, bucketID cdssdk.BucketID, name strin
Scan(&packageID).Error

if err != nil {
return 0, err
return cdssdk.Package{}, err
}
if packageID != 0 {
return 0, gorm.ErrDuplicatedKey
return cdssdk.Package{}, gorm.ErrDuplicatedKey
}

newPackage := cdssdk.Package{Name: name, BucketID: bucketID, State: cdssdk.PackageStateNormal}
if err := ctx.Create(&newPackage).Error; err != nil {
return 0, fmt.Errorf("insert package failed, err: %w", err)
return cdssdk.Package{}, fmt.Errorf("insert package failed, err: %w", err)
}

return newPackage.PackageID, nil
return newPackage, nil
}

// SoftDelete 设置一个对象被删除,并将相关数据删除
func (db *PackageDB) SoftDelete(ctx SQLContext, packageID cdssdk.PackageID) error {
obj, err := db.GetByID(ctx, packageID)
if err != nil {
return fmt.Errorf("get package failed, err: %w", err)
}

if obj.State != cdssdk.PackageStateNormal {
return nil
}
func (*PackageDB) Delete(ctx SQLContext, packageID cdssdk.PackageID) error {
err := ctx.Delete(&model.Package{}, "PackageID = ?", packageID).Error
return err
}

if err := db.ChangeState(ctx, packageID, cdssdk.PackageStateDeleted); err != nil {
return fmt.Errorf("change package state failed, err: %w", err)
// 删除与Package相关的所有数据
func (db *PackageDB) DeleteComplete(ctx SQLContext, packageID cdssdk.PackageID) error {
if err := db.Package().Delete(ctx, packageID); err != nil {
return fmt.Errorf("delete package state: %w", err)
}

if err := db.ObjectAccessStat().DeleteInPackage(ctx, packageID); err != nil {
@@ -163,23 +168,13 @@ func (db *PackageDB) SoftDelete(ctx SQLContext, packageID cdssdk.PackageID) erro
return fmt.Errorf("deleting objects in package: %w", err)
}

if _, err := db.StoragePackage().SetAllPackageDeleted(ctx, packageID); err != nil {
return fmt.Errorf("set storage package deleted failed, err: %w", err)
if err := db.PackageAccessStat().DeleteByPackageID(ctx, packageID); err != nil {
return fmt.Errorf("deleting package access stat: %w", err)
}

return nil
}

// DeleteUnused 删除一个已经是Deleted状态,且不再被使用的对象
func (PackageDB) DeleteUnused(ctx SQLContext, packageID cdssdk.PackageID) error {
err := ctx.Exec("DELETE FROM Package WHERE PackageID = ? AND State = ? AND NOT EXISTS (SELECT StorageID FROM StoragePackage WHERE PackageID = ?)",
packageID,
cdssdk.PackageStateDeleted,
packageID,
).Error
return err
}

func (*PackageDB) ChangeState(ctx SQLContext, packageID cdssdk.PackageID, state string) error {
err := ctx.Exec("UPDATE Package SET State = ? WHERE PackageID = ?", state, packageID).Error
return err


+ 8
- 4
common/pkgs/db2/pinned_object.go View File

@@ -42,8 +42,10 @@ func (*PinnedObjectDB) BatchGetByObjectID(ctx SQLContext, objectIDs []cdssdk.Obj
}

func (*PinnedObjectDB) TryCreate(ctx SQLContext, stgID cdssdk.StorageID, objectID cdssdk.ObjectID, createTime time.Time) error {
err := ctx.Clauses(clause.Insert{Modifier: "ignore"}).Table("PinnedObject").Create(&cdssdk.PinnedObject{StorageID: stgID, ObjectID: objectID, CreateTime: createTime}).Error
return err
return ctx.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "ObjectID"}, {Name: "StorageID"}},
DoUpdates: clause.AssignmentColumns([]string{"CreateTime"}),
}).Create(&cdssdk.PinnedObject{StorageID: stgID, ObjectID: objectID, CreateTime: createTime}).Error
}

func (*PinnedObjectDB) BatchTryCreate(ctx SQLContext, pinneds []cdssdk.PinnedObject) error {
@@ -51,8 +53,10 @@ func (*PinnedObjectDB) BatchTryCreate(ctx SQLContext, pinneds []cdssdk.PinnedObj
return nil
}

err := ctx.Clauses(clause.Insert{Modifier: "ignore"}).Table("PinnedObject").Create(pinneds).Error
return err
return ctx.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "ObjectID"}, {Name: "StorageID"}},
DoUpdates: clause.AssignmentColumns([]string{"CreateTime"}),
}).Create(&pinneds).Error
}

func (*PinnedObjectDB) CreateFromPackage(ctx SQLContext, packageID cdssdk.PackageID, stgID cdssdk.StorageID) error {


+ 0
- 83
common/pkgs/db2/storage_package.go View File

@@ -1,83 +0,0 @@
package db2

import (
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
)

type StoragePackageDB struct {
*DB
}

func (db *DB) StoragePackage() *StoragePackageDB {
return &StoragePackageDB{DB: db}
}

func (*StoragePackageDB) Get(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) (model.StoragePackage, error) {
var ret model.StoragePackage
err := ctx.Table("StoragePackage").Where("StorageID = ? AND PackageID = ? AND UserID = ?", storageID, packageID, userID).First(&ret).Error
return ret, err
}

func (*StoragePackageDB) GetAllByStorageAndPackageID(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID) ([]model.StoragePackage, error) {
var ret []model.StoragePackage
err := ctx.Table("StoragePackage").Where("StorageID = ? AND PackageID = ?", storageID, packageID).Find(&ret).Error
return ret, err
}

func (*StoragePackageDB) GetAllByStorageID(ctx SQLContext, storageID cdssdk.StorageID) ([]model.StoragePackage, error) {
var ret []model.StoragePackage
err := ctx.Table("StoragePackage").Where("StorageID = ?", storageID).Find(&ret).Error
return ret, err
}

func (*StoragePackageDB) CreateOrUpdate(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) error {
sql := "INSERT INTO StoragePackage (StorageID, PackageID, UserID, State) VALUES (?, ?, ?, ?) " +
"ON DUPLICATE KEY UPDATE State = VALUES(State)"
return ctx.Exec(sql, storageID, packageID, userID, model.StoragePackageStateNormal).Error
}

func (*StoragePackageDB) ChangeState(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID, state string) error {
return ctx.Table("StoragePackage").Where("StorageID = ? AND PackageID = ? AND UserID = ?", storageID, packageID, userID).Update("State", state).Error
}

// SetStateNormal 将状态设置为Normal,如果记录状态是Deleted,则不进行操作
func (*StoragePackageDB) SetStateNormal(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) error {
return ctx.Table("StoragePackage").Where("StorageID = ? AND PackageID = ? AND UserID = ? AND State <> ?",
storageID, packageID, userID, model.StoragePackageStateDeleted).Update("State", model.StoragePackageStateNormal).Error
}

func (*StoragePackageDB) SetAllPackageState(ctx SQLContext, packageID cdssdk.PackageID, state string) (int64, error) {
ret := ctx.Table("StoragePackage").Where("PackageID = ?", packageID).Update("State", state)
if err := ret.Error; err != nil {
return 0, err
}
return ret.RowsAffected, nil
}

// SetAllPackageOutdated 将Storage中指定对象设置为已过期。只会设置Normal状态的对象
func (*StoragePackageDB) SetAllPackageOutdated(ctx SQLContext, packageID cdssdk.PackageID) (int64, error) {
ret := ctx.Table("StoragePackage").Where("State = ? AND PackageID = ?", model.StoragePackageStateNormal, packageID).Update("State", model.StoragePackageStateOutdated)
if err := ret.Error; err != nil {
return 0, err
}
return ret.RowsAffected, nil
}

func (db *StoragePackageDB) SetAllPackageDeleted(ctx SQLContext, packageID cdssdk.PackageID) (int64, error) {
return db.SetAllPackageState(ctx, packageID, model.StoragePackageStateDeleted)
}

func (*StoragePackageDB) Delete(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) error {
return ctx.Table("StoragePackage").Where("StorageID = ? AND PackageID = ? AND UserID = ?", storageID, packageID, userID).Delete(&model.StoragePackage{}).Error
}

// FindPackageStorages 查询存储了指定对象的Storage
func (*StoragePackageDB) FindPackageStorages(ctx SQLContext, packageID cdssdk.PackageID) ([]model.Storage, error) {
var ret []model.Storage
err := ctx.Table("StoragePackage").Select("Storage.*").
Joins("JOIN Storage ON StoragePackage.StorageID = Storage.StorageID").
Where("PackageID = ?", packageID).
Scan(&ret).Error
return ret, err
}

+ 7
- 3
common/pkgs/db2/user_bucket.go View File

@@ -13,10 +13,14 @@ func (db *DB) UserBucket() *UserBucketDB {
return &UserBucketDB{DB: db}
}

func (*UserBucketDB) Create(ctx SQLContext, userID int64, bucketID int64) error {
func (*UserBucketDB) Create(ctx SQLContext, userID cdssdk.UserID, bucketID cdssdk.BucketID) error {
userBucket := model.UserBucket{
UserID: cdssdk.UserID(userID),
BucketID: cdssdk.BucketID(bucketID),
UserID: userID,
BucketID: bucketID,
}
return ctx.Table("UserBucket").Create(&userBucket).Error
}

func (*UserBucketDB) DeleteByBucketID(ctx SQLContext, bucketID cdssdk.BucketID) error {
return ctx.Table("UserBucket").Where("BucketID = ?", bucketID).Delete(&model.UserBucket{}).Error
}

+ 0
- 24
common/pkgs/distlock/reqbuilder/metadata_storage_package.go View File

@@ -1,24 +0,0 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider"
)

type MetadataStoragePackageLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) StoragePackage() *MetadataStoragePackageLockReqBuilder {
return &MetadataStoragePackageLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataStoragePackageLockReqBuilder) CreateOne(userID cdssdk.UserID, storageID cdssdk.StorageID, packageID cdssdk.PackageID) *MetadataStoragePackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("StoragePackage"),
Name: lockprovider.MetadataCreateLock,
Target: *lockprovider.NewStringLockTarget().Add(userID, storageID, packageID),
})
return b
}

+ 2
- 3
common/pkgs/distlock/service.go View File

@@ -24,7 +24,7 @@ func initProviders() []distlock.PathProvider {

provs = append(provs, initMetadataLockProviders()...)

provs = append(provs, initIPFSLockProviders()...)
provs = append(provs, initShardLockProviders()...)

provs = append(provs, initStorageLockProviders()...)

@@ -45,12 +45,11 @@ func initMetadataLockProviders() []distlock.PathProvider {
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "ObjectRep"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "ObjectBlock"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Cache"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "StoragePackage"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Location"),
}
}

func initIPFSLockProviders() []distlock.PathProvider {
func initShardLockProviders() []distlock.PathProvider {
return []distlock.PathProvider{
distlock.NewPathProvider(lockprovider.NewShardStoreLock(), lockprovider.ShardStoreLockPathPrefix, trie.WORD_ANY),
}


+ 0
- 2
common/pkgs/downloader/config.go View File

@@ -3,8 +3,6 @@ package downloader
type Config struct {
// EC模式的Object的条带缓存数量
MaxStripCacheCount int `json:"maxStripCacheCount"`
// 当到下载节点的延迟高于这个值时,该节点在评估时会有更高的分数惩罚,单位:ms
HighLatencyHubMs float64 `json:"highLatencyHubMs"`
// EC模式下,每个Object的条带的预取数量,最少为1
ECStripPrefetchCount int `json:"ecStripPrefetchCount"`
}

+ 13
- 10
common/pkgs/downloader/downloader.go View File

@@ -10,8 +10,9 @@ import (
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
)

const (
@@ -38,23 +39,25 @@ type Downloading struct {
}

type Downloader struct {
strips *StripCache
cfg Config
conn *connectivity.Collector
stgMgr *svcmgr.Manager
strips *StripCache
cfg Config
conn *connectivity.Collector
stgAgts *agtpool.AgentPool
selector *strategy.Selector
}

func NewDownloader(cfg Config, conn *connectivity.Collector, stgMgr *svcmgr.Manager) Downloader {
func NewDownloader(cfg Config, conn *connectivity.Collector, stgAgts *agtpool.AgentPool, sel *strategy.Selector) Downloader {
if cfg.MaxStripCacheCount == 0 {
cfg.MaxStripCacheCount = DefaultMaxStripCacheCount
}

ch, _ := lru.New[ECStripKey, ObjectECStrip](cfg.MaxStripCacheCount)
return Downloader{
strips: ch,
cfg: cfg,
conn: conn,
stgMgr: stgMgr,
strips: ch,
cfg: cfg,
conn: conn,
stgAgts: stgAgts,
selector: sel,
}
}



+ 95
- 304
common/pkgs/downloader/iterator.go View File

@@ -4,28 +4,21 @@ import (
"context"
"fmt"
"io"
"math"
"reflect"
"time"

"github.com/samber/lo"

"gitlink.org.cn/cloudream/common/pkgs/bitmap"
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

"gitlink.org.cn/cloudream/common/utils/io2"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/common/utils/sort2"
"gitlink.org.cn/cloudream/storage/common/consts"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)

type downloadStorageInfo struct {
@@ -39,15 +32,10 @@ type DownloadContext struct {
Distlock *distlock.Service
}
type DownloadObjectIterator struct {
OnClosing func()

OnClosing func()
downloader *Downloader
reqs []downloadReqeust2
currentIndex int
inited bool

coorCli *coormq.Client
allStorages map[cdssdk.StorageID]stgmod.StorageDetail
}

func NewDownloadObjectIterator(downloader *Downloader, downloadObjs []downloadReqeust2) *DownloadObjectIterator {
@@ -58,68 +46,11 @@ func NewDownloadObjectIterator(downloader *Downloader, downloadObjs []downloadRe
}

func (i *DownloadObjectIterator) MoveNext() (*Downloading, error) {
if !i.inited {
if err := i.init(); err != nil {
return nil, err
}

i.inited = true
}

if i.currentIndex >= len(i.reqs) {
return nil, iterator.ErrNoMoreItem
}

item, err := i.doMove()
i.currentIndex++
return item, err
}

func (i *DownloadObjectIterator) init() error {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return fmt.Errorf("new coordinator client: %w", err)
}
i.coorCli = coorCli

allStgIDsMp := make(map[cdssdk.StorageID]bool)
for _, obj := range i.reqs {
if obj.Detail == nil {
continue
}

for _, p := range obj.Detail.PinnedAt {
allStgIDsMp[p] = true
}

for _, b := range obj.Detail.Blocks {
allStgIDsMp[b.StorageID] = true
}
}

stgIDs := lo.Keys(allStgIDsMp)
getStgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails(stgIDs))
if err != nil {
return fmt.Errorf("getting storage details: %w", err)
}

i.allStorages = make(map[cdssdk.StorageID]stgmod.StorageDetail)
for idx, s := range getStgs.Storages {
if s == nil {
return fmt.Errorf("storage %v not found", stgIDs[idx])
}
if s.Storage.ShardStore == nil {
return fmt.Errorf("storage %v has no shard store", stgIDs[idx])
}

i.allStorages[s.Storage.StorageID] = *s
}

return nil
}

func (iter *DownloadObjectIterator) doMove() (*Downloading, error) {
req := iter.reqs[iter.currentIndex]
req := i.reqs[i.currentIndex]
if req.Detail == nil {
return &Downloading{
Object: nil,
@@ -128,57 +59,51 @@ func (iter *DownloadObjectIterator) doMove() (*Downloading, error) {
}, nil
}

switch red := req.Detail.Object.Redundancy.(type) {
case *cdssdk.NoneRedundancy:
reader, err := iter.downloadNoneOrRepObject(req)
if err != nil {
return nil, fmt.Errorf("downloading object %v: %w", req.Raw.ObjectID, err)
}
destHub := cdssdk.HubID(0)
if stgglb.Local.HubID != nil {
destHub = *stgglb.Local.HubID
}

return &Downloading{
Object: &req.Detail.Object,
File: reader,
Request: req.Raw,
}, nil
strg, err := i.downloader.selector.Select(strategy.Request{
Detail: *req.Detail,
Range: math2.NewRange(req.Raw.Offset, req.Raw.Length),
DestHub: destHub,
DestLocation: stgglb.Local.LocationID,
})
if err != nil {
return nil, fmt.Errorf("selecting download strategy: %w", err)
}

case *cdssdk.RepRedundancy:
reader, err := iter.downloadNoneOrRepObject(req)
var reader io.ReadCloser
switch strg := strg.(type) {
case *strategy.DirectStrategy:
reader, err = i.downloadDirect(req, *strg)
if err != nil {
return nil, fmt.Errorf("downloading rep object %v: %w", req.Raw.ObjectID, err)
return nil, fmt.Errorf("downloading object %v: %w", req.Raw.ObjectID, err)
}

return &Downloading{
Object: &req.Detail.Object,
File: reader,
Request: req.Raw,
}, nil

case *cdssdk.ECRedundancy:
reader, err := iter.downloadECObject(req, red)
case *strategy.ECReconstructStrategy:
reader, err = i.downloadECReconstruct(req, *strg)
if err != nil {
return nil, fmt.Errorf("downloading ec object %v: %w", req.Raw.ObjectID, err)
}

return &Downloading{
Object: &req.Detail.Object,
File: reader,
Request: req.Raw,
}, nil

case *cdssdk.LRCRedundancy:
reader, err := iter.downloadLRCObject(req, red)
case *strategy.LRCReconstructStrategy:
reader, err = i.downloadLRCReconstruct(req, *strg)
if err != nil {
return nil, fmt.Errorf("downloading lrc object %v: %w", req.Raw.ObjectID, err)
}

return &Downloading{
Object: &req.Detail.Object,
File: reader,
Request: req.Raw,
}, nil
default:
return nil, fmt.Errorf("unsupported strategy type: %v", reflect.TypeOf(strg))
}

return nil, fmt.Errorf("unsupported redundancy type: %v of object %v", reflect.TypeOf(req.Detail.Object.Redundancy), req.Raw.ObjectID)
i.currentIndex++
return &Downloading{
Object: &req.Detail.Object,
File: reader,
Request: req.Raw,
}, nil
}

func (i *DownloadObjectIterator) Close() {
@@ -187,227 +112,93 @@ func (i *DownloadObjectIterator) Close() {
}
}

func (iter *DownloadObjectIterator) downloadNoneOrRepObject(obj downloadReqeust2) (io.ReadCloser, error) {
allStgs, err := iter.sortDownloadStorages(obj)
if err != nil {
return nil, err
}
func (i *DownloadObjectIterator) downloadDirect(req downloadReqeust2, strg strategy.DirectStrategy) (io.ReadCloser, error) {
logger.Debugf("downloading object %v from storage %v", req.Raw.ObjectID, strg.Storage.Storage.String())

bsc, blocks := iter.getMinReadingBlockSolution(allStgs, 1)
osc, stg := iter.getMinReadingObjectSolution(allStgs, 1)
if bsc < osc {
logger.Debugf("downloading object %v from storage %v", obj.Raw.ObjectID, blocks[0].Storage.Storage.String())
return iter.downloadFromStorage(&blocks[0].Storage, obj)
}
var strHandle *exec.DriverReadStream
ft := ioswitch2.NewFromTo()

if osc == math.MaxFloat64 {
// bsc >= osc,如果osc是MaxFloat64,那么bsc也一定是,也就意味着没有足够块来恢复文件
return nil, fmt.Errorf("no storage has this object")
toExec, handle := ioswitch2.NewToDriver(ioswitch2.RawStream())
toExec.Range = math2.Range{
Offset: req.Raw.Offset,
}

logger.Debugf("downloading object %v from storage %v", obj.Raw.ObjectID, stg.Storage.String())
return iter.downloadFromStorage(stg, obj)
}

func (iter *DownloadObjectIterator) downloadECObject(req downloadReqeust2, ecRed *cdssdk.ECRedundancy) (io.ReadCloser, error) {
allStorages, err := iter.sortDownloadStorages(req)
if err != nil {
return nil, err
if req.Raw.Length != -1 {
len := req.Raw.Length
toExec.Range.Length = &len
}

bsc, blocks := iter.getMinReadingBlockSolution(allStorages, ecRed.K)
osc, stg := iter.getMinReadingObjectSolution(allStorages, ecRed.K)

if bsc < osc {
var logStrs []any = []any{fmt.Sprintf("downloading ec object %v from blocks: ", req.Raw.ObjectID)}
for i, b := range blocks {
if i > 0 {
logStrs = append(logStrs, ", ")
}
logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Block.Index, b.Storage.Storage.String()))
}
logger.Debug(logStrs...)

pr, pw := io.Pipe()
go func() {
readPos := req.Raw.Offset
totalReadLen := req.Detail.Object.Size - req.Raw.Offset
if req.Raw.Length >= 0 {
totalReadLen = math2.Min(req.Raw.Length, totalReadLen)
}

firstStripIndex := readPos / ecRed.StripSize()
stripIter := NewStripIterator(iter.downloader, req.Detail.Object, blocks, ecRed, firstStripIndex, iter.downloader.strips, iter.downloader.cfg.ECStripPrefetchCount)
defer stripIter.Close()

for totalReadLen > 0 {
strip, err := stripIter.MoveNext()
if err == iterator.ErrNoMoreItem {
pw.CloseWithError(io.ErrUnexpectedEOF)
return
}
if err != nil {
pw.CloseWithError(err)
return
}

readRelativePos := readPos - strip.Position
curReadLen := math2.Min(totalReadLen, ecRed.StripSize()-readRelativePos)

err = io2.WriteAll(pw, strip.Data[readRelativePos:readRelativePos+curReadLen])
if err != nil {
pw.CloseWithError(err)
return
}

totalReadLen -= curReadLen
readPos += curReadLen
}
pw.Close()
}()
ft.AddFrom(ioswitch2.NewFromShardstore(req.Detail.Object.FileHash, *strg.Storage.MasterHub, strg.Storage, ioswitch2.RawStream())).AddTo(toExec)
strHandle = handle

return pr, nil
plans := exec.NewPlanBuilder()
if err := parser.Parse(ft, plans); err != nil {
return nil, fmt.Errorf("parsing plan: %w", err)
}

// bsc >= osc,如果osc是MaxFloat64,那么bsc也一定是,也就意味着没有足够块来恢复文件
if osc == math.MaxFloat64 {
return nil, fmt.Errorf("no enough blocks to reconstruct the object %v , want %d, get only %d", req.Raw.ObjectID, ecRed.K, len(blocks))
}
exeCtx := exec.NewExecContext()
exec.SetValueByType(exeCtx, i.downloader.stgAgts)
exec := plans.Execute(exeCtx)
go exec.Wait(context.TODO())

logger.Debugf("downloading ec object %v from storage %v", req.Raw.ObjectID, stg.Storage.String())
return iter.downloadFromStorage(stg, req)
return exec.BeginRead(strHandle)
}

func (iter *DownloadObjectIterator) sortDownloadStorages(req downloadReqeust2) ([]*downloadStorageInfo, error) {
var stgIDs []cdssdk.StorageID
for _, id := range req.Detail.PinnedAt {
if !lo.Contains(stgIDs, id) {
stgIDs = append(stgIDs, id)
}
}
for _, b := range req.Detail.Blocks {
if !lo.Contains(stgIDs, b.StorageID) {
stgIDs = append(stgIDs, b.StorageID)
func (i *DownloadObjectIterator) downloadECReconstruct(req downloadReqeust2, strg strategy.ECReconstructStrategy) (io.ReadCloser, error) {
var logStrs []any = []any{fmt.Sprintf("downloading ec object %v from: ", req.Raw.ObjectID)}
for i, b := range strg.Blocks {
if i > 0 {
logStrs = append(logStrs, ", ")
}

logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Index, strg.Storages[i].Storage.String()))
}
logger.Debug(logStrs...)

downloadStorageMap := make(map[cdssdk.StorageID]*downloadStorageInfo)
for _, id := range req.Detail.PinnedAt {
storage, ok := downloadStorageMap[id]
if !ok {
mod := iter.allStorages[id]
storage = &downloadStorageInfo{
Storage: mod,
ObjectPinned: true,
Distance: iter.getStorageDistance(mod),
}
downloadStorageMap[id] = storage
downloadBlks := make([]downloadBlock, len(strg.Blocks))
for i, b := range strg.Blocks {
downloadBlks[i] = downloadBlock{
Block: b,
Storage: strg.Storages[i],
}

storage.ObjectPinned = true
}

for _, b := range req.Detail.Blocks {
storage, ok := downloadStorageMap[b.StorageID]
if !ok {
mod := iter.allStorages[b.StorageID]
storage = &downloadStorageInfo{
Storage: mod,
Distance: iter.getStorageDistance(mod),
}
downloadStorageMap[b.StorageID] = storage
pr, pw := io.Pipe()
go func() {
readPos := req.Raw.Offset
totalReadLen := req.Detail.Object.Size - req.Raw.Offset
if req.Raw.Length >= 0 {
totalReadLen = math2.Min(req.Raw.Length, totalReadLen)
}

storage.Blocks = append(storage.Blocks, b)
}

return sort2.Sort(lo.Values(downloadStorageMap), func(left, right *downloadStorageInfo) int {
return sort2.Cmp(left.Distance, right.Distance)
}), nil
}
firstStripIndex := readPos / strg.Redundancy.StripSize()
stripIter := NewStripIterator(i.downloader, req.Detail.Object, downloadBlks, strg.Redundancy, firstStripIndex, i.downloader.strips, i.downloader.cfg.ECStripPrefetchCount)
defer stripIter.Close()

func (iter *DownloadObjectIterator) getMinReadingBlockSolution(sortedStgs []*downloadStorageInfo, k int) (float64, []downloadBlock) {
gotBlocksMap := bitmap.Bitmap64(0)
var gotBlocks []downloadBlock
dist := float64(0.0)
for _, n := range sortedStgs {
for _, b := range n.Blocks {
if !gotBlocksMap.Get(b.Index) {
gotBlocks = append(gotBlocks, downloadBlock{
Storage: n.Storage,
Block: b,
})
gotBlocksMap.Set(b.Index, true)
dist += n.Distance
for totalReadLen > 0 {
strip, err := stripIter.MoveNext()
if err == iterator.ErrNoMoreItem {
pw.CloseWithError(io.ErrUnexpectedEOF)
return
}
if len(gotBlocks) >= k {
return dist, gotBlocks
if err != nil {
pw.CloseWithError(err)
return
}
}
}

return math.MaxFloat64, gotBlocks
}

func (iter *DownloadObjectIterator) getMinReadingObjectSolution(sortedStgs []*downloadStorageInfo, k int) (float64, *stgmod.StorageDetail) {
dist := math.MaxFloat64
var downloadStg *stgmod.StorageDetail
for _, n := range sortedStgs {
if n.ObjectPinned && float64(k)*n.Distance < dist {
dist = float64(k) * n.Distance
stg := n.Storage
downloadStg = &stg
}
}
readRelativePos := readPos - strip.Position
curReadLen := math2.Min(totalReadLen, strg.Redundancy.StripSize()-readRelativePos)

return dist, downloadStg
}
err = io2.WriteAll(pw, strip.Data[readRelativePos:readRelativePos+curReadLen])
if err != nil {
pw.CloseWithError(err)
return
}

func (iter *DownloadObjectIterator) getStorageDistance(stg stgmod.StorageDetail) float64 {
if stgglb.Local.HubID != nil {
if stg.MasterHub.HubID == *stgglb.Local.HubID {
return consts.StorageDistanceSameStorage
totalReadLen -= curReadLen
readPos += curReadLen
}
}

if stg.MasterHub.LocationID == stgglb.Local.LocationID {
return consts.StorageDistanceSameLocation
}

c := iter.downloader.conn.Get(stg.MasterHub.HubID)
if c == nil || c.Delay == nil || *c.Delay > time.Duration(float64(time.Millisecond)*iter.downloader.cfg.HighLatencyHubMs) {
return consts.HubDistanceHighLatencyHub
}

return consts.StorageDistanceOther
}

func (iter *DownloadObjectIterator) downloadFromStorage(stg *stgmod.StorageDetail, req downloadReqeust2) (io.ReadCloser, error) {
var strHandle *exec.DriverReadStream
ft := ioswitch2.NewFromTo()

toExec, handle := ioswitch2.NewToDriver(ioswitch2.RawStream())
toExec.Range = exec.Range{
Offset: req.Raw.Offset,
}
if req.Raw.Length != -1 {
len := req.Raw.Length
toExec.Range.Length = &len
}

ft.AddFrom(ioswitch2.NewFromShardstore(req.Detail.Object.FileHash, *stg.MasterHub, stg.Storage, ioswitch2.RawStream())).AddTo(toExec)
strHandle = handle

plans := exec.NewPlanBuilder()
if err := parser.Parse(ft, plans); err != nil {
return nil, fmt.Errorf("parsing plan: %w", err)
}

exeCtx := exec.NewExecContext()
exec.SetValueByType(exeCtx, iter.downloader.stgMgr)
exec := plans.Execute(exeCtx)
go exec.Wait(context.TODO())
pw.Close()
}()

return exec.BeginRead(strHandle)
return pr, nil
}

+ 17
- 31
common/pkgs/downloader/lrc.go View File

@@ -6,44 +6,30 @@ import (

"gitlink.org.cn/cloudream/common/pkgs/iterator"
"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/io2"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
)

func (iter *DownloadObjectIterator) downloadLRCObject(req downloadReqeust2, red *cdssdk.LRCRedundancy) (io.ReadCloser, error) {
allStgs, err := iter.sortDownloadStorages(req)
if err != nil {
return nil, err
}

var blocks []downloadBlock
selectedBlkIdx := make(map[int]bool)
for _, stg := range allStgs {
for _, b := range stg.Blocks {
if b.Index >= red.M() || selectedBlkIdx[b.Index] {
continue
}
blocks = append(blocks, downloadBlock{
Storage: stg.Storage,
Block: b,
})
selectedBlkIdx[b.Index] = true
}
}
if len(blocks) < red.K {
return nil, fmt.Errorf("not enough blocks to download lrc object")
}

var logStrs []any = []any{"downloading lrc object from blocks: "}
for i, b := range blocks {
func (iter *DownloadObjectIterator) downloadLRCReconstruct(req downloadReqeust2, strg strategy.LRCReconstructStrategy) (io.ReadCloser, error) {
var logStrs []any = []any{fmt.Sprintf("downloading lrc object %v from: ", req.Raw.ObjectID)}
for i, b := range strg.Blocks {
if i > 0 {
logStrs = append(logStrs, ", ")
}
logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Block.Index, b.Storage.Storage.String()))

logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Index, strg.Storages[i].Storage.String()))
}
logger.Debug(logStrs...)

downloadBlks := make([]downloadBlock, len(strg.Blocks))
for i, b := range strg.Blocks {
downloadBlks[i] = downloadBlock{
Block: b,
Storage: strg.Storages[i],
}
}

pr, pw := io.Pipe()
go func() {
readPos := req.Raw.Offset
@@ -52,8 +38,8 @@ func (iter *DownloadObjectIterator) downloadLRCObject(req downloadReqeust2, red
totalReadLen = math2.Min(req.Raw.Length, totalReadLen)
}

firstStripIndex := readPos / int64(red.K) / int64(red.ChunkSize)
stripIter := NewLRCStripIterator(iter.downloader, req.Detail.Object, blocks, red, firstStripIndex, iter.downloader.strips, iter.downloader.cfg.ECStripPrefetchCount)
firstStripIndex := readPos / int64(strg.Redundancy.K) / int64(strg.Redundancy.ChunkSize)
stripIter := NewLRCStripIterator(iter.downloader, req.Detail.Object, downloadBlks, strg.Redundancy, firstStripIndex, iter.downloader.strips, iter.downloader.cfg.ECStripPrefetchCount)
defer stripIter.Close()

for totalReadLen > 0 {
@@ -68,7 +54,7 @@ func (iter *DownloadObjectIterator) downloadLRCObject(req downloadReqeust2, red
}

readRelativePos := readPos - strip.Position
nextStripPos := strip.Position + int64(red.K)*int64(red.ChunkSize)
nextStripPos := strip.Position + int64(strg.Redundancy.K)*int64(strg.Redundancy.ChunkSize)
curReadLen := math2.Min(totalReadLen, nextStripPos-readPos)

err = io2.WriteAll(pw, strip.Data[readRelativePos:readRelativePos+curReadLen])


+ 5
- 4
common/pkgs/downloader/lrc_strip_iterator.go View File

@@ -9,6 +9,7 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/iterator"
"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc/parser"
)
@@ -17,7 +18,7 @@ type LRCStripIterator struct {
downloder *Downloader
object cdssdk.Object
blocks []downloadBlock
red *cdssdk.LRCRedundancy
red cdssdk.LRCRedundancy
curStripIndex int64
cache *StripCache
dataChan chan dataChanEntry
@@ -26,7 +27,7 @@ type LRCStripIterator struct {
inited bool
}

func NewLRCStripIterator(downloder *Downloader, object cdssdk.Object, blocks []downloadBlock, red *cdssdk.LRCRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *LRCStripIterator {
func NewLRCStripIterator(downloder *Downloader, object cdssdk.Object, blocks []downloadBlock, red cdssdk.LRCRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *LRCStripIterator {
if maxPrefetch <= 0 {
maxPrefetch = 1
}
@@ -101,7 +102,7 @@ func (s *LRCStripIterator) downloading() {
froms = append(froms, ioswitchlrc.NewFromStorage(b.Block.FileHash, *stg.MasterHub, stg.Storage, b.Block.Index))
}

toExec, hd := ioswitchlrc.NewToDriverWithRange(-1, exec.Range{
toExec, hd := ioswitchlrc.NewToDriverWithRange(-1, math2.Range{
Offset: s.curStripIndex * int64(s.red.ChunkSize*s.red.K),
})

@@ -113,7 +114,7 @@ func (s *LRCStripIterator) downloading() {
}

exeCtx := exec.NewExecContext()
exec.SetValueByType(exeCtx, s.downloder.stgMgr)
exec.SetValueByType(exeCtx, s.downloder.stgAgts)

exec := plans.Execute(exeCtx)



+ 6
- 0
common/pkgs/downloader/strategy/config.go View File

@@ -0,0 +1,6 @@
package strategy

type Config struct {
// 当到下载节点的延迟高于这个值时,该节点在评估时会有更高的分数惩罚,单位:ms
HighLatencyHubMs float64 `json:"highLatencyHubMs"`
}

+ 337
- 0
common/pkgs/downloader/strategy/selector.go View File

@@ -0,0 +1,337 @@
package strategy

import (
"fmt"
"math"
"reflect"
"time"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/bitmap"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/common/utils/sort2"
"gitlink.org.cn/cloudream/storage/common/consts"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/metacache"
)

type Request struct {
Detail stgmod.ObjectDetail
Range math2.Range
DestHub cdssdk.HubID // 可以为0。此字段不为0时,DestLocation字段无意义。
DestLocation cdssdk.LocationID // 可以为0
}

type Strategy interface {
GetDetail() stgmod.ObjectDetail
}

// 直接下载完整对象
type DirectStrategy struct {
Detail stgmod.ObjectDetail
Storage stgmod.StorageDetail
}

func (s *DirectStrategy) GetDetail() stgmod.ObjectDetail {
return s.Detail
}

// 从指定对象重建对象
type ECReconstructStrategy struct {
Detail stgmod.ObjectDetail
Redundancy cdssdk.ECRedundancy
Blocks []stgmod.ObjectBlock
Storages []stgmod.StorageDetail
}

func (s *ECReconstructStrategy) GetDetail() stgmod.ObjectDetail {
return s.Detail
}

type LRCReconstructStrategy struct {
Detail stgmod.ObjectDetail
Redundancy cdssdk.LRCRedundancy
Blocks []stgmod.ObjectBlock
Storages []stgmod.StorageDetail
}

func (s *LRCReconstructStrategy) GetDetail() stgmod.ObjectDetail {
return s.Detail
}

type Selector struct {
cfg Config
storageMeta *metacache.StorageMeta
hubMeta *metacache.HubMeta
connectivity *metacache.Connectivity
}

func NewSelector(cfg Config, storageMeta *metacache.StorageMeta, hubMeta *metacache.HubMeta, connectivity *metacache.Connectivity) *Selector {
return &Selector{
cfg: cfg,
storageMeta: storageMeta,
hubMeta: hubMeta,
connectivity: connectivity,
}
}

func (s *Selector) Select(req Request) (Strategy, error) {
req2 := request2{
Detail: req.Detail,
Range: req.Range,
DestLocation: req.DestLocation,
}

if req.DestHub != 0 {
req2.DestHub = s.hubMeta.Get(req.DestHub)
}

switch red := req.Detail.Object.Redundancy.(type) {
case *cdssdk.NoneRedundancy:
return s.selectForNoneOrRep(req2)

case *cdssdk.RepRedundancy:
return s.selectForNoneOrRep(req2)

case *cdssdk.ECRedundancy:
return s.selectForEC(req2, *red)

case *cdssdk.LRCRedundancy:
return s.selectForLRC(req2, *red)
}

return nil, fmt.Errorf("unsupported redundancy type: %v of object %v", reflect.TypeOf(req.Detail.Object.Redundancy), req.Detail.Object.ObjectID)
}

type downloadStorageInfo struct {
Storage stgmod.StorageDetail
ObjectPinned bool
Blocks []stgmod.ObjectBlock
Distance float64
}

type downloadBlock struct {
Storage stgmod.StorageDetail
Block stgmod.ObjectBlock
}

type request2 struct {
Detail stgmod.ObjectDetail
Range math2.Range
DestHub *cdssdk.Hub
DestLocation cdssdk.LocationID
}

func (s *Selector) selectForNoneOrRep(req request2) (Strategy, error) {
sortedStgs := s.sortDownloadStorages(req)
if len(sortedStgs) == 0 {
return nil, fmt.Errorf("no storage available for download")
}

_, blks := s.getMinReadingBlockSolution(sortedStgs, 1)
if len(blks) == 0 {
return nil, fmt.Errorf("no block available for download")
}

return &DirectStrategy{
Detail: req.Detail,
Storage: sortedStgs[0].Storage,
}, nil
}

func (s *Selector) selectForEC(req request2, red cdssdk.ECRedundancy) (Strategy, error) {
sortedStgs := s.sortDownloadStorages(req)
if len(sortedStgs) == 0 {
return nil, fmt.Errorf("no storage available for download")
}

bsc, blocks := s.getMinReadingBlockSolution(sortedStgs, red.K)
osc, stg := s.getMinReadingObjectSolution(sortedStgs, red.K)

if bsc < osc {
bs := make([]stgmod.ObjectBlock, len(blocks))
ss := make([]stgmod.StorageDetail, len(blocks))
for i, b := range blocks {
bs[i] = b.Block
ss[i] = b.Storage
}

return &ECReconstructStrategy{
Detail: req.Detail,
Redundancy: red,
Blocks: bs,
Storages: ss,
}, nil
}

// bsc >= osc,如果osc是MaxFloat64,那么bsc也一定是,也就意味着没有足够块来恢复文件
if osc == math.MaxFloat64 {
return nil, fmt.Errorf("no enough blocks to reconstruct the object %v , want %d, get only %d", req.Detail.Object.ObjectID, red.K, len(blocks))
}

return &DirectStrategy{
Detail: req.Detail,
Storage: stg,
}, nil
}

func (s *Selector) selectForLRC(req request2, red cdssdk.LRCRedundancy) (Strategy, error) {
sortedStgs := s.sortDownloadStorages(req)
if len(sortedStgs) == 0 {
return nil, fmt.Errorf("no storage available for download")
}

var blocks []downloadBlock
selectedBlkIdx := make(map[int]bool)
for _, stg := range sortedStgs {
for _, b := range stg.Blocks {
if b.Index >= red.M() || selectedBlkIdx[b.Index] {
continue
}
blocks = append(blocks, downloadBlock{
Storage: stg.Storage,
Block: b,
})
selectedBlkIdx[b.Index] = true
}
}
if len(blocks) < red.K {
return nil, fmt.Errorf("not enough blocks to download lrc object")
}

bs := make([]stgmod.ObjectBlock, len(blocks))
ss := make([]stgmod.StorageDetail, len(blocks))
for i, b := range blocks {
bs[i] = b.Block
ss[i] = b.Storage
}

return &LRCReconstructStrategy{
Detail: req.Detail,
Redundancy: red,
Blocks: bs,
Storages: ss,
}, nil
}

func (s *Selector) sortDownloadStorages(req request2) []*downloadStorageInfo {
var stgIDs []cdssdk.StorageID
for _, id := range req.Detail.PinnedAt {
if !lo.Contains(stgIDs, id) {
stgIDs = append(stgIDs, id)
}
}
for _, b := range req.Detail.Blocks {
if !lo.Contains(stgIDs, b.StorageID) {
stgIDs = append(stgIDs, b.StorageID)
}
}

downloadStorageMap := make(map[cdssdk.StorageID]*downloadStorageInfo)
for _, id := range req.Detail.PinnedAt {
storage, ok := downloadStorageMap[id]
if !ok {
mod := s.storageMeta.Get(id)
if mod == nil || mod.MasterHub == nil {
continue
}

storage = &downloadStorageInfo{
Storage: *mod,
ObjectPinned: true,
Distance: s.getStorageDistance(req, *mod),
}
downloadStorageMap[id] = storage
}

storage.ObjectPinned = true
}

for _, b := range req.Detail.Blocks {
storage, ok := downloadStorageMap[b.StorageID]
if !ok {
mod := s.storageMeta.Get(b.StorageID)
if mod == nil || mod.MasterHub == nil {
continue
}

storage = &downloadStorageInfo{
Storage: *mod,
Distance: s.getStorageDistance(req, *mod),
}
downloadStorageMap[b.StorageID] = storage
}

storage.Blocks = append(storage.Blocks, b)
}

return sort2.Sort(lo.Values(downloadStorageMap), func(left, right *downloadStorageInfo) int {
return sort2.Cmp(left.Distance, right.Distance)
})
}

func (s *Selector) getStorageDistance(req request2, src stgmod.StorageDetail) float64 {
if req.DestHub != nil {
if src.MasterHub.HubID == req.DestHub.HubID {
return consts.StorageDistanceSameStorage
}

if src.MasterHub.LocationID == req.DestHub.LocationID {
return consts.StorageDistanceSameLocation
}

latency := s.connectivity.Get(src.MasterHub.HubID, req.DestHub.HubID)
if latency == nil || *latency > time.Duration(float64(time.Millisecond)*s.cfg.HighLatencyHubMs) {
return consts.HubDistanceHighLatencyHub
}

return consts.StorageDistanceOther
}

if req.DestLocation != 0 {
if src.MasterHub.LocationID == req.DestLocation {
return consts.StorageDistanceSameLocation
}
}

return consts.StorageDistanceOther
}

func (s *Selector) getMinReadingBlockSolution(sortedStgs []*downloadStorageInfo, k int) (float64, []downloadBlock) {
gotBlocksMap := bitmap.Bitmap64(0)
var gotBlocks []downloadBlock
dist := float64(0.0)
for _, n := range sortedStgs {
for _, b := range n.Blocks {
if !gotBlocksMap.Get(b.Index) {
gotBlocks = append(gotBlocks, downloadBlock{
Storage: n.Storage,
Block: b,
})
gotBlocksMap.Set(b.Index, true)
dist += n.Distance
}

if len(gotBlocks) >= k {
return dist, gotBlocks
}
}
}

return math.MaxFloat64, gotBlocks
}

func (s *Selector) getMinReadingObjectSolution(sortedStgs []*downloadStorageInfo, k int) (float64, stgmod.StorageDetail) {
dist := math.MaxFloat64
var downloadStg stgmod.StorageDetail
for _, n := range sortedStgs {
if n.ObjectPinned && float64(k)*n.Distance < dist {
dist = float64(k) * n.Distance
stg := n.Storage
downloadStg = stg
}
}

return dist, downloadStg
}

+ 7
- 6
common/pkgs/downloader/strip_iterator.go View File

@@ -9,6 +9,7 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/iterator"
"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/math2"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser"
@@ -28,7 +29,7 @@ type StripIterator struct {
downloader *Downloader
object cdssdk.Object
blocks []downloadBlock
red *cdssdk.ECRedundancy
red cdssdk.ECRedundancy
curStripIndex int64
cache *StripCache
dataChan chan dataChanEntry
@@ -46,7 +47,7 @@ type dataChanEntry struct {
Error error
}

func NewStripIterator(downloader *Downloader, object cdssdk.Object, blocks []downloadBlock, red *cdssdk.ECRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *StripIterator {
func NewStripIterator(downloader *Downloader, object cdssdk.Object, blocks []downloadBlock, red cdssdk.ECRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *StripIterator {
if maxPrefetch <= 0 {
maxPrefetch = 1
}
@@ -199,13 +200,13 @@ func (s *StripIterator) readStrip(stripIndex int64, buf []byte) (int, error) {
}

ft := ioswitch2.NewFromTo()
ft.ECParam = s.red
ft.ECParam = &s.red
for _, b := range s.blocks {
stg := b.Storage
ft.AddFrom(ioswitch2.NewFromShardstore(b.Block.FileHash, *stg.MasterHub, stg.Storage, ioswitch2.ECSrteam(b.Block.Index)))
ft.AddFrom(ioswitch2.NewFromShardstore(b.Block.FileHash, *stg.MasterHub, stg, ioswitch2.ECStream(b.Block.Index)))
}

toExec, hd := ioswitch2.NewToDriverWithRange(ioswitch2.RawStream(), exec.Range{
toExec, hd := ioswitch2.NewToDriverWithRange(ioswitch2.RawStream(), math2.Range{
Offset: stripIndex * s.red.StripSize(),
})
ft.AddTo(toExec)
@@ -217,7 +218,7 @@ func (s *StripIterator) readStrip(stripIndex int64, buf []byte) (int, error) {
}

exeCtx := exec.NewExecContext()
exec.SetValueByType(exeCtx, s.downloader.stgMgr)
exec.SetValueByType(exeCtx, s.downloader.stgAgts)
exec := plans.Execute(exeCtx)

ctx, cancel := context.WithCancel(context.Background())


+ 21
- 24
common/pkgs/ioswitch2/fromto.go View File

@@ -3,6 +3,7 @@ package ioswitch2
import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/math2"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
)

@@ -14,7 +15,7 @@ type To interface {
// To所需要的文件流的范围。具体含义与DataIndex有关系:
// 如果DataIndex == -1,则表示在整个文件的范围。
// 如果DataIndex >= 0,则表示在文件的某个分片的范围。
GetRange() exec.Range
GetRange() math2.Range
GetStreamIndex() StreamIndex
}

@@ -38,7 +39,7 @@ func RawStream() StreamIndex {
}
}

func ECSrteam(index int) StreamIndex {
func ECStream(index int) StreamIndex {
return StreamIndex{
Type: StreamIndexEC,
Index: index,
@@ -96,7 +97,7 @@ type FromDriver struct {

func NewFromDriver(strIdx StreamIndex) (*FromDriver, *exec.DriverWriteStream) {
handle := &exec.DriverWriteStream{
RangeHint: &exec.Range{},
RangeHint: &math2.Range{},
}
return &FromDriver{
Handle: handle,
@@ -111,11 +112,11 @@ func (f *FromDriver) GetStreamIndex() StreamIndex {
type FromShardstore struct {
FileHash cdssdk.FileHash
Hub cdssdk.Hub
Storage cdssdk.Storage
Storage stgmod.StorageDetail
StreamIndex StreamIndex
}

func NewFromShardstore(fileHash cdssdk.FileHash, hub cdssdk.Hub, storage cdssdk.Storage, strIdx StreamIndex) *FromShardstore {
func NewFromShardstore(fileHash cdssdk.FileHash, hub cdssdk.Hub, storage stgmod.StorageDetail, strIdx StreamIndex) *FromShardstore {
return &FromShardstore{
FileHash: fileHash,
Hub: hub,
@@ -131,7 +132,7 @@ func (f *FromShardstore) GetStreamIndex() StreamIndex {
type ToDriver struct {
Handle *exec.DriverReadStream
StreamIndex StreamIndex
Range exec.Range
Range math2.Range
}

func NewToDriver(strIdx StreamIndex) (*ToDriver, *exec.DriverReadStream) {
@@ -142,7 +143,7 @@ func NewToDriver(strIdx StreamIndex) (*ToDriver, *exec.DriverReadStream) {
}, &str
}

func NewToDriverWithRange(strIdx StreamIndex, rng exec.Range) (*ToDriver, *exec.DriverReadStream) {
func NewToDriverWithRange(strIdx StreamIndex, rng math2.Range) (*ToDriver, *exec.DriverReadStream) {
str := exec.DriverReadStream{}
return &ToDriver{
Handle: &str,
@@ -155,7 +156,7 @@ func (t *ToDriver) GetStreamIndex() StreamIndex {
return t.StreamIndex
}

func (t *ToDriver) GetRange() exec.Range {
func (t *ToDriver) GetRange() math2.Range {
return t.Range
}

@@ -163,7 +164,7 @@ type ToShardStore struct {
Hub cdssdk.Hub
Storage stgmod.StorageDetail
StreamIndex StreamIndex
Range exec.Range
Range math2.Range
FileHashStoreKey string
}

@@ -176,7 +177,7 @@ func NewToShardStore(hub cdssdk.Hub, stg stgmod.StorageDetail, strIdx StreamInde
}
}

func NewToShardStoreWithRange(hub cdssdk.Hub, stg stgmod.StorageDetail, streamIndex StreamIndex, fileHashStoreKey string, rng exec.Range) *ToShardStore {
func NewToShardStoreWithRange(hub cdssdk.Hub, stg stgmod.StorageDetail, streamIndex StreamIndex, fileHashStoreKey string, rng math2.Range) *ToShardStore {
return &ToShardStore{
Hub: hub,
Storage: stg,
@@ -190,25 +191,21 @@ func (t *ToShardStore) GetStreamIndex() StreamIndex {
return t.StreamIndex
}

func (t *ToShardStore) GetRange() exec.Range {
func (t *ToShardStore) GetRange() math2.Range {
return t.Range
}

type LoadToShared struct {
Hub cdssdk.Hub
Storage cdssdk.Storage
UserID cdssdk.UserID
PackageID cdssdk.PackageID
Path string
Hub cdssdk.Hub
Storage stgmod.StorageDetail
ObjectPath string
}

func NewLoadToShared(hub cdssdk.Hub, storage cdssdk.Storage, userID cdssdk.UserID, packageID cdssdk.PackageID, path string) *LoadToShared {
func NewLoadToShared(hub cdssdk.Hub, storage stgmod.StorageDetail, objectPath string) *LoadToShared {
return &LoadToShared{
Hub: hub,
Storage: storage,
UserID: userID,
PackageID: packageID,
Path: path,
Hub: hub,
Storage: storage,
ObjectPath: objectPath,
}
}

@@ -218,6 +215,6 @@ func (t *LoadToShared) GetStreamIndex() StreamIndex {
}
}

func (t *LoadToShared) GetRange() exec.Range {
return exec.Range{}
func (t *LoadToShared) GetRange() math2.Range {
return math2.Range{}
}

+ 100
- 12
common/pkgs/ioswitch2/ops2/bypass.go View File

@@ -6,7 +6,7 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/types"
)

@@ -14,6 +14,9 @@ func init() {
exec.UseOp[*BypassToShardStore]()
exec.UseVarValue[*BypassFileInfoValue]()
exec.UseVarValue[*BypassHandleResultValue]()

exec.UseOp[*BypassFromShardStore]()
exec.UseVarValue[*BypassFilePathValue]()
}

type BypassFileInfoValue struct {
@@ -44,19 +47,19 @@ type BypassToShardStore struct {
}

func (o *BypassToShardStore) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
svcMgr, err := exec.GetValueByType[*svcmgr.Manager](ctx)
stgAgts, err := exec.GetValueByType[*agtpool.AgentPool](ctx)
if err != nil {
return err
}

shardStore, err := svcMgr.GetShardStore(o.StorageID)
shardStore, err := stgAgts.GetShardStore(o.StorageID)
if err != nil {
return err
}

notifier, ok := shardStore.(types.BypassNotifier)
br, ok := shardStore.(types.BypassWrite)
if !ok {
return fmt.Errorf("shard store %v not support bypass", o.StorageID)
return fmt.Errorf("shard store %v not support bypass write", o.StorageID)
}

fileInfo, err := exec.BindVar[*BypassFileInfoValue](e, ctx.Context, o.BypassFileInfo)
@@ -64,7 +67,7 @@ func (o *BypassToShardStore) Execute(ctx *exec.ExecContext, e *exec.Executor) er
return err
}

err = notifier.BypassUploaded(fileInfo.BypassFileInfo)
err = br.BypassUploaded(fileInfo.BypassFileInfo)
if err != nil {
return err
}
@@ -78,6 +81,52 @@ func (o *BypassToShardStore) String() string {
return fmt.Sprintf("BypassToShardStore[StorageID:%v] Info: %v, Callback: %v", o.StorageID, o.BypassFileInfo, o.BypassCallback)
}

type BypassFilePathValue struct {
types.BypassFilePath
}

func (v *BypassFilePathValue) Clone() exec.VarValue {
return &BypassFilePathValue{
BypassFilePath: v.BypassFilePath,
}
}

type BypassFromShardStore struct {
StorageID cdssdk.StorageID
FileHash cdssdk.FileHash
Output exec.VarID
}

func (o *BypassFromShardStore) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
stgAgts, err := exec.GetValueByType[*agtpool.AgentPool](ctx)
if err != nil {
return err
}

shardStore, err := stgAgts.GetShardStore(o.StorageID)
if err != nil {
return err
}

br, ok := shardStore.(types.BypassRead)
if !ok {
return fmt.Errorf("shard store %v not support bypass read", o.StorageID)
}

path, err := br.BypassRead(o.FileHash)
if err != nil {
return err
}

e.PutVar(o.Output, &BypassFilePathValue{BypassFilePath: path})
return nil
}

func (o *BypassFromShardStore) String() string {
return fmt.Sprintf("BypassFromShardStore[StorageID:%v] FileHash: %v, Output: %v", o.StorageID, o.FileHash, o.Output)
}

// 旁路写入
type BypassToShardStoreNode struct {
dag.NodeBase
StorageID cdssdk.StorageID
@@ -103,19 +152,58 @@ func (n *BypassToShardStoreNode) BypassFileInfoSlot() dag.ValueInputSlot {
}
}

func (n *BypassToShardStoreNode) BypassCallbackVar() *dag.ValueVar {
return n.OutputValues().Get(0)
func (n *BypassToShardStoreNode) BypassCallbackVar() dag.ValueOutputSlot {
return dag.ValueOutputSlot{
Node: n,
Index: 0,
}
}

func (n *BypassToShardStoreNode) FileHashVar() *dag.ValueVar {
return n.OutputValues().Get(1)
func (n *BypassToShardStoreNode) FileHashVar() dag.ValueOutputSlot {
return dag.ValueOutputSlot{
Node: n,
Index: 1,
}
}

func (t *BypassToShardStoreNode) GenerateOp() (exec.Op, error) {
return &BypassToShardStore{
StorageID: t.StorageID,
BypassFileInfo: t.BypassFileInfoSlot().Var().VarID,
BypassCallback: t.BypassCallbackVar().VarID,
FileHash: t.FileHashVar().VarID,
BypassCallback: t.BypassCallbackVar().Var().VarID,
FileHash: t.FileHashVar().Var().VarID,
}, nil
}

// 旁路读取
type BypassFromShardStoreNode struct {
dag.NodeBase
StorageID cdssdk.StorageID
FileHash cdssdk.FileHash
}

func (b *GraphNodeBuilder) NewBypassFromShardStore(storageID cdssdk.StorageID, fileHash cdssdk.FileHash) *BypassFromShardStoreNode {
node := &BypassFromShardStoreNode{
StorageID: storageID,
FileHash: fileHash,
}
b.AddNode(node)

node.OutputValues().Init(node, 1)
return node
}

func (n *BypassFromShardStoreNode) FilePathVar() dag.ValueOutputSlot {
return dag.ValueOutputSlot{
Node: n,
Index: 0,
}
}

func (n *BypassFromShardStoreNode) GenerateOp() (exec.Op, error) {
return &BypassFromShardStore{
StorageID: n.StorageID,
FileHash: n.FileHash,
Output: n.FilePathVar().Var().VarID,
}, nil
}

+ 4
- 1
common/pkgs/ioswitch2/ops2/chunked.go View File

@@ -37,7 +37,10 @@ func (o *ChunkedSplit) Execute(ctx *exec.ExecContext, e *exec.Executor) error {

sem := semaphore.NewWeighted(int64(len(outputs)))
for i := range outputs {
sem.Acquire(ctx.Context, 1)
err = sem.Acquire(ctx.Context, 1)
if err != nil {
return err
}

e.PutVar(o.Outputs[i], &exec.StreamValue{
Stream: io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) {


+ 2
- 1
common/pkgs/ioswitch2/ops2/driver.go View File

@@ -3,6 +3,7 @@ package ops2
import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
)

@@ -45,7 +46,7 @@ type ToDriverNode struct {
dag.NodeBase
To ioswitch2.To
Handle *exec.DriverReadStream
Range exec.Range
Range math2.Range
}

func (b *GraphNodeBuilder) NewToDriver(to ioswitch2.To, handle *exec.DriverReadStream) *ToDriverNode {


+ 49
- 9
common/pkgs/ioswitch2/ops2/ec.go View File

@@ -10,6 +10,7 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/utils"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/io2"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/common/utils/sync2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ec"
)
@@ -45,20 +46,35 @@ func (o *ECMultiply) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
outputWrs[i] = wr
}

fut := future.NewSetVoid()
go func() {
mul := ec.GaloisMultiplier().BuildGalois()
inputChunks := make([][]byte, len(o.Inputs))
for i := range o.Inputs {
inputChunks[i] = make([]byte, math2.Min(o.ChunkSize, 64*1024))
}

inputChunks := make([][]byte, len(o.Inputs))
for i := range o.Inputs {
inputChunks[i] = make([]byte, o.ChunkSize)
}
// 输出用两个缓冲轮换
outputBufPool := sync2.NewBucketPool[[][]byte]()
for i := 0; i < 2; i++ {
outputChunks := make([][]byte, len(o.Outputs))
for i := range o.Outputs {
outputChunks[i] = make([]byte, o.ChunkSize)
outputChunks[i] = make([]byte, math2.Min(o.ChunkSize, 64*1024))
}
outputBufPool.PutEmpty(outputChunks)
}

fut := future.NewSetVoid()
go func() {
mul := ec.GaloisMultiplier().BuildGalois()
defer outputBufPool.WakeUpAll()

readLens := math2.SplitLessThan(o.ChunkSize, 64*1024)
readLenIdx := 0

for {
curReadLen := readLens[readLenIdx]
for i := range inputChunks {
inputChunks[i] = inputChunks[i][:curReadLen]
}

err := sync2.ParallelDo(inputs, func(s *exec.StreamValue, i int) error {
_, err := io.ReadFull(s.Stream, inputChunks[i])
return err
@@ -72,12 +88,34 @@ func (o *ECMultiply) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
return
}

err = mul.Multiply(o.Coef, inputChunks, outputChunks)
outputBuf, ok := outputBufPool.GetEmpty()
if !ok {
return
}
for i := range outputBuf {
outputBuf[i] = outputBuf[i][:curReadLen]
}

err = mul.Multiply(o.Coef, inputChunks, outputBuf)
if err != nil {
fut.SetError(err)
return
}

outputBufPool.PutFilled(outputBuf)
readLenIdx = (readLenIdx + 1) % len(readLens)
}
}()

go func() {
defer outputBufPool.WakeUpAll()

for {
outputChunks, ok := outputBufPool.GetFilled()
if !ok {
return
}

for i := range o.Outputs {
err := io2.WriteAll(outputWrs[i], outputChunks[i])
if err != nil {
@@ -85,6 +123,8 @@ func (o *ECMultiply) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
return
}
}

outputBufPool.PutEmpty(outputChunks)
}
}()



+ 6
- 4
common/pkgs/ioswitch2/ops2/faas.go View File

@@ -1,12 +1,13 @@
package ops2

/*
import (
"fmt"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/types"
)

@@ -19,17 +20,17 @@ type InternalFaaSGalMultiply struct {
}

func (o *InternalFaaSGalMultiply) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
stgMgr, err := exec.GetValueByType[*svcmgr.Manager](ctx)
stgAgts, err := exec.GetValueByType[*agtpool.AgentPool](ctx)
if err != nil {
return err
}

fass, err := svcmgr.GetComponent[types.InternalFaaSCall](stgMgr, o.StorageID)
fass, err := agtpool.GetComponent[types.InternalFaaSCall](stgAgts, o.StorageID)
if err != nil {
return fmt.Errorf("getting faas component: %w", err)
}

tmp, err := svcmgr.GetComponent[types.TempStore](stgMgr, o.StorageID)
tmp, err := agtpool.GetComponent[types.TempStore](stgAgts, o.StorageID)
if err != nil {
return fmt.Errorf("getting temp store component: %w", err)
}
@@ -58,3 +59,4 @@ func (o *InternalFaaSGalMultiply) Execute(ctx *exec.ExecContext, e *exec.Executo
exec.PutArray(e, o.OutputFilePathes, outputVars)
return nil
}
*/

+ 30
- 18
common/pkgs/ioswitch2/ops2/multipart.go View File

@@ -48,20 +48,22 @@ type MultipartInitiator struct {
}

func (o *MultipartInitiator) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
initiator, err := factory.CreateComponent[types.MultipartInitiator](o.Storage)
blder := factory.GetBuilder(o.Storage)
multi, err := blder.CreateMultiparter()
if err != nil {
return err
}
defer initiator.Abort()

// 启动一个新的上传任务
initState, err := initiator.Initiate(ctx.Context)
// 启动一个新的上传任务W
multiTask, err := multi.Initiate(ctx.Context)
if err != nil {
return err
}
defer multiTask.Abort()

// 分发上传参数
e.PutVar(o.UploadArgs, &MultipartUploadArgsValue{
InitState: initState,
InitState: multiTask.InitState(),
})

// 收集分片上传结果
@@ -76,7 +78,7 @@ func (o *MultipartInitiator) Execute(ctx *exec.ExecContext, e *exec.Executor) er
}

// 合并分片
fileInfo, err := initiator.JoinParts(ctx.Context, partInfos)
fileInfo, err := multiTask.JoinParts(ctx.Context, partInfos)
if err != nil {
return fmt.Errorf("completing multipart upload: %v", err)
}
@@ -93,7 +95,7 @@ func (o *MultipartInitiator) Execute(ctx *exec.ExecContext, e *exec.Executor) er
}

if cb.Commited {
initiator.Complete()
multiTask.Complete()
}

return nil
@@ -113,6 +115,7 @@ type MultipartUpload struct {
}

func (o *MultipartUpload) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
blder := factory.GetBuilder(o.Storage)
uploadArgs, err := exec.BindVar[*MultipartUploadArgsValue](e, ctx.Context, o.UploadArgs)
if err != nil {
return err
@@ -124,13 +127,13 @@ func (o *MultipartUpload) Execute(ctx *exec.ExecContext, e *exec.Executor) error
}
defer partStr.Stream.Close()

uploader, err := factory.CreateComponent[types.MultipartUploader](o.Storage)
multi, err := blder.CreateMultiparter()
if err != nil {
return err
}

startTime := time.Now()
uploadedInfo, err := uploader.UploadPart(ctx.Context, uploadArgs.InitState, o.PartSize, o.PartNumber, partStr.Stream)
uploadedInfo, err := multi.UploadPart(ctx.Context, uploadArgs.InitState, o.PartSize, o.PartNumber, partStr.Stream)
if err != nil {
return err
}
@@ -163,12 +166,18 @@ func (b *GraphNodeBuilder) NewMultipartInitiator(storage stgmod.StorageDetail) *
return node
}

func (n *MultipartInitiatorNode) UploadArgsVar() *dag.ValueVar {
return n.OutputValues().Get(0)
func (n *MultipartInitiatorNode) UploadArgsVar() dag.ValueOutputSlot {
return dag.ValueOutputSlot{
Node: n,
Index: 0,
}
}

func (n *MultipartInitiatorNode) BypassFileInfoVar() *dag.ValueVar {
return n.OutputValues().Get(1)
func (n *MultipartInitiatorNode) BypassFileInfoVar() dag.ValueOutputSlot {
return dag.ValueOutputSlot{
Node: n,
Index: 1,
}
}

func (n *MultipartInitiatorNode) BypassCallbackSlot() dag.ValueInputSlot {
@@ -188,9 +197,9 @@ func (n *MultipartInitiatorNode) AppendPartInfoSlot() dag.ValueInputSlot {
func (n *MultipartInitiatorNode) GenerateOp() (exec.Op, error) {
return &MultipartInitiator{
Storage: n.Storage,
UploadArgs: n.UploadArgsVar().VarID,
UploadArgs: n.UploadArgsVar().Var().VarID,
UploadedParts: n.InputValues().GetVarIDsStart(1),
BypassFileOutput: n.BypassFileInfoVar().VarID,
BypassFileOutput: n.BypassFileInfoVar().Var().VarID,
BypassCallback: n.BypassCallbackSlot().Var().VarID,
}, nil
}
@@ -223,8 +232,11 @@ func (n *MultipartUploadNode) UploadArgsSlot() dag.ValueInputSlot {
}
}

func (n *MultipartUploadNode) UploadResultVar() *dag.ValueVar {
return n.OutputValues().Get(0)
func (n *MultipartUploadNode) UploadResultVar() dag.ValueOutputSlot {
return dag.ValueOutputSlot{
Node: n,
Index: 0,
}
}

func (n *MultipartUploadNode) PartStreamSlot() dag.StreamInputSlot {
@@ -238,7 +250,7 @@ func (n *MultipartUploadNode) GenerateOp() (exec.Op, error) {
return &MultipartUpload{
Storage: n.Storage,
UploadArgs: n.UploadArgsSlot().Var().VarID,
UploadResult: n.UploadResultVar().VarID,
UploadResult: n.UploadResultVar().Var().VarID,
PartStream: n.PartStreamSlot().Var().VarID,
PartNumber: n.PartNumber,
PartSize: n.PartSize,


+ 0
- 68
common/pkgs/ioswitch2/ops2/ops.go View File

@@ -26,71 +26,3 @@ type ToNode interface {
Input() dag.StreamInputSlot
SetInput(input *dag.StreamVar)
}

// func formatStreamIO(node *dag.Node) string {
// is := ""
// for i, in := range node.InputStreams {
// if i > 0 {
// is += ","
// }

// if in == nil {
// is += "."
// } else {
// is += fmt.Sprintf("%v", in.ID)
// }
// }

// os := ""
// for i, out := range node.OutputStreams {
// if i > 0
// os += ","
// }

// if out == nil {
// os += "."
// } else {
// os += fmt.Sprintf("%v", out.ID)
// }
// }

// if is == "" && os == "" {
// return ""
// }

// return fmt.Sprintf("S{%s>%s}", is, os)
// }

// func formatValueIO(node *dag.Node) string {
// is := ""
// for i, in := range node.InputValues {
// if i > 0 {
// is += ","
// }

// if in == nil {
// is += "."
// } else {
// is += fmt.Sprintf("%v", in.ID)
// }
// }

// os := ""
// for i, out := range node.OutputValues {
// if i > 0 {
// os += ","
// }

// if out == nil {
// os += "."
// } else {
// os += fmt.Sprintf("%v", out.ID)
// }
// }

// if is == "" && os == "" {
// return ""
// }

// return fmt.Sprintf("V{%s>%s}", is, os)
// }

+ 2
- 2
common/pkgs/ioswitch2/ops2/range.go View File

@@ -81,7 +81,7 @@ func (o *Range) String() string {

type RangeNode struct {
dag.NodeBase
Range exec.Range
Range math2.Range
}

func (b *GraphNodeBuilder) NewRange() *RangeNode {
@@ -93,7 +93,7 @@ func (b *GraphNodeBuilder) NewRange() *RangeNode {
return node
}

func (t *RangeNode) RangeStream(input *dag.StreamVar, rng exec.Range) *dag.StreamVar {
func (t *RangeNode) RangeStream(input *dag.StreamVar, rng math2.Range) *dag.StreamVar {
input.To(t, 0)
t.Range = rng
return t.OutputStreams().Get(0)


+ 115
- 0
common/pkgs/ioswitch2/ops2/s2s.go View File

@@ -0,0 +1,115 @@
package ops2

import (
"fmt"

"gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/factory"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/types"
)

func init() {
exec.UseOp[*S2STransfer]()
}

type S2STransfer struct {
Src stgmod.StorageDetail
SrcPath exec.VarID
Dst stgmod.StorageDetail
Output exec.VarID
BypassCallback exec.VarID
}

func (o *S2STransfer) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
srcPath, err := exec.BindVar[*BypassFilePathValue](e, ctx.Context, o.SrcPath)
if err != nil {
return err
}

s2s, err := factory.GetBuilder(o.Dst).CreateS2STransfer()
if err != nil {
return err
}

// 传输文件
dstPath, err := s2s.Transfer(ctx.Context, o.Src, srcPath.Path)
if err != nil {
return err
}
defer s2s.Abort()

// 告知后续Op处理临时文件
e.PutVar(o.Output, &BypassFileInfoValue{BypassFileInfo: types.BypassFileInfo{
TempFilePath: dstPath,
FileHash: srcPath.Info.Hash,
Size: srcPath.Info.Size,
}})

// 等待后续Op处理临时文件
cb, err := exec.BindVar[*BypassHandleResultValue](e, ctx.Context, o.BypassCallback)
if err != nil {
return fmt.Errorf("getting temp file callback: %v", err)
}

if cb.Commited {
s2s.Complete()
}

return nil
}

func (o *S2STransfer) String() string {
return fmt.Sprintf("S2STransfer %v:%v -> %v:%v", o.Src.Storage.String(), o.SrcPath, o.Dst.Storage.String(), o.Output)
}

type S2STransferNode struct {
dag.NodeBase
Src stgmod.StorageDetail
Dst stgmod.StorageDetail
}

func (b *GraphNodeBuilder) NewS2STransfer(src stgmod.StorageDetail, dst stgmod.StorageDetail) *S2STransferNode {
n := &S2STransferNode{
Src: src,
Dst: dst,
}
b.AddNode(n)

n.OutputValues().Init(n, 1)
n.InputValues().Init(2)

return n
}

func (n *S2STransferNode) SrcPathSlot() dag.ValueInputSlot {
return dag.ValueInputSlot{
Node: n,
Index: 0,
}
}

func (n *S2STransferNode) BypassCallbackSlot() dag.ValueInputSlot {
return dag.ValueInputSlot{
Node: n,
Index: 1,
}
}

func (n *S2STransferNode) BypassFileInfoVar() dag.ValueOutputSlot {
return dag.ValueOutputSlot{
Node: n,
Index: 0,
}
}

func (n *S2STransferNode) GenerateOp() (exec.Op, error) {
return &S2STransfer{
Src: n.Src,
SrcPath: n.SrcPathSlot().Var().VarID,
Dst: n.Dst,
Output: n.BypassFileInfoVar().Var().VarID,
BypassCallback: n.BypassCallbackSlot().Var().VarID,
}, nil
}

+ 5
- 5
common/pkgs/ioswitch2/ops2/shard_store.go View File

@@ -12,7 +12,7 @@ import (
"gitlink.org.cn/cloudream/common/utils/io2"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/types"
)

@@ -42,12 +42,12 @@ func (o *ShardRead) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
Debugf("reading from shard store")
defer logger.Debugf("reading from shard store finished")

stgMgr, err := exec.GetValueByType[*svcmgr.Manager](ctx)
stgAgts, err := exec.GetValueByType[*agtpool.AgentPool](ctx)
if err != nil {
return fmt.Errorf("getting storage manager: %w", err)
}

store, err := stgMgr.GetShardStore(o.StorageID)
store, err := stgAgts.GetShardStore(o.StorageID)
if err != nil {
return fmt.Errorf("getting shard store of storage %v: %w", o.StorageID, err)
}
@@ -84,12 +84,12 @@ func (o *ShardWrite) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
Debugf("writting file to shard store")
defer logger.Debugf("write to shard store finished")

stgMgr, err := exec.GetValueByType[*svcmgr.Manager](ctx)
stgAgts, err := exec.GetValueByType[*agtpool.AgentPool](ctx)
if err != nil {
return fmt.Errorf("getting storage manager: %w", err)
}

store, err := stgMgr.GetShardStore(o.StorageID)
store, err := stgAgts.GetShardStore(o.StorageID)
if err != nil {
return fmt.Errorf("getting shard store of storage %v: %w", o.StorageID, err)
}


+ 19
- 43
common/pkgs/ioswitch2/ops2/shared_store.go View File

@@ -7,8 +7,9 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
)

func init() {
@@ -16,12 +17,9 @@ func init() {
}

type SharedLoad struct {
Input exec.VarID `json:"input"`
StorageID cdssdk.StorageID `json:"storageID"`
UserID cdssdk.UserID `json:"userID"`
PackageID cdssdk.PackageID `json:"packageID"`
Path string `json:"path"`
FullPathOutput exec.VarID `json:"fullPathOutput"`
Input exec.VarID
StorageID cdssdk.StorageID
ObjectPath string
}

func (o *SharedLoad) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
@@ -30,12 +28,12 @@ func (o *SharedLoad) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
Debugf("load file to shared store")
defer logger.Debugf("load file to shared store finished")

stgMgr, err := exec.GetValueByType[*svcmgr.Manager](ctx)
stgAgts, err := exec.GetValueByType[*agtpool.AgentPool](ctx)
if err != nil {
return fmt.Errorf("getting storage manager: %w", err)
}

store, err := stgMgr.GetSharedStore(o.StorageID)
store, err := stgAgts.GetSharedStore(o.StorageID)
if err != nil {
return fmt.Errorf("getting shard store of storage %v: %w", o.StorageID, err)
}
@@ -46,44 +44,29 @@ func (o *SharedLoad) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
}
defer input.Stream.Close()

fullPath, err := store.WritePackageObject(o.UserID, o.PackageID, o.Path, input.Stream)
if err != nil {
return fmt.Errorf("writing file to shard store: %w", err)
}

if o.FullPathOutput > 0 {
e.PutVar(o.FullPathOutput, &exec.StringValue{
Value: fullPath,
})
}
return nil
return store.Write(o.ObjectPath, input.Stream)
}

func (o *SharedLoad) String() string {
return fmt.Sprintf("SharedLoad %v -> %v:%v/%v/%v", o.Input, o.StorageID, o.UserID, o.PackageID, o.Path)
return fmt.Sprintf("SharedLoad %v -> %v:%v", o.Input, o.StorageID, o.ObjectPath)
}

type SharedLoadNode struct {
dag.NodeBase
To ioswitch2.To
StorageID cdssdk.StorageID
UserID cdssdk.UserID
PackageID cdssdk.PackageID
Path string
To ioswitch2.To
Storage stgmod.StorageDetail
ObjectPath string
}

func (b *GraphNodeBuilder) NewSharedLoad(to ioswitch2.To, stgID cdssdk.StorageID, userID cdssdk.UserID, packageID cdssdk.PackageID, path string) *SharedLoadNode {
func (b *GraphNodeBuilder) NewSharedLoad(to ioswitch2.To, stg stgmod.StorageDetail, objPath string) *SharedLoadNode {
node := &SharedLoadNode{
To: to,
StorageID: stgID,
UserID: userID,
PackageID: packageID,
Path: path,
To: to,
Storage: stg,
ObjectPath: objPath,
}
b.AddNode(node)

node.InputStreams().Init(1)
node.OutputValues().Init(node, 1)
return node
}

@@ -102,17 +85,10 @@ func (t *SharedLoadNode) Input() dag.StreamInputSlot {
}
}

func (t *SharedLoadNode) FullPathVar() *dag.ValueVar {
return t.OutputValues().Get(0)
}

func (t *SharedLoadNode) GenerateOp() (exec.Op, error) {
return &SharedLoad{
Input: t.InputStreams().Get(0).VarID,
StorageID: t.StorageID,
UserID: t.UserID,
PackageID: t.PackageID,
Path: t.Path,
FullPathOutput: t.OutputValues().Get(0).VarID,
Input: t.InputStreams().Get(0).VarID,
StorageID: t.Storage.Storage.StorageID,
ObjectPath: t.ObjectPath,
}, nil
}

+ 488
- 0
common/pkgs/ioswitch2/parser/gen/generator.go View File

@@ -0,0 +1,488 @@
package gen

import (
"fmt"
"math"

"gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/lo2"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/types"
)

// 检查使用不同编码时参数是否设置到位
func CheckEncodingParams(ctx *state.GenerateState) error {
for _, f := range ctx.Ft.Froms {
if f.GetStreamIndex().IsEC() {
ctx.UseEC = true
if ctx.Ft.ECParam == nil {
return fmt.Errorf("EC encoding parameters not set")
}
}

if f.GetStreamIndex().IsSegment() {
ctx.UseSegment = true
if ctx.Ft.SegmentParam == nil {
return fmt.Errorf("segment parameters not set")
}
}
}

for _, t := range ctx.Ft.Toes {
if t.GetStreamIndex().IsEC() {
ctx.UseEC = true
if ctx.Ft.ECParam == nil {
return fmt.Errorf("EC encoding parameters not set")
}
}

if t.GetStreamIndex().IsSegment() {
ctx.UseSegment = true
if ctx.Ft.SegmentParam == nil {
return fmt.Errorf("segment parameters not set")
}
}
}

return nil
}

// 计算输入流的打开范围。如果From或者To中包含EC的流,则会将打开范围扩大到条带大小的整数倍。
func CalcStreamRange(ctx *state.GenerateState) {
rng := math2.NewRange(math.MaxInt64, 0)

for _, to := range ctx.Ft.Toes {
strIdx := to.GetStreamIndex()
if strIdx.IsRaw() {
toRng := to.GetRange()
rng.ExtendStart(toRng.Offset)
if toRng.Length != nil {
rng.ExtendEnd(toRng.Offset + *toRng.Length)
} else {
rng.Length = nil
}
} else if strIdx.IsEC() {
toRng := to.GetRange()
stripSize := ctx.Ft.ECParam.StripSize()
blkStartIndex := math2.FloorDiv(toRng.Offset, int64(ctx.Ft.ECParam.ChunkSize))
rng.ExtendStart(blkStartIndex * stripSize)
if toRng.Length != nil {
blkEndIndex := math2.CeilDiv(toRng.Offset+*toRng.Length, int64(ctx.Ft.ECParam.ChunkSize))
rng.ExtendEnd(blkEndIndex * stripSize)
} else {
rng.Length = nil
}

} else if strIdx.IsSegment() {
// Segment节点的Range是相对于本段的,需要加上本段的起始位置
toRng := to.GetRange()

segStart := ctx.Ft.SegmentParam.CalcSegmentStart(strIdx.Index)

offset := toRng.Offset + segStart

rng.ExtendStart(offset)
if toRng.Length != nil {
rng.ExtendEnd(offset + *toRng.Length)
} else {
rng.Length = nil
}
}
}

if ctx.UseEC {
stripSize := ctx.Ft.ECParam.StripSize()
rng.ExtendStart(math2.Floor(rng.Offset, stripSize))
if rng.Length != nil {
rng.ExtendEnd(math2.Ceil(rng.Offset+*rng.Length, stripSize))
}
}

ctx.StreamRange = rng
}

func Extend(ctx *state.GenerateState) error {
for _, fr := range ctx.Ft.Froms {
frNode, err := buildFromNode(ctx, fr)
if err != nil {
return err
}
ctx.FromNodes[fr] = frNode

ctx.IndexedStreams = append(ctx.IndexedStreams, state.IndexedStream{
Stream: frNode.Output().Var(),
StreamIndex: fr.GetStreamIndex(),
})

// 对于完整文件的From,生成Split指令
if fr.GetStreamIndex().IsRaw() {
// 只有输入输出需要EC编码的块时,才生成相关指令
if ctx.UseEC {
splitNode := ctx.DAG.NewChunkedSplit(ctx.Ft.ECParam.ChunkSize, ctx.Ft.ECParam.K)
splitNode.Split(frNode.Output().Var())
for i := 0; i < ctx.Ft.ECParam.K; i++ {
ctx.IndexedStreams = append(ctx.IndexedStreams, state.IndexedStream{
Stream: splitNode.SubStream(i),
StreamIndex: ioswitch2.ECStream(i),
})
}
}

// 同上
if ctx.UseSegment {
splitNode := ctx.DAG.NewSegmentSplit(ctx.Ft.SegmentParam.Segments)
frNode.Output().Var().ToSlot(splitNode.InputSlot())
for i := 0; i < len(ctx.Ft.SegmentParam.Segments); i++ {
ctx.IndexedStreams = append(ctx.IndexedStreams, state.IndexedStream{
Stream: splitNode.Segment(i),
StreamIndex: ioswitch2.SegmentStream(i),
})
}
}
}
}

if ctx.UseEC {
// 如果有K个不同的文件块流,则生成Multiply指令,同时针对其生成的流,生成Join指令
ecInputStrs := make(map[int]*dag.StreamVar)
for _, s := range ctx.IndexedStreams {
if s.StreamIndex.IsEC() && ecInputStrs[s.StreamIndex.Index] == nil {
ecInputStrs[s.StreamIndex.Index] = s.Stream
if len(ecInputStrs) == ctx.Ft.ECParam.K {
break
}
}
}

if len(ecInputStrs) == ctx.Ft.ECParam.K {
mulNode := ctx.DAG.NewECMultiply(*ctx.Ft.ECParam)

for i, s := range ecInputStrs {
mulNode.AddInput(s, i)
}
for i := 0; i < ctx.Ft.ECParam.N; i++ {
ctx.IndexedStreams = append(ctx.IndexedStreams, state.IndexedStream{
Stream: mulNode.NewOutput(i),
StreamIndex: ioswitch2.ECStream(i),
})
}

joinNode := ctx.DAG.NewChunkedJoin(ctx.Ft.ECParam.ChunkSize)
for i := 0; i < ctx.Ft.ECParam.K; i++ {
// 不可能找不到流
joinNode.AddInput(findOutputStream(ctx, ioswitch2.ECStream(i)))
}
ctx.IndexedStreams = append(ctx.IndexedStreams, state.IndexedStream{
Stream: joinNode.Joined(),
StreamIndex: ioswitch2.RawStream(),
})
}
}

if ctx.UseSegment {
// 先假设有所有的顺序分段,生成Join指令,后续根据Range再实际计算是否缺少流
joinNode := ctx.DAG.NewSegmentJoin(ctx.Ft.SegmentParam.Segments)
for i := 0; i < ctx.Ft.SegmentParam.SegmentCount(); i++ {
str := findOutputStream(ctx, ioswitch2.SegmentStream(i))
if str != nil {
str.ToSlot(joinNode.InputSlot(i))
}
}
ctx.IndexedStreams = append(ctx.IndexedStreams, state.IndexedStream{
Stream: joinNode.Joined(),
StreamIndex: ioswitch2.RawStream(),
})

// SegmentJoin生成的Join指令可以用来生成EC块
if ctx.UseEC {
splitNode := ctx.DAG.NewChunkedSplit(ctx.Ft.ECParam.ChunkSize, ctx.Ft.ECParam.K)
splitNode.Split(joinNode.Joined())

mulNode := ctx.DAG.NewECMultiply(*ctx.Ft.ECParam)

for i := 0; i < ctx.Ft.ECParam.K; i++ {
mulNode.AddInput(splitNode.SubStream(i), i)
ctx.IndexedStreams = append(ctx.IndexedStreams, state.IndexedStream{
Stream: splitNode.SubStream(i),
StreamIndex: ioswitch2.ECStream(i),
})
}

for i := 0; i < ctx.Ft.ECParam.N; i++ {
ctx.IndexedStreams = append(ctx.IndexedStreams, state.IndexedStream{
Stream: mulNode.NewOutput(i),
StreamIndex: ioswitch2.ECStream(i),
})
}
}
}

// 为每一个To找到一个输入流
for _, to := range ctx.Ft.Toes {
toNode, err := buildToNode(ctx, to)
if err != nil {
return err
}
ctx.ToNodes[to] = toNode

str := findOutputStream(ctx, to.GetStreamIndex())
if str == nil {
return fmt.Errorf("no output stream found for data index %d", to.GetStreamIndex())
}

toNode.SetInput(str)
}

return nil
}

func buildFromNode(ctx *state.GenerateState, f ioswitch2.From) (ops2.FromNode, error) {
var repRange math2.Range
repRange.Offset = ctx.StreamRange.Offset
if ctx.StreamRange.Length != nil {
repRngLen := *ctx.StreamRange.Length
repRange.Length = &repRngLen
}

var blkRange math2.Range
if ctx.UseEC {
blkRange.Offset = ctx.StreamRange.Offset / int64(ctx.Ft.ECParam.ChunkSize*ctx.Ft.ECParam.K) * int64(ctx.Ft.ECParam.ChunkSize)
if ctx.StreamRange.Length != nil {
blkRngLen := *ctx.StreamRange.Length / int64(ctx.Ft.ECParam.ChunkSize*ctx.Ft.ECParam.K) * int64(ctx.Ft.ECParam.ChunkSize)
blkRange.Length = &blkRngLen
}
}

switch f := f.(type) {
case *ioswitch2.FromShardstore:
t := ctx.DAG.NewShardRead(f, f.Storage.Storage.StorageID, types.NewOpen(f.FileHash))

if f.StreamIndex.IsRaw() {
t.Open.WithNullableLength(repRange.Offset, repRange.Length)
} else if f.StreamIndex.IsEC() {
t.Open.WithNullableLength(blkRange.Offset, blkRange.Length)
} else if f.StreamIndex.IsSegment() {
segStart := ctx.Ft.SegmentParam.CalcSegmentStart(f.StreamIndex.Index)
segLen := ctx.Ft.SegmentParam.Segments[f.StreamIndex.Index]
segEnd := segStart + segLen

// 打开的范围不超过本段的范围

openOff := ctx.StreamRange.Offset - segStart
openOff = math2.Clamp(openOff, 0, segLen)

openLen := segLen

if ctx.StreamRange.Length != nil {
strEnd := ctx.StreamRange.Offset + *ctx.StreamRange.Length
openEnd := math2.Min(strEnd, segEnd)
openLen = openEnd - segStart - openOff
}

t.Open.WithNullableLength(openOff, &openLen)
}

switch addr := f.Hub.Address.(type) {
case *cdssdk.HttpAddressInfo:
t.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: f.Hub})
t.Env().Pinned = true

case *cdssdk.GRPCAddressInfo:
t.Env().ToEnvWorker(&ioswitch2.AgentWorker{Hub: f.Hub, Address: *addr})
t.Env().Pinned = true

default:
return nil, fmt.Errorf("unsupported node address type %T", addr)
}

return t, nil

case *ioswitch2.FromDriver:
n := ctx.DAG.NewFromDriver(f, f.Handle)
n.Env().ToEnvDriver()
n.Env().Pinned = true

if f.StreamIndex.IsRaw() {
f.Handle.RangeHint.Offset = repRange.Offset
f.Handle.RangeHint.Length = repRange.Length
} else if f.StreamIndex.IsEC() {
f.Handle.RangeHint.Offset = blkRange.Offset
f.Handle.RangeHint.Length = blkRange.Length
} else if f.StreamIndex.IsSegment() {
segStart := ctx.Ft.SegmentParam.CalcSegmentStart(f.StreamIndex.Index)
segLen := ctx.Ft.SegmentParam.Segments[f.StreamIndex.Index]
segEnd := segStart + segLen

// 打开的范围不超过本段的范围

openOff := repRange.Offset - segStart
openOff = math2.Clamp(openOff, 0, segLen)

openLen := segLen

if repRange.Length != nil {
repEnd := repRange.Offset + *repRange.Length
openEnd := math2.Min(repEnd, segEnd)
openLen = openEnd - openOff
}

f.Handle.RangeHint.Offset = openOff
f.Handle.RangeHint.Length = &openLen
}

return n, nil

default:
return nil, fmt.Errorf("unsupported from type %T", f)
}
}

func buildToNode(ctx *state.GenerateState, t ioswitch2.To) (ops2.ToNode, error) {
switch t := t.(type) {
case *ioswitch2.ToShardStore:
n := ctx.DAG.NewShardWrite(t, t.Storage, t.FileHashStoreKey)

if err := setEnvByAddress(n, t.Hub, t.Hub.Address); err != nil {
return nil, err
}

n.Env().Pinned = true

return n, nil

case *ioswitch2.ToDriver:
n := ctx.DAG.NewToDriver(t, t.Handle)
n.Env().ToEnvDriver()
n.Env().Pinned = true

return n, nil

case *ioswitch2.LoadToShared:
n := ctx.DAG.NewSharedLoad(t, t.Storage, t.ObjectPath)

if err := setEnvByAddress(n, t.Hub, t.Hub.Address); err != nil {
return nil, err
}

n.Env().Pinned = true

return n, nil

default:
return nil, fmt.Errorf("unsupported to type %T", t)
}
}

func setEnvByAddress(n dag.Node, hub cdssdk.Hub, addr cdssdk.HubAddressInfo) error {
switch addr := addr.(type) {
case *cdssdk.HttpAddressInfo:
n.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: hub})

case *cdssdk.GRPCAddressInfo:
n.Env().ToEnvWorker(&ioswitch2.AgentWorker{Hub: hub, Address: *addr})

default:
return fmt.Errorf("unsupported node address type %T", addr)
}

return nil
}

func findOutputStream(ctx *state.GenerateState, streamIndex ioswitch2.StreamIndex) *dag.StreamVar {
var ret *dag.StreamVar
for _, s := range ctx.IndexedStreams {
if s.StreamIndex == streamIndex {
ret = s.Stream
break
}
}
return ret
}

// 根据StreamRange,调整SegmentSplit中分段的个数和每段的大小
func FixSegmentSplit(ctx *state.GenerateState) error {
var err error
dag.WalkOnlyType[*ops2.SegmentSplitNode](ctx.DAG.Graph, func(node *ops2.SegmentSplitNode) bool {
var strEnd *int64
if ctx.StreamRange.Length != nil {
e := ctx.StreamRange.Offset + *ctx.StreamRange.Length
strEnd = &e
}

startSeg, endSeg := ctx.Ft.SegmentParam.CalcSegmentRange(ctx.StreamRange.Offset, strEnd)

// 关闭超出范围的分段
for i := endSeg; i < len(node.Segments); i++ {
node.OutputStreams().Get(i).ClearAllDst()
}
node.OutputStreams().Slots.RemoveRange(endSeg, ctx.Ft.SegmentParam.SegmentCount()-endSeg)
node.Segments = lo2.RemoveRange(node.Segments, endSeg, ctx.Ft.SegmentParam.SegmentCount()-endSeg)

for i := 0; i < startSeg; i++ {
node.OutputStreams().Get(i).ClearAllDst()
}
node.OutputStreams().Slots.RemoveRange(0, startSeg)
node.Segments = lo2.RemoveRange(node.Segments, 0, startSeg)

// StreamRange开始的位置可能在某个分段的中间,此时这个分段的大小等于流开始位置到分段结束位置的距离
startSegStart := ctx.Ft.SegmentParam.CalcSegmentStart(startSeg)
node.Segments[0] -= ctx.StreamRange.Offset - startSegStart

// StreamRange结束的位置可能在某个分段的中间,此时这个分段的大小就等于流结束位置到分段起始位置的距离
if strEnd != nil {
endSegStart := ctx.Ft.SegmentParam.CalcSegmentStart(endSeg - 1)
node.Segments[len(node.Segments)-1] = *strEnd - endSegStart
}
return true
})

return err
}

// 从SegmentJoin中删除未使用的分段
func FixSegmentJoin(ctx *state.GenerateState) error {
var err error
dag.WalkOnlyType[*ops2.SegmentJoinNode](ctx.DAG.Graph, func(node *ops2.SegmentJoinNode) bool {
start := ctx.StreamRange.Offset
var end *int64
if ctx.StreamRange.Length != nil {
e := ctx.StreamRange.Offset + *ctx.StreamRange.Length
end = &e
}

startSeg, endSeg := ctx.Ft.SegmentParam.CalcSegmentRange(start, end)

// 关闭超出范围的分段
for i := endSeg; i < len(node.Segments); i++ {
node.InputStreams().Get(i).NotTo(node)
}
node.InputStreams().Slots.RemoveRange(endSeg, ctx.Ft.SegmentParam.SegmentCount()-endSeg)
node.Segments = lo2.RemoveRange(node.Segments, endSeg, ctx.Ft.SegmentParam.SegmentCount()-endSeg)

for i := 0; i < startSeg; i++ {
node.InputStreams().Get(i).NotTo(node)
}
node.InputStreams().Slots.RemoveRange(0, startSeg)
node.Segments = lo2.RemoveRange(node.Segments, 0, startSeg)

// StreamRange开始的位置可能在某个分段的中间,此时这个分段的大小等于流开始位置到分段结束位置的距离
startSegStart := ctx.Ft.SegmentParam.CalcSegmentStart(startSeg)
node.Segments[0] -= ctx.StreamRange.Offset - startSegStart

// 检查一下必须的分段是否都被加入到Join中
for i := 0; i < node.InputStreams().Len(); i++ {
if node.InputStreams().Get(i) == nil {
err = fmt.Errorf("segment %v missed to join an raw stream", i+startSeg)
return false
}
}

return true
})

return err
}

+ 98
- 0
common/pkgs/ioswitch2/parser/opt/chunked.go View File

@@ -0,0 +1,98 @@
package opt

import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state"
)

// 删除输出流未被使用的Join指令
func RemoveUnusedJoin(ctx *state.GenerateState) bool {
changed := false

dag.WalkOnlyType[*ops2.ChunkedJoinNode](ctx.DAG.Graph, func(node *ops2.ChunkedJoinNode) bool {
if node.Joined().Dst.Len() > 0 {
return true
}

node.RemoveAllInputs()
ctx.DAG.RemoveNode(node)
return true
})

return changed
}

// 删除未使用的Split指令
func RemoveUnusedSplit(ctx *state.GenerateState) bool {
changed := false
dag.WalkOnlyType[*ops2.ChunkedSplitNode](ctx.DAG.Graph, func(typ *ops2.ChunkedSplitNode) bool {
// Split出来的每一个流都没有被使用,才能删除这个指令
for _, out := range typ.OutputStreams().Slots.RawArray() {
if out.Dst.Len() > 0 {
return true
}
}

typ.RemoveAllStream()
ctx.DAG.RemoveNode(typ)
changed = true
return true
})

return changed
}

// 如果Split的结果被完全用于Join,则省略Split和Join指令
func OmitSplitJoin(ctx *state.GenerateState) bool {
changed := false

dag.WalkOnlyType[*ops2.ChunkedSplitNode](ctx.DAG.Graph, func(splitNode *ops2.ChunkedSplitNode) bool {
// Split指令的每一个输出都有且只有一个目的地
var dstNode dag.Node
for _, out := range splitNode.OutputStreams().Slots.RawArray() {
if out.Dst.Len() != 1 {
return true
}

if dstNode == nil {
dstNode = out.Dst.Get(0)
} else if dstNode != out.Dst.Get(0) {
return true
}
}

if dstNode == nil {
return true
}

// 且这个目的地要是一个Join指令
joinNode, ok := dstNode.(*ops2.ChunkedJoinNode)
if !ok {
return true
}

// 同时这个Join指令的输入也必须全部来自Split指令的输出。
// 由于上面判断了Split指令的输出目的地都相同,所以这里只要判断Join指令的输入数量是否与Split指令的输出数量相同即可
if joinNode.InputStreams().Len() != splitNode.OutputStreams().Len() {
return true
}

// 所有条件都满足,可以开始省略操作,将Join操作的目的地的输入流替换为Split操作的输入流:
// F->Split->Join->T 变换为:F->T
splitInput := splitNode.InputStreams().Get(0)
for _, to := range joinNode.Joined().Dst.RawArray() {
splitInput.To(to, to.InputStreams().IndexOf(joinNode.Joined()))
}
splitInput.NotTo(splitNode)

// 并删除这两个指令
ctx.DAG.RemoveNode(joinNode)
ctx.DAG.RemoveNode(splitNode)

changed = true
return true
})

return changed
}

+ 38
- 0
common/pkgs/ioswitch2/parser/opt/ec.go View File

@@ -0,0 +1,38 @@
package opt

import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
"gitlink.org.cn/cloudream/common/utils/lo2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state"
)

// 减少未使用的Multiply指令的输出流。如果减少到0,则删除该指令
func RemoveUnusedMultiplyOutput(ctx *state.GenerateState) bool {
changed := false
dag.WalkOnlyType[*ops2.ECMultiplyNode](ctx.DAG.Graph, func(node *ops2.ECMultiplyNode) bool {
outArr := node.OutputStreams().Slots.RawArray()
for i2, out := range outArr {
if out.Dst.Len() > 0 {
continue
}

outArr[i2] = nil
node.OutputIndexes[i2] = -2
changed = true
}

node.OutputStreams().Slots.SetRawArray(lo2.RemoveAllDefault(outArr))
node.OutputIndexes = lo2.RemoveAll(node.OutputIndexes, -2)

// 如果所有输出流都被删除,则删除该指令
if node.OutputStreams().Len() == 0 {
node.RemoveAllInputs()
ctx.DAG.RemoveNode(node)
changed = true
}

return true
})
return changed
}

+ 154
- 0
common/pkgs/ioswitch2/parser/opt/misc.go View File

@@ -0,0 +1,154 @@
package opt

import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state"
)

// 删除未使用的From流,不会删除FromDriver
func RemoveUnusedFromNode(ctx *state.GenerateState) {
dag.WalkOnlyType[ops2.FromNode](ctx.DAG.Graph, func(node ops2.FromNode) bool {
if _, ok := node.(*ops2.FromDriverNode); ok {
return true
}

if node.Output().Var().Dst.Len() == 0 {
ctx.DAG.RemoveNode(node)
}
return true
})
}

// 对于所有未使用的流,增加Drop指令
func DropUnused(ctx *state.GenerateState) {
ctx.DAG.Walk(func(node dag.Node) bool {
for _, out := range node.OutputStreams().Slots.RawArray() {
if out.Dst.Len() == 0 {
n := ctx.DAG.NewDropStream()
*n.Env() = *node.Env()
n.SetInput(out)
}
}
return true
})
}

// 为IPFS写入指令存储结果
func StoreShardWriteResult(ctx *state.GenerateState) {
dag.WalkOnlyType[*ops2.ShardWriteNode](ctx.DAG.Graph, func(n *ops2.ShardWriteNode) bool {
if n.FileHashStoreKey == "" {
return true
}

storeNode := ctx.DAG.NewStore()
storeNode.Env().ToEnvDriver()

storeNode.Store(n.FileHashStoreKey, n.FileHashVar())
return true
})

dag.WalkOnlyType[*ops2.BypassToShardStoreNode](ctx.DAG.Graph, func(n *ops2.BypassToShardStoreNode) bool {
if n.FileHashStoreKey == "" {
return true
}

storeNode := ctx.DAG.NewStore()
storeNode.Env().ToEnvDriver()

storeNode.Store(n.FileHashStoreKey, n.FileHashVar().Var())
return true
})
}

// 生成Range指令。StreamRange可能超过文件总大小,但Range指令会在数据量不够时不报错而是正常返回
func GenerateRange(ctx *state.GenerateState) {
for to, toNode := range ctx.ToNodes {
toStrIdx := to.GetStreamIndex()
toRng := to.GetRange()

if toStrIdx.IsRaw() {
n := ctx.DAG.NewRange()
toInput := toNode.Input()
*n.Env() = *toInput.Var().Src.Env()
rnged := n.RangeStream(toInput.Var(), math2.Range{
Offset: toRng.Offset - ctx.StreamRange.Offset,
Length: toRng.Length,
})
toInput.Var().NotTo(toNode)
toNode.SetInput(rnged)

} else if toStrIdx.IsEC() {
stripSize := int64(ctx.Ft.ECParam.ChunkSize * ctx.Ft.ECParam.K)
blkStartIdx := ctx.StreamRange.Offset / stripSize

blkStart := blkStartIdx * int64(ctx.Ft.ECParam.ChunkSize)

n := ctx.DAG.NewRange()
toInput := toNode.Input()
*n.Env() = *toInput.Var().Src.Env()
rnged := n.RangeStream(toInput.Var(), math2.Range{
Offset: toRng.Offset - blkStart,
Length: toRng.Length,
})
toInput.Var().NotTo(toNode)
toNode.SetInput(rnged)
} else if toStrIdx.IsSegment() {
// if frNode, ok := toNode.Input().Var().From().Node.(ops2.FromNode); ok {
// // 目前只有To也是分段时,才可能对接一个提供分段的From,此时不需要再生成Range指令
// if frNode.GetFrom().GetStreamIndex().IsSegment() {
// continue
// }
// }

// segStart := ctx.Ft.SegmentParam.CalcSegmentStart(toStrIdx.Index)
// strStart := segStart + toRng.Offset

// n := ctx.DAG.NewRange()
// toInput := toNode.Input()
// *n.Env() = *toInput.Var().From().Node.Env()
// rnged := n.RangeStream(toInput.Var(), exec.Range{
// Offset: strStart - ctx.StreamRange.Offset,
// Length: toRng.Length,
// })
// toInput.Var().NotTo(toNode, toInput.Index)
// toNode.SetInput(rnged)
}
}
}

// 生成Clone指令
func GenerateClone(ctx *state.GenerateState) {
ctx.DAG.Walk(func(node dag.Node) bool {
for _, outVar := range node.OutputStreams().Slots.RawArray() {
if outVar.Dst.Len() <= 1 {
continue
}

c := ctx.DAG.NewCloneStream()
*c.Env() = *node.Env()
for _, dst := range outVar.Dst.RawArray() {
c.NewOutput().To(dst, dst.InputStreams().IndexOf(outVar))
}
outVar.Dst.Resize(0)
c.SetInput(outVar)
}

for _, outVar := range node.OutputValues().Slots.RawArray() {
if outVar.Dst.Len() <= 1 {
continue
}

t := ctx.DAG.NewCloneValue()
*t.Env() = *node.Env()
for _, dst := range outVar.Dst.RawArray() {
t.NewOutput().To(dst, dst.InputValues().IndexOf(outVar))
}
outVar.Dst.Resize(0)
t.SetInput(outVar)
}

return true
})
}

+ 105
- 0
common/pkgs/ioswitch2/parser/opt/multipart.go View File

@@ -0,0 +1,105 @@
package opt

import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/factory"
)

// 将SegmentJoin指令替换成分片上传指令
func UseMultipartUploadToShardStore(ctx *state.GenerateState) {
dag.WalkOnlyType[*ops2.SegmentJoinNode](ctx.DAG.Graph, func(joinNode *ops2.SegmentJoinNode) bool {
if joinNode.Joined().Dst.Len() != 1 {
return true
}

joinDst := joinNode.Joined().Dst.Get(0)
shardNode, ok := joinDst.(*ops2.ShardWriteNode)
if !ok {
return true
}

// SegmentJoin的输出流的范围必须与ToShardStore的输入流的范围相同,
// 虽然可以通过调整SegmentJoin的输入流来调整范围,但太复杂,暂不支持
toStrIdx := shardNode.GetTo().GetStreamIndex()
toStrRng := shardNode.GetTo().GetRange()
if toStrIdx.IsRaw() {
if !toStrRng.Equals(ctx.StreamRange) {
return true
}
} else {
return true
}

// Join的目的地必须支持MultipartUpload功能才能替换成分片上传
multiUpload, err := factory.GetBuilder(shardNode.Storage).CreateMultiparter()
if err != nil {
return true
}

// Join的每一个段的大小必须超过最小分片大小。
// 目前只支持拆分超过最大分片的流,不支持合并多个小段流以达到最小分片大小。
for _, size := range joinNode.Segments {
if size < multiUpload.MinPartSize() {
return true
}
}

initNode := ctx.DAG.NewMultipartInitiator(shardNode.Storage)
initNode.Env().CopyFrom(shardNode.Env())

partNumber := 1
for i, size := range joinNode.Segments {
joinInput := joinNode.InputSlot(i)

if size > multiUpload.MaxPartSize() {
// 如果一个分段的大小大于最大分片大小,则需要拆分为多个小段上传
// 拆分以及上传指令直接在流的产生节点执行
splits := math2.SplitLessThan(size, multiUpload.MaxPartSize())
splitNode := ctx.DAG.NewSegmentSplit(splits)
splitNode.Env().CopyFrom(joinInput.Var().Src.Env())

joinInput.Var().ToSlot(splitNode.InputSlot())

for i2 := 0; i2 < len(splits); i2++ {
uploadNode := ctx.DAG.NewMultipartUpload(shardNode.Storage, partNumber, splits[i2])
uploadNode.Env().CopyFrom(joinInput.Var().Src.Env())

initNode.UploadArgsVar().ToSlot(uploadNode.UploadArgsSlot())
splitNode.SegmentVar(i2).ToSlot(uploadNode.PartStreamSlot())
uploadNode.UploadResultVar().ToSlot(initNode.AppendPartInfoSlot())

partNumber++
}
} else {
// 否则直接上传整个分段
uploadNode := ctx.DAG.NewMultipartUpload(shardNode.Storage, partNumber, size)
// 上传指令直接在流的产生节点执行
uploadNode.Env().CopyFrom(joinInput.Var().Src.Env())

initNode.UploadArgsVar().ToSlot(uploadNode.UploadArgsSlot())
joinInput.Var().ToSlot(uploadNode.PartStreamSlot())
uploadNode.UploadResultVar().ToSlot(initNode.AppendPartInfoSlot())

partNumber++
}

joinInput.Var().NotTo(joinNode)
}

bypassNode := ctx.DAG.NewBypassToShardStore(shardNode.Storage.Storage.StorageID, shardNode.FileHashStoreKey)
bypassNode.Env().CopyFrom(shardNode.Env())

// 分片上传Node产生的结果送到bypassNode,bypassNode将处理结果再送回分片上传Node
initNode.BypassFileInfoVar().ToSlot(bypassNode.BypassFileInfoSlot())
bypassNode.BypassCallbackVar().ToSlot(initNode.BypassCallbackSlot())

// 最后删除Join指令和ToShardStore指令
ctx.DAG.RemoveNode(joinNode)
ctx.DAG.RemoveNode(shardNode)
delete(ctx.ToNodes, shardNode.GetTo())
return true
})
}

+ 69
- 0
common/pkgs/ioswitch2/parser/opt/pin.go View File

@@ -0,0 +1,69 @@
package opt

import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state"
)

// 通过流的输入输出位置来确定指令的执行位置。
// To系列的指令都会有固定的执行位置,这些位置会随着pin操作逐步扩散到整个DAG,
// 所以理论上不会出现有指令的位置始终无法确定的情况。
func Pin(ctx *state.GenerateState) bool {
changed := false
ctx.DAG.Walk(func(node dag.Node) bool {
if node.Env().Pinned {
return true
}

var toEnv *dag.NodeEnv
for _, out := range node.OutputStreams().Slots.RawArray() {
for _, to := range out.Dst.RawArray() {
if to.Env().Type == dag.EnvUnknown {
continue
}

if toEnv == nil {
toEnv = to.Env()
} else if !toEnv.Equals(to.Env()) {
toEnv = nil
break
}
}
}

if toEnv != nil {
if !node.Env().Equals(toEnv) {
changed = true
}

*node.Env() = *toEnv
return true
}

// 否则根据输入流的始发地来固定
var fromEnv *dag.NodeEnv
for _, in := range node.InputStreams().Slots.RawArray() {
if in.Src.Env().Type == dag.EnvUnknown {
continue
}

if fromEnv == nil {
fromEnv = in.Src.Env()
} else if !fromEnv.Equals(in.Src.Env()) {
fromEnv = nil
break
}
}

if fromEnv != nil {
if !node.Env().Equals(fromEnv) {
changed = true
}

*node.Env() = *fromEnv
}
return true
})

return changed
}

+ 131
- 0
common/pkgs/ioswitch2/parser/opt/s2s.go View File

@@ -0,0 +1,131 @@
package opt

import (
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/factory"
)

// 将直接从一个存储服务传到另一个存储服务的过程换成S2S传输
func UseS2STransfer(ctx *state.GenerateState) {
// S2S传输暂不支持只传文件的一部分
if ctx.StreamRange.Offset != 0 || ctx.StreamRange.Length != nil {
return
}

for fr, frNode := range ctx.FromNodes {
fromShard, ok := fr.(*ioswitch2.FromShardstore)
if !ok {
continue
}

fromStgBld := factory.GetBuilder(fromShard.Storage)
if !fromStgBld.ShardStoreDesc().HasBypassRead() {
continue
}

s2s, err := fromStgBld.CreateS2STransfer()
if err != nil {
continue
}

// 此输出流的所有目的地都要能支持S2S传输
outVar := frNode.Output().Var()
if outVar.Dst.Len() == 0 {
continue
}

failed := false
var toShards []*ops2.ShardWriteNode
// var toShareds []*ops2.SharedLoadNode

loop:
for i := 0; i < outVar.Dst.Len(); i++ {
dstNode := outVar.Dst.Get(i)

switch dstNode := dstNode.(type) {
case *ops2.ShardWriteNode:
dstStgBld := factory.GetBuilder(dstNode.Storage)
if !dstStgBld.ShardStoreDesc().HasBypassWrite() {
failed = true
break
}

if !s2s.CanTransfer(dstNode.Storage) {
failed = true
break
}

toShards = append(toShards, dstNode)

/* TODO 暂不支持共享存储服务
case *ops2.SharedLoadNode:
if !s2s.CanTransfer(to.Storage) {
failed = true
break
}
toShareds = append(toShareds, to)
*/
default:
failed = true
break loop
}
}
if failed {
continue
}

for _, toShard := range toShards {
s2sNode := ctx.DAG.NewS2STransfer(fromShard.Storage, toShard.Storage)
// 直传指令在目的地Hub上执行
s2sNode.Env().CopyFrom(toShard.Env())

// 先获取文件路径,送到S2S节点
brNode := ctx.DAG.NewBypassFromShardStore(fromShard.Storage.Storage.StorageID, fromShard.FileHash)
brNode.Env().CopyFrom(frNode.Env())
brNode.FilePathVar().ToSlot(s2sNode.SrcPathSlot())

// 传输结果通知目的节点
to := toShard.To.(*ioswitch2.ToShardStore)
bwNode := ctx.DAG.NewBypassToShardStore(toShard.Storage.Storage.StorageID, to.FileHashStoreKey)
bwNode.Env().CopyFrom(toShard.Env())

s2sNode.BypassFileInfoVar().ToSlot(bwNode.BypassFileInfoSlot())
bwNode.BypassCallbackVar().ToSlot(s2sNode.BypassCallbackSlot())

// 从计划中删除目标节点
ctx.DAG.RemoveNode(toShard)
delete(ctx.ToNodes, toShard.To)
}

/*
for _, toShared := range toShareds {
s2sNode := ctx.DAG.NewS2STransfer(fromShard.Storage, toShared.Storage)
// 直传指令在目的地Hub上执行
s2sNode.Env().CopyFrom(toShared.Env())

// 先获取文件路径,送到S2S节点
brNode := ctx.DAG.NewBypassFromShardStore(fromShard.Storage.Storage.StorageID, fromShard.FileHash)
brNode.Env().CopyFrom(toShared.Env())
brNode.FilePathVar().ToSlot(s2sNode.SrcPathSlot())

// 传输结果通知目的节点
to := toShared.To.(*ioswitch2.LoadToShared)
bwNode := ctx.DAG.NewBypassToShardStore(toShard.Storage.Storage.StorageID, to.FileHashStoreKey)
bwNode.Env().CopyFrom(toShard.Env())

s2sNode.BypassFileInfoVar().ToSlot(bwNode.BypassFileInfoSlot())
bwNode.BypassCallbackVar().ToSlot(s2sNode.BypassCallbackSlot())

// 从计划中删除目标节点
ctx.DAG.RemoveNode(toShared)
delete(ctx.ToNodes, toShared.To)
}
*/

// 从计划中删除源节点
ctx.DAG.RemoveNode(frNode)
delete(ctx.FromNodes, fr)
}
}

+ 98
- 0
common/pkgs/ioswitch2/parser/opt/segment.go View File

@@ -0,0 +1,98 @@
package opt

import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state"
)

// 删除未使用的SegmentJoin
func RemoveUnusedSegmentJoin(ctx *state.GenerateState) bool {
changed := false

dag.WalkOnlyType[*ops2.SegmentJoinNode](ctx.DAG.Graph, func(node *ops2.SegmentJoinNode) bool {
if node.Joined().Dst.Len() > 0 {
return true
}

node.RemoveAllInputs()
ctx.DAG.RemoveNode(node)
return true
})

return changed
}

// 删除未使用的SegmentSplit
func RemoveUnusedSegmentSplit(ctx *state.GenerateState) bool {
changed := false
dag.WalkOnlyType[*ops2.SegmentSplitNode](ctx.DAG.Graph, func(typ *ops2.SegmentSplitNode) bool {
// Split出来的每一个流都没有被使用,才能删除这个指令
for _, out := range typ.OutputStreams().Slots.RawArray() {
if out.Dst.Len() > 0 {
return true
}
}

typ.RemoveAllStream()
ctx.DAG.RemoveNode(typ)
changed = true
return true
})

return changed
}

// 如果Split的结果被完全用于Join,则省略Split和Join指令
func OmitSegmentSplitJoin(ctx *state.GenerateState) bool {
changed := false

dag.WalkOnlyType[*ops2.SegmentSplitNode](ctx.DAG.Graph, func(splitNode *ops2.SegmentSplitNode) bool {
// 随便找一个输出流的目的地
splitOut := splitNode.OutputStreams().Get(0)
if splitOut.Dst.Len() != 1 {
return true
}
dstNode := splitOut.Dst.Get(0)

// 这个目的地要是一个Join指令
joinNode, ok := dstNode.(*ops2.SegmentJoinNode)
if !ok {
return true
}

if splitNode.OutputStreams().Len() != joinNode.Joined().Dst.Len() {
return true
}

// Join指令的输入必须全部来自Split指令的输出,且位置要相同
for i := 0; i < splitNode.OutputStreams().Len(); i++ {
splitOut := splitNode.OutputStreams().Get(i)
joinIn := joinNode.InputStreams().Get(i)
if splitOut != joinIn {
return true
}

if splitOut != nil && splitOut.Dst.Len() != 1 {
return true
}
}

// 所有条件都满足,可以开始省略操作,将Join操作的目的地的输入流替换为Split操作的输入流:
// F->Split->Join->T 变换为:F->T
splitInput := splitNode.InputStreams().Get(0)
for _, to := range joinNode.Joined().Dst.RawArray() {
splitInput.To(to, to.InputStreams().IndexOf(joinNode.Joined()))
}
splitInput.NotTo(splitNode)

// 并删除这两个指令
ctx.DAG.RemoveNode(joinNode)
ctx.DAG.RemoveNode(splitNode)

changed = true
return true
})

return changed
}

+ 26
- 1046
common/pkgs/ioswitch2/parser/parser.go
File diff suppressed because it is too large
View File


+ 35
- 0
common/pkgs/ioswitch2/parser/state/state.go View File

@@ -0,0 +1,35 @@
package state

import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2"
)

type IndexedStream struct {
Stream *dag.StreamVar
StreamIndex ioswitch2.StreamIndex
}

type GenerateState struct {
Ft ioswitch2.FromTo
DAG *ops2.GraphNodeBuilder
// 为了产生所有To所需的数据范围,而需要From打开的范围。
// 这个范围是基于整个文件的,且上下界都取整到条带大小的整数倍,因此上界是有可能超过文件大小的。
ToNodes map[ioswitch2.To]ops2.ToNode
FromNodes map[ioswitch2.From]ops2.FromNode
IndexedStreams []IndexedStream
StreamRange math2.Range
UseEC bool // 是否使用纠删码
UseSegment bool // 是否使用分段
}

func InitGenerateState(ft ioswitch2.FromTo) *GenerateState {
return &GenerateState{
Ft: ft,
DAG: ops2.NewGraphNodeBuilder(),
ToNodes: make(map[ioswitch2.To]ops2.ToNode),
FromNodes: make(map[ioswitch2.From]ops2.FromNode),
}
}

+ 9
- 8
common/pkgs/ioswitchlrc/fromto.go View File

@@ -3,6 +3,7 @@ package ioswitchlrc
import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/math2"
)

type From interface {
@@ -13,7 +14,7 @@ type To interface {
// To所需要的文件流的范围。具体含义与DataIndex有关系:
// 如果DataIndex == -1,则表示在整个文件的范围。
// 如果DataIndex >= 0,则表示在文件的某个分片的范围。
GetRange() exec.Range
GetRange() math2.Range
GetDataIndex() int
}

@@ -24,7 +25,7 @@ type FromDriver struct {

func NewFromDriver(dataIndex int) (*FromDriver, *exec.DriverWriteStream) {
handle := &exec.DriverWriteStream{
RangeHint: &exec.Range{},
RangeHint: &math2.Range{},
}
return &FromDriver{
Handle: handle,
@@ -58,7 +59,7 @@ func (f *FromNode) GetDataIndex() int {
type ToDriver struct {
Handle *exec.DriverReadStream
DataIndex int
Range exec.Range
Range math2.Range
}

func NewToDriver(dataIndex int) (*ToDriver, *exec.DriverReadStream) {
@@ -69,7 +70,7 @@ func NewToDriver(dataIndex int) (*ToDriver, *exec.DriverReadStream) {
}, &str
}

func NewToDriverWithRange(dataIndex int, rng exec.Range) (*ToDriver, *exec.DriverReadStream) {
func NewToDriverWithRange(dataIndex int, rng math2.Range) (*ToDriver, *exec.DriverReadStream) {
str := exec.DriverReadStream{}
return &ToDriver{
Handle: &str,
@@ -82,7 +83,7 @@ func (t *ToDriver) GetDataIndex() int {
return t.DataIndex
}

func (t *ToDriver) GetRange() exec.Range {
func (t *ToDriver) GetRange() math2.Range {
return t.Range
}

@@ -90,7 +91,7 @@ type ToNode struct {
Hub cdssdk.Hub
Storage cdssdk.Storage
DataIndex int
Range exec.Range
Range math2.Range
FileHashStoreKey string
}

@@ -103,7 +104,7 @@ func NewToStorage(hub cdssdk.Hub, stg cdssdk.Storage, dataIndex int, fileHashSto
}
}

func NewToStorageWithRange(hub cdssdk.Hub, stg cdssdk.Storage, dataIndex int, fileHashStoreKey string, rng exec.Range) *ToNode {
func NewToStorageWithRange(hub cdssdk.Hub, stg cdssdk.Storage, dataIndex int, fileHashStoreKey string, rng math2.Range) *ToNode {
return &ToNode{
Hub: hub,
Storage: stg,
@@ -117,7 +118,7 @@ func (t *ToNode) GetDataIndex() int {
return t.DataIndex
}

func (t *ToNode) GetRange() exec.Range {
func (t *ToNode) GetRange() math2.Range {
return t.Range
}



+ 4
- 1
common/pkgs/ioswitchlrc/ops2/chunked.go View File

@@ -37,7 +37,10 @@ func (o *ChunkedSplit) Execute(ctx *exec.ExecContext, e *exec.Executor) error {

sem := semaphore.NewWeighted(int64(len(outputs)))
for i := range outputs {
sem.Acquire(ctx.Context, 1)
err = sem.Acquire(ctx.Context, 1)
if err != nil {
return err
}

e.PutVar(o.Outputs[i], &exec.StreamValue{
Stream: io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) {


+ 2
- 2
common/pkgs/ioswitchlrc/ops2/range.go View File

@@ -81,7 +81,7 @@ func (o *Range) String() string {

type RangeNode struct {
dag.NodeBase
Range exec.Range
Range math2.Range
}

func (b *GraphNodeBuilder) NewRange() *RangeNode {
@@ -93,7 +93,7 @@ func (b *GraphNodeBuilder) NewRange() *RangeNode {
return node
}

func (t *RangeNode) RangeStream(input *dag.StreamVar, rng exec.Range) *dag.StreamVar {
func (t *RangeNode) RangeStream(input *dag.StreamVar, rng math2.Range) *dag.StreamVar {
input.To(t, 0)
t.Range = rng
return t.OutputStreams().Get(0)


+ 5
- 5
common/pkgs/ioswitchlrc/ops2/shard_store.go View File

@@ -11,7 +11,7 @@ import (
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/io2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/types"
)

@@ -41,12 +41,12 @@ func (o *ShardRead) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
Debugf("reading from shard store")
defer logger.Debugf("reading from shard store finished")

stgMgr, err := exec.GetValueByType[*svcmgr.Manager](ctx)
stgAgts, err := exec.GetValueByType[*agtpool.AgentPool](ctx)
if err != nil {
return fmt.Errorf("getting storage manager: %w", err)
}

store, err := stgMgr.GetShardStore(o.StorageID)
store, err := stgAgts.GetShardStore(o.StorageID)
if err != nil {
return fmt.Errorf("getting shard store of storage %v: %w", o.StorageID, err)
}
@@ -83,12 +83,12 @@ func (o *ShardWrite) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
Debugf("writting file to shard store")
defer logger.Debugf("write to shard store finished")

stgMgr, err := exec.GetValueByType[*svcmgr.Manager](ctx)
stgAgts, err := exec.GetValueByType[*agtpool.AgentPool](ctx)
if err != nil {
return fmt.Errorf("getting storage manager: %w", err)
}

store, err := stgMgr.GetShardStore(o.StorageID)
store, err := stgAgts.GetShardStore(o.StorageID)
if err != nil {
return fmt.Errorf("getting shard store of storage %v: %w", o.StorageID, err)
}


+ 5
- 4
common/pkgs/ioswitchlrc/parser/generator.go View File

@@ -7,6 +7,7 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/plan"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc/ops2"
)
@@ -16,7 +17,7 @@ type GenerateContext struct {
DAG *ops2.GraphNodeBuilder
To []ioswitchlrc.To
ToNodes map[ioswitchlrc.To]ops2.ToNode
StreamRange exec.Range
StreamRange math2.Range
}

// 输入一个完整文件,从这个完整文件产生任意文件块(也可再产生完整文件)。
@@ -48,7 +49,7 @@ func Encode(fr ioswitchlrc.From, toes []ioswitchlrc.To, blder *exec.PlanBuilder)
generateClone(&ctx)
generateRange(&ctx)

return plan.Generate(ctx.DAG.Graph, blder)
return plan.Compile(ctx.DAG.Graph, blder)
}

func buildDAGEncode(ctx *GenerateContext, fr ioswitchlrc.From, toes []ioswitchlrc.To) error {
@@ -145,7 +146,7 @@ func ReconstructAny(frs []ioswitchlrc.From, toes []ioswitchlrc.To, blder *exec.P
generateClone(&ctx)
generateRange(&ctx)

return plan.Generate(ctx.DAG.Graph, blder)
return plan.Compile(ctx.DAG.Graph, blder)
}

func buildDAGReconstructAny(ctx *GenerateContext, frs []ioswitchlrc.From, toes []ioswitchlrc.To) error {
@@ -266,7 +267,7 @@ func ReconstructGroup(frs []ioswitchlrc.From, toes []ioswitchlrc.To, blder *exec
generateClone(&ctx)
generateRange(&ctx)

return plan.Generate(ctx.DAG.Graph, blder)
return plan.Compile(ctx.DAG.Graph, blder)
}

func buildDAGReconstructGroup(ctx *GenerateContext, frs []ioswitchlrc.From, toes []ioswitchlrc.To) error {


+ 5
- 6
common/pkgs/ioswitchlrc/parser/passes.go View File

@@ -5,7 +5,6 @@ import (
"math"

"gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc"
@@ -17,7 +16,7 @@ import (
func calcStreamRange(ctx *GenerateContext) {
stripSize := int64(ctx.LRC.ChunkSize * ctx.LRC.K)

rng := exec.Range{
rng := math2.Range{
Offset: math.MaxInt64,
}

@@ -49,8 +48,8 @@ func calcStreamRange(ctx *GenerateContext) {
}

func buildFromNode(ctx *GenerateContext, f ioswitchlrc.From) (ops2.FromNode, error) {
var repRange exec.Range
var blkRange exec.Range
var repRange math2.Range
var blkRange math2.Range

repRange.Offset = ctx.StreamRange.Offset
blkRange.Offset = ctx.StreamRange.Offset / int64(ctx.LRC.ChunkSize*ctx.LRC.K) * int64(ctx.LRC.ChunkSize)
@@ -234,7 +233,7 @@ func generateRange(ctx *GenerateContext) {
n := ctx.DAG.NewRange()
toInput := toNode.Input()
*n.Env() = *toInput.Var().Src.Env()
rnged := n.RangeStream(toInput.Var(), exec.Range{
rnged := n.RangeStream(toInput.Var(), math2.Range{
Offset: toRng.Offset - ctx.StreamRange.Offset,
Length: toRng.Length,
})
@@ -250,7 +249,7 @@ func generateRange(ctx *GenerateContext) {
n := ctx.DAG.NewRange()
toInput := toNode.Input()
*n.Env() = *toInput.Var().Src.Env()
rnged := n.RangeStream(toInput.Var(), exec.Range{
rnged := n.RangeStream(toInput.Var(), math2.Range{
Offset: toRng.Offset - blkStart,
Length: toRng.Length,
})


+ 96
- 0
common/pkgs/metacache/connectivity.go View File

@@ -0,0 +1,96 @@
package metacache

import (
"sync"
"time"

"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)

func (m *MetaCacheHost) AddConnectivity() *Connectivity {
cache := &Connectivity{
entries: make(map[cdssdk.HubID]*ConnectivityEntry),
}

m.caches = append(m.caches, cache)
return cache
}

type Connectivity struct {
lock sync.RWMutex
entries map[cdssdk.HubID]*ConnectivityEntry
}

func (c *Connectivity) Get(from cdssdk.HubID, to cdssdk.HubID) *time.Duration {
for i := 0; i < 2; i++ {
c.lock.RLock()
entry, ok := c.entries[from]
if ok {
con, ok := entry.To[to]
if ok {
c.lock.RUnlock()

if con.Latency == nil {
return nil
}
l := time.Millisecond * time.Duration(*con.Latency)
return &l
}
}
c.lock.RUnlock()

c.load(from)
}

return nil
}

func (c *Connectivity) ClearOutdated() {
c.lock.Lock()
defer c.lock.Unlock()

for hubID, entry := range c.entries {
if time.Since(entry.UpdateTime) > time.Minute*5 {
delete(c.entries, hubID)
}
}
}

func (c *Connectivity) load(hubID cdssdk.HubID) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
logger.Warnf("new coordinator client: %v", err)
return
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

get, err := coorCli.GetHubConnectivities(coormq.ReqGetHubConnectivities([]cdssdk.HubID{hubID}))
if err != nil {
logger.Warnf("get hub connectivities: %v", err)
return
}

c.lock.Lock()
defer c.lock.Unlock()

ce := &ConnectivityEntry{
From: hubID,
To: make(map[cdssdk.HubID]cdssdk.HubConnectivity),
UpdateTime: time.Now(),
}

for _, conn := range get.Connectivities {
ce.To[conn.ToHubID] = conn
}

c.entries[hubID] = ce
}

type ConnectivityEntry struct {
From cdssdk.HubID
To map[cdssdk.HubID]cdssdk.HubConnectivity
UpdateTime time.Time
}

+ 27
- 0
common/pkgs/metacache/host.go View File

@@ -0,0 +1,27 @@
package metacache

import "time"

type MetaCache interface {
ClearOutdated()
}

type MetaCacheHost struct {
caches []MetaCache
}

func NewHost() *MetaCacheHost {
return &MetaCacheHost{}
}

func (m *MetaCacheHost) Serve() {
ticker := time.NewTicker(time.Minute)
for {
select {
case <-ticker.C:
for _, cache := range m.caches {
cache.ClearOutdated()
}
}
}
}

+ 75
- 0
common/pkgs/metacache/hubmeta.go View File

@@ -0,0 +1,75 @@
package metacache

import (
"time"

"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)

func (m *MetaCacheHost) AddHubMeta() *HubMeta {
meta := &HubMeta{}
meta.cache = NewSimpleMetaCache(SimpleMetaCacheConfig[cdssdk.HubID, cdssdk.Hub]{
Getter: meta.load,
Expire: time.Minute * 5,
})

m.caches = append(m.caches, meta)
return meta
}

type HubMeta struct {
cache *SimpleMetaCache[cdssdk.HubID, cdssdk.Hub]
}

func (h *HubMeta) Get(hubID cdssdk.HubID) *cdssdk.Hub {
v, ok := h.cache.Get(hubID)
if ok {
return &v
}
return nil
}

func (h *HubMeta) GetMany(hubIDs []cdssdk.HubID) []*cdssdk.Hub {
vs, oks := h.cache.GetMany(hubIDs)
ret := make([]*cdssdk.Hub, len(vs))
for i := range vs {
if oks[i] {
ret[i] = &vs[i]
}
}
return ret
}

func (h *HubMeta) ClearOutdated() {
h.cache.ClearOutdated()
}

func (h *HubMeta) load(keys []cdssdk.HubID) ([]cdssdk.Hub, []bool) {
vs := make([]cdssdk.Hub, len(keys))
oks := make([]bool, len(keys))

coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
logger.Warnf("new coordinator client: %v", err)
return vs, oks
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

get, err := coorCli.GetHubs(coormq.NewGetHubs(keys))
if err != nil {
logger.Warnf("get hubs: %v", err)
return vs, oks
}

for i := range keys {
if get.Hubs[i] != nil {
vs[i] = *get.Hubs[i]
oks[i] = true
}
}

return vs, oks
}

+ 121
- 0
common/pkgs/metacache/simple.go View File

@@ -0,0 +1,121 @@
package metacache

import (
"sync"
"time"
)

type SimpleMetaCacheConfig[K comparable, V any] struct {
Getter Getter[K, V]
Expire time.Duration
}

type Getter[K comparable, V any] func(keys []K) ([]V, []bool)

type SimpleMetaCache[K comparable, V any] struct {
lock sync.RWMutex
cache map[K]*CacheEntry[K, V]
cfg SimpleMetaCacheConfig[K, V]
}

func NewSimpleMetaCache[K comparable, V any](cfg SimpleMetaCacheConfig[K, V]) *SimpleMetaCache[K, V] {
return &SimpleMetaCache[K, V]{
cache: make(map[K]*CacheEntry[K, V]),
cfg: cfg,
}
}

func (mc *SimpleMetaCache[K, V]) Get(key K) (V, bool) {
var ret V
var ok bool

for i := 0; i < 2; i++ {
mc.lock.RLock()
entry, o := mc.cache[key]
if o {
ret = entry.Data
ok = true
}
mc.lock.RUnlock()

if o {
break
}

mc.load([]K{key})
}

return ret, ok
}

func (mc *SimpleMetaCache[K, V]) GetMany(keys []K) ([]V, []bool) {
result := make([]V, len(keys))
oks := make([]bool, len(keys))

for i := 0; i < 2; i++ {
allGet := true
mc.lock.RLock()
for i, key := range keys {
entry, ok := mc.cache[key]
if ok {
result[i] = entry.Data
oks[i] = true
} else {
allGet = false
}
}
mc.lock.RUnlock()

if allGet {
break
}

mc.load(keys)
}

return result, oks
}

func (mc *SimpleMetaCache[K, V]) load(keys []K) {
vs, getOks := mc.cfg.Getter(keys)

mc.lock.Lock()
defer mc.lock.Unlock()

for i, key := range keys {
if !getOks[i] {
continue
}

_, ok := mc.cache[key]
// 缓存中已有key则认为缓存中是最新的,不再更新
if ok {
continue
}

entry := &CacheEntry[K, V]{
Key: key,
Data: vs[i],
UpdateTime: time.Now(),
}
mc.cache[key] = entry
}
}

func (mc *SimpleMetaCache[K, V]) ClearOutdated() {
mc.lock.Lock()
defer mc.lock.Unlock()

for key, entry := range mc.cache {
dt := time.Since(entry.UpdateTime)
if dt > mc.cfg.Expire || dt < 0 {
delete(mc.cache, key)
}
}
}

type CacheEntry[K comparable, V any] struct {
Key K
Data V
UpdateTime time.Time
}

+ 76
- 0
common/pkgs/metacache/storagemeta.go View File

@@ -0,0 +1,76 @@
package metacache

import (
"time"

"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)

func (m *MetaCacheHost) AddStorageMeta() *StorageMeta {
meta := &StorageMeta{}
meta.cache = NewSimpleMetaCache(SimpleMetaCacheConfig[cdssdk.StorageID, stgmod.StorageDetail]{
Getter: meta.load,
Expire: time.Minute * 5,
})

m.caches = append(m.caches, meta)
return meta
}

type StorageMeta struct {
cache *SimpleMetaCache[cdssdk.StorageID, stgmod.StorageDetail]
}

func (s *StorageMeta) Get(stgID cdssdk.StorageID) *stgmod.StorageDetail {
v, ok := s.cache.Get(stgID)
if ok {
return &v
}
return nil
}

func (s *StorageMeta) GetMany(stgIDs []cdssdk.StorageID) []*stgmod.StorageDetail {
vs, oks := s.cache.GetMany(stgIDs)
ret := make([]*stgmod.StorageDetail, len(vs))
for i := range vs {
if oks[i] {
ret[i] = &vs[i]
}
}
return ret
}

func (s *StorageMeta) ClearOutdated() {
s.cache.ClearOutdated()
}

func (s *StorageMeta) load(keys []cdssdk.StorageID) ([]stgmod.StorageDetail, []bool) {
vs := make([]stgmod.StorageDetail, len(keys))
oks := make([]bool, len(keys))

coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
logger.Warnf("new coordinator client: %v", err)
return vs, oks
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

get, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails(keys))
if err != nil {
logger.Warnf("get storage details: %v", err)
return vs, oks
}

for i := range keys {
if get.Storages[i] != nil {
vs[i] = *get.Storages[i]
oks[i] = true
}
}

return vs, oks
}

+ 4
- 4
common/pkgs/mq/agent/client.go View File

@@ -13,8 +13,8 @@ type Client struct {
id cdssdk.HubID
}

func NewClient(id cdssdk.HubID, cfg *stgmq.Config) (*Client, error) {
rabbitCli, err := mq.NewRabbitMQTransport(cfg.MakeConnectingURL(), stgmq.MakeAgentQueueName(int64(id)), "")
func NewClient(id cdssdk.HubID, cfg mq.Config) (*Client, error) {
rabbitCli, err := mq.NewRabbitMQTransport(cfg, stgmq.MakeAgentQueueName(int64(id)), "")
if err != nil {
return nil, err
}
@@ -35,12 +35,12 @@ type Pool interface {
}

type pool struct {
mqcfg *stgmq.Config
mqcfg mq.Config
shareds map[cdssdk.HubID]*Client
lock sync.Mutex
}

func NewPool(mqcfg *stgmq.Config) Pool {
func NewPool(mqcfg mq.Config) Pool {
return &pool{
mqcfg: mqcfg,
shareds: make(map[cdssdk.HubID]*Client),


+ 2
- 3
common/pkgs/mq/agent/server.go View File

@@ -20,18 +20,17 @@ type Server struct {
rabbitSvr mq.RabbitMQServer
}

func NewServer(svc Service, id cdssdk.HubID, cfg *mymq.Config) (*Server, error) {
func NewServer(svc Service, id cdssdk.HubID, cfg mq.Config) (*Server, error) {
srv := &Server{
service: svc,
}

rabbitSvr, err := mq.NewRabbitMQServer(
cfg.MakeConnectingURL(),
cfg,
mymq.MakeAgentQueueName(int64(id)),
func(msg *mq.Message) (*mq.Message, error) {
return msgDispatcher.Handle(srv.service, msg)
},
cfg.Param,
)
if err != nil {
return nil, err


+ 0
- 128
common/pkgs/mq/agent/storage.go View File

@@ -3,142 +3,14 @@ package agent
import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
)

type StorageService interface {
StartStorageLoadPackage(msg *StartStorageLoadPackage) (*StartStorageLoadPackageResp, *mq.CodeMessage)

WaitStorageLoadPackage(msg *WaitStorageLoadPackage) (*WaitStorageLoadPackageResp, *mq.CodeMessage)

StorageCheck(msg *StorageCheck) (*StorageCheckResp, *mq.CodeMessage)

StorageGC(msg *StorageGC) (*StorageGCResp, *mq.CodeMessage)

StartStorageCreatePackage(msg *StartStorageCreatePackage) (*StartStorageCreatePackageResp, *mq.CodeMessage)

WaitStorageCreatePackage(msg *WaitStorageCreatePackage) (*WaitStorageCreatePackageResp, *mq.CodeMessage)
}

// 启动调度Package的任务
var _ = Register(Service.StartStorageLoadPackage)

type StartStorageLoadPackage struct {
mq.MessageBodyBase
UserID cdssdk.UserID `json:"userID"`
PackageID cdssdk.PackageID `json:"packageID"`
StorageID cdssdk.StorageID `json:"storageID"`
}
type StartStorageLoadPackageResp struct {
mq.MessageBodyBase
TaskID string `json:"taskID"`
}

func NewStartStorageLoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID) *StartStorageLoadPackage {
return &StartStorageLoadPackage{
UserID: userID,
PackageID: packageID,
StorageID: storageID,
}
}
func NewStartStorageLoadPackageResp(taskID string) *StartStorageLoadPackageResp {
return &StartStorageLoadPackageResp{
TaskID: taskID,
}
}
func (client *Client) StartStorageLoadPackage(msg *StartStorageLoadPackage, opts ...mq.RequestOption) (*StartStorageLoadPackageResp, error) {
return mq.Request(Service.StartStorageLoadPackage, client.rabbitCli, msg, opts...)
}

// 等待调度Package的任务
var _ = Register(Service.WaitStorageLoadPackage)

type WaitStorageLoadPackage struct {
mq.MessageBodyBase
TaskID string `json:"taskID"`
WaitTimeoutMs int64 `json:"waitTimeout"`
}
type WaitStorageLoadPackageResp struct {
mq.MessageBodyBase
IsComplete bool `json:"isComplete"`
Error string `json:"error"`
PackagePath string `json:"packagePath"` // 加载后的Package的路径,相对于数据库中配置的Directory
LocalBase string `json:"localBase"` // 存储服务本地的目录,LocalBase + PackagePath = Package在代理节点上的完整路径
RemoteBase string `json:"remoteBase"` // 存储服务远程的目录,RemoteBase + PackagePath = Package在存储服务中的完整路径
}

func NewWaitStorageLoadPackage(taskID string, waitTimeoutMs int64) *WaitStorageLoadPackage {
return &WaitStorageLoadPackage{
TaskID: taskID,
WaitTimeoutMs: waitTimeoutMs,
}
}
func NewWaitStorageLoadPackageResp(isComplete bool, err string, packagePath string, localBase string, remoteBase string) *WaitStorageLoadPackageResp {
return &WaitStorageLoadPackageResp{
IsComplete: isComplete,
Error: err,
PackagePath: packagePath,
LocalBase: localBase,
RemoteBase: remoteBase,
}
}
func (client *Client) WaitStorageLoadPackage(msg *WaitStorageLoadPackage, opts ...mq.RequestOption) (*WaitStorageLoadPackageResp, error) {
return mq.Request(Service.WaitStorageLoadPackage, client.rabbitCli, msg, opts...)
}

// 检查Storage
var _ = Register(Service.StorageCheck)

type StorageCheck struct {
mq.MessageBodyBase
StorageID cdssdk.StorageID `json:"storageID"`
}
type StorageCheckResp struct {
mq.MessageBodyBase
Packages []stgmod.LoadedPackageID `json:"packages"`
}

func NewStorageCheck(storageID cdssdk.StorageID) *StorageCheck {
return &StorageCheck{
StorageID: storageID,
}
}
func NewStorageCheckResp(packages []stgmod.LoadedPackageID) *StorageCheckResp {
return &StorageCheckResp{
Packages: packages,
}
}
func (client *Client) StorageCheck(msg *StorageCheck, opts ...mq.RequestOption) (*StorageCheckResp, error) {
return mq.Request(Service.StorageCheck, client.rabbitCli, msg, opts...)
}

// 清理Cache中不用的文件
var _ = Register(Service.StorageGC)

type StorageGC struct {
mq.MessageBodyBase
StorageID cdssdk.StorageID `json:"storageID"`
Packages []model.StoragePackage `json:"packages"`
}
type StorageGCResp struct {
mq.MessageBodyBase
}

func ReqStorageGC(storageID cdssdk.StorageID, packages []model.StoragePackage) *StorageGC {
return &StorageGC{
StorageID: storageID,
Packages: packages,
}
}
func RespStorageGC() *StorageGCResp {
return &StorageGCResp{}
}
func (client *Client) StorageGC(msg *StorageGC, opts ...mq.RequestOption) (*StorageGCResp, error) {
return mq.Request(Service.StorageGC, client.rabbitCli, msg, opts...)
}

// 启动从Storage上传Package的任务
var _ = Register(Service.StartStorageCreatePackage)



+ 0
- 19
common/pkgs/mq/config.go View File

@@ -1,19 +0,0 @@
package mq

import (
"fmt"

"gitlink.org.cn/cloudream/common/pkgs/mq"
)

type Config struct {
Address string `json:"address"`
Account string `json:"account"`
Password string `json:"password"`
VHost string `json:"vhost"`
Param mq.RabbitMQParam `json:"param"`
}

func (cfg *Config) MakeConnectingURL() string {
return fmt.Sprintf("amqp://%s:%s@%s%s", cfg.Account, cfg.Password, cfg.Address, cfg.VHost)
}

+ 4
- 4
common/pkgs/mq/coordinator/client.go View File

@@ -11,8 +11,8 @@ type Client struct {
rabbitCli *mq.RabbitMQTransport
}

func NewClient(cfg *stgmq.Config) (*Client, error) {
rabbitCli, err := mq.NewRabbitMQTransport(cfg.MakeConnectingURL(), stgmq.COORDINATOR_QUEUE_NAME, "")
func NewClient(cfg mq.Config) (*Client, error) {
rabbitCli, err := mq.NewRabbitMQTransport(cfg, stgmq.COORDINATOR_QUEUE_NAME, "")
if err != nil {
return nil, err
}
@@ -32,12 +32,12 @@ type Pool interface {
}

type pool struct {
mqcfg *stgmq.Config
mqcfg mq.Config
shared *Client
lock sync.Mutex
}

func NewPool(mqcfg *stgmq.Config) Pool {
func NewPool(mqcfg mq.Config) Pool {
return &pool{
mqcfg: mqcfg,
}


+ 3
- 3
common/pkgs/mq/coordinator/hub.go View File

@@ -80,7 +80,7 @@ type GetHubs struct {
}
type GetHubsResp struct {
mq.MessageBodyBase
Hubs []cdssdk.Hub `json:"hubs"`
Hubs []*cdssdk.Hub `json:"hubs"`
}

func NewGetHubs(hubIDs []cdssdk.HubID) *GetHubs {
@@ -88,7 +88,7 @@ func NewGetHubs(hubIDs []cdssdk.HubID) *GetHubs {
HubIDs: hubIDs,
}
}
func NewGetHubsResp(hubs []cdssdk.Hub) *GetHubsResp {
func NewGetHubsResp(hubs []*cdssdk.Hub) *GetHubsResp {
return &GetHubsResp{
Hubs: hubs,
}
@@ -96,7 +96,7 @@ func NewGetHubsResp(hubs []cdssdk.Hub) *GetHubsResp {
func (r *GetHubsResp) GetHub(id cdssdk.HubID) *cdssdk.Hub {
for _, n := range r.Hubs {
if n.HubID == id {
return &n
return n
}
}



+ 59
- 0
common/pkgs/mq/coordinator/object.go View File

@@ -10,6 +10,8 @@ import (
)

type ObjectService interface {
GetObjects(msg *GetObjects) (*GetObjectsResp, *mq.CodeMessage)

GetObjectsByPath(msg *GetObjectsByPath) (*GetObjectsByPathResp, *mq.CodeMessage)

GetPackageObjects(msg *GetPackageObjects) (*GetPackageObjectsResp, *mq.CodeMessage)
@@ -26,11 +28,40 @@ type ObjectService interface {

DeleteObjects(msg *DeleteObjects) (*DeleteObjectsResp, *mq.CodeMessage)

CloneObjects(msg *CloneObjects) (*CloneObjectsResp, *mq.CodeMessage)

GetDatabaseAll(msg *GetDatabaseAll) (*GetDatabaseAllResp, *mq.CodeMessage)

AddAccessStat(msg *AddAccessStat)
}

var _ = Register(Service.GetObjects)

type GetObjects struct {
mq.MessageBodyBase
UserID cdssdk.UserID `json:"userID"`
ObjectIDs []cdssdk.ObjectID `json:"objectIDs"`
}
type GetObjectsResp struct {
mq.MessageBodyBase
Objects []*cdssdk.Object `json:"objects"`
}

func ReqGetObjects(userID cdssdk.UserID, objectIDs []cdssdk.ObjectID) *GetObjects {
return &GetObjects{
UserID: userID,
ObjectIDs: objectIDs,
}
}
func RespGetObjects(objects []*cdssdk.Object) *GetObjectsResp {
return &GetObjectsResp{
Objects: objects,
}
}
func (client *Client) GetObjects(msg *GetObjects) (*GetObjectsResp, error) {
return mq.Request(Service.GetObjects, client.rabbitCli, msg)
}

// 查询指定前缀的Object,返回的Objects会按照ObjectID升序
var _ = Register(Service.GetObjectsByPath)

@@ -256,6 +287,34 @@ func (client *Client) DeleteObjects(msg *DeleteObjects) (*DeleteObjectsResp, err
return mq.Request(Service.DeleteObjects, client.rabbitCli, msg)
}

// 克隆Object
var _ = Register(Service.CloneObjects)

type CloneObjects struct {
mq.MessageBodyBase
UserID cdssdk.UserID `json:"userID"`
Clonings []cdsapi.CloningObject `json:"clonings"`
}
type CloneObjectsResp struct {
mq.MessageBodyBase
Objects []*cdssdk.Object `json:"objects"`
}

func ReqCloneObjects(userID cdssdk.UserID, clonings []cdsapi.CloningObject) *CloneObjects {
return &CloneObjects{
UserID: userID,
Clonings: clonings,
}
}
func RespCloneObjects(objects []*cdssdk.Object) *CloneObjectsResp {
return &CloneObjectsResp{
Objects: objects,
}
}
func (client *Client) CloneObjects(msg *CloneObjects) (*CloneObjectsResp, error) {
return mq.Request(Service.CloneObjects, client.rabbitCli, msg)
}

// 增加访问计数
var _ = RegisterNoReply(Service.AddAccessStat)



Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save