diff --git a/agent/internal/cmd/serve.go b/agent/internal/cmd/serve.go index 83d9433..ca6b9cf 100644 --- a/agent/internal/cmd/serve.go +++ b/agent/internal/cmd/serve.go @@ -19,8 +19,10 @@ import ( "gitlink.org.cn/cloudream/storage/common/pkgs/connectivity" "gitlink.org.cn/cloudream/storage/common/pkgs/distlock" "gitlink.org.cn/cloudream/storage/common/pkgs/downloader" + "gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy" agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr" + "gitlink.org.cn/cloudream/storage/common/pkgs/metacache" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool" "gitlink.org.cn/cloudream/storage/common/pkgs/uploader" "google.golang.org/grpc" @@ -46,16 +48,16 @@ func serve(configPath string) { } stgglb.InitLocal(&config.Cfg().Local) - stgglb.InitMQPool(&config.Cfg().RabbitMQ) + stgglb.InitMQPool(config.Cfg().RabbitMQ) stgglb.InitAgentRPCPool(&agtrpc.PoolConfig{}) // 获取Hub配置 hubCfg := downloadHubConfig() // 初始化存储服务管理器 - stgMgr := svcmgr.NewManager() + stgAgts := agtpool.NewPool() for _, stg := range hubCfg.Storages { - err := stgMgr.CreateService(stg) + err := stgAgts.SetupAgent(stg) if err != nil { fmt.Printf("init storage %v: %v", stg.Storage.String(), err) os.Exit(1) @@ -66,7 +68,7 @@ func serve(configPath string) { worker := exec.NewWorker() // 初始化HTTP服务 - httpSvr, err := http.NewServer(config.Cfg().ListenAddr, http.NewService(&worker, stgMgr)) + httpSvr, err := http.NewServer(config.Cfg().ListenAddr, http.NewService(&worker, stgAgts)) if err != nil { logger.Fatalf("new http server failed, err: %s", err.Error()) } @@ -87,15 +89,15 @@ func serve(configPath string) { hubCons := make([]cdssdk.HubConnectivity, 0, len(cons)) for _, con := range cons { var delay *float32 - if con.Delay != nil { - v := float32(con.Delay.Microseconds()) / 1000 + if con.Latency != nil { + v := float32(con.Latency.Microseconds()) / 1000 delay = &v } hubCons = append(hubCons, cdssdk.HubConnectivity{ FromHubID: *stgglb.Local.HubID, ToHubID: con.ToHubID, - Delay: delay, + Latency: delay, TestTime: con.TestTime, }) } @@ -107,6 +109,13 @@ func serve(configPath string) { }) conCol.CollectInPlace() + // 初始化元数据缓存服务 + metacacheHost := metacache.NewHost() + go metacacheHost.Serve() + stgMeta := metacacheHost.AddStorageMeta() + hubMeta := metacacheHost.AddHubMeta() + conMeta := metacacheHost.AddConnectivity() + // 启动访问统计服务 acStat := accessstat.NewAccessStat(accessstat.Config{ // TODO 考虑放到配置里 @@ -120,18 +129,21 @@ func serve(configPath string) { logger.Fatalf("new ipfs failed, err: %s", err.Error()) } + // 初始化下载策略选择器 + strgSel := strategy.NewSelector(config.Cfg().DownloadStrategy, stgMeta, hubMeta, conMeta) + // 初始化下载器 - dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgMgr) + dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgAgts, strgSel) // 初始化上传器 - uploader := uploader.NewUploader(distlock, &conCol, stgMgr) + uploader := uploader.NewUploader(distlock, &conCol, stgAgts, stgMeta) // 初始化任务管理器 - taskMgr := task.NewManager(distlock, &conCol, &dlder, acStat, stgMgr, uploader) + taskMgr := task.NewManager(distlock, &conCol, &dlder, acStat, stgAgts, uploader) // 启动命令服务器 // TODO 需要设计AgentID持久化机制 - agtSvr, err := agtmq.NewServer(cmdsvc.NewService(&taskMgr, stgMgr), config.Cfg().ID, &config.Cfg().RabbitMQ) + agtSvr, err := agtmq.NewServer(cmdsvc.NewService(&taskMgr, stgAgts), config.Cfg().ID, config.Cfg().RabbitMQ) if err != nil { logger.Fatalf("new agent server failed, err: %s", err.Error()) } @@ -147,7 +159,7 @@ func serve(configPath string) { logger.Fatalf("listen on %s failed, err: %s", listenAddr, err.Error()) } s := grpc.NewServer() - agtrpc.RegisterAgentServer(s, grpcsvc.NewService(&worker, stgMgr)) + agtrpc.RegisterAgentServer(s, grpcsvc.NewService(&worker, stgAgts)) go serveGRPC(s, lis) go serveDistLock(distlock) diff --git a/agent/internal/config/config.go b/agent/internal/config/config.go index e16235a..40b3f28 100644 --- a/agent/internal/config/config.go +++ b/agent/internal/config/config.go @@ -3,25 +3,27 @@ package config import ( "gitlink.org.cn/cloudream/common/pkgs/distlock" log "gitlink.org.cn/cloudream/common/pkgs/logger" + "gitlink.org.cn/cloudream/common/pkgs/mq" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" c "gitlink.org.cn/cloudream/common/utils/config" stgmodels "gitlink.org.cn/cloudream/storage/common/models" "gitlink.org.cn/cloudream/storage/common/pkgs/connectivity" "gitlink.org.cn/cloudream/storage/common/pkgs/downloader" + "gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy" "gitlink.org.cn/cloudream/storage/common/pkgs/grpc" - stgmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq" ) type Config struct { - ID cdssdk.HubID `json:"id"` - ListenAddr string `json:"listenAddr"` - Local stgmodels.LocalMachineInfo `json:"local"` - GRPC *grpc.Config `json:"grpc"` - Logger log.Config `json:"logger"` - RabbitMQ stgmq.Config `json:"rabbitMQ"` - DistLock distlock.Config `json:"distlock"` - Connectivity connectivity.Config `json:"connectivity"` - Downloader downloader.Config `json:"downloader"` + ID cdssdk.HubID `json:"id"` + ListenAddr string `json:"listenAddr"` + Local stgmodels.LocalMachineInfo `json:"local"` + GRPC *grpc.Config `json:"grpc"` + Logger log.Config `json:"logger"` + RabbitMQ mq.Config `json:"rabbitMQ"` + DistLock distlock.Config `json:"distlock"` + Connectivity connectivity.Config `json:"connectivity"` + Downloader downloader.Config `json:"downloader"` + DownloadStrategy strategy.Config `json:"downloadStrategy"` } var cfg Config diff --git a/agent/internal/grpc/io.go b/agent/internal/grpc/io.go index bef65f7..15c49a8 100644 --- a/agent/internal/grpc/io.go +++ b/agent/internal/grpc/io.go @@ -29,7 +29,7 @@ func (s *Service) ExecuteIOPlan(ctx context.Context, req *agtrpc.ExecuteIOPlanRe defer s.swWorker.Remove(sw) execCtx := exec.NewWithContext(ctx) - exec.SetValueByType(execCtx, s.stgMgr) + exec.SetValueByType(execCtx, s.stgAgts) _, err = sw.Run(execCtx) if err != nil { log.Warnf("running io plan: %v", err) diff --git a/agent/internal/grpc/service.go b/agent/internal/grpc/service.go index 5cc4a33..3dbbf85 100644 --- a/agent/internal/grpc/service.go +++ b/agent/internal/grpc/service.go @@ -3,18 +3,18 @@ package grpc import ( "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" agentserver "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool" ) type Service struct { agentserver.AgentServer swWorker *exec.Worker - stgMgr *svcmgr.Manager + stgAgts *agtpool.AgentPool } -func NewService(swWorker *exec.Worker, stgMgr *svcmgr.Manager) *Service { +func NewService(swWorker *exec.Worker, stgAgts *agtpool.AgentPool) *Service { return &Service{ swWorker: swWorker, - stgMgr: stgMgr, + stgAgts: stgAgts, } } diff --git a/agent/internal/http/hub_io.go b/agent/internal/http/hub_io.go index 36e4e5d..c03deae 100644 --- a/agent/internal/http/hub_io.go +++ b/agent/internal/http/hub_io.go @@ -162,7 +162,7 @@ func (s *IOService) ExecuteIOPlan(ctx *gin.Context) { defer s.svc.swWorker.Remove(sw) execCtx := exec.NewWithContext(ctx.Request.Context()) - exec.SetValueByType(execCtx, s.svc.stgMgr) + exec.SetValueByType(execCtx, s.svc.stgAgts) _, err = sw.Run(execCtx) if err != nil { ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("executing plan: %v", err))) diff --git a/agent/internal/http/service.go b/agent/internal/http/service.go index 62756e8..2c38d36 100644 --- a/agent/internal/http/service.go +++ b/agent/internal/http/service.go @@ -2,17 +2,17 @@ package http import ( "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool" ) type Service struct { swWorker *exec.Worker - stgMgr *svcmgr.Manager + stgAgts *agtpool.AgentPool } -func NewService(swWorker *exec.Worker, stgMgr *svcmgr.Manager) *Service { +func NewService(swWorker *exec.Worker, stgAgts *agtpool.AgentPool) *Service { return &Service{ swWorker: swWorker, - stgMgr: stgMgr, + stgAgts: stgAgts, } } diff --git a/agent/internal/mq/cache.go b/agent/internal/mq/cache.go index 19fe06d..735e1e3 100644 --- a/agent/internal/mq/cache.go +++ b/agent/internal/mq/cache.go @@ -12,7 +12,7 @@ import ( ) func (svc *Service) CheckCache(msg *agtmq.CheckCache) (*agtmq.CheckCacheResp, *mq.CodeMessage) { - store, err := svc.stgMgr.GetShardStore(msg.StorageID) + store, err := svc.stgAgts.GetShardStore(msg.StorageID) if err != nil { return nil, mq.Failed(errorcode.OperationFailed, fmt.Sprintf("get shard store of storage %v: %v", msg.StorageID, err)) } @@ -31,7 +31,7 @@ func (svc *Service) CheckCache(msg *agtmq.CheckCache) (*agtmq.CheckCacheResp, *m } func (svc *Service) CacheGC(msg *agtmq.CacheGC) (*agtmq.CacheGCResp, *mq.CodeMessage) { - store, err := svc.stgMgr.GetShardStore(msg.StorageID) + store, err := svc.stgAgts.GetShardStore(msg.StorageID) if err != nil { return nil, mq.Failed(errorcode.OperationFailed, fmt.Sprintf("get shard store of storage %v: %v", msg.StorageID, err)) } diff --git a/agent/internal/mq/service.go b/agent/internal/mq/service.go index 73ee256..b4688ed 100644 --- a/agent/internal/mq/service.go +++ b/agent/internal/mq/service.go @@ -2,17 +2,17 @@ package mq import ( "gitlink.org.cn/cloudream/storage/agent/internal/task" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool" ) type Service struct { taskManager *task.Manager - stgMgr *svcmgr.Manager + stgAgts *agtpool.AgentPool } -func NewService(taskMgr *task.Manager, stgMgr *svcmgr.Manager) *Service { +func NewService(taskMgr *task.Manager, stgAgts *agtpool.AgentPool) *Service { return &Service{ taskManager: taskMgr, - stgMgr: stgMgr, + stgAgts: stgAgts, } } diff --git a/agent/internal/mq/storage.go b/agent/internal/mq/storage.go index dfd4161..ff5b4ff 100644 --- a/agent/internal/mq/storage.go +++ b/agent/internal/mq/storage.go @@ -4,104 +4,11 @@ import ( "time" "gitlink.org.cn/cloudream/common/consts/errorcode" - "gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/mq" mytask "gitlink.org.cn/cloudream/storage/agent/internal/task" - stgglb "gitlink.org.cn/cloudream/storage/common/globals" - stgmod "gitlink.org.cn/cloudream/storage/common/models" agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent" ) -func (svc *Service) StartStorageLoadPackage(msg *agtmq.StartStorageLoadPackage) (*agtmq.StartStorageLoadPackageResp, *mq.CodeMessage) { - tsk := svc.taskManager.StartNew(mytask.NewStorageLoadPackage(msg.UserID, msg.PackageID, msg.StorageID)) - return mq.ReplyOK(agtmq.NewStartStorageLoadPackageResp(tsk.ID())) -} - -func (svc *Service) WaitStorageLoadPackage(msg *agtmq.WaitStorageLoadPackage) (*agtmq.WaitStorageLoadPackageResp, *mq.CodeMessage) { - logger.WithField("TaskID", msg.TaskID).Debugf("wait loading package") - - tsk := svc.taskManager.FindByID(msg.TaskID) - if tsk == nil { - return nil, mq.Failed(errorcode.TaskNotFound, "task not found") - } - - if msg.WaitTimeoutMs == 0 { - tsk.Wait() - - errMsg := "" - if tsk.Error() != nil { - errMsg = tsk.Error().Error() - } - - loadTsk := tsk.Body().(*mytask.StorageLoadPackage) - - return mq.ReplyOK(agtmq.NewWaitStorageLoadPackageResp(true, errMsg, loadTsk.PackagePath, loadTsk.LocalBase, loadTsk.RemoteBase)) - - } else { - if tsk.WaitTimeout(time.Duration(msg.WaitTimeoutMs) * time.Millisecond) { - - errMsg := "" - if tsk.Error() != nil { - errMsg = tsk.Error().Error() - } - - loadTsk := tsk.Body().(*mytask.StorageLoadPackage) - - return mq.ReplyOK(agtmq.NewWaitStorageLoadPackageResp(true, errMsg, loadTsk.PackagePath, loadTsk.LocalBase, loadTsk.RemoteBase)) - } - - return mq.ReplyOK(agtmq.NewWaitStorageLoadPackageResp(false, "", "", "", "")) - } -} - -func (svc *Service) StorageCheck(msg *agtmq.StorageCheck) (*agtmq.StorageCheckResp, *mq.CodeMessage) { - coorCli, err := stgglb.CoordinatorMQPool.Acquire() - if err != nil { - return nil, mq.Failed(errorcode.OperationFailed, err.Error()) - } - defer stgglb.CoordinatorMQPool.Release(coorCli) - - shared, err := svc.stgMgr.GetSharedStore(msg.StorageID) - if err != nil { - return nil, mq.Failed(errorcode.OperationFailed, err.Error()) - } - - loaded, err := shared.ListLoadedPackages() - if err != nil { - return nil, mq.Failed(errorcode.OperationFailed, err.Error()) - } - - return mq.ReplyOK(agtmq.NewStorageCheckResp(loaded)) -} - -func (svc *Service) StorageGC(msg *agtmq.StorageGC) (*agtmq.StorageGCResp, *mq.CodeMessage) { - coorCli, err := stgglb.CoordinatorMQPool.Acquire() - if err != nil { - return nil, mq.Failed(errorcode.OperationFailed, err.Error()) - } - defer stgglb.CoordinatorMQPool.Release(coorCli) - - shared, err := svc.stgMgr.GetSharedStore(msg.StorageID) - if err != nil { - return nil, mq.Failed(errorcode.OperationFailed, err.Error()) - } - - var loadeds []stgmod.LoadedPackageID - for _, pkg := range msg.Packages { - loadeds = append(loadeds, stgmod.LoadedPackageID{ - UserID: pkg.UserID, - PackageID: pkg.PackageID, - }) - } - - err = shared.PackageGC(loadeds) - if err != nil { - return nil, mq.Failed(errorcode.OperationFailed, err.Error()) - } - - return mq.ReplyOK(agtmq.RespStorageGC()) -} - func (svc *Service) StartStorageCreatePackage(msg *agtmq.StartStorageCreatePackage) (*agtmq.StartStorageCreatePackageResp, *mq.CodeMessage) { return nil, mq.Failed(errorcode.OperationFailed, "not implemented") // coorCli, err := stgglb.CoordinatorMQPool.Acquire() diff --git a/agent/internal/task/cache_move_package.go b/agent/internal/task/cache_move_package.go index a6f51f8..404d7ec 100644 --- a/agent/internal/task/cache_move_package.go +++ b/agent/internal/task/cache_move_package.go @@ -39,7 +39,7 @@ func (t *CacheMovePackage) do(ctx TaskContext) error { log.Debugf("begin with %v", logger.FormatStruct(t)) defer log.Debugf("end") - store, err := ctx.stgMgr.GetShardStore(t.storageID) + store, err := ctx.stgAgts.GetShardStore(t.storageID) if err != nil { return fmt.Errorf("get shard store of storage %v: %w", t.storageID, err) } diff --git a/agent/internal/task/create_package.go b/agent/internal/task/create_package.go index 1ce6104..6354938 100644 --- a/agent/internal/task/create_package.go +++ b/agent/internal/task/create_package.go @@ -85,7 +85,7 @@ func (t *CreatePackage) Execute(task *task.Task[TaskContext], ctx TaskContext, c return } - up, err := ctx.uploader.BeginUpdate(t.userID, createResp.Package.PackageID, t.stgAffinity) + up, err := ctx.uploader.BeginUpdate(t.userID, createResp.Package.PackageID, t.stgAffinity, nil, nil) if err != nil { err = fmt.Errorf("begin update: %w", err) log.Error(err.Error()) diff --git a/agent/internal/task/storage_load_package.go b/agent/internal/task/storage_load_package.go deleted file mode 100644 index 02436d0..0000000 --- a/agent/internal/task/storage_load_package.go +++ /dev/null @@ -1,339 +0,0 @@ -package task - -import ( - "fmt" - "io" - "math" - "time" - - "github.com/samber/lo" - "gitlink.org.cn/cloudream/common/pkgs/bitmap" - "gitlink.org.cn/cloudream/common/pkgs/logger" - "gitlink.org.cn/cloudream/common/pkgs/task" - cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" - "gitlink.org.cn/cloudream/common/utils/io2" - "gitlink.org.cn/cloudream/common/utils/reflect2" - "gitlink.org.cn/cloudream/common/utils/sort2" - "gitlink.org.cn/cloudream/storage/common/consts" - stgglb "gitlink.org.cn/cloudream/storage/common/globals" - stgmod "gitlink.org.cn/cloudream/storage/common/models" - "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" - "gitlink.org.cn/cloudream/storage/common/pkgs/ec" - coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/utils" -) - -type StorageLoadPackage struct { - PackagePath string - LocalBase string - RemoteBase string - - userID cdssdk.UserID - packageID cdssdk.PackageID - storageID cdssdk.StorageID - pinnedBlocks []stgmod.ObjectBlock -} - -func NewStorageLoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID) *StorageLoadPackage { - return &StorageLoadPackage{ - userID: userID, - packageID: packageID, - storageID: storageID, - } -} -func (t *StorageLoadPackage) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) { - startTime := time.Now() - log := logger.WithType[StorageLoadPackage]("Task") - log.WithField("TaskID", task.ID()). - Infof("begin to load package %v to %v", t.packageID, t.storageID) - - err := t.do(task, ctx) - if err == nil { - log.WithField("TaskID", task.ID()). - Infof("loading success, cost: %v", time.Since(startTime)) - } else { - log.WithField("TaskID", task.ID()). - Warnf("loading package: %v, cost: %v", err, time.Since(startTime)) - } - - complete(err, CompleteOption{ - RemovingDelay: time.Minute, - }) -} - -func (t *StorageLoadPackage) do(task *task.Task[TaskContext], ctx TaskContext) error { - coorCli, err := stgglb.CoordinatorMQPool.Acquire() - if err != nil { - return fmt.Errorf("new coordinator client: %w", err) - } - defer stgglb.CoordinatorMQPool.Release(coorCli) - - shared, err := ctx.stgMgr.GetSharedStore(t.storageID) - if err != nil { - return fmt.Errorf("get shared store of storage %v: %w", t.storageID, err) - } - t.PackagePath = utils.MakeLoadedPackagePath(t.userID, t.packageID) - - getObjectDetails, err := coorCli.GetPackageObjectDetails(coormq.ReqGetPackageObjectDetails(t.packageID)) - if err != nil { - return fmt.Errorf("getting package object details: %w", err) - } - - shardstore, err := ctx.stgMgr.GetShardStore(t.storageID) - if err != nil { - return fmt.Errorf("get shard store of storage %v: %w", t.storageID, err) - } - - mutex, err := reqbuilder.NewBuilder(). - // 提前占位 - Metadata().StoragePackage().CreateOne(t.userID, t.storageID, t.packageID). - // 保护在storage目录中下载的文件 - Storage().Buzy(t.storageID). - // 保护下载文件时同时保存到IPFS的文件 - Shard().Buzy(t.storageID). - MutexLock(ctx.distlock) - if err != nil { - return fmt.Errorf("acquire locks failed, err: %w", err) - } - defer mutex.Unlock() - - for _, obj := range getObjectDetails.Objects { - err := t.downloadOne(coorCli, shardstore, shared, obj) - if err != nil { - return err - } - ctx.accessStat.AddAccessCounter(obj.Object.ObjectID, t.packageID, t.storageID, 1) - } - - _, err = coorCli.StoragePackageLoaded(coormq.NewStoragePackageLoaded(t.userID, t.storageID, t.packageID, t.pinnedBlocks)) - if err != nil { - return fmt.Errorf("loading package to storage: %w", err) - } - - // TODO 要防止下载的临时文件被删除 - return err -} - -func (t *StorageLoadPackage) downloadOne(coorCli *coormq.Client, shardStore types.ShardStore, shared types.SharedStore, obj stgmod.ObjectDetail) error { - var file io.ReadCloser - - switch red := obj.Object.Redundancy.(type) { - case *cdssdk.NoneRedundancy: - reader, err := t.downloadNoneOrRepObject(shardStore, obj) - if err != nil { - return fmt.Errorf("downloading object: %w", err) - } - file = reader - - case *cdssdk.RepRedundancy: - reader, err := t.downloadNoneOrRepObject(shardStore, obj) - if err != nil { - return fmt.Errorf("downloading rep object: %w", err) - } - file = reader - - case *cdssdk.ECRedundancy: - reader, pinnedBlocks, err := t.downloadECObject(coorCli, shardStore, obj, red) - if err != nil { - return fmt.Errorf("downloading ec object: %w", err) - } - file = reader - t.pinnedBlocks = append(t.pinnedBlocks, pinnedBlocks...) - - default: - return fmt.Errorf("unknow redundancy type: %v", reflect2.TypeOfValue(obj.Object.Redundancy)) - } - defer file.Close() - - if _, err := shared.WritePackageObject(t.userID, t.packageID, obj.Object.Path, file); err != nil { - return fmt.Errorf("writting object to file: %w", err) - } - - return nil -} - -func (t *StorageLoadPackage) downloadNoneOrRepObject(shardStore types.ShardStore, obj stgmod.ObjectDetail) (io.ReadCloser, error) { - if len(obj.Blocks) == 0 && len(obj.PinnedAt) == 0 { - return nil, fmt.Errorf("no storage has this object") - } - - file, err := shardStore.Open(types.NewOpen(obj.Object.FileHash)) - if err != nil { - return nil, err - } - - return file, nil -} - -func (t *StorageLoadPackage) downloadECObject(coorCli *coormq.Client, shardStore types.ShardStore, obj stgmod.ObjectDetail, ecRed *cdssdk.ECRedundancy) (io.ReadCloser, []stgmod.ObjectBlock, error) { - allStorages, err := t.sortDownloadStorages(coorCli, obj) - if err != nil { - return nil, nil, err - } - bsc, blocks := t.getMinReadingBlockSolution(allStorages, ecRed.K) - osc, _ := t.getMinReadingObjectSolution(allStorages, ecRed.K) - if bsc < osc { - var fileStrs []io.ReadCloser - - rs, err := ec.NewStreamRs(ecRed.K, ecRed.N, ecRed.ChunkSize) - if err != nil { - return nil, nil, fmt.Errorf("new rs: %w", err) - } - - for i := range blocks { - str, err := shardStore.Open(types.NewOpen(blocks[i].Block.FileHash)) - if err != nil { - for i -= 1; i >= 0; i-- { - fileStrs[i].Close() - } - return nil, nil, fmt.Errorf("donwloading file: %w", err) - } - - fileStrs = append(fileStrs, str) - } - - fileReaders, filesCloser := io2.ToReaders(fileStrs) - - var indexes []int - for _, b := range blocks { - indexes = append(indexes, b.Block.Index) - } - - outputs, outputsCloser := io2.ToReaders(rs.ReconstructData(fileReaders, indexes)) - return io2.AfterReadClosed(io2.Length(io2.ChunkedJoin(outputs, int(ecRed.ChunkSize)), obj.Object.Size), func(c io.ReadCloser) { - filesCloser() - outputsCloser() - }), nil, nil - } - - // bsc >= osc,如果osc是MaxFloat64,那么bsc也一定是,也就意味着没有足够块来恢复文件 - if osc == math.MaxFloat64 { - return nil, nil, fmt.Errorf("no enough blocks to reconstruct the file, want %d, get only %d", ecRed.K, len(blocks)) - } - - // 如果是直接读取的文件,那么就不需要Pin文件块 - str, err := shardStore.Open(types.NewOpen(obj.Object.FileHash)) - return str, nil, err -} - -type downloadStorageInfo struct { - Storage stgmod.StorageDetail - ObjectPinned bool - Blocks []stgmod.ObjectBlock - Distance float64 -} - -func (t *StorageLoadPackage) sortDownloadStorages(coorCli *coormq.Client, obj stgmod.ObjectDetail) ([]*downloadStorageInfo, error) { - var stgIDs []cdssdk.StorageID - for _, id := range obj.PinnedAt { - if !lo.Contains(stgIDs, id) { - stgIDs = append(stgIDs, id) - } - } - for _, b := range obj.Blocks { - if !lo.Contains(stgIDs, b.StorageID) { - stgIDs = append(stgIDs, b.StorageID) - } - } - - getStgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails(stgIDs)) - if err != nil { - return nil, fmt.Errorf("getting storage details: %w", err) - } - allStgs := make(map[cdssdk.StorageID]stgmod.StorageDetail) - for _, stg := range getStgs.Storages { - allStgs[stg.Storage.StorageID] = *stg - } - - downloadStorageMap := make(map[cdssdk.StorageID]*downloadStorageInfo) - for _, id := range obj.PinnedAt { - storage, ok := downloadStorageMap[id] - if !ok { - mod := allStgs[id] - storage = &downloadStorageInfo{ - Storage: mod, - ObjectPinned: true, - Distance: t.getStorageDistance(mod), - } - downloadStorageMap[id] = storage - } - - storage.ObjectPinned = true - } - - for _, b := range obj.Blocks { - storage, ok := downloadStorageMap[b.StorageID] - if !ok { - mod := allStgs[b.StorageID] - storage = &downloadStorageInfo{ - Storage: mod, - Distance: t.getStorageDistance(mod), - } - downloadStorageMap[b.StorageID] = storage - } - - storage.Blocks = append(storage.Blocks, b) - } - - return sort2.Sort(lo.Values(downloadStorageMap), func(left, right *downloadStorageInfo) int { - return sort2.Cmp(left.Distance, right.Distance) - }), nil -} - -type downloadBlock struct { - Storage stgmod.StorageDetail - Block stgmod.ObjectBlock -} - -func (t *StorageLoadPackage) getMinReadingBlockSolution(sortedStorages []*downloadStorageInfo, k int) (float64, []downloadBlock) { - gotBlocksMap := bitmap.Bitmap64(0) - var gotBlocks []downloadBlock - dist := float64(0.0) - for _, n := range sortedStorages { - for _, b := range n.Blocks { - if !gotBlocksMap.Get(b.Index) { - gotBlocks = append(gotBlocks, downloadBlock{ - Storage: n.Storage, - Block: b, - }) - gotBlocksMap.Set(b.Index, true) - dist += n.Distance - } - - if len(gotBlocks) >= k { - return dist, gotBlocks - } - } - } - - return math.MaxFloat64, gotBlocks -} - -func (t *StorageLoadPackage) getMinReadingObjectSolution(sortedStorages []*downloadStorageInfo, k int) (float64, *stgmod.StorageDetail) { - dist := math.MaxFloat64 - var downloadStg *stgmod.StorageDetail - for _, n := range sortedStorages { - if n.ObjectPinned && float64(k)*n.Distance < dist { - dist = float64(k) * n.Distance - stg := n.Storage - downloadStg = &stg - } - } - - return dist, downloadStg -} - -func (t *StorageLoadPackage) getStorageDistance(stg stgmod.StorageDetail) float64 { - if stgglb.Local.HubID != nil { - if stg.MasterHub.HubID == *stgglb.Local.HubID { - return consts.StorageDistanceSameStorage - } - } - - if stg.MasterHub.LocationID == stgglb.Local.LocationID { - return consts.StorageDistanceSameLocation - } - - return consts.StorageDistanceOther -} diff --git a/agent/internal/task/task.go b/agent/internal/task/task.go index 856ebb4..4080005 100644 --- a/agent/internal/task/task.go +++ b/agent/internal/task/task.go @@ -6,7 +6,7 @@ import ( "gitlink.org.cn/cloudream/storage/common/pkgs/accessstat" "gitlink.org.cn/cloudream/storage/common/pkgs/connectivity" "gitlink.org.cn/cloudream/storage/common/pkgs/downloader" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool" "gitlink.org.cn/cloudream/storage/common/pkgs/uploader" ) @@ -16,7 +16,7 @@ type TaskContext struct { connectivity *connectivity.Collector downloader *downloader.Downloader accessStat *accessstat.AccessStat - stgMgr *svcmgr.Manager + stgAgts *agtpool.AgentPool uploader *uploader.Uploader } @@ -35,13 +35,13 @@ type Task = task.Task[TaskContext] // CompleteOption 类型定义了任务完成时的选项,可用于定制化任务完成的处理方式 type CompleteOption = task.CompleteOption -func NewManager(distlock *distlock.Service, connectivity *connectivity.Collector, downloader *downloader.Downloader, accessStat *accessstat.AccessStat, stgMgr *svcmgr.Manager, uploader *uploader.Uploader) Manager { +func NewManager(distlock *distlock.Service, connectivity *connectivity.Collector, downloader *downloader.Downloader, accessStat *accessstat.AccessStat, stgAgts *agtpool.AgentPool, uploader *uploader.Uploader) Manager { return task.NewManager(TaskContext{ distlock: distlock, connectivity: connectivity, downloader: downloader, accessStat: accessStat, - stgMgr: stgMgr, + stgAgts: stgAgts, uploader: uploader, }) } diff --git a/client/internal/cmdline/getp.go b/client/internal/cmdline/getp.go index d071373..ba2be44 100644 --- a/client/internal/cmdline/getp.go +++ b/client/internal/cmdline/getp.go @@ -52,7 +52,7 @@ func getpByPath(cmdCtx *CommandContext, path string, output string) { return } - pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByName(userID, comps[0], comps[1]) + pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByFullName(userID, comps[0], comps[1]) if err != nil { fmt.Println(err) return diff --git a/client/internal/cmdline/load.go b/client/internal/cmdline/load.go index 30942c4..4e976b5 100644 --- a/client/internal/cmdline/load.go +++ b/client/internal/cmdline/load.go @@ -15,7 +15,7 @@ func init() { cmd := cobra.Command{ Use: "load", Short: "Load data from CDS to a storage service", - Args: cobra.ExactArgs(2), + Args: cobra.ExactArgs(3), Run: func(cmd *cobra.Command, args []string) { cmdCtx := GetCmdCtx(cmd) @@ -30,9 +30,9 @@ func init() { fmt.Printf("Invalid storage ID: %s\n", args[1]) } - loadByID(cmdCtx, cdssdk.PackageID(pkgID), cdssdk.StorageID(stgID)) + loadByID(cmdCtx, cdssdk.PackageID(pkgID), cdssdk.StorageID(stgID), args[2]) } else { - loadByPath(cmdCtx, args[0], args[1]) + loadByPath(cmdCtx, args[0], args[1], args[2]) } }, } @@ -40,7 +40,7 @@ func init() { rootCmd.AddCommand(&cmd) } -func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string) { +func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string, rootPath string) { userID := cdssdk.UserID(1) comps := strings.Split(strings.Trim(pkgPath, cdssdk.ObjectPathSeparator), cdssdk.ObjectPathSeparator) @@ -49,7 +49,7 @@ func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string) { return } - pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByName(userID, comps[0], comps[1]) + pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByFullName(userID, comps[0], comps[1]) if err != nil { fmt.Println(err) return @@ -61,29 +61,18 @@ func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string) { return } - loadByID(cmdCtx, pkg.PackageID, stg.StorageID) + loadByID(cmdCtx, pkg.PackageID, stg.StorageID, rootPath) } -func loadByID(cmdCtx *CommandContext, pkgID cdssdk.PackageID, stgID cdssdk.StorageID) { +func loadByID(cmdCtx *CommandContext, pkgID cdssdk.PackageID, stgID cdssdk.StorageID, rootPath string) { userID := cdssdk.UserID(1) startTime := time.Now() - hubID, taskID, err := cmdCtx.Cmdline.Svc.StorageSvc().StartStorageLoadPackage(userID, pkgID, stgID) + err := cmdCtx.Cmdline.Svc.StorageSvc().LoadPackage(userID, pkgID, stgID, rootPath) if err != nil { fmt.Println(err) return } - for { - complete, fullPath, err := cmdCtx.Cmdline.Svc.StorageSvc().WaitStorageLoadPackage(hubID, taskID, time.Second*10) - if err != nil { - fmt.Println(err) - return - } - - if complete { - fmt.Printf("Package loaded to: %s in %v\n", fullPath, time.Since(startTime)) - break - } - } + fmt.Printf("Package loaded to: %v:%v in %v\n", stgID, rootPath, time.Since(startTime)) } diff --git a/client/internal/cmdline/lsp.go b/client/internal/cmdline/lsp.go index 9bcc3a3..3393cdc 100644 --- a/client/internal/cmdline/lsp.go +++ b/client/internal/cmdline/lsp.go @@ -46,7 +46,7 @@ func lspByPath(cmdCtx *CommandContext, path string) { return } - pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByName(userID, comps[0], comps[1]) + pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByFullName(userID, comps[0], comps[1]) if err != nil { fmt.Println(err) return diff --git a/client/internal/cmdline/newloadp.go b/client/internal/cmdline/newloadp.go index 08c48d3..005513f 100644 --- a/client/internal/cmdline/newloadp.go +++ b/client/internal/cmdline/newloadp.go @@ -29,26 +29,34 @@ func init() { packageName := args[2] storageIDs := make([]cdssdk.StorageID, 0) - for _, sID := range args[3:] { - sID, err := strconv.ParseInt(sID, 10, 64) + rootPathes := make([]string, 0) + for _, dst := range args[3:] { + comps := strings.Split(dst, ":") + if len(comps) != 2 { + fmt.Println("invalid storage destination: ", dst) + return + } + + sID, err := strconv.ParseInt(comps[0], 10, 64) if err != nil { fmt.Println(err) return } storageIDs = append(storageIDs, cdssdk.StorageID(sID)) + rootPathes = append(rootPathes, comps[1]) } - newloadp(cmdCtx, localPath, cdssdk.BucketID(bktID), packageName, storageIDs) + newloadp(cmdCtx, localPath, cdssdk.BucketID(bktID), packageName, storageIDs, rootPathes) }, } rootCmd.AddCommand(cmd) } -func newloadp(cmdCtx *CommandContext, path string, bucketID cdssdk.BucketID, packageName string, storageIDs []cdssdk.StorageID) { +func newloadp(cmdCtx *CommandContext, path string, bucketID cdssdk.BucketID, packageName string, storageIDs []cdssdk.StorageID, rootPathes []string) { userID := cdssdk.UserID(1) - up, err := cmdCtx.Cmdline.Svc.Uploader.BeginCreateLoad(userID, bucketID, packageName, storageIDs) + up, err := cmdCtx.Cmdline.Svc.Uploader.BeginCreateLoad(userID, bucketID, packageName, storageIDs, rootPathes) if err != nil { fmt.Println(err) return @@ -94,7 +102,7 @@ func newloadp(cmdCtx *CommandContext, path string, bucketID cdssdk.BucketID, pac } wr := table.NewWriter() - wr.AppendHeader(table.Row{"ID", "Name", "FileCount", "TotalSize", "LoadedDirs"}) - wr.AppendRow(table.Row{ret.Package.PackageID, ret.Package.Name, fileCount, totalSize, strings.Join(ret.LoadedDirs, "\n")}) + wr.AppendHeader(table.Row{"ID", "Name", "FileCount", "TotalSize"}) + wr.AppendRow(table.Row{ret.Package.PackageID, ret.Package.Name, fileCount, totalSize}) fmt.Println(wr.Render()) } diff --git a/client/internal/cmdline/object.go b/client/internal/cmdline/object.go index f0744ab..c3791d5 100644 --- a/client/internal/cmdline/object.go +++ b/client/internal/cmdline/object.go @@ -33,7 +33,7 @@ var _ = MustAddCmd(func(ctx CommandContext, packageID cdssdk.PackageID, rootPath storageAff = storageAffinity[0] } - up, err := ctx.Cmdline.Svc.Uploader.BeginUpdate(userID, packageID, storageAff) + up, err := ctx.Cmdline.Svc.Uploader.BeginUpdate(userID, packageID, storageAff, nil, nil) if err != nil { return fmt.Errorf("begin updating package: %w", err) } diff --git a/client/internal/cmdline/package.go b/client/internal/cmdline/package.go index a843cd0..32ed829 100644 --- a/client/internal/cmdline/package.go +++ b/client/internal/cmdline/package.go @@ -181,26 +181,6 @@ func PackageGetCachedStorages(ctx CommandContext, packageID cdssdk.PackageID) er return nil } -// PackageGetLoadedStorages 获取指定包裹的已加载节点信息。 -// -// 参数: -// -// ctx - 命令上下文。 -// packageID - 包裹ID。 -// -// 返回值: -// -// error - 操作过程中发生的任何错误。 -func PackageGetLoadedStorages(ctx CommandContext, packageID cdssdk.PackageID) error { - userID := cdssdk.UserID(1) - hubIDs, err := ctx.Cmdline.Svc.PackageSvc().GetLoadedStorages(userID, packageID) - fmt.Printf("hubIDs: %v\n", hubIDs) - if err != nil { - return fmt.Errorf("get package %d loaded storages failed, err: %w", packageID, err) - } - return nil -} - // 初始化命令行工具的包相关命令。 func init() { commands.MustAdd(PackageListBucketPackages, "pkg", "ls") @@ -213,7 +193,4 @@ func init() { // 查询package缓存到哪些节点 commands.MustAdd(PackageGetCachedStorages, "pkg", "cached") - - // 查询package调度到哪些节点 - commands.MustAdd(PackageGetLoadedStorages, "pkg", "loaded") } diff --git a/client/internal/cmdline/put.go b/client/internal/cmdline/put.go index bac7a36..a540518 100644 --- a/client/internal/cmdline/put.go +++ b/client/internal/cmdline/put.go @@ -48,7 +48,7 @@ func init() { return } - pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByName(userID, comps[0], comps[1]) + pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByFullName(userID, comps[0], comps[1]) if err != nil { if codeMsg, ok := err.(*mq.CodeMessageError); ok && codeMsg.Code == errorcode.DataNotFound { pkg2, err := cmdCtx.Cmdline.Svc.PackageSvc().Create(userID, bkt.BucketID, comps[1]) @@ -68,7 +68,7 @@ func init() { storageAff = cdssdk.StorageID(stgID) } - up, err := cmdCtx.Cmdline.Svc.Uploader.BeginUpdate(userID, pkg.PackageID, storageAff) + up, err := cmdCtx.Cmdline.Svc.Uploader.BeginUpdate(userID, pkg.PackageID, storageAff, nil, nil) if err != nil { fmt.Printf("begin updating package: %v\n", err) return diff --git a/client/internal/cmdline/serve.go b/client/internal/cmdline/serve.go index 2e27c59..95f8da0 100644 --- a/client/internal/cmdline/serve.go +++ b/client/internal/cmdline/serve.go @@ -3,6 +3,7 @@ package cmdline import ( "fmt" + "gitlink.org.cn/cloudream/storage/client/internal/config" "gitlink.org.cn/cloudream/storage/client/internal/http" ) @@ -17,8 +18,13 @@ func ServeHTTP(ctx CommandContext, args []string) error { listenAddr = args[0] } + awsAuth, err := http.NewAWSAuth(config.Cfg().AuthAccessKey, config.Cfg().AuthSecretKey) + if err != nil { + return fmt.Errorf("new aws auth: %w", err) + } + // 创建一个新的HTTP服务器实例。 - httpSvr, err := http.NewServer(listenAddr, ctx.Cmdline.Svc) + httpSvr, err := http.NewServer(listenAddr, ctx.Cmdline.Svc, awsAuth) if err != nil { return fmt.Errorf("new http server: %w", err) } diff --git a/client/internal/cmdline/storage.go b/client/internal/cmdline/storage.go index c1eadf9..bb6c09a 100644 --- a/client/internal/cmdline/storage.go +++ b/client/internal/cmdline/storage.go @@ -7,42 +7,6 @@ import ( cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" ) -// StorageLoadPackage 加载指定的包到存储系统中。 -// ctx: 命令上下文,提供必要的服务和环境配置。 -// packageID: 需要加载的包的唯一标识。 -// storageID: 目标存储系统的唯一标识。 -// 返回值: 执行过程中遇到的任何错误。 -func StorageLoadPackage(ctx CommandContext, packageID cdssdk.PackageID, storageID cdssdk.StorageID) error { - startTime := time.Now() - defer func() { - // 打印函数执行时间 - fmt.Printf("%v\n", time.Since(startTime).Seconds()) - }() - - // 开始加载包到存储系统 - hubID, taskID, err := ctx.Cmdline.Svc.StorageSvc().StartStorageLoadPackage(1, packageID, storageID) - if err != nil { - return fmt.Errorf("start loading package to storage: %w", err) - } - - // 循环等待加载完成 - for { - complete, fullPath, err := ctx.Cmdline.Svc.StorageSvc().WaitStorageLoadPackage(hubID, taskID, time.Second*10) - if complete { - if err != nil { - return fmt.Errorf("moving complete with: %w", err) - } - - fmt.Printf("Load To: %s\n", fullPath) - return nil - } - - if err != nil { - return fmt.Errorf("wait moving: %w", err) - } - } -} - // StorageCreatePackage 创建一个新的包并上传到指定的存储系统。 // ctx: 命令上下文,提供必要的服务和环境配置。 // bucketID: 存储桶的唯一标识,包将被上传到这个存储桶中。 @@ -83,9 +47,6 @@ func StorageCreatePackage(ctx CommandContext, bucketID cdssdk.BucketID, name str // 初始化函数,注册加载包和创建包的命令到命令行解析器。 func init() { - // 注册加载包命令 - commands.MustAdd(StorageLoadPackage, "stg", "pkg", "load") - // 注册创建包命令 commands.MustAdd(StorageCreatePackage, "stg", "pkg", "new") } diff --git a/client/internal/cmdline/test.go b/client/internal/cmdline/test.go index 3d80c83..5f7edb4 100644 --- a/client/internal/cmdline/test.go +++ b/client/internal/cmdline/test.go @@ -9,6 +9,7 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/future" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" + "gitlink.org.cn/cloudream/common/utils/math2" stgglb "gitlink.org.cn/cloudream/storage/common/globals" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser" @@ -26,20 +27,20 @@ func init() { } defer stgglb.CoordinatorMQPool.Release(coorCli) - stgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{1, 2, 3})) + stgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{1, 2, 3, 4})) if err != nil { panic(err) } ft := ioswitch2.NewFromTo() ft.SegmentParam = cdssdk.NewSegmentRedundancy(1024*100*3, 3) - ft.AddFrom(ioswitch2.NewFromShardstore("FullE58B075E9F7C5744CB1C2CBBECC30F163DE699DCDA94641DDA34A0C2EB01E240", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(0))) - ft.AddFrom(ioswitch2.NewFromShardstore("FullEA14D17544786427C3A766F0C5E6DEB221D00D3DE1875BBE3BD0AD5C8118C1A0", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(1))) - ft.AddFrom(ioswitch2.NewFromShardstore("Full4D142C458F2399175232D5636235B09A84664D60869E925EB20FFBE931045BDD", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(2))) - ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[2].MasterHub, *stgs.Storages[2], ioswitch2.RawStream(), "0")) - // ft.AddFrom(ioswitch2.NewFromShardstore("CA56E5934859E0220D1F3B848F41619D937D7B874D4EBF63A6CC98D2D8E3280F", *stgs.Storages[0].MasterHub, stgs.Storages[0].Storage, ioswitch2.RawStream())) + // ft.AddFrom(ioswitch2.NewFromShardstore("FullE58B075E9F7C5744CB1C2CBBECC30F163DE699DCDA94641DDA34A0C2EB01E240", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(0))) + // ft.AddFrom(ioswitch2.NewFromShardstore("FullEA14D17544786427C3A766F0C5E6DEB221D00D3DE1875BBE3BD0AD5C8118C1A0", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1))) + // ft.AddFrom(ioswitch2.NewFromShardstore("Full4D142C458F2399175232D5636235B09A84664D60869E925EB20FFBE931045BDD", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(2))) + ft.AddFrom(ioswitch2.NewFromShardstore("Full03B5CF4B57251D7BB4308FE5C81AF5A21E2B28994CC7CB1FB37698DAE271DC22", *stgs.Storages[2].MasterHub, *stgs.Storages[2], ioswitch2.RawStream())) + ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[3].MasterHub, *stgs.Storages[3], ioswitch2.RawStream(), "0")) // ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(0), "0")) - // ft.AddTo(ioswitch2.NewToShardStoreWithRange(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1), "1", exec.Range{Offset: 1})) + // ft.AddTo(ioswitch2.NewToShardStoreWithRange(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1), "1", math2.Range{Offset: 1})) // ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(0), "0")) // ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1), "1")) // ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(2), "2")) @@ -86,9 +87,9 @@ func init() { ft := ioswitch2.NewFromTo() ft.SegmentParam = cdssdk.NewSegmentRedundancy(1293, 3) - ft.AddFrom(ioswitch2.NewFromShardstore("4E69A8B8CD9F42EDE371DA94458BADFB2308AFCA736AA393784A3D81F4746377", *stgs.Storages[0].MasterHub, stgs.Storages[0].Storage, ioswitch2.RawStream())) + ft.AddFrom(ioswitch2.NewFromShardstore("4E69A8B8CD9F42EDE371DA94458BADFB2308AFCA736AA393784A3D81F4746377", *stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.RawStream())) // ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(0), "0")) - ft.AddTo(ioswitch2.NewToShardStoreWithRange(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1), "1", exec.Range{Offset: 1})) + ft.AddTo(ioswitch2.NewToShardStoreWithRange(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1), "1", math2.Range{Offset: 1})) ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(2), "2")) plans := exec.NewPlanBuilder() @@ -133,15 +134,15 @@ func init() { ft := ioswitch2.NewFromTo() ft.SegmentParam = cdssdk.NewSegmentRedundancy(1293, 3) ft.ECParam = &cdssdk.DefaultECRedundancy - ft.AddFrom(ioswitch2.NewFromShardstore("22CC59CE3297F78F2D20DC1E33181B77F21E6782097C94E1664F99F129834069", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(0))) - ft.AddFrom(ioswitch2.NewFromShardstore("5EAC20EB3EBC7B5FA176C5BD1C01041FB2A6D14C35D6A232CA83D7F1E4B01ADE", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(1))) - ft.AddFrom(ioswitch2.NewFromShardstore("A9BC1802F37100C80C72A1D6E8F53C0E0B73F85F99153D8C78FB01CEC9D8D903", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(2))) + ft.AddFrom(ioswitch2.NewFromShardstore("22CC59CE3297F78F2D20DC1E33181B77F21E6782097C94E1664F99F129834069", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(0))) + ft.AddFrom(ioswitch2.NewFromShardstore("5EAC20EB3EBC7B5FA176C5BD1C01041FB2A6D14C35D6A232CA83D7F1E4B01ADE", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1))) + ft.AddFrom(ioswitch2.NewFromShardstore("A9BC1802F37100C80C72A1D6E8F53C0E0B73F85F99153D8C78FB01CEC9D8D903", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(2))) - toDrv, drvStr := ioswitch2.NewToDriverWithRange(ioswitch2.RawStream(), exec.NewRange(0, 1293)) + toDrv, drvStr := ioswitch2.NewToDriverWithRange(ioswitch2.RawStream(), math2.NewRange(0, 1293)) ft.AddTo(toDrv) - ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(0), "EC0")) - ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(1), "EC1")) - ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(2), "EC2")) + ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(0), "EC0")) + ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(1), "EC1")) + ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(2), "EC2")) plans := exec.NewPlanBuilder() err = parser.Parse(ft, plans) @@ -202,10 +203,10 @@ func init() { ft := ioswitch2.NewFromTo() ft.ECParam = &cdssdk.DefaultECRedundancy - ft.AddFrom(ioswitch2.NewFromShardstore("4E69A8B8CD9F42EDE371DA94458BADFB2308AFCA736AA393784A3D81F4746377", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.RawStream())) - ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(0), "EC0")) - ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(1), "EC1")) - ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(2), "EC2")) + ft.AddFrom(ioswitch2.NewFromShardstore("4E69A8B8CD9F42EDE371DA94458BADFB2308AFCA736AA393784A3D81F4746377", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.RawStream())) + ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(0), "EC0")) + ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(1), "EC1")) + ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(2), "EC2")) plans := exec.NewPlanBuilder() err = parser.Parse(ft, plans) @@ -253,10 +254,10 @@ func init() { ft := ioswitch2.NewFromTo() ft.SegmentParam = cdssdk.NewSegmentRedundancy(1293, 3) ft.ECParam = &cdssdk.DefaultECRedundancy - ft.AddFrom(ioswitch2.NewFromShardstore("22CC59CE3297F78F2D20DC1E33181B77F21E6782097C94E1664F99F129834069", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(0))) - ft.AddFrom(ioswitch2.NewFromShardstore("5EAC20EB3EBC7B5FA176C5BD1C01041FB2A6D14C35D6A232CA83D7F1E4B01ADE", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(1))) - ft.AddFrom(ioswitch2.NewFromShardstore("A9BC1802F37100C80C72A1D6E8F53C0E0B73F85F99153D8C78FB01CEC9D8D903", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(2))) - ft.AddTo(ioswitch2.NewToShardStoreWithRange(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.RawStream(), "raw", exec.NewRange(10, 645))) + ft.AddFrom(ioswitch2.NewFromShardstore("22CC59CE3297F78F2D20DC1E33181B77F21E6782097C94E1664F99F129834069", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(0))) + ft.AddFrom(ioswitch2.NewFromShardstore("5EAC20EB3EBC7B5FA176C5BD1C01041FB2A6D14C35D6A232CA83D7F1E4B01ADE", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1))) + ft.AddFrom(ioswitch2.NewFromShardstore("A9BC1802F37100C80C72A1D6E8F53C0E0B73F85F99153D8C78FB01CEC9D8D903", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(2))) + ft.AddTo(ioswitch2.NewToShardStoreWithRange(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.RawStream(), "raw", math2.NewRange(10, 645))) plans := exec.NewPlanBuilder() err = parser.Parse(ft, plans) diff --git a/client/internal/config/config.go b/client/internal/config/config.go index b0cbb24..9f556f5 100644 --- a/client/internal/config/config.go +++ b/client/internal/config/config.go @@ -3,24 +3,29 @@ package config import ( "gitlink.org.cn/cloudream/common/pkgs/distlock" "gitlink.org.cn/cloudream/common/pkgs/logger" + "gitlink.org.cn/cloudream/common/pkgs/mq" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" "gitlink.org.cn/cloudream/common/utils/config" stgmodels "gitlink.org.cn/cloudream/storage/common/models" "gitlink.org.cn/cloudream/storage/common/pkgs/connectivity" "gitlink.org.cn/cloudream/storage/common/pkgs/downloader" + "gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy" agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent" - stgmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq" ) type Config struct { - Local stgmodels.LocalMachineInfo `json:"local"` - AgentGRPC agtrpc.PoolConfig `json:"agentGRPC"` - Logger logger.Config `json:"logger"` - RabbitMQ stgmq.Config `json:"rabbitMQ"` - DistLock distlock.Config `json:"distlock"` - Connectivity connectivity.Config `json:"connectivity"` - Downloader downloader.Config `json:"downloader"` - StorageID cdssdk.StorageID `json:"storageID"` // TODO 进行访问量统计时,当前客户端所属的存储ID。临时解决方案。 + Local stgmodels.LocalMachineInfo `json:"local"` + AgentGRPC agtrpc.PoolConfig `json:"agentGRPC"` + Logger logger.Config `json:"logger"` + RabbitMQ mq.Config `json:"rabbitMQ"` + DistLock distlock.Config `json:"distlock"` + Connectivity connectivity.Config `json:"connectivity"` + Downloader downloader.Config `json:"downloader"` + DownloadStrategy strategy.Config `json:"downloadStrategy"` + StorageID cdssdk.StorageID `json:"storageID"` // TODO 进行访问量统计时,当前客户端所属的存储ID。临时解决方案。 + AuthAccessKey string `json:"authAccessKey"` // TODO 临时办法 + AuthSecretKey string `json:"authSecretKey"` + MaxHTTPBodySize int64 `json:"maxHttpBodySize"` } var cfg Config diff --git a/client/internal/http/aws_auth.go b/client/internal/http/aws_auth.go new file mode 100644 index 0000000..4a6d283 --- /dev/null +++ b/client/internal/http/aws_auth.go @@ -0,0 +1,197 @@ +package http + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/gin-gonic/gin" + "gitlink.org.cn/cloudream/common/consts/errorcode" + "gitlink.org.cn/cloudream/common/pkgs/logger" + "gitlink.org.cn/cloudream/storage/client/internal/config" +) + +const ( + AuthRegion = "any" + AuthService = "jcs" + AuthorizationHeader = "Authorization" +) + +type AWSAuth struct { + cred aws.Credentials + signer *v4.Signer +} + +func NewAWSAuth(accessKey string, secretKey string) (*AWSAuth, error) { + prod := credentials.NewStaticCredentialsProvider(accessKey, secretKey, "") + cred, err := prod.Retrieve(context.TODO()) + if err != nil { + return nil, err + } + + return &AWSAuth{ + cred: cred, + signer: v4.NewSigner(), + }, nil +} + +func (a *AWSAuth) Auth(c *gin.Context) { + authorizationHeader := c.GetHeader(AuthorizationHeader) + if authorizationHeader == "" { + c.AbortWithStatusJSON(http.StatusBadRequest, Failed(errorcode.Unauthorized, "authorization header is missing")) + return + } + + _, headers, reqSig, err := parseAuthorizationHeader(authorizationHeader) + if err != nil { + c.AbortWithStatusJSON(http.StatusBadRequest, Failed(errorcode.Unauthorized, "invalid Authorization header format")) + return + } + + // 限制请求体大小 + rd := io.LimitReader(c.Request.Body, config.Cfg().MaxHTTPBodySize) + body, err := io.ReadAll(rd) + if err != nil { + c.AbortWithStatusJSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "read request body failed")) + return + } + + payloadHash := sha256.Sum256(body) + hexPayloadHash := hex.EncodeToString(payloadHash[:]) + + // 构造验签用的请求 + verifyReq, err := http.NewRequest(c.Request.Method, c.Request.URL.String(), nil) + if err != nil { + c.AbortWithStatusJSON(http.StatusOK, Failed(errorcode.OperationFailed, err.Error())) + return + } + for _, h := range headers { + verifyReq.Header.Add(h, c.Request.Header.Get(h)) + } + verifyReq.Host = c.Request.Host + + timestamp, err := time.Parse("20060102T150405Z", c.GetHeader("X-Amz-Date")) + if err != nil { + c.AbortWithStatusJSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "invalid X-Amz-Date header format")) + return + } + + signer := v4.NewSigner() + err = signer.SignHTTP(context.TODO(), a.cred, verifyReq, hexPayloadHash, AuthService, AuthRegion, timestamp) + if err != nil { + logger.Warnf("sign request: %v", err) + c.AbortWithStatusJSON(http.StatusOK, Failed(errorcode.OperationFailed, "sign request failed")) + return + } + + verifySig := a.getSignature(verifyReq) + if !strings.EqualFold(verifySig, reqSig) { + c.AbortWithStatusJSON(http.StatusOK, Failed(errorcode.Unauthorized, "signature mismatch")) + return + } + + c.Request.Body = io.NopCloser(bytes.NewReader(body)) + + c.Next() +} + +func (a *AWSAuth) AuthWithoutBody(c *gin.Context) { + authorizationHeader := c.GetHeader(AuthorizationHeader) + if authorizationHeader == "" { + c.AbortWithStatusJSON(http.StatusBadRequest, Failed(errorcode.Unauthorized, "authorization header is missing")) + return + } + + _, headers, reqSig, err := parseAuthorizationHeader(authorizationHeader) + if err != nil { + c.AbortWithStatusJSON(http.StatusBadRequest, Failed(errorcode.Unauthorized, "invalid Authorization header format")) + return + } + + // 构造验签用的请求 + verifyReq, err := http.NewRequest(c.Request.Method, c.Request.URL.String(), nil) + if err != nil { + c.AbortWithStatusJSON(http.StatusOK, Failed(errorcode.OperationFailed, err.Error())) + return + } + for _, h := range headers { + verifyReq.Header.Add(h, c.Request.Header.Get(h)) + } + verifyReq.Host = c.Request.Host + + timestamp, err := time.Parse("20060102T150405Z", c.GetHeader("X-Amz-Date")) + if err != nil { + c.AbortWithStatusJSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "invalid X-Amz-Date header format")) + return + } + + err = a.signer.SignHTTP(context.TODO(), a.cred, verifyReq, "", AuthService, AuthRegion, timestamp) + if err != nil { + logger.Warnf("sign request: %v", err) + c.AbortWithStatusJSON(http.StatusOK, Failed(errorcode.OperationFailed, "sign request failed")) + return + } + + verifySig := a.getSignature(verifyReq) + if strings.EqualFold(verifySig, reqSig) { + c.AbortWithStatusJSON(http.StatusOK, Failed(errorcode.Unauthorized, "signature mismatch")) + return + } + + c.Next() +} + +// 解析 Authorization 头部 +func parseAuthorizationHeader(authorizationHeader string) (string, []string, string, error) { + if !strings.HasPrefix(authorizationHeader, "AWS4-HMAC-SHA256 ") { + return "", nil, "", fmt.Errorf("invalid Authorization header format") + } + + authorizationHeader = strings.TrimPrefix(authorizationHeader, "AWS4-HMAC-SHA256") + + parts := strings.Split(authorizationHeader, ",") + if len(parts) != 3 { + return "", nil, "", fmt.Errorf("invalid Authorization header format") + } + + var credential, signedHeaders, signature string + for _, part := range parts { + part = strings.TrimSpace(part) + + if strings.HasPrefix(part, "Credential=") { + credential = strings.TrimPrefix(part, "Credential=") + } + if strings.HasPrefix(part, "SignedHeaders=") { + signedHeaders = strings.TrimPrefix(part, "SignedHeaders=") + } + if strings.HasPrefix(part, "Signature=") { + signature = strings.TrimPrefix(part, "Signature=") + } + } + + if credential == "" || signedHeaders == "" || signature == "" { + return "", nil, "", fmt.Errorf("missing necessary parts in Authorization header") + } + + headers := strings.Split(signedHeaders, ";") + return credential, headers, signature, nil +} + +func (a *AWSAuth) getSignature(req *http.Request) string { + auth := req.Header.Get(AuthorizationHeader) + idx := strings.Index(auth, "Signature=") + if idx == -1 { + return "" + } + + return auth[idx+len("Signature="):] +} diff --git a/client/internal/http/object.go b/client/internal/http/object.go index c535d5c..4f3ee37 100644 --- a/client/internal/http/object.go +++ b/client/internal/http/object.go @@ -28,10 +28,10 @@ func (s *Server) Object() *ObjectService { } } -func (s *ObjectService) List(ctx *gin.Context) { - log := logger.WithField("HTTP", "Object.List") +func (s *ObjectService) ListByPath(ctx *gin.Context) { + log := logger.WithField("HTTP", "Object.ListByPath") - var req cdsapi.ObjectList + var req cdsapi.ObjectListByPath if err := ctx.ShouldBindQuery(&req); err != nil { log.Warnf("binding body: %s", err.Error()) ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) @@ -45,7 +45,27 @@ func (s *ObjectService) List(ctx *gin.Context) { return } - ctx.JSON(http.StatusOK, OK(cdsapi.ObjectListResp{Objects: objs})) + ctx.JSON(http.StatusOK, OK(cdsapi.ObjectListByPathResp{Objects: objs})) +} + +func (s *ObjectService) ListByIDs(ctx *gin.Context) { + log := logger.WithField("HTTP", "Object.ListByIDs") + + var req cdsapi.ObjectListByIDs + if err := ctx.ShouldBindJSON(&req); err != nil { + log.Warnf("binding body: %s", err.Error()) + ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) + return + } + + objs, err := s.svc.ObjectSvc().GetByIDs(req.UserID, req.ObjectIDs) + if err != nil { + log.Warnf("listing objects: %s", err.Error()) + ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("listing objects: %v", err))) + return + } + + ctx.JSON(http.StatusOK, OK(cdsapi.ObjectListByIDsResp{Objects: objs})) } type ObjectUploadReq struct { @@ -63,7 +83,7 @@ func (s *ObjectService) Upload(ctx *gin.Context) { return } - up, err := s.svc.Uploader.BeginUpdate(req.Info.UserID, req.Info.PackageID, req.Info.Affinity) + up, err := s.svc.Uploader.BeginUpdate(req.Info.UserID, req.Info.PackageID, req.Info.Affinity, req.Info.LoadTo, req.Info.LoadToPath) if err != nil { log.Warnf("begin update: %s", err.Error()) ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("begin update: %v", err))) @@ -138,6 +158,11 @@ func (s *ObjectService) Download(ctx *gin.Context) { ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "download object failed")) return } + if file.File == nil { + log.Warnf("object not found: %d", req.ObjectID) + ctx.JSON(http.StatusOK, Failed(errorcode.DataNotFound, "object not found")) + return + } defer file.File.Close() ctx.Header("Content-Disposition", "attachment; filename="+url.PathEscape(path.Base(file.Object.Path))) @@ -338,6 +363,26 @@ func (s *ObjectService) DeleteByPath(ctx *gin.Context) { ctx.JSON(http.StatusOK, OK(nil)) } +func (s *ObjectService) Clone(ctx *gin.Context) { + log := logger.WithField("HTTP", "Object.Clone") + + var req cdsapi.ObjectClone + if err := ctx.ShouldBindJSON(&req); err != nil { + log.Warnf("binding body: %s", err.Error()) + ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) + return + } + + objs, err := s.svc.ObjectSvc().Clone(req.UserID, req.Clonings) + if err != nil { + log.Warnf("cloning object: %s", err.Error()) + ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "clone object failed")) + return + } + + ctx.JSON(http.StatusOK, OK(cdsapi.ObjectCloneResp{Objects: objs})) +} + func (s *ObjectService) GetPackageObjects(ctx *gin.Context) { log := logger.WithField("HTTP", "Object.GetPackageObjects") diff --git a/client/internal/http/package.go b/client/internal/http/package.go index 5b9aaf0..af3126c 100644 --- a/client/internal/http/package.go +++ b/client/internal/http/package.go @@ -46,24 +46,24 @@ func (s *PackageService) Get(ctx *gin.Context) { ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetResp{Package: *pkg})) } -func (s *PackageService) GetByName(ctx *gin.Context) { - log := logger.WithField("HTTP", "Package.GetByName") +func (s *PackageService) GetByFullName(ctx *gin.Context) { + log := logger.WithField("HTTP", "Package.GetByFullName") - var req cdsapi.PackageGetByName + var req cdsapi.PackageGetByFullName if err := ctx.ShouldBindQuery(&req); err != nil { log.Warnf("binding query: %s", err.Error()) ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) return } - pkg, err := s.svc.PackageSvc().GetByName(req.UserID, req.BucketName, req.PackageName) + pkg, err := s.svc.PackageSvc().GetByFullName(req.UserID, req.BucketName, req.PackageName) if err != nil { log.Warnf("getting package by name: %s", err.Error()) ctx.JSON(http.StatusOK, FailedError(err)) return } - ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetByNameResp{Package: *pkg})) + ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetByFullNameResp{Package: *pkg})) } // Create 处理创建新包的HTTP请求。 @@ -103,7 +103,13 @@ func (s *PackageService) CreateLoad(ctx *gin.Context) { return } - up, err := s.svc.Uploader.BeginCreateLoad(req.Info.UserID, req.Info.BucketID, req.Info.Name, req.Info.LoadTo) + if len(req.Info.LoadTo) != len(req.Info.LoadToPath) { + log.Warnf("load to and load to path count not match") + ctx.JSON(http.StatusOK, Failed(errorcode.BadArgument, "load to and load to path count not match")) + return + } + + up, err := s.svc.Uploader.BeginCreateLoad(req.Info.UserID, req.Info.BucketID, req.Info.Name, req.Info.LoadTo, req.Info.LoadToPath) if err != nil { log.Warnf("begin package create load: %s", err.Error()) ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("begin package create load: %v", err))) @@ -149,7 +155,7 @@ func (s *PackageService) CreateLoad(ctx *gin.Context) { objs[i] = ret.Objects[pathes[i]] } - ctx.JSON(http.StatusOK, OK(cdsapi.PackageCreateLoadResp{Package: ret.Package, Objects: objs, LoadedDirs: ret.LoadedDirs})) + ctx.JSON(http.StatusOK, OK(cdsapi.PackageCreateLoadResp{Package: ret.Package, Objects: objs})) } func (s *PackageService) Delete(ctx *gin.Context) { @@ -172,6 +178,28 @@ func (s *PackageService) Delete(ctx *gin.Context) { ctx.JSON(http.StatusOK, OK(nil)) } +func (s *PackageService) Clone(ctx *gin.Context) { + log := logger.WithField("HTTP", "Package.Clone") + + var req cdsapi.PackageClone + if err := ctx.ShouldBindJSON(&req); err != nil { + log.Warnf("binding body: %s", err.Error()) + ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) + return + } + + pkg, err := s.svc.PackageSvc().Clone(req.UserID, req.PackageID, req.BucketID, req.Name) + if err != nil { + log.Warnf("cloning package: %s", err.Error()) + ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "clone package failed")) + return + } + + ctx.JSON(http.StatusOK, OK(cdsapi.PackageCloneResp{ + Package: pkg, + })) +} + func (s *PackageService) ListBucketPackages(ctx *gin.Context) { log := logger.WithField("HTTP", "Package.ListBucketPackages") @@ -214,26 +242,3 @@ func (s *PackageService) GetCachedStorages(ctx *gin.Context) { ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetCachedStoragesResp{PackageCachingInfo: resp})) } - -// GetLoadedStorages 处理获取包的加载节点的HTTP请求。 -func (s *PackageService) GetLoadedStorages(ctx *gin.Context) { - log := logger.WithField("HTTP", "Package.GetLoadedStorages") - - var req cdsapi.PackageGetLoadedStoragesReq - if err := ctx.ShouldBindQuery(&req); err != nil { - log.Warnf("binding query: %s", err.Error()) - ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) - return - } - - stgIDs, err := s.svc.PackageSvc().GetLoadedStorages(req.UserID, req.PackageID) - if err != nil { - log.Warnf("get package loaded storages failed: %s", err.Error()) - ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get package loaded storages failed")) - return - } - - ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetLoadedStoragesResp{ - StorageIDs: stgIDs, - })) -} diff --git a/client/internal/http/server.go b/client/internal/http/server.go index de73e52..4594910 100644 --- a/client/internal/http/server.go +++ b/client/internal/http/server.go @@ -11,15 +11,17 @@ type Server struct { engine *gin.Engine listenAddr string svc *services.Service + awsAuth *AWSAuth } -func NewServer(listenAddr string, svc *services.Service) (*Server, error) { +func NewServer(listenAddr string, svc *services.Service, awsAuth *AWSAuth) (*Server, error) { engine := gin.New() return &Server{ engine: engine, listenAddr: listenAddr, svc: svc, + awsAuth: awsAuth, }, nil } @@ -43,7 +45,10 @@ func (s *Server) initRouters() { // initTemp(rt, s) - rt.GET(cdsapi.ObjectListPath, s.Object().List) + s.routeV1(s.engine) + + rt.GET(cdsapi.ObjectListPathByPath, s.Object().ListByPath) + rt.POST(cdsapi.ObjectListByIDsPath, s.Object().ListByIDs) rt.GET(cdsapi.ObjectDownloadPath, s.Object().Download) rt.GET(cdsapi.ObjectDownloadByPathPath, s.Object().DownloadByPath) rt.POST(cdsapi.ObjectUploadPath, s.Object().Upload) @@ -53,15 +58,16 @@ func (s *Server) initRouters() { rt.POST(cdsapi.ObjectMovePath, s.Object().Move) rt.POST(cdsapi.ObjectDeletePath, s.Object().Delete) rt.POST(cdsapi.ObjectDeleteByPathPath, s.Object().DeleteByPath) + rt.POST(cdsapi.ObjectClonePath, s.Object().Clone) rt.GET(cdsapi.PackageGetPath, s.Package().Get) - rt.GET(cdsapi.PackageGetByNamePath, s.Package().GetByName) + rt.GET(cdsapi.PackageGetByFullNamePath, s.Package().GetByFullName) rt.POST(cdsapi.PackageCreatePath, s.Package().Create) rt.POST(cdsapi.PackageCreateLoadPath, s.Package().CreateLoad) rt.POST(cdsapi.PackageDeletePath, s.Package().Delete) + rt.POST(cdsapi.PackageClonePath, s.Package().Clone) rt.GET(cdsapi.PackageListBucketPackagesPath, s.Package().ListBucketPackages) rt.GET(cdsapi.PackageGetCachedStoragesPath, s.Package().GetCachedStorages) - rt.GET(cdsapi.PackageGetLoadedStoragesPath, s.Package().GetLoadedStorages) rt.POST(cdsapi.StorageLoadPackagePath, s.Storage().LoadPackage) rt.POST(cdsapi.StorageCreatePackagePath, s.Storage().CreatePackage) @@ -73,4 +79,42 @@ func (s *Server) initRouters() { rt.POST(cdsapi.BucketCreatePath, s.Bucket().Create) rt.POST(cdsapi.BucketDeletePath, s.Bucket().Delete) rt.GET(cdsapi.BucketListUserBucketsPath, s.Bucket().ListUserBuckets) + +} + +func (s *Server) routeV1(eg *gin.Engine) { + v1 := eg.Group("/v1") + + v1.GET(cdsapi.ObjectListPathByPath, s.awsAuth.Auth, s.Object().ListByPath) + v1.POST(cdsapi.ObjectListByIDsPath, s.awsAuth.Auth, s.Object().ListByIDs) + v1.GET(cdsapi.ObjectDownloadPath, s.awsAuth.Auth, s.Object().Download) + v1.GET(cdsapi.ObjectDownloadByPathPath, s.awsAuth.Auth, s.Object().DownloadByPath) + v1.POST(cdsapi.ObjectUploadPath, s.awsAuth.AuthWithoutBody, s.Object().Upload) + v1.GET(cdsapi.ObjectGetPackageObjectsPath, s.awsAuth.Auth, s.Object().GetPackageObjects) + v1.POST(cdsapi.ObjectUpdateInfoPath, s.awsAuth.Auth, s.Object().UpdateInfo) + v1.POST(cdsapi.ObjectUpdateInfoByPathPath, s.awsAuth.Auth, s.Object().UpdateInfoByPath) + v1.POST(cdsapi.ObjectMovePath, s.awsAuth.Auth, s.Object().Move) + v1.POST(cdsapi.ObjectDeletePath, s.awsAuth.Auth, s.Object().Delete) + v1.POST(cdsapi.ObjectDeleteByPathPath, s.awsAuth.Auth, s.Object().DeleteByPath) + v1.POST(cdsapi.ObjectClonePath, s.awsAuth.Auth, s.Object().Clone) + + v1.GET(cdsapi.PackageGetPath, s.awsAuth.Auth, s.Package().Get) + v1.GET(cdsapi.PackageGetByFullNamePath, s.awsAuth.Auth, s.Package().GetByFullName) + v1.POST(cdsapi.PackageCreatePath, s.awsAuth.Auth, s.Package().Create) + v1.POST(cdsapi.PackageCreateLoadPath, s.awsAuth.Auth, s.Package().CreateLoad) + v1.POST(cdsapi.PackageDeletePath, s.awsAuth.Auth, s.Package().Delete) + v1.POST(cdsapi.PackageClonePath, s.awsAuth.Auth, s.Package().Clone) + v1.GET(cdsapi.PackageListBucketPackagesPath, s.awsAuth.Auth, s.Package().ListBucketPackages) + v1.GET(cdsapi.PackageGetCachedStoragesPath, s.awsAuth.Auth, s.Package().GetCachedStorages) + + v1.POST(cdsapi.StorageLoadPackagePath, s.awsAuth.Auth, s.Storage().LoadPackage) + v1.POST(cdsapi.StorageCreatePackagePath, s.awsAuth.Auth, s.Storage().CreatePackage) + v1.GET(cdsapi.StorageGetPath, s.awsAuth.Auth, s.Storage().Get) + + v1.POST(cdsapi.CacheMovePackagePath, s.awsAuth.Auth, s.Cache().MovePackage) + + v1.GET(cdsapi.BucketGetByNamePath, s.awsAuth.Auth, s.Bucket().GetByName) + v1.POST(cdsapi.BucketCreatePath, s.awsAuth.Auth, s.Bucket().Create) + v1.POST(cdsapi.BucketDeletePath, s.awsAuth.Auth, s.Bucket().Delete) + v1.GET(cdsapi.BucketListUserBucketsPath, s.awsAuth.Auth, s.Bucket().ListUserBuckets) } diff --git a/client/internal/http/storage.go b/client/internal/http/storage.go index badb27b..96bb9a6 100644 --- a/client/internal/http/storage.go +++ b/client/internal/http/storage.go @@ -1,9 +1,7 @@ package http import ( - "fmt" "net/http" - "path/filepath" "time" "github.com/gin-gonic/gin" @@ -32,37 +30,14 @@ func (s *StorageService) LoadPackage(ctx *gin.Context) { return } - hubID, taskID, err := s.svc.StorageSvc().StartStorageLoadPackage(req.UserID, req.PackageID, req.StorageID) + err := s.svc.StorageSvc().LoadPackage(req.UserID, req.PackageID, req.StorageID, req.RootPath) if err != nil { - log.Warnf("start storage load package: %s", err.Error()) - ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("start loading: %v", err))) + log.Warnf("loading package: %s", err.Error()) + ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "loading package failed")) return } - for { - complete, ret, err := s.svc.StorageSvc().WaitStorageLoadPackage(hubID, taskID, time.Second*10) - if complete { - if err != nil { - log.Warnf("loading complete with: %s", err.Error()) - ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("loading complete with: %v", err))) - return - } - - ctx.JSON(http.StatusOK, OK(cdsapi.StorageLoadPackageResp{ - FullPath: filepath.Join(ret.RemoteBase, ret.PackagePath), - PackagePath: ret.PackagePath, - LocalBase: ret.LocalBase, - RemoteBase: ret.RemoteBase, - })) - return - } - - if err != nil { - log.Warnf("wait loadding: %s", err.Error()) - ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("wait loading: %v", err))) - return - } - } + ctx.JSON(http.StatusOK, OK(cdsapi.StorageLoadPackageResp{})) } func (s *StorageService) CreatePackage(ctx *gin.Context) { diff --git a/client/internal/services/cache.go b/client/internal/services/cache.go index 964c2a5..04bcb5a 100644 --- a/client/internal/services/cache.go +++ b/client/internal/services/cache.go @@ -9,6 +9,7 @@ import ( stgglb "gitlink.org.cn/cloudream/storage/common/globals" agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent" coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/factory" ) type CacheService struct { @@ -31,7 +32,7 @@ func (svc *CacheService) StartCacheMovePackage(userID cdssdk.UserID, packageID c return 0, "", fmt.Errorf("get storage detail: %w", err) } - if getStg.Storages[0].Storage.ShardStore == nil { + if !factory.GetBuilder(*getStg.Storages[0]).ShardStoreDesc().Enabled() { return 0, "", fmt.Errorf("shard storage is not enabled") } diff --git a/client/internal/services/hub.go b/client/internal/services/hub.go index c924b6b..7e3e94d 100644 --- a/client/internal/services/hub.go +++ b/client/internal/services/hub.go @@ -26,7 +26,7 @@ func (svc *Service) HubSvc() *HubService { // // []cdssdk.Hub - 获取到的节点信息列表 // error - 如果过程中发生错误,则返回错误信息 -func (svc *HubService) GetHubs(hubIDs []cdssdk.HubID) ([]cdssdk.Hub, error) { +func (svc *HubService) GetHubs(hubIDs []cdssdk.HubID) ([]*cdssdk.Hub, error) { // 从协调器MQ池中获取一个客户端实例 coorCli, err := stgglb.CoordinatorMQPool.Acquire() if err != nil { diff --git a/client/internal/services/object.go b/client/internal/services/object.go index d5f2208..b155ece 100644 --- a/client/internal/services/object.go +++ b/client/internal/services/object.go @@ -37,6 +37,21 @@ func (svc *ObjectService) GetByPath(userID cdssdk.UserID, pkgID cdssdk.PackageID return listResp.Objects, nil } +func (svc *ObjectService) GetByIDs(userID cdssdk.UserID, objectIDs []cdssdk.ObjectID) ([]*cdssdk.Object, error) { + coorCli, err := stgglb.CoordinatorMQPool.Acquire() + if err != nil { + return nil, fmt.Errorf("new coordinator client: %w", err) + } + defer stgglb.CoordinatorMQPool.Release(coorCli) + + listResp, err := coorCli.GetObjects(coormq.ReqGetObjects(userID, objectIDs)) + if err != nil { + return nil, fmt.Errorf("requsting to coodinator: %w", err) + } + + return listResp.Objects, nil +} + func (svc *ObjectService) UpdateInfo(userID cdssdk.UserID, updatings []cdsapi.UpdatingObject) ([]cdssdk.ObjectID, error) { coorCli, err := stgglb.CoordinatorMQPool.Acquire() if err != nil { @@ -98,6 +113,21 @@ func (svc *ObjectService) Delete(userID cdssdk.UserID, objectIDs []cdssdk.Object return nil } +func (svc *ObjectService) Clone(userID cdssdk.UserID, clonings []cdsapi.CloningObject) ([]*cdssdk.Object, error) { + coorCli, err := stgglb.CoordinatorMQPool.Acquire() + if err != nil { + return nil, fmt.Errorf("new coordinator client: %w", err) + } + defer stgglb.CoordinatorMQPool.Release(coorCli) + + resp, err := coorCli.CloneObjects(coormq.ReqCloneObjects(userID, clonings)) + if err != nil { + return nil, fmt.Errorf("requsting to coodinator: %w", err) + } + + return resp.Objects, nil +} + // GetPackageObjects 获取包中的对象列表。 // userID: 用户ID。 // packageID: 包ID。 diff --git a/client/internal/services/package.go b/client/internal/services/package.go index 4991565..f31938c 100644 --- a/client/internal/services/package.go +++ b/client/internal/services/package.go @@ -36,7 +36,7 @@ func (svc *PackageService) Get(userID cdssdk.UserID, packageID cdssdk.PackageID) return &getResp.Package, nil } -func (svc *PackageService) GetByName(userID cdssdk.UserID, bucketName string, packageName string) (*cdssdk.Package, error) { +func (svc *PackageService) GetByFullName(userID cdssdk.UserID, bucketName string, packageName string) (*cdssdk.Package, error) { coorCli, err := stgglb.CoordinatorMQPool.Acquire() if err != nil { return nil, fmt.Errorf("new coordinator client: %w", err) @@ -106,6 +106,21 @@ func (svc *PackageService) DeletePackage(userID cdssdk.UserID, packageID cdssdk. return nil } +func (svc *PackageService) Clone(userID cdssdk.UserID, packageID cdssdk.PackageID, bucketID cdssdk.BucketID, name string) (cdssdk.Package, error) { + coorCli, err := stgglb.CoordinatorMQPool.Acquire() + if err != nil { + return cdssdk.Package{}, fmt.Errorf("new coordinator client: %w", err) + } + defer stgglb.CoordinatorMQPool.Release(coorCli) + + resp, err := coorCli.ClonePackage(coormq.ReqClonePackage(userID, packageID, bucketID, name)) + if err != nil { + return cdssdk.Package{}, fmt.Errorf("cloning package: %w", err) + } + + return resp.Package, nil +} + // GetCachedStorages 获取指定包的缓存节点信息 func (svc *PackageService) GetCachedStorages(userID cdssdk.UserID, packageID cdssdk.PackageID) (cdssdk.PackageCachingInfo, error) { // 从协调器MQ池中获取客户端 @@ -128,20 +143,3 @@ func (svc *PackageService) GetCachedStorages(userID cdssdk.UserID, packageID cds } return tmp, nil } - -// GetLoadedStorages 获取指定包加载的节点列表 -func (svc *PackageService) GetLoadedStorages(userID cdssdk.UserID, packageID cdssdk.PackageID) ([]cdssdk.StorageID, error) { - // 从协调器MQ池中获取客户端 - coorCli, err := stgglb.CoordinatorMQPool.Acquire() - if err != nil { - return nil, fmt.Errorf("new coordinator client: %w", err) - } - defer stgglb.CoordinatorMQPool.Release(coorCli) - - // 向协调器请求获取加载指定包的节点ID列表 - resp, err := coorCli.GetPackageLoadedStorages(coormq.ReqGetPackageLoadedStorages(userID, packageID)) - if err != nil { - return nil, fmt.Errorf("get package loaded storages: %w", err) - } - return resp.StorageIDs, nil -} diff --git a/client/internal/services/service.go b/client/internal/services/service.go index 476e126..260bbb3 100644 --- a/client/internal/services/service.go +++ b/client/internal/services/service.go @@ -7,24 +7,38 @@ import ( "gitlink.org.cn/cloudream/storage/client/internal/task" "gitlink.org.cn/cloudream/storage/common/pkgs/accessstat" "gitlink.org.cn/cloudream/storage/common/pkgs/downloader" + "gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy" + "gitlink.org.cn/cloudream/storage/common/pkgs/metacache" "gitlink.org.cn/cloudream/storage/common/pkgs/uploader" ) // Service 结构体封装了分布锁服务和任务管理服务。 type Service struct { - DistLock *distlock.Service - TaskMgr *task.Manager - Downloader *downloader.Downloader - AccessStat *accessstat.AccessStat - Uploader *uploader.Uploader + DistLock *distlock.Service + TaskMgr *task.Manager + Downloader *downloader.Downloader + AccessStat *accessstat.AccessStat + Uploader *uploader.Uploader + StrategySelector *strategy.Selector + StorageMeta *metacache.StorageMeta } -func NewService(distlock *distlock.Service, taskMgr *task.Manager, downloader *downloader.Downloader, accStat *accessstat.AccessStat, uploder *uploader.Uploader) (*Service, error) { +func NewService( + distlock *distlock.Service, + taskMgr *task.Manager, + downloader *downloader.Downloader, + accStat *accessstat.AccessStat, + uploder *uploader.Uploader, + strategySelector *strategy.Selector, + storageMeta *metacache.StorageMeta, +) (*Service, error) { return &Service{ - DistLock: distlock, - TaskMgr: taskMgr, - Downloader: downloader, - AccessStat: accStat, - Uploader: uploder, + DistLock: distlock, + TaskMgr: taskMgr, + Downloader: downloader, + AccessStat: accStat, + Uploader: uploder, + StrategySelector: strategySelector, + StorageMeta: storageMeta, }, nil } diff --git a/client/internal/services/storage.go b/client/internal/services/storage.go index 17a5cf2..046de63 100644 --- a/client/internal/services/storage.go +++ b/client/internal/services/storage.go @@ -1,15 +1,23 @@ package services import ( + "context" "fmt" + "path" "time" + "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" stgglb "gitlink.org.cn/cloudream/storage/common/globals" "gitlink.org.cn/cloudream/storage/common/pkgs/db2/model" + "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" + "gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser" agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent" coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/factory" ) type StorageService struct { @@ -50,74 +58,90 @@ func (svc *StorageService) GetByName(userID cdssdk.UserID, name string) (*model. return &getResp.Storage, nil } -func (svc *StorageService) StartStorageLoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID) (cdssdk.HubID, string, error) { +func (svc *StorageService) LoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID, rootPath string) error { coorCli, err := stgglb.CoordinatorMQPool.Acquire() if err != nil { - return 0, "", fmt.Errorf("new coordinator client: %w", err) + return fmt.Errorf("new coordinator client: %w", err) } defer stgglb.CoordinatorMQPool.Release(coorCli) - stgResp, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{storageID})) - if err != nil { - return 0, "", fmt.Errorf("getting storage info: %w", err) + destStg := svc.StorageMeta.Get(storageID) + if destStg == nil { + return fmt.Errorf("storage not found: %d", storageID) } - - if stgResp.Storages[0].Storage.ShardStore == nil { - return 0, "", fmt.Errorf("shard storage is not enabled") + if destStg.MasterHub == nil { + return fmt.Errorf("storage %v has no master hub", storageID) } - agentCli, err := stgglb.AgentMQPool.Acquire(stgResp.Storages[0].MasterHub.HubID) + details, err := coorCli.GetPackageObjectDetails(coormq.ReqGetPackageObjectDetails(packageID)) if err != nil { - return 0, "", fmt.Errorf("new agent client: %w", err) + return err } - defer stgglb.AgentMQPool.Release(agentCli) - startResp, err := agentCli.StartStorageLoadPackage(agtmq.NewStartStorageLoadPackage(userID, packageID, storageID)) - if err != nil { - return 0, "", fmt.Errorf("start storage load package: %w", err) - } + var pinned []cdssdk.ObjectID + plans := exec.NewPlanBuilder() + for _, obj := range details.Objects { + strg, err := svc.StrategySelector.Select(strategy.Request{ + Detail: obj, + DestHub: destStg.MasterHub.HubID, + }) + if err != nil { + return fmt.Errorf("select download strategy: %w", err) + } - return stgResp.Storages[0].MasterHub.HubID, startResp.TaskID, nil -} + ft := ioswitch2.NewFromTo() + switch strg := strg.(type) { + case *strategy.DirectStrategy: + ft.AddFrom(ioswitch2.NewFromShardstore(strg.Detail.Object.FileHash, *strg.Storage.MasterHub, strg.Storage, ioswitch2.RawStream())) -type StorageLoadPackageResult struct { - PackagePath string - LocalBase string - RemoteBase string -} + case *strategy.ECReconstructStrategy: + for i, b := range strg.Blocks { + ft.AddFrom(ioswitch2.NewFromShardstore(b.FileHash, *strg.Storages[i].MasterHub, strg.Storages[i], ioswitch2.ECStream(b.Index))) + ft.ECParam = &strg.Redundancy + } + default: + return fmt.Errorf("unsupported download strategy: %T", strg) + } -func (svc *StorageService) WaitStorageLoadPackage(hubID cdssdk.HubID, taskID string, waitTimeout time.Duration) (bool, *StorageLoadPackageResult, error) { - agentCli, err := stgglb.AgentMQPool.Acquire(hubID) - if err != nil { - // TODO 失败是否要当做任务已经结束? - return true, nil, fmt.Errorf("new agent client: %w", err) + ft.AddTo(ioswitch2.NewLoadToShared(*destStg.MasterHub, *destStg, path.Join(rootPath, obj.Object.Path))) + // 顺便保存到同存储服务的分片存储中 + if factory.GetBuilder(*destStg).ShardStoreDesc().Enabled() { + ft.AddTo(ioswitch2.NewToShardStore(*destStg.MasterHub, *destStg, ioswitch2.RawStream(), "")) + pinned = append(pinned, obj.Object.ObjectID) + } + + err = parser.Parse(ft, plans) + if err != nil { + return fmt.Errorf("parse plan: %w", err) + } } - defer stgglb.AgentMQPool.Release(agentCli) - waitResp, err := agentCli.WaitStorageLoadPackage(agtmq.NewWaitStorageLoadPackage(taskID, waitTimeout.Milliseconds())) + mutex, err := reqbuilder.NewBuilder(). + // 保护在storage目录中下载的文件 + Storage().Buzy(storageID). + // 保护下载文件时同时保存到IPFS的文件 + Shard().Buzy(storageID). + MutexLock(svc.DistLock) if err != nil { - // TODO 请求失败是否要当做任务已经结束? - return true, nil, fmt.Errorf("wait storage load package: %w", err) + return fmt.Errorf("acquire locks failed, err: %w", err) } - if !waitResp.IsComplete { - return false, nil, nil + // 记录访问统计 + for _, obj := range details.Objects { + svc.AccessStat.AddAccessCounter(obj.Object.ObjectID, packageID, storageID, 1) } - if waitResp.Error != "" { - return true, nil, fmt.Errorf("%s", waitResp.Error) - } + defer mutex.Unlock() - return true, &StorageLoadPackageResult{ - PackagePath: waitResp.PackagePath, - LocalBase: waitResp.LocalBase, - RemoteBase: waitResp.RemoteBase, - }, nil -} + drv := plans.Execute(exec.NewExecContext()) + _, err = drv.Wait(context.Background()) + if err != nil { + return err + } -func (svc *StorageService) DeleteStoragePackage(userID int64, packageID int64, storageID int64) error { - // TODO - panic("not implement yet") + // 失败也没关系 + coorCli.StoragePackageLoaded(coormq.ReqStoragePackageLoaded(userID, storageID, packageID, rootPath, pinned)) + return nil } // 请求节点启动从Storage中上传文件的任务。会返回节点ID和任务ID diff --git a/client/internal/task/task.go b/client/internal/task/task.go index 4d66cc5..348621b 100644 --- a/client/internal/task/task.go +++ b/client/internal/task/task.go @@ -4,14 +4,14 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/distlock" // 引入分布式锁服务 "gitlink.org.cn/cloudream/common/pkgs/task" // 引入任务处理相关的包 "gitlink.org.cn/cloudream/storage/common/pkgs/connectivity" // 引入网络连接状态收集器 - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool" ) // TaskContext 定义了任务执行的上下文环境,包含分布式锁服务和网络连接状态收集器 type TaskContext struct { distlock *distlock.Service connectivity *connectivity.Collector - stgMgr *svcmgr.Manager + stgAgts *agtpool.AgentPool } // CompleteFn 类型定义了任务完成时的回调函数,用于设置任务的执行结果 @@ -31,10 +31,10 @@ type CompleteOption = task.CompleteOption // NewManager 创建一个新的任务管理器实例,接受一个分布式锁服务和一个网络连接状态收集器作为参数 // 返回一个初始化好的任务管理器实例 -func NewManager(distlock *distlock.Service, connectivity *connectivity.Collector, stgMgr *svcmgr.Manager) Manager { +func NewManager(distlock *distlock.Service, connectivity *connectivity.Collector, stgAgts *agtpool.AgentPool) Manager { return task.NewManager(TaskContext{ distlock: distlock, connectivity: connectivity, - stgMgr: stgMgr, + stgAgts: stgAgts, }) } diff --git a/client/main.go b/client/main.go index 158dca7..abd1a45 100644 --- a/client/main.go +++ b/client/main.go @@ -18,8 +18,10 @@ import ( "gitlink.org.cn/cloudream/storage/common/pkgs/connectivity" "gitlink.org.cn/cloudream/storage/common/pkgs/distlock" "gitlink.org.cn/cloudream/storage/common/pkgs/downloader" + "gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy" + "gitlink.org.cn/cloudream/storage/common/pkgs/metacache" coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool" "gitlink.org.cn/cloudream/storage/common/pkgs/uploader" ) @@ -37,7 +39,7 @@ func main() { } stgglb.InitLocal(&config.Cfg().Local) - stgglb.InitMQPool(&config.Cfg().RabbitMQ) + stgglb.InitMQPool(config.Cfg().RabbitMQ) stgglb.InitAgentRPCPool(&config.Cfg().AgentGRPC) // 连接性信息收集 @@ -57,13 +59,13 @@ func main() { consMap := make(map[cdssdk.HubID]connectivity.Connectivity) for _, con := range getCons.Connectivities { var delay *time.Duration - if con.Delay != nil { - d := time.Duration(*con.Delay * float32(time.Millisecond)) + if con.Latency != nil { + d := time.Duration(*con.Latency * float32(time.Millisecond)) delay = &d } consMap[con.FromHubID] = connectivity.Connectivity{ ToHubID: con.ToHubID, - Delay: delay, + Latency: delay, } } conCol = connectivity.NewCollectorWithInitData(&config.Cfg().Connectivity, nil, consMap) @@ -75,6 +77,12 @@ func main() { conCol.CollectInPlace() } + metaCacheHost := metacache.NewHost() + go metaCacheHost.Serve() + stgMeta := metaCacheHost.AddStorageMeta() + hubMeta := metaCacheHost.AddHubMeta() + conMeta := metaCacheHost.AddConnectivity() + // 分布式锁 distlockSvc, err := distlock.NewService(&config.Cfg().DistLock) if err != nil { @@ -91,18 +99,20 @@ func main() { go serveAccessStat(acStat) // 存储管理器 - stgMgr := svcmgr.NewManager() + stgAgts := agtpool.NewPool() // 任务管理器 - taskMgr := task.NewManager(distlockSvc, &conCol, stgMgr) + taskMgr := task.NewManager(distlockSvc, &conCol, stgAgts) + + strgSel := strategy.NewSelector(config.Cfg().DownloadStrategy, stgMeta, hubMeta, conMeta) // 下载器 - dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgMgr) + dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgAgts, strgSel) // 上传器 - uploader := uploader.NewUploader(distlockSvc, &conCol, stgMgr) + uploader := uploader.NewUploader(distlockSvc, &conCol, stgAgts, stgMeta) - svc, err := services.NewService(distlockSvc, &taskMgr, &dlder, acStat, uploader) + svc, err := services.NewService(distlockSvc, &taskMgr, &dlder, acStat, uploader, strgSel, stgMeta) if err != nil { logger.Warnf("new services failed, err: %s", err.Error()) os.Exit(1) diff --git a/common/assets/confs/agent.config.json b/common/assets/confs/agent.config.json index 2deefb4..5c088c7 100644 --- a/common/assets/confs/agent.config.json +++ b/common/assets/confs/agent.config.json @@ -39,7 +39,9 @@ }, "downloader": { "maxStripCacheCount": 100, - "highLatencyHub": 35, "ecStripPrefetchCount": 1 + }, + "downloadStrategy": { + "highLatencyHub": 35 } } \ No newline at end of file diff --git a/common/assets/confs/client.config.json b/common/assets/confs/client.config.json index 2dad96a..418129f 100644 --- a/common/assets/confs/client.config.json +++ b/common/assets/confs/client.config.json @@ -34,8 +34,13 @@ }, "downloader": { "maxStripCacheCount": 100, - "highLatencyHub": 35, "ecStripPrefetchCount": 1 }, - "storageID": 0 + "downloadStrategy": { + "highLatencyHub": 35 + }, + "storageID": 0, + "authAccessKey": "", + "authSecretKey": "", + "maxHttpBodySize": 5242880 } \ No newline at end of file diff --git a/common/assets/confs/scanner.config.json b/common/assets/confs/scanner.config.json index 6dcde99..e8c8531 100644 --- a/common/assets/confs/scanner.config.json +++ b/common/assets/confs/scanner.config.json @@ -9,15 +9,15 @@ "level": "debug" }, "db": { - "address": "106.75.6.194:3306", - "account": "root", - "password": "cloudream123456", + "address": "127.0.0.1:3306", + "account": "", + "password": "", "databaseName": "cloudream" }, "rabbitMQ": { - "address": "106.75.6.194:5672", - "account": "cloudream", - "password": "cloudream123456", + "address": "127.0.0.1:5672", + "account": "", + "password": "", "vhost": "/", "param": { "retryNum": 5, @@ -25,7 +25,7 @@ } }, "distlock": { - "etcdAddress": "106.75.6.194:2379", + "etcdAddress": "127.0.0.1:2379", "etcdUsername": "", "etcdPassword": "", "etcdLockLeaseTimeSec": 5, diff --git a/common/globals/pools.go b/common/globals/pools.go index cb4ab84..53841f9 100644 --- a/common/globals/pools.go +++ b/common/globals/pools.go @@ -1,8 +1,8 @@ package stgglb import ( + "gitlink.org.cn/cloudream/common/pkgs/mq" agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent" - stgmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq" agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent" coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" scmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner" @@ -18,7 +18,7 @@ var ScannerMQPool scmq.Pool // // @Description: 初始化MQ连接池 // @param cfg -func InitMQPool(cfg *stgmq.Config) { +func InitMQPool(cfg mq.Config) { AgentMQPool = agtmq.NewPool(cfg) CoordinatorMQPool = coormq.NewPool(cfg) diff --git a/common/pkgs/connectivity/collector.go b/common/pkgs/connectivity/collector.go index b6cb6b4..31d30e4 100644 --- a/common/pkgs/connectivity/collector.go +++ b/common/pkgs/connectivity/collector.go @@ -13,7 +13,7 @@ import ( type Connectivity struct { ToHubID cdssdk.HubID - Delay *time.Duration + Latency *time.Duration TestTime time.Time } @@ -52,17 +52,6 @@ func NewCollectorWithInitData(cfg *Config, onCollected func(collector *Collector return rpt } -func (r *Collector) Get(hubID cdssdk.HubID) *Connectivity { - r.lock.RLock() - defer r.lock.RUnlock() - - con, ok := r.connectivities[hubID] - if ok { - return &con - } - - return nil -} func (r *Collector) GetAll() map[cdssdk.HubID]Connectivity { r.lock.RLock() defer r.lock.RUnlock() @@ -101,8 +90,8 @@ func (r *Collector) serve() { // 为了防止同时启动的节点会集中进行Ping,所以第一次上报间隔为0-TestInterval秒之间随机 startup := true - firstReportDelay := time.Duration(float64(r.cfg.TestInterval) * float64(time.Second) * rand.Float64()) - ticker := time.NewTicker(firstReportDelay) + firstReportLatency := time.Duration(float64(r.cfg.TestInterval) * float64(time.Second) * rand.Float64()) + ticker := time.NewTicker(firstReportLatency) loop: for { @@ -150,7 +139,7 @@ func (r *Collector) testing() { wg.Add(1) go func() { defer wg.Done() - cons[tmpIdx] = r.ping(tmpHub) + cons[tmpIdx] = r.ping(*tmpHub) }() } @@ -190,7 +179,7 @@ func (r *Collector) ping(hub cdssdk.Hub) Connectivity { return Connectivity{ ToHubID: hub.HubID, - Delay: nil, + Latency: nil, TestTime: time.Now(), } } @@ -200,7 +189,7 @@ func (r *Collector) ping(hub cdssdk.Hub) Connectivity { log.Warnf("new agent %v:%v rpc client: %w", ip, port, err) return Connectivity{ ToHubID: hub.HubID, - Delay: nil, + Latency: nil, TestTime: time.Now(), } } @@ -212,13 +201,13 @@ func (r *Collector) ping(hub cdssdk.Hub) Connectivity { log.Warnf("pre ping: %v", err) return Connectivity{ ToHubID: hub.HubID, - Delay: nil, + Latency: nil, TestTime: time.Now(), } } // 后几次ping计算延迟 - var avgDelay time.Duration + var avgLatency time.Duration for i := 0; i < 3; i++ { start := time.Now() err = agtCli.Ping() @@ -226,22 +215,22 @@ func (r *Collector) ping(hub cdssdk.Hub) Connectivity { log.Warnf("ping: %v", err) return Connectivity{ ToHubID: hub.HubID, - Delay: nil, + Latency: nil, TestTime: time.Now(), } } - delay := time.Since(start) - avgDelay += delay + latency := time.Since(start) + avgLatency += latency // 每次ping之间间隔1秒 <-time.After(time.Second) } - delay := avgDelay / 3 + latency := avgLatency / 3 return Connectivity{ ToHubID: hub.HubID, - Delay: &delay, + Latency: &latency, TestTime: time.Now(), } } diff --git a/common/pkgs/db2/bucket.go b/common/pkgs/db2/bucket.go index d3ea7d5..028dda1 100644 --- a/common/pkgs/db2/bucket.go +++ b/common/pkgs/db2/bucket.go @@ -113,26 +113,5 @@ func (db *BucketDB) Create(ctx SQLContext, userID cdssdk.UserID, bucketName stri } func (db *BucketDB) Delete(ctx SQLContext, bucketID cdssdk.BucketID) error { - if err := ctx.Exec("DELETE FROM UserBucket WHERE BucketID = ?", bucketID).Error; err != nil { - return fmt.Errorf("delete user bucket failed, err: %w", err) - } - - if err := ctx.Exec("DELETE FROM Bucket WHERE BucketID = ?", bucketID).Error; err != nil { - return fmt.Errorf("delete bucket failed, err: %w", err) - } - - var pkgIDs []cdssdk.PackageID - if err := ctx.Table("Package").Select("PackageID").Where("BucketID = ?", bucketID).Find(&pkgIDs).Error; err != nil { - return fmt.Errorf("query package failed, err: %w", err) - } - - for _, pkgID := range pkgIDs { - if err := db.Package().SoftDelete(ctx, pkgID); err != nil { - return fmt.Errorf("set package selected failed, err: %w", err) - } - - // 失败也没关系,会有定时任务再次尝试 - db.Package().DeleteUnused(ctx, pkgID) - } - return nil + return ctx.Delete(&cdssdk.Bucket{}, "BucketID = ?", bucketID).Error } diff --git a/common/pkgs/db2/model/model.go b/common/pkgs/db2/model/model.go index 27badf9..ccbcf85 100644 --- a/common/pkgs/db2/model/model.go +++ b/common/pkgs/db2/model/model.go @@ -67,24 +67,6 @@ func (Cache) TableName() string { return "Cache" } -const ( - StoragePackageStateNormal = "Normal" - StoragePackageStateDeleted = "Deleted" - StoragePackageStateOutdated = "Outdated" -) - -// Storage当前加载的Package -type StoragePackage struct { - StorageID cdssdk.StorageID `gorm:"column:StorageID; primaryKey; type:bigint" json:"storageID"` - PackageID cdssdk.PackageID `gorm:"column:PackageID; primaryKey; type:bigint" json:"packageID"` - UserID cdssdk.UserID `gorm:"column:UserID; primaryKey; type:bigint" json:"userID"` - State string `gorm:"column:State; type:varchar(255); not null" json:"state"` -} - -func (StoragePackage) TableName() string { - return "StoragePackage" -} - type Location struct { LocationID cdssdk.LocationID `gorm:"column:LocationID; primaryKey; type:bigint; autoIncrement" json:"locationID"` Name string `gorm:"column:Name; type:varchar(255); not null" json:"name"` diff --git a/common/pkgs/db2/object.go b/common/pkgs/db2/object.go index 7a7f02c..00a7069 100644 --- a/common/pkgs/db2/object.go +++ b/common/pkgs/db2/object.go @@ -20,7 +20,7 @@ func (db *DB) Object() *ObjectDB { return &ObjectDB{DB: db} } -func (db *ObjectDB) GetByID(ctx SQLContext, objectID cdssdk.ObjectID) (model.Object, error) { +func (db *ObjectDB) GetByID(ctx SQLContext, objectID cdssdk.ObjectID) (cdssdk.Object, error) { var ret cdssdk.Object err := ctx.Table("Object").Where("ObjectID = ?", objectID).First(&ret).Error return ret, err @@ -57,7 +57,7 @@ func (db *ObjectDB) BatchTestObjectID(ctx SQLContext, objectIDs []cdssdk.ObjectI return avaiIDMap, nil } -func (db *ObjectDB) BatchGet(ctx SQLContext, objectIDs []cdssdk.ObjectID) ([]model.Object, error) { +func (db *ObjectDB) BatchGet(ctx SQLContext, objectIDs []cdssdk.ObjectID) ([]cdssdk.Object, error) { if len(objectIDs) == 0 { return nil, nil } @@ -85,6 +85,41 @@ func (db *ObjectDB) BatchGetByPackagePath(ctx SQLContext, pkgID cdssdk.PackageID return objs, nil } +// 仅返回查询到的对象 +func (db *ObjectDB) BatchGetDetails(ctx SQLContext, objectIDs []cdssdk.ObjectID) ([]stgmod.ObjectDetail, error) { + var objs []cdssdk.Object + + err := ctx.Table("Object").Where("ObjectID IN ?", objectIDs).Order("ObjectID ASC").Find(&objs).Error + if err != nil { + return nil, err + } + + // 获取所有的 ObjectBlock + var allBlocks []stgmod.ObjectBlock + err = ctx.Table("ObjectBlock").Where("ObjectID IN ?", objectIDs).Order("ObjectID, `Index` ASC").Find(&allBlocks).Error + if err != nil { + return nil, err + } + + // 获取所有的 PinnedObject + var allPinnedObjs []cdssdk.PinnedObject + err = ctx.Table("PinnedObject").Where("ObjectID IN ?", objectIDs).Order("ObjectID ASC").Find(&allPinnedObjs).Error + if err != nil { + return nil, err + } + + details := make([]stgmod.ObjectDetail, len(objs)) + for i, obj := range objs { + details[i] = stgmod.ObjectDetail{ + Object: obj, + } + } + + stgmod.DetailsFillObjectBlocks(details, allBlocks) + stgmod.DetailsFillPinnedAt(details, allPinnedObjs) + return details, nil +} + func (db *ObjectDB) Create(ctx SQLContext, obj cdssdk.Object) (cdssdk.ObjectID, error) { err := ctx.Table("Object").Create(&obj).Error if err != nil { @@ -128,7 +163,7 @@ func (db *ObjectDB) BatchUpdateColumns(ctx SQLContext, objs []cdssdk.Object, col }).Create(objs).Error } -func (db *ObjectDB) GetPackageObjects(ctx SQLContext, packageID cdssdk.PackageID) ([]model.Object, error) { +func (db *ObjectDB) GetPackageObjects(ctx SQLContext, packageID cdssdk.PackageID) ([]cdssdk.Object, error) { var ret []cdssdk.Object err := ctx.Table("Object").Where("PackageID = ?", packageID).Order("ObjectID ASC").Find(&ret).Error return ret, err diff --git a/common/pkgs/db2/object_block.go b/common/pkgs/db2/object_block.go index 6f52112..53550a6 100644 --- a/common/pkgs/db2/object_block.go +++ b/common/pkgs/db2/object_block.go @@ -33,6 +33,16 @@ func (db *ObjectBlockDB) BatchGetByObjectID(ctx SQLContext, objectIDs []cdssdk.O return blocks, err } +func (*ObjectBlockDB) GetInPackageID(ctx SQLContext, packageID cdssdk.PackageID) ([]stgmod.ObjectBlock, error) { + var rets []stgmod.ObjectBlock + err := ctx.Table("ObjectBlock"). + Joins("INNER JOIN Object ON ObjectBlock.ObjectID = Object.ObjectID"). + Where("Object.PackageID = ?", packageID). + Order("ObjectBlock.ObjectID, ObjectBlock.`Index` ASC"). + Find(&rets).Error + return rets, err +} + func (db *ObjectBlockDB) Create(ctx SQLContext, objectID cdssdk.ObjectID, index int, stgID cdssdk.StorageID, fileHash cdssdk.FileHash) error { block := stgmod.ObjectBlock{ObjectID: objectID, Index: index, StorageID: stgID, FileHash: fileHash} return ctx.Table("ObjectBlock").Create(&block).Error diff --git a/common/pkgs/db2/package.go b/common/pkgs/db2/package.go index 37a7012..8c8812b 100644 --- a/common/pkgs/db2/package.go +++ b/common/pkgs/db2/package.go @@ -57,7 +57,7 @@ func (*PackageDB) BatchGetAllPackageIDs(ctx SQLContext, start int, count int) ([ return ret, err } -func (db *PackageDB) GetBucketPackages(ctx SQLContext, userID cdssdk.UserID, bucketID cdssdk.BucketID) ([]model.Package, error) { +func (db *PackageDB) GetUserBucketPackages(ctx SQLContext, userID cdssdk.UserID, bucketID cdssdk.BucketID) ([]model.Package, error) { var ret []model.Package err := ctx.Table("UserBucket"). Select("Package.*"). @@ -67,6 +67,15 @@ func (db *PackageDB) GetBucketPackages(ctx SQLContext, userID cdssdk.UserID, buc return ret, err } +func (db *PackageDB) GetBucketPackages(ctx SQLContext, bucketID cdssdk.BucketID) ([]model.Package, error) { + var ret []model.Package + err := ctx.Table("Package"). + Select("Package.*"). + Where("BucketID = ?", bucketID). + Find(&ret).Error + return ret, err +} + // IsAvailable 判断一个用户是否拥有指定对象 func (db *PackageDB) IsAvailable(ctx SQLContext, userID cdssdk.UserID, packageID cdssdk.PackageID) (bool, error) { var pkgID cdssdk.PackageID @@ -110,7 +119,7 @@ func (*PackageDB) GetUserPackageByName(ctx SQLContext, userID cdssdk.UserID, buc return ret, err } -func (db *PackageDB) Create(ctx SQLContext, bucketID cdssdk.BucketID, name string) (cdssdk.PackageID, error) { +func (db *PackageDB) Create(ctx SQLContext, bucketID cdssdk.BucketID, name string) (cdssdk.Package, error) { var packageID int64 err := ctx.Table("Package"). Select("PackageID"). @@ -118,33 +127,29 @@ func (db *PackageDB) Create(ctx SQLContext, bucketID cdssdk.BucketID, name strin Scan(&packageID).Error if err != nil { - return 0, err + return cdssdk.Package{}, err } if packageID != 0 { - return 0, gorm.ErrDuplicatedKey + return cdssdk.Package{}, gorm.ErrDuplicatedKey } newPackage := cdssdk.Package{Name: name, BucketID: bucketID, State: cdssdk.PackageStateNormal} if err := ctx.Create(&newPackage).Error; err != nil { - return 0, fmt.Errorf("insert package failed, err: %w", err) + return cdssdk.Package{}, fmt.Errorf("insert package failed, err: %w", err) } - return newPackage.PackageID, nil + return newPackage, nil } -// SoftDelete 设置一个对象被删除,并将相关数据删除 -func (db *PackageDB) SoftDelete(ctx SQLContext, packageID cdssdk.PackageID) error { - obj, err := db.GetByID(ctx, packageID) - if err != nil { - return fmt.Errorf("get package failed, err: %w", err) - } - - if obj.State != cdssdk.PackageStateNormal { - return nil - } +func (*PackageDB) Delete(ctx SQLContext, packageID cdssdk.PackageID) error { + err := ctx.Delete(&model.Package{}, "PackageID = ?", packageID).Error + return err +} - if err := db.ChangeState(ctx, packageID, cdssdk.PackageStateDeleted); err != nil { - return fmt.Errorf("change package state failed, err: %w", err) +// 删除与Package相关的所有数据 +func (db *PackageDB) DeleteComplete(ctx SQLContext, packageID cdssdk.PackageID) error { + if err := db.Package().Delete(ctx, packageID); err != nil { + return fmt.Errorf("delete package state: %w", err) } if err := db.ObjectAccessStat().DeleteInPackage(ctx, packageID); err != nil { @@ -163,23 +168,13 @@ func (db *PackageDB) SoftDelete(ctx SQLContext, packageID cdssdk.PackageID) erro return fmt.Errorf("deleting objects in package: %w", err) } - if _, err := db.StoragePackage().SetAllPackageDeleted(ctx, packageID); err != nil { - return fmt.Errorf("set storage package deleted failed, err: %w", err) + if err := db.PackageAccessStat().DeleteByPackageID(ctx, packageID); err != nil { + return fmt.Errorf("deleting package access stat: %w", err) } return nil } -// DeleteUnused 删除一个已经是Deleted状态,且不再被使用的对象 -func (PackageDB) DeleteUnused(ctx SQLContext, packageID cdssdk.PackageID) error { - err := ctx.Exec("DELETE FROM Package WHERE PackageID = ? AND State = ? AND NOT EXISTS (SELECT StorageID FROM StoragePackage WHERE PackageID = ?)", - packageID, - cdssdk.PackageStateDeleted, - packageID, - ).Error - return err -} - func (*PackageDB) ChangeState(ctx SQLContext, packageID cdssdk.PackageID, state string) error { err := ctx.Exec("UPDATE Package SET State = ? WHERE PackageID = ?", state, packageID).Error return err diff --git a/common/pkgs/db2/pinned_object.go b/common/pkgs/db2/pinned_object.go index 8b31dca..a9de444 100644 --- a/common/pkgs/db2/pinned_object.go +++ b/common/pkgs/db2/pinned_object.go @@ -42,8 +42,10 @@ func (*PinnedObjectDB) BatchGetByObjectID(ctx SQLContext, objectIDs []cdssdk.Obj } func (*PinnedObjectDB) TryCreate(ctx SQLContext, stgID cdssdk.StorageID, objectID cdssdk.ObjectID, createTime time.Time) error { - err := ctx.Clauses(clause.Insert{Modifier: "ignore"}).Table("PinnedObject").Create(&cdssdk.PinnedObject{StorageID: stgID, ObjectID: objectID, CreateTime: createTime}).Error - return err + return ctx.Clauses(clause.OnConflict{ + Columns: []clause.Column{{Name: "ObjectID"}, {Name: "StorageID"}}, + DoUpdates: clause.AssignmentColumns([]string{"CreateTime"}), + }).Create(&cdssdk.PinnedObject{StorageID: stgID, ObjectID: objectID, CreateTime: createTime}).Error } func (*PinnedObjectDB) BatchTryCreate(ctx SQLContext, pinneds []cdssdk.PinnedObject) error { @@ -51,8 +53,10 @@ func (*PinnedObjectDB) BatchTryCreate(ctx SQLContext, pinneds []cdssdk.PinnedObj return nil } - err := ctx.Clauses(clause.Insert{Modifier: "ignore"}).Table("PinnedObject").Create(pinneds).Error - return err + return ctx.Clauses(clause.OnConflict{ + Columns: []clause.Column{{Name: "ObjectID"}, {Name: "StorageID"}}, + DoUpdates: clause.AssignmentColumns([]string{"CreateTime"}), + }).Create(&pinneds).Error } func (*PinnedObjectDB) CreateFromPackage(ctx SQLContext, packageID cdssdk.PackageID, stgID cdssdk.StorageID) error { diff --git a/common/pkgs/db2/storage_package.go b/common/pkgs/db2/storage_package.go deleted file mode 100644 index 2eb9814..0000000 --- a/common/pkgs/db2/storage_package.go +++ /dev/null @@ -1,83 +0,0 @@ -package db2 - -import ( - cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" - "gitlink.org.cn/cloudream/storage/common/pkgs/db2/model" -) - -type StoragePackageDB struct { - *DB -} - -func (db *DB) StoragePackage() *StoragePackageDB { - return &StoragePackageDB{DB: db} -} - -func (*StoragePackageDB) Get(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) (model.StoragePackage, error) { - var ret model.StoragePackage - err := ctx.Table("StoragePackage").Where("StorageID = ? AND PackageID = ? AND UserID = ?", storageID, packageID, userID).First(&ret).Error - return ret, err -} - -func (*StoragePackageDB) GetAllByStorageAndPackageID(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID) ([]model.StoragePackage, error) { - var ret []model.StoragePackage - err := ctx.Table("StoragePackage").Where("StorageID = ? AND PackageID = ?", storageID, packageID).Find(&ret).Error - return ret, err -} - -func (*StoragePackageDB) GetAllByStorageID(ctx SQLContext, storageID cdssdk.StorageID) ([]model.StoragePackage, error) { - var ret []model.StoragePackage - err := ctx.Table("StoragePackage").Where("StorageID = ?", storageID).Find(&ret).Error - return ret, err -} - -func (*StoragePackageDB) CreateOrUpdate(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) error { - sql := "INSERT INTO StoragePackage (StorageID, PackageID, UserID, State) VALUES (?, ?, ?, ?) " + - "ON DUPLICATE KEY UPDATE State = VALUES(State)" - return ctx.Exec(sql, storageID, packageID, userID, model.StoragePackageStateNormal).Error -} - -func (*StoragePackageDB) ChangeState(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID, state string) error { - return ctx.Table("StoragePackage").Where("StorageID = ? AND PackageID = ? AND UserID = ?", storageID, packageID, userID).Update("State", state).Error -} - -// SetStateNormal 将状态设置为Normal,如果记录状态是Deleted,则不进行操作 -func (*StoragePackageDB) SetStateNormal(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) error { - return ctx.Table("StoragePackage").Where("StorageID = ? AND PackageID = ? AND UserID = ? AND State <> ?", - storageID, packageID, userID, model.StoragePackageStateDeleted).Update("State", model.StoragePackageStateNormal).Error -} - -func (*StoragePackageDB) SetAllPackageState(ctx SQLContext, packageID cdssdk.PackageID, state string) (int64, error) { - ret := ctx.Table("StoragePackage").Where("PackageID = ?", packageID).Update("State", state) - if err := ret.Error; err != nil { - return 0, err - } - return ret.RowsAffected, nil -} - -// SetAllPackageOutdated 将Storage中指定对象设置为已过期。只会设置Normal状态的对象 -func (*StoragePackageDB) SetAllPackageOutdated(ctx SQLContext, packageID cdssdk.PackageID) (int64, error) { - ret := ctx.Table("StoragePackage").Where("State = ? AND PackageID = ?", model.StoragePackageStateNormal, packageID).Update("State", model.StoragePackageStateOutdated) - if err := ret.Error; err != nil { - return 0, err - } - return ret.RowsAffected, nil -} - -func (db *StoragePackageDB) SetAllPackageDeleted(ctx SQLContext, packageID cdssdk.PackageID) (int64, error) { - return db.SetAllPackageState(ctx, packageID, model.StoragePackageStateDeleted) -} - -func (*StoragePackageDB) Delete(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) error { - return ctx.Table("StoragePackage").Where("StorageID = ? AND PackageID = ? AND UserID = ?", storageID, packageID, userID).Delete(&model.StoragePackage{}).Error -} - -// FindPackageStorages 查询存储了指定对象的Storage -func (*StoragePackageDB) FindPackageStorages(ctx SQLContext, packageID cdssdk.PackageID) ([]model.Storage, error) { - var ret []model.Storage - err := ctx.Table("StoragePackage").Select("Storage.*"). - Joins("JOIN Storage ON StoragePackage.StorageID = Storage.StorageID"). - Where("PackageID = ?", packageID). - Scan(&ret).Error - return ret, err -} diff --git a/common/pkgs/db2/user_bucket.go b/common/pkgs/db2/user_bucket.go index cbec894..4af3183 100644 --- a/common/pkgs/db2/user_bucket.go +++ b/common/pkgs/db2/user_bucket.go @@ -13,10 +13,14 @@ func (db *DB) UserBucket() *UserBucketDB { return &UserBucketDB{DB: db} } -func (*UserBucketDB) Create(ctx SQLContext, userID int64, bucketID int64) error { +func (*UserBucketDB) Create(ctx SQLContext, userID cdssdk.UserID, bucketID cdssdk.BucketID) error { userBucket := model.UserBucket{ - UserID: cdssdk.UserID(userID), - BucketID: cdssdk.BucketID(bucketID), + UserID: userID, + BucketID: bucketID, } return ctx.Table("UserBucket").Create(&userBucket).Error } + +func (*UserBucketDB) DeleteByBucketID(ctx SQLContext, bucketID cdssdk.BucketID) error { + return ctx.Table("UserBucket").Where("BucketID = ?", bucketID).Delete(&model.UserBucket{}).Error +} diff --git a/common/pkgs/distlock/reqbuilder/metadata_storage_package.go b/common/pkgs/distlock/reqbuilder/metadata_storage_package.go deleted file mode 100644 index e14c275..0000000 --- a/common/pkgs/distlock/reqbuilder/metadata_storage_package.go +++ /dev/null @@ -1,24 +0,0 @@ -package reqbuilder - -import ( - "gitlink.org.cn/cloudream/common/pkgs/distlock" - cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" - "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider" -) - -type MetadataStoragePackageLockReqBuilder struct { - *MetadataLockReqBuilder -} - -func (b *MetadataLockReqBuilder) StoragePackage() *MetadataStoragePackageLockReqBuilder { - return &MetadataStoragePackageLockReqBuilder{MetadataLockReqBuilder: b} -} - -func (b *MetadataStoragePackageLockReqBuilder) CreateOne(userID cdssdk.UserID, storageID cdssdk.StorageID, packageID cdssdk.PackageID) *MetadataStoragePackageLockReqBuilder { - b.locks = append(b.locks, distlock.Lock{ - Path: b.makePath("StoragePackage"), - Name: lockprovider.MetadataCreateLock, - Target: *lockprovider.NewStringLockTarget().Add(userID, storageID, packageID), - }) - return b -} diff --git a/common/pkgs/distlock/service.go b/common/pkgs/distlock/service.go index 0c1333c..a30a49e 100644 --- a/common/pkgs/distlock/service.go +++ b/common/pkgs/distlock/service.go @@ -24,7 +24,7 @@ func initProviders() []distlock.PathProvider { provs = append(provs, initMetadataLockProviders()...) - provs = append(provs, initIPFSLockProviders()...) + provs = append(provs, initShardLockProviders()...) provs = append(provs, initStorageLockProviders()...) @@ -45,12 +45,11 @@ func initMetadataLockProviders() []distlock.PathProvider { distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "ObjectRep"), distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "ObjectBlock"), distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Cache"), - distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "StoragePackage"), distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Location"), } } -func initIPFSLockProviders() []distlock.PathProvider { +func initShardLockProviders() []distlock.PathProvider { return []distlock.PathProvider{ distlock.NewPathProvider(lockprovider.NewShardStoreLock(), lockprovider.ShardStoreLockPathPrefix, trie.WORD_ANY), } diff --git a/common/pkgs/downloader/config.go b/common/pkgs/downloader/config.go index 6311f50..9a235a6 100644 --- a/common/pkgs/downloader/config.go +++ b/common/pkgs/downloader/config.go @@ -3,8 +3,6 @@ package downloader type Config struct { // EC模式的Object的条带缓存数量 MaxStripCacheCount int `json:"maxStripCacheCount"` - // 当到下载节点的延迟高于这个值时,该节点在评估时会有更高的分数惩罚,单位:ms - HighLatencyHubMs float64 `json:"highLatencyHubMs"` // EC模式下,每个Object的条带的预取数量,最少为1 ECStripPrefetchCount int `json:"ecStripPrefetchCount"` } diff --git a/common/pkgs/downloader/downloader.go b/common/pkgs/downloader/downloader.go index 54f09e0..176234f 100644 --- a/common/pkgs/downloader/downloader.go +++ b/common/pkgs/downloader/downloader.go @@ -10,8 +10,9 @@ import ( stgglb "gitlink.org.cn/cloudream/storage/common/globals" stgmod "gitlink.org.cn/cloudream/storage/common/models" "gitlink.org.cn/cloudream/storage/common/pkgs/connectivity" + "gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy" coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool" ) const ( @@ -38,23 +39,25 @@ type Downloading struct { } type Downloader struct { - strips *StripCache - cfg Config - conn *connectivity.Collector - stgMgr *svcmgr.Manager + strips *StripCache + cfg Config + conn *connectivity.Collector + stgAgts *agtpool.AgentPool + selector *strategy.Selector } -func NewDownloader(cfg Config, conn *connectivity.Collector, stgMgr *svcmgr.Manager) Downloader { +func NewDownloader(cfg Config, conn *connectivity.Collector, stgAgts *agtpool.AgentPool, sel *strategy.Selector) Downloader { if cfg.MaxStripCacheCount == 0 { cfg.MaxStripCacheCount = DefaultMaxStripCacheCount } ch, _ := lru.New[ECStripKey, ObjectECStrip](cfg.MaxStripCacheCount) return Downloader{ - strips: ch, - cfg: cfg, - conn: conn, - stgMgr: stgMgr, + strips: ch, + cfg: cfg, + conn: conn, + stgAgts: stgAgts, + selector: sel, } } diff --git a/common/pkgs/downloader/iterator.go b/common/pkgs/downloader/iterator.go index 9f956d3..df66758 100644 --- a/common/pkgs/downloader/iterator.go +++ b/common/pkgs/downloader/iterator.go @@ -4,28 +4,21 @@ import ( "context" "fmt" "io" - "math" "reflect" - "time" - "github.com/samber/lo" - - "gitlink.org.cn/cloudream/common/pkgs/bitmap" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/common/pkgs/logger" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" "gitlink.org.cn/cloudream/common/utils/io2" "gitlink.org.cn/cloudream/common/utils/math2" - "gitlink.org.cn/cloudream/common/utils/sort2" - "gitlink.org.cn/cloudream/storage/common/consts" stgglb "gitlink.org.cn/cloudream/storage/common/globals" stgmod "gitlink.org.cn/cloudream/storage/common/models" "gitlink.org.cn/cloudream/storage/common/pkgs/distlock" + "gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser" "gitlink.org.cn/cloudream/storage/common/pkgs/iterator" - coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" ) type downloadStorageInfo struct { @@ -39,15 +32,10 @@ type DownloadContext struct { Distlock *distlock.Service } type DownloadObjectIterator struct { - OnClosing func() - + OnClosing func() downloader *Downloader reqs []downloadReqeust2 currentIndex int - inited bool - - coorCli *coormq.Client - allStorages map[cdssdk.StorageID]stgmod.StorageDetail } func NewDownloadObjectIterator(downloader *Downloader, downloadObjs []downloadReqeust2) *DownloadObjectIterator { @@ -58,68 +46,11 @@ func NewDownloadObjectIterator(downloader *Downloader, downloadObjs []downloadRe } func (i *DownloadObjectIterator) MoveNext() (*Downloading, error) { - if !i.inited { - if err := i.init(); err != nil { - return nil, err - } - - i.inited = true - } - if i.currentIndex >= len(i.reqs) { return nil, iterator.ErrNoMoreItem } - item, err := i.doMove() - i.currentIndex++ - return item, err -} - -func (i *DownloadObjectIterator) init() error { - coorCli, err := stgglb.CoordinatorMQPool.Acquire() - if err != nil { - return fmt.Errorf("new coordinator client: %w", err) - } - i.coorCli = coorCli - - allStgIDsMp := make(map[cdssdk.StorageID]bool) - for _, obj := range i.reqs { - if obj.Detail == nil { - continue - } - - for _, p := range obj.Detail.PinnedAt { - allStgIDsMp[p] = true - } - - for _, b := range obj.Detail.Blocks { - allStgIDsMp[b.StorageID] = true - } - } - - stgIDs := lo.Keys(allStgIDsMp) - getStgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails(stgIDs)) - if err != nil { - return fmt.Errorf("getting storage details: %w", err) - } - - i.allStorages = make(map[cdssdk.StorageID]stgmod.StorageDetail) - for idx, s := range getStgs.Storages { - if s == nil { - return fmt.Errorf("storage %v not found", stgIDs[idx]) - } - if s.Storage.ShardStore == nil { - return fmt.Errorf("storage %v has no shard store", stgIDs[idx]) - } - - i.allStorages[s.Storage.StorageID] = *s - } - - return nil -} - -func (iter *DownloadObjectIterator) doMove() (*Downloading, error) { - req := iter.reqs[iter.currentIndex] + req := i.reqs[i.currentIndex] if req.Detail == nil { return &Downloading{ Object: nil, @@ -128,57 +59,51 @@ func (iter *DownloadObjectIterator) doMove() (*Downloading, error) { }, nil } - switch red := req.Detail.Object.Redundancy.(type) { - case *cdssdk.NoneRedundancy: - reader, err := iter.downloadNoneOrRepObject(req) - if err != nil { - return nil, fmt.Errorf("downloading object %v: %w", req.Raw.ObjectID, err) - } + destHub := cdssdk.HubID(0) + if stgglb.Local.HubID != nil { + destHub = *stgglb.Local.HubID + } - return &Downloading{ - Object: &req.Detail.Object, - File: reader, - Request: req.Raw, - }, nil + strg, err := i.downloader.selector.Select(strategy.Request{ + Detail: *req.Detail, + Range: math2.NewRange(req.Raw.Offset, req.Raw.Length), + DestHub: destHub, + DestLocation: stgglb.Local.LocationID, + }) + if err != nil { + return nil, fmt.Errorf("selecting download strategy: %w", err) + } - case *cdssdk.RepRedundancy: - reader, err := iter.downloadNoneOrRepObject(req) + var reader io.ReadCloser + switch strg := strg.(type) { + case *strategy.DirectStrategy: + reader, err = i.downloadDirect(req, *strg) if err != nil { - return nil, fmt.Errorf("downloading rep object %v: %w", req.Raw.ObjectID, err) + return nil, fmt.Errorf("downloading object %v: %w", req.Raw.ObjectID, err) } - return &Downloading{ - Object: &req.Detail.Object, - File: reader, - Request: req.Raw, - }, nil - - case *cdssdk.ECRedundancy: - reader, err := iter.downloadECObject(req, red) + case *strategy.ECReconstructStrategy: + reader, err = i.downloadECReconstruct(req, *strg) if err != nil { return nil, fmt.Errorf("downloading ec object %v: %w", req.Raw.ObjectID, err) } - return &Downloading{ - Object: &req.Detail.Object, - File: reader, - Request: req.Raw, - }, nil - - case *cdssdk.LRCRedundancy: - reader, err := iter.downloadLRCObject(req, red) + case *strategy.LRCReconstructStrategy: + reader, err = i.downloadLRCReconstruct(req, *strg) if err != nil { return nil, fmt.Errorf("downloading lrc object %v: %w", req.Raw.ObjectID, err) } - return &Downloading{ - Object: &req.Detail.Object, - File: reader, - Request: req.Raw, - }, nil + default: + return nil, fmt.Errorf("unsupported strategy type: %v", reflect.TypeOf(strg)) } - return nil, fmt.Errorf("unsupported redundancy type: %v of object %v", reflect.TypeOf(req.Detail.Object.Redundancy), req.Raw.ObjectID) + i.currentIndex++ + return &Downloading{ + Object: &req.Detail.Object, + File: reader, + Request: req.Raw, + }, nil } func (i *DownloadObjectIterator) Close() { @@ -187,227 +112,93 @@ func (i *DownloadObjectIterator) Close() { } } -func (iter *DownloadObjectIterator) downloadNoneOrRepObject(obj downloadReqeust2) (io.ReadCloser, error) { - allStgs, err := iter.sortDownloadStorages(obj) - if err != nil { - return nil, err - } +func (i *DownloadObjectIterator) downloadDirect(req downloadReqeust2, strg strategy.DirectStrategy) (io.ReadCloser, error) { + logger.Debugf("downloading object %v from storage %v", req.Raw.ObjectID, strg.Storage.Storage.String()) - bsc, blocks := iter.getMinReadingBlockSolution(allStgs, 1) - osc, stg := iter.getMinReadingObjectSolution(allStgs, 1) - if bsc < osc { - logger.Debugf("downloading object %v from storage %v", obj.Raw.ObjectID, blocks[0].Storage.Storage.String()) - return iter.downloadFromStorage(&blocks[0].Storage, obj) - } + var strHandle *exec.DriverReadStream + ft := ioswitch2.NewFromTo() - if osc == math.MaxFloat64 { - // bsc >= osc,如果osc是MaxFloat64,那么bsc也一定是,也就意味着没有足够块来恢复文件 - return nil, fmt.Errorf("no storage has this object") + toExec, handle := ioswitch2.NewToDriver(ioswitch2.RawStream()) + toExec.Range = math2.Range{ + Offset: req.Raw.Offset, } - - logger.Debugf("downloading object %v from storage %v", obj.Raw.ObjectID, stg.Storage.String()) - return iter.downloadFromStorage(stg, obj) -} - -func (iter *DownloadObjectIterator) downloadECObject(req downloadReqeust2, ecRed *cdssdk.ECRedundancy) (io.ReadCloser, error) { - allStorages, err := iter.sortDownloadStorages(req) - if err != nil { - return nil, err + if req.Raw.Length != -1 { + len := req.Raw.Length + toExec.Range.Length = &len } - bsc, blocks := iter.getMinReadingBlockSolution(allStorages, ecRed.K) - osc, stg := iter.getMinReadingObjectSolution(allStorages, ecRed.K) - - if bsc < osc { - var logStrs []any = []any{fmt.Sprintf("downloading ec object %v from blocks: ", req.Raw.ObjectID)} - for i, b := range blocks { - if i > 0 { - logStrs = append(logStrs, ", ") - } - logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Block.Index, b.Storage.Storage.String())) - } - logger.Debug(logStrs...) - - pr, pw := io.Pipe() - go func() { - readPos := req.Raw.Offset - totalReadLen := req.Detail.Object.Size - req.Raw.Offset - if req.Raw.Length >= 0 { - totalReadLen = math2.Min(req.Raw.Length, totalReadLen) - } - - firstStripIndex := readPos / ecRed.StripSize() - stripIter := NewStripIterator(iter.downloader, req.Detail.Object, blocks, ecRed, firstStripIndex, iter.downloader.strips, iter.downloader.cfg.ECStripPrefetchCount) - defer stripIter.Close() - - for totalReadLen > 0 { - strip, err := stripIter.MoveNext() - if err == iterator.ErrNoMoreItem { - pw.CloseWithError(io.ErrUnexpectedEOF) - return - } - if err != nil { - pw.CloseWithError(err) - return - } - - readRelativePos := readPos - strip.Position - curReadLen := math2.Min(totalReadLen, ecRed.StripSize()-readRelativePos) - - err = io2.WriteAll(pw, strip.Data[readRelativePos:readRelativePos+curReadLen]) - if err != nil { - pw.CloseWithError(err) - return - } - - totalReadLen -= curReadLen - readPos += curReadLen - } - pw.Close() - }() + ft.AddFrom(ioswitch2.NewFromShardstore(req.Detail.Object.FileHash, *strg.Storage.MasterHub, strg.Storage, ioswitch2.RawStream())).AddTo(toExec) + strHandle = handle - return pr, nil + plans := exec.NewPlanBuilder() + if err := parser.Parse(ft, plans); err != nil { + return nil, fmt.Errorf("parsing plan: %w", err) } - // bsc >= osc,如果osc是MaxFloat64,那么bsc也一定是,也就意味着没有足够块来恢复文件 - if osc == math.MaxFloat64 { - return nil, fmt.Errorf("no enough blocks to reconstruct the object %v , want %d, get only %d", req.Raw.ObjectID, ecRed.K, len(blocks)) - } + exeCtx := exec.NewExecContext() + exec.SetValueByType(exeCtx, i.downloader.stgAgts) + exec := plans.Execute(exeCtx) + go exec.Wait(context.TODO()) - logger.Debugf("downloading ec object %v from storage %v", req.Raw.ObjectID, stg.Storage.String()) - return iter.downloadFromStorage(stg, req) + return exec.BeginRead(strHandle) } -func (iter *DownloadObjectIterator) sortDownloadStorages(req downloadReqeust2) ([]*downloadStorageInfo, error) { - var stgIDs []cdssdk.StorageID - for _, id := range req.Detail.PinnedAt { - if !lo.Contains(stgIDs, id) { - stgIDs = append(stgIDs, id) - } - } - for _, b := range req.Detail.Blocks { - if !lo.Contains(stgIDs, b.StorageID) { - stgIDs = append(stgIDs, b.StorageID) +func (i *DownloadObjectIterator) downloadECReconstruct(req downloadReqeust2, strg strategy.ECReconstructStrategy) (io.ReadCloser, error) { + var logStrs []any = []any{fmt.Sprintf("downloading ec object %v from: ", req.Raw.ObjectID)} + for i, b := range strg.Blocks { + if i > 0 { + logStrs = append(logStrs, ", ") } + + logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Index, strg.Storages[i].Storage.String())) } + logger.Debug(logStrs...) - downloadStorageMap := make(map[cdssdk.StorageID]*downloadStorageInfo) - for _, id := range req.Detail.PinnedAt { - storage, ok := downloadStorageMap[id] - if !ok { - mod := iter.allStorages[id] - storage = &downloadStorageInfo{ - Storage: mod, - ObjectPinned: true, - Distance: iter.getStorageDistance(mod), - } - downloadStorageMap[id] = storage + downloadBlks := make([]downloadBlock, len(strg.Blocks)) + for i, b := range strg.Blocks { + downloadBlks[i] = downloadBlock{ + Block: b, + Storage: strg.Storages[i], } - - storage.ObjectPinned = true } - for _, b := range req.Detail.Blocks { - storage, ok := downloadStorageMap[b.StorageID] - if !ok { - mod := iter.allStorages[b.StorageID] - storage = &downloadStorageInfo{ - Storage: mod, - Distance: iter.getStorageDistance(mod), - } - downloadStorageMap[b.StorageID] = storage + pr, pw := io.Pipe() + go func() { + readPos := req.Raw.Offset + totalReadLen := req.Detail.Object.Size - req.Raw.Offset + if req.Raw.Length >= 0 { + totalReadLen = math2.Min(req.Raw.Length, totalReadLen) } - storage.Blocks = append(storage.Blocks, b) - } - - return sort2.Sort(lo.Values(downloadStorageMap), func(left, right *downloadStorageInfo) int { - return sort2.Cmp(left.Distance, right.Distance) - }), nil -} + firstStripIndex := readPos / strg.Redundancy.StripSize() + stripIter := NewStripIterator(i.downloader, req.Detail.Object, downloadBlks, strg.Redundancy, firstStripIndex, i.downloader.strips, i.downloader.cfg.ECStripPrefetchCount) + defer stripIter.Close() -func (iter *DownloadObjectIterator) getMinReadingBlockSolution(sortedStgs []*downloadStorageInfo, k int) (float64, []downloadBlock) { - gotBlocksMap := bitmap.Bitmap64(0) - var gotBlocks []downloadBlock - dist := float64(0.0) - for _, n := range sortedStgs { - for _, b := range n.Blocks { - if !gotBlocksMap.Get(b.Index) { - gotBlocks = append(gotBlocks, downloadBlock{ - Storage: n.Storage, - Block: b, - }) - gotBlocksMap.Set(b.Index, true) - dist += n.Distance + for totalReadLen > 0 { + strip, err := stripIter.MoveNext() + if err == iterator.ErrNoMoreItem { + pw.CloseWithError(io.ErrUnexpectedEOF) + return } - - if len(gotBlocks) >= k { - return dist, gotBlocks + if err != nil { + pw.CloseWithError(err) + return } - } - } - - return math.MaxFloat64, gotBlocks -} -func (iter *DownloadObjectIterator) getMinReadingObjectSolution(sortedStgs []*downloadStorageInfo, k int) (float64, *stgmod.StorageDetail) { - dist := math.MaxFloat64 - var downloadStg *stgmod.StorageDetail - for _, n := range sortedStgs { - if n.ObjectPinned && float64(k)*n.Distance < dist { - dist = float64(k) * n.Distance - stg := n.Storage - downloadStg = &stg - } - } + readRelativePos := readPos - strip.Position + curReadLen := math2.Min(totalReadLen, strg.Redundancy.StripSize()-readRelativePos) - return dist, downloadStg -} + err = io2.WriteAll(pw, strip.Data[readRelativePos:readRelativePos+curReadLen]) + if err != nil { + pw.CloseWithError(err) + return + } -func (iter *DownloadObjectIterator) getStorageDistance(stg stgmod.StorageDetail) float64 { - if stgglb.Local.HubID != nil { - if stg.MasterHub.HubID == *stgglb.Local.HubID { - return consts.StorageDistanceSameStorage + totalReadLen -= curReadLen + readPos += curReadLen } - } - - if stg.MasterHub.LocationID == stgglb.Local.LocationID { - return consts.StorageDistanceSameLocation - } - - c := iter.downloader.conn.Get(stg.MasterHub.HubID) - if c == nil || c.Delay == nil || *c.Delay > time.Duration(float64(time.Millisecond)*iter.downloader.cfg.HighLatencyHubMs) { - return consts.HubDistanceHighLatencyHub - } - - return consts.StorageDistanceOther -} - -func (iter *DownloadObjectIterator) downloadFromStorage(stg *stgmod.StorageDetail, req downloadReqeust2) (io.ReadCloser, error) { - var strHandle *exec.DriverReadStream - ft := ioswitch2.NewFromTo() - - toExec, handle := ioswitch2.NewToDriver(ioswitch2.RawStream()) - toExec.Range = exec.Range{ - Offset: req.Raw.Offset, - } - if req.Raw.Length != -1 { - len := req.Raw.Length - toExec.Range.Length = &len - } - - ft.AddFrom(ioswitch2.NewFromShardstore(req.Detail.Object.FileHash, *stg.MasterHub, stg.Storage, ioswitch2.RawStream())).AddTo(toExec) - strHandle = handle - - plans := exec.NewPlanBuilder() - if err := parser.Parse(ft, plans); err != nil { - return nil, fmt.Errorf("parsing plan: %w", err) - } - - exeCtx := exec.NewExecContext() - exec.SetValueByType(exeCtx, iter.downloader.stgMgr) - exec := plans.Execute(exeCtx) - go exec.Wait(context.TODO()) + pw.Close() + }() - return exec.BeginRead(strHandle) + return pr, nil } diff --git a/common/pkgs/downloader/lrc.go b/common/pkgs/downloader/lrc.go index 0d1fc0e..f38168a 100644 --- a/common/pkgs/downloader/lrc.go +++ b/common/pkgs/downloader/lrc.go @@ -6,44 +6,30 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/iterator" "gitlink.org.cn/cloudream/common/pkgs/logger" - cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" "gitlink.org.cn/cloudream/common/utils/io2" "gitlink.org.cn/cloudream/common/utils/math2" + "gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy" ) -func (iter *DownloadObjectIterator) downloadLRCObject(req downloadReqeust2, red *cdssdk.LRCRedundancy) (io.ReadCloser, error) { - allStgs, err := iter.sortDownloadStorages(req) - if err != nil { - return nil, err - } - - var blocks []downloadBlock - selectedBlkIdx := make(map[int]bool) - for _, stg := range allStgs { - for _, b := range stg.Blocks { - if b.Index >= red.M() || selectedBlkIdx[b.Index] { - continue - } - blocks = append(blocks, downloadBlock{ - Storage: stg.Storage, - Block: b, - }) - selectedBlkIdx[b.Index] = true - } - } - if len(blocks) < red.K { - return nil, fmt.Errorf("not enough blocks to download lrc object") - } - - var logStrs []any = []any{"downloading lrc object from blocks: "} - for i, b := range blocks { +func (iter *DownloadObjectIterator) downloadLRCReconstruct(req downloadReqeust2, strg strategy.LRCReconstructStrategy) (io.ReadCloser, error) { + var logStrs []any = []any{fmt.Sprintf("downloading lrc object %v from: ", req.Raw.ObjectID)} + for i, b := range strg.Blocks { if i > 0 { logStrs = append(logStrs, ", ") } - logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Block.Index, b.Storage.Storage.String())) + + logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Index, strg.Storages[i].Storage.String())) } logger.Debug(logStrs...) + downloadBlks := make([]downloadBlock, len(strg.Blocks)) + for i, b := range strg.Blocks { + downloadBlks[i] = downloadBlock{ + Block: b, + Storage: strg.Storages[i], + } + } + pr, pw := io.Pipe() go func() { readPos := req.Raw.Offset @@ -52,8 +38,8 @@ func (iter *DownloadObjectIterator) downloadLRCObject(req downloadReqeust2, red totalReadLen = math2.Min(req.Raw.Length, totalReadLen) } - firstStripIndex := readPos / int64(red.K) / int64(red.ChunkSize) - stripIter := NewLRCStripIterator(iter.downloader, req.Detail.Object, blocks, red, firstStripIndex, iter.downloader.strips, iter.downloader.cfg.ECStripPrefetchCount) + firstStripIndex := readPos / int64(strg.Redundancy.K) / int64(strg.Redundancy.ChunkSize) + stripIter := NewLRCStripIterator(iter.downloader, req.Detail.Object, downloadBlks, strg.Redundancy, firstStripIndex, iter.downloader.strips, iter.downloader.cfg.ECStripPrefetchCount) defer stripIter.Close() for totalReadLen > 0 { @@ -68,7 +54,7 @@ func (iter *DownloadObjectIterator) downloadLRCObject(req downloadReqeust2, red } readRelativePos := readPos - strip.Position - nextStripPos := strip.Position + int64(red.K)*int64(red.ChunkSize) + nextStripPos := strip.Position + int64(strg.Redundancy.K)*int64(strg.Redundancy.ChunkSize) curReadLen := math2.Min(totalReadLen, nextStripPos-readPos) err = io2.WriteAll(pw, strip.Data[readRelativePos:readRelativePos+curReadLen]) diff --git a/common/pkgs/downloader/lrc_strip_iterator.go b/common/pkgs/downloader/lrc_strip_iterator.go index e00c614..4270ec5 100644 --- a/common/pkgs/downloader/lrc_strip_iterator.go +++ b/common/pkgs/downloader/lrc_strip_iterator.go @@ -9,6 +9,7 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/iterator" "gitlink.org.cn/cloudream/common/pkgs/logger" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" + "gitlink.org.cn/cloudream/common/utils/math2" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc/parser" ) @@ -17,7 +18,7 @@ type LRCStripIterator struct { downloder *Downloader object cdssdk.Object blocks []downloadBlock - red *cdssdk.LRCRedundancy + red cdssdk.LRCRedundancy curStripIndex int64 cache *StripCache dataChan chan dataChanEntry @@ -26,7 +27,7 @@ type LRCStripIterator struct { inited bool } -func NewLRCStripIterator(downloder *Downloader, object cdssdk.Object, blocks []downloadBlock, red *cdssdk.LRCRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *LRCStripIterator { +func NewLRCStripIterator(downloder *Downloader, object cdssdk.Object, blocks []downloadBlock, red cdssdk.LRCRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *LRCStripIterator { if maxPrefetch <= 0 { maxPrefetch = 1 } @@ -101,7 +102,7 @@ func (s *LRCStripIterator) downloading() { froms = append(froms, ioswitchlrc.NewFromStorage(b.Block.FileHash, *stg.MasterHub, stg.Storage, b.Block.Index)) } - toExec, hd := ioswitchlrc.NewToDriverWithRange(-1, exec.Range{ + toExec, hd := ioswitchlrc.NewToDriverWithRange(-1, math2.Range{ Offset: s.curStripIndex * int64(s.red.ChunkSize*s.red.K), }) @@ -113,7 +114,7 @@ func (s *LRCStripIterator) downloading() { } exeCtx := exec.NewExecContext() - exec.SetValueByType(exeCtx, s.downloder.stgMgr) + exec.SetValueByType(exeCtx, s.downloder.stgAgts) exec := plans.Execute(exeCtx) diff --git a/common/pkgs/downloader/strategy/config.go b/common/pkgs/downloader/strategy/config.go new file mode 100644 index 0000000..29c4bb2 --- /dev/null +++ b/common/pkgs/downloader/strategy/config.go @@ -0,0 +1,6 @@ +package strategy + +type Config struct { + // 当到下载节点的延迟高于这个值时,该节点在评估时会有更高的分数惩罚,单位:ms + HighLatencyHubMs float64 `json:"highLatencyHubMs"` +} diff --git a/common/pkgs/downloader/strategy/selector.go b/common/pkgs/downloader/strategy/selector.go new file mode 100644 index 0000000..a629cb2 --- /dev/null +++ b/common/pkgs/downloader/strategy/selector.go @@ -0,0 +1,337 @@ +package strategy + +import ( + "fmt" + "math" + "reflect" + "time" + + "github.com/samber/lo" + "gitlink.org.cn/cloudream/common/pkgs/bitmap" + cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" + "gitlink.org.cn/cloudream/common/utils/math2" + "gitlink.org.cn/cloudream/common/utils/sort2" + "gitlink.org.cn/cloudream/storage/common/consts" + stgmod "gitlink.org.cn/cloudream/storage/common/models" + "gitlink.org.cn/cloudream/storage/common/pkgs/metacache" +) + +type Request struct { + Detail stgmod.ObjectDetail + Range math2.Range + DestHub cdssdk.HubID // 可以为0。此字段不为0时,DestLocation字段无意义。 + DestLocation cdssdk.LocationID // 可以为0 +} + +type Strategy interface { + GetDetail() stgmod.ObjectDetail +} + +// 直接下载完整对象 +type DirectStrategy struct { + Detail stgmod.ObjectDetail + Storage stgmod.StorageDetail +} + +func (s *DirectStrategy) GetDetail() stgmod.ObjectDetail { + return s.Detail +} + +// 从指定对象重建对象 +type ECReconstructStrategy struct { + Detail stgmod.ObjectDetail + Redundancy cdssdk.ECRedundancy + Blocks []stgmod.ObjectBlock + Storages []stgmod.StorageDetail +} + +func (s *ECReconstructStrategy) GetDetail() stgmod.ObjectDetail { + return s.Detail +} + +type LRCReconstructStrategy struct { + Detail stgmod.ObjectDetail + Redundancy cdssdk.LRCRedundancy + Blocks []stgmod.ObjectBlock + Storages []stgmod.StorageDetail +} + +func (s *LRCReconstructStrategy) GetDetail() stgmod.ObjectDetail { + return s.Detail +} + +type Selector struct { + cfg Config + storageMeta *metacache.StorageMeta + hubMeta *metacache.HubMeta + connectivity *metacache.Connectivity +} + +func NewSelector(cfg Config, storageMeta *metacache.StorageMeta, hubMeta *metacache.HubMeta, connectivity *metacache.Connectivity) *Selector { + return &Selector{ + cfg: cfg, + storageMeta: storageMeta, + hubMeta: hubMeta, + connectivity: connectivity, + } +} + +func (s *Selector) Select(req Request) (Strategy, error) { + req2 := request2{ + Detail: req.Detail, + Range: req.Range, + DestLocation: req.DestLocation, + } + + if req.DestHub != 0 { + req2.DestHub = s.hubMeta.Get(req.DestHub) + } + + switch red := req.Detail.Object.Redundancy.(type) { + case *cdssdk.NoneRedundancy: + return s.selectForNoneOrRep(req2) + + case *cdssdk.RepRedundancy: + return s.selectForNoneOrRep(req2) + + case *cdssdk.ECRedundancy: + return s.selectForEC(req2, *red) + + case *cdssdk.LRCRedundancy: + return s.selectForLRC(req2, *red) + } + + return nil, fmt.Errorf("unsupported redundancy type: %v of object %v", reflect.TypeOf(req.Detail.Object.Redundancy), req.Detail.Object.ObjectID) +} + +type downloadStorageInfo struct { + Storage stgmod.StorageDetail + ObjectPinned bool + Blocks []stgmod.ObjectBlock + Distance float64 +} + +type downloadBlock struct { + Storage stgmod.StorageDetail + Block stgmod.ObjectBlock +} + +type request2 struct { + Detail stgmod.ObjectDetail + Range math2.Range + DestHub *cdssdk.Hub + DestLocation cdssdk.LocationID +} + +func (s *Selector) selectForNoneOrRep(req request2) (Strategy, error) { + sortedStgs := s.sortDownloadStorages(req) + if len(sortedStgs) == 0 { + return nil, fmt.Errorf("no storage available for download") + } + + _, blks := s.getMinReadingBlockSolution(sortedStgs, 1) + if len(blks) == 0 { + return nil, fmt.Errorf("no block available for download") + } + + return &DirectStrategy{ + Detail: req.Detail, + Storage: sortedStgs[0].Storage, + }, nil +} + +func (s *Selector) selectForEC(req request2, red cdssdk.ECRedundancy) (Strategy, error) { + sortedStgs := s.sortDownloadStorages(req) + if len(sortedStgs) == 0 { + return nil, fmt.Errorf("no storage available for download") + } + + bsc, blocks := s.getMinReadingBlockSolution(sortedStgs, red.K) + osc, stg := s.getMinReadingObjectSolution(sortedStgs, red.K) + + if bsc < osc { + bs := make([]stgmod.ObjectBlock, len(blocks)) + ss := make([]stgmod.StorageDetail, len(blocks)) + for i, b := range blocks { + bs[i] = b.Block + ss[i] = b.Storage + } + + return &ECReconstructStrategy{ + Detail: req.Detail, + Redundancy: red, + Blocks: bs, + Storages: ss, + }, nil + } + + // bsc >= osc,如果osc是MaxFloat64,那么bsc也一定是,也就意味着没有足够块来恢复文件 + if osc == math.MaxFloat64 { + return nil, fmt.Errorf("no enough blocks to reconstruct the object %v , want %d, get only %d", req.Detail.Object.ObjectID, red.K, len(blocks)) + } + + return &DirectStrategy{ + Detail: req.Detail, + Storage: stg, + }, nil +} + +func (s *Selector) selectForLRC(req request2, red cdssdk.LRCRedundancy) (Strategy, error) { + sortedStgs := s.sortDownloadStorages(req) + if len(sortedStgs) == 0 { + return nil, fmt.Errorf("no storage available for download") + } + + var blocks []downloadBlock + selectedBlkIdx := make(map[int]bool) + for _, stg := range sortedStgs { + for _, b := range stg.Blocks { + if b.Index >= red.M() || selectedBlkIdx[b.Index] { + continue + } + blocks = append(blocks, downloadBlock{ + Storage: stg.Storage, + Block: b, + }) + selectedBlkIdx[b.Index] = true + } + } + if len(blocks) < red.K { + return nil, fmt.Errorf("not enough blocks to download lrc object") + } + + bs := make([]stgmod.ObjectBlock, len(blocks)) + ss := make([]stgmod.StorageDetail, len(blocks)) + for i, b := range blocks { + bs[i] = b.Block + ss[i] = b.Storage + } + + return &LRCReconstructStrategy{ + Detail: req.Detail, + Redundancy: red, + Blocks: bs, + Storages: ss, + }, nil +} + +func (s *Selector) sortDownloadStorages(req request2) []*downloadStorageInfo { + var stgIDs []cdssdk.StorageID + for _, id := range req.Detail.PinnedAt { + if !lo.Contains(stgIDs, id) { + stgIDs = append(stgIDs, id) + } + } + for _, b := range req.Detail.Blocks { + if !lo.Contains(stgIDs, b.StorageID) { + stgIDs = append(stgIDs, b.StorageID) + } + } + + downloadStorageMap := make(map[cdssdk.StorageID]*downloadStorageInfo) + for _, id := range req.Detail.PinnedAt { + storage, ok := downloadStorageMap[id] + if !ok { + mod := s.storageMeta.Get(id) + if mod == nil || mod.MasterHub == nil { + continue + } + + storage = &downloadStorageInfo{ + Storage: *mod, + ObjectPinned: true, + Distance: s.getStorageDistance(req, *mod), + } + downloadStorageMap[id] = storage + } + + storage.ObjectPinned = true + } + + for _, b := range req.Detail.Blocks { + storage, ok := downloadStorageMap[b.StorageID] + if !ok { + mod := s.storageMeta.Get(b.StorageID) + if mod == nil || mod.MasterHub == nil { + continue + } + + storage = &downloadStorageInfo{ + Storage: *mod, + Distance: s.getStorageDistance(req, *mod), + } + downloadStorageMap[b.StorageID] = storage + } + + storage.Blocks = append(storage.Blocks, b) + } + + return sort2.Sort(lo.Values(downloadStorageMap), func(left, right *downloadStorageInfo) int { + return sort2.Cmp(left.Distance, right.Distance) + }) +} + +func (s *Selector) getStorageDistance(req request2, src stgmod.StorageDetail) float64 { + if req.DestHub != nil { + if src.MasterHub.HubID == req.DestHub.HubID { + return consts.StorageDistanceSameStorage + } + + if src.MasterHub.LocationID == req.DestHub.LocationID { + return consts.StorageDistanceSameLocation + } + + latency := s.connectivity.Get(src.MasterHub.HubID, req.DestHub.HubID) + if latency == nil || *latency > time.Duration(float64(time.Millisecond)*s.cfg.HighLatencyHubMs) { + return consts.HubDistanceHighLatencyHub + } + + return consts.StorageDistanceOther + } + + if req.DestLocation != 0 { + if src.MasterHub.LocationID == req.DestLocation { + return consts.StorageDistanceSameLocation + } + } + + return consts.StorageDistanceOther +} + +func (s *Selector) getMinReadingBlockSolution(sortedStgs []*downloadStorageInfo, k int) (float64, []downloadBlock) { + gotBlocksMap := bitmap.Bitmap64(0) + var gotBlocks []downloadBlock + dist := float64(0.0) + for _, n := range sortedStgs { + for _, b := range n.Blocks { + if !gotBlocksMap.Get(b.Index) { + gotBlocks = append(gotBlocks, downloadBlock{ + Storage: n.Storage, + Block: b, + }) + gotBlocksMap.Set(b.Index, true) + dist += n.Distance + } + + if len(gotBlocks) >= k { + return dist, gotBlocks + } + } + } + + return math.MaxFloat64, gotBlocks +} + +func (s *Selector) getMinReadingObjectSolution(sortedStgs []*downloadStorageInfo, k int) (float64, stgmod.StorageDetail) { + dist := math.MaxFloat64 + var downloadStg stgmod.StorageDetail + for _, n := range sortedStgs { + if n.ObjectPinned && float64(k)*n.Distance < dist { + dist = float64(k) * n.Distance + stg := n.Storage + downloadStg = stg + } + } + + return dist, downloadStg +} diff --git a/common/pkgs/downloader/strip_iterator.go b/common/pkgs/downloader/strip_iterator.go index 326fb1d..ad7f94c 100644 --- a/common/pkgs/downloader/strip_iterator.go +++ b/common/pkgs/downloader/strip_iterator.go @@ -9,6 +9,7 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/iterator" "gitlink.org.cn/cloudream/common/pkgs/logger" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" + "gitlink.org.cn/cloudream/common/utils/math2" stgmod "gitlink.org.cn/cloudream/storage/common/models" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser" @@ -28,7 +29,7 @@ type StripIterator struct { downloader *Downloader object cdssdk.Object blocks []downloadBlock - red *cdssdk.ECRedundancy + red cdssdk.ECRedundancy curStripIndex int64 cache *StripCache dataChan chan dataChanEntry @@ -46,7 +47,7 @@ type dataChanEntry struct { Error error } -func NewStripIterator(downloader *Downloader, object cdssdk.Object, blocks []downloadBlock, red *cdssdk.ECRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *StripIterator { +func NewStripIterator(downloader *Downloader, object cdssdk.Object, blocks []downloadBlock, red cdssdk.ECRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *StripIterator { if maxPrefetch <= 0 { maxPrefetch = 1 } @@ -199,13 +200,13 @@ func (s *StripIterator) readStrip(stripIndex int64, buf []byte) (int, error) { } ft := ioswitch2.NewFromTo() - ft.ECParam = s.red + ft.ECParam = &s.red for _, b := range s.blocks { stg := b.Storage - ft.AddFrom(ioswitch2.NewFromShardstore(b.Block.FileHash, *stg.MasterHub, stg.Storage, ioswitch2.ECSrteam(b.Block.Index))) + ft.AddFrom(ioswitch2.NewFromShardstore(b.Block.FileHash, *stg.MasterHub, stg, ioswitch2.ECStream(b.Block.Index))) } - toExec, hd := ioswitch2.NewToDriverWithRange(ioswitch2.RawStream(), exec.Range{ + toExec, hd := ioswitch2.NewToDriverWithRange(ioswitch2.RawStream(), math2.Range{ Offset: stripIndex * s.red.StripSize(), }) ft.AddTo(toExec) @@ -217,7 +218,7 @@ func (s *StripIterator) readStrip(stripIndex int64, buf []byte) (int, error) { } exeCtx := exec.NewExecContext() - exec.SetValueByType(exeCtx, s.downloader.stgMgr) + exec.SetValueByType(exeCtx, s.downloader.stgAgts) exec := plans.Execute(exeCtx) ctx, cancel := context.WithCancel(context.Background()) diff --git a/common/pkgs/ioswitch2/fromto.go b/common/pkgs/ioswitch2/fromto.go index 8097827..2c72e9b 100644 --- a/common/pkgs/ioswitch2/fromto.go +++ b/common/pkgs/ioswitch2/fromto.go @@ -3,6 +3,7 @@ package ioswitch2 import ( "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" + "gitlink.org.cn/cloudream/common/utils/math2" stgmod "gitlink.org.cn/cloudream/storage/common/models" ) @@ -14,7 +15,7 @@ type To interface { // To所需要的文件流的范围。具体含义与DataIndex有关系: // 如果DataIndex == -1,则表示在整个文件的范围。 // 如果DataIndex >= 0,则表示在文件的某个分片的范围。 - GetRange() exec.Range + GetRange() math2.Range GetStreamIndex() StreamIndex } @@ -38,7 +39,7 @@ func RawStream() StreamIndex { } } -func ECSrteam(index int) StreamIndex { +func ECStream(index int) StreamIndex { return StreamIndex{ Type: StreamIndexEC, Index: index, @@ -96,7 +97,7 @@ type FromDriver struct { func NewFromDriver(strIdx StreamIndex) (*FromDriver, *exec.DriverWriteStream) { handle := &exec.DriverWriteStream{ - RangeHint: &exec.Range{}, + RangeHint: &math2.Range{}, } return &FromDriver{ Handle: handle, @@ -111,11 +112,11 @@ func (f *FromDriver) GetStreamIndex() StreamIndex { type FromShardstore struct { FileHash cdssdk.FileHash Hub cdssdk.Hub - Storage cdssdk.Storage + Storage stgmod.StorageDetail StreamIndex StreamIndex } -func NewFromShardstore(fileHash cdssdk.FileHash, hub cdssdk.Hub, storage cdssdk.Storage, strIdx StreamIndex) *FromShardstore { +func NewFromShardstore(fileHash cdssdk.FileHash, hub cdssdk.Hub, storage stgmod.StorageDetail, strIdx StreamIndex) *FromShardstore { return &FromShardstore{ FileHash: fileHash, Hub: hub, @@ -131,7 +132,7 @@ func (f *FromShardstore) GetStreamIndex() StreamIndex { type ToDriver struct { Handle *exec.DriverReadStream StreamIndex StreamIndex - Range exec.Range + Range math2.Range } func NewToDriver(strIdx StreamIndex) (*ToDriver, *exec.DriverReadStream) { @@ -142,7 +143,7 @@ func NewToDriver(strIdx StreamIndex) (*ToDriver, *exec.DriverReadStream) { }, &str } -func NewToDriverWithRange(strIdx StreamIndex, rng exec.Range) (*ToDriver, *exec.DriverReadStream) { +func NewToDriverWithRange(strIdx StreamIndex, rng math2.Range) (*ToDriver, *exec.DriverReadStream) { str := exec.DriverReadStream{} return &ToDriver{ Handle: &str, @@ -155,7 +156,7 @@ func (t *ToDriver) GetStreamIndex() StreamIndex { return t.StreamIndex } -func (t *ToDriver) GetRange() exec.Range { +func (t *ToDriver) GetRange() math2.Range { return t.Range } @@ -163,7 +164,7 @@ type ToShardStore struct { Hub cdssdk.Hub Storage stgmod.StorageDetail StreamIndex StreamIndex - Range exec.Range + Range math2.Range FileHashStoreKey string } @@ -176,7 +177,7 @@ func NewToShardStore(hub cdssdk.Hub, stg stgmod.StorageDetail, strIdx StreamInde } } -func NewToShardStoreWithRange(hub cdssdk.Hub, stg stgmod.StorageDetail, streamIndex StreamIndex, fileHashStoreKey string, rng exec.Range) *ToShardStore { +func NewToShardStoreWithRange(hub cdssdk.Hub, stg stgmod.StorageDetail, streamIndex StreamIndex, fileHashStoreKey string, rng math2.Range) *ToShardStore { return &ToShardStore{ Hub: hub, Storage: stg, @@ -190,25 +191,21 @@ func (t *ToShardStore) GetStreamIndex() StreamIndex { return t.StreamIndex } -func (t *ToShardStore) GetRange() exec.Range { +func (t *ToShardStore) GetRange() math2.Range { return t.Range } type LoadToShared struct { - Hub cdssdk.Hub - Storage cdssdk.Storage - UserID cdssdk.UserID - PackageID cdssdk.PackageID - Path string + Hub cdssdk.Hub + Storage stgmod.StorageDetail + ObjectPath string } -func NewLoadToShared(hub cdssdk.Hub, storage cdssdk.Storage, userID cdssdk.UserID, packageID cdssdk.PackageID, path string) *LoadToShared { +func NewLoadToShared(hub cdssdk.Hub, storage stgmod.StorageDetail, objectPath string) *LoadToShared { return &LoadToShared{ - Hub: hub, - Storage: storage, - UserID: userID, - PackageID: packageID, - Path: path, + Hub: hub, + Storage: storage, + ObjectPath: objectPath, } } @@ -218,6 +215,6 @@ func (t *LoadToShared) GetStreamIndex() StreamIndex { } } -func (t *LoadToShared) GetRange() exec.Range { - return exec.Range{} +func (t *LoadToShared) GetRange() math2.Range { + return math2.Range{} } diff --git a/common/pkgs/ioswitch2/ops2/bypass.go b/common/pkgs/ioswitch2/ops2/bypass.go index 9f69762..e2cb10e 100644 --- a/common/pkgs/ioswitch2/ops2/bypass.go +++ b/common/pkgs/ioswitch2/ops2/bypass.go @@ -6,7 +6,7 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" ) @@ -14,6 +14,9 @@ func init() { exec.UseOp[*BypassToShardStore]() exec.UseVarValue[*BypassFileInfoValue]() exec.UseVarValue[*BypassHandleResultValue]() + + exec.UseOp[*BypassFromShardStore]() + exec.UseVarValue[*BypassFilePathValue]() } type BypassFileInfoValue struct { @@ -44,19 +47,19 @@ type BypassToShardStore struct { } func (o *BypassToShardStore) Execute(ctx *exec.ExecContext, e *exec.Executor) error { - svcMgr, err := exec.GetValueByType[*svcmgr.Manager](ctx) + stgAgts, err := exec.GetValueByType[*agtpool.AgentPool](ctx) if err != nil { return err } - shardStore, err := svcMgr.GetShardStore(o.StorageID) + shardStore, err := stgAgts.GetShardStore(o.StorageID) if err != nil { return err } - notifier, ok := shardStore.(types.BypassNotifier) + br, ok := shardStore.(types.BypassWrite) if !ok { - return fmt.Errorf("shard store %v not support bypass", o.StorageID) + return fmt.Errorf("shard store %v not support bypass write", o.StorageID) } fileInfo, err := exec.BindVar[*BypassFileInfoValue](e, ctx.Context, o.BypassFileInfo) @@ -64,7 +67,7 @@ func (o *BypassToShardStore) Execute(ctx *exec.ExecContext, e *exec.Executor) er return err } - err = notifier.BypassUploaded(fileInfo.BypassFileInfo) + err = br.BypassUploaded(fileInfo.BypassFileInfo) if err != nil { return err } @@ -78,6 +81,52 @@ func (o *BypassToShardStore) String() string { return fmt.Sprintf("BypassToShardStore[StorageID:%v] Info: %v, Callback: %v", o.StorageID, o.BypassFileInfo, o.BypassCallback) } +type BypassFilePathValue struct { + types.BypassFilePath +} + +func (v *BypassFilePathValue) Clone() exec.VarValue { + return &BypassFilePathValue{ + BypassFilePath: v.BypassFilePath, + } +} + +type BypassFromShardStore struct { + StorageID cdssdk.StorageID + FileHash cdssdk.FileHash + Output exec.VarID +} + +func (o *BypassFromShardStore) Execute(ctx *exec.ExecContext, e *exec.Executor) error { + stgAgts, err := exec.GetValueByType[*agtpool.AgentPool](ctx) + if err != nil { + return err + } + + shardStore, err := stgAgts.GetShardStore(o.StorageID) + if err != nil { + return err + } + + br, ok := shardStore.(types.BypassRead) + if !ok { + return fmt.Errorf("shard store %v not support bypass read", o.StorageID) + } + + path, err := br.BypassRead(o.FileHash) + if err != nil { + return err + } + + e.PutVar(o.Output, &BypassFilePathValue{BypassFilePath: path}) + return nil +} + +func (o *BypassFromShardStore) String() string { + return fmt.Sprintf("BypassFromShardStore[StorageID:%v] FileHash: %v, Output: %v", o.StorageID, o.FileHash, o.Output) +} + +// 旁路写入 type BypassToShardStoreNode struct { dag.NodeBase StorageID cdssdk.StorageID @@ -103,19 +152,58 @@ func (n *BypassToShardStoreNode) BypassFileInfoSlot() dag.ValueInputSlot { } } -func (n *BypassToShardStoreNode) BypassCallbackVar() *dag.ValueVar { - return n.OutputValues().Get(0) +func (n *BypassToShardStoreNode) BypassCallbackVar() dag.ValueOutputSlot { + return dag.ValueOutputSlot{ + Node: n, + Index: 0, + } } -func (n *BypassToShardStoreNode) FileHashVar() *dag.ValueVar { - return n.OutputValues().Get(1) +func (n *BypassToShardStoreNode) FileHashVar() dag.ValueOutputSlot { + return dag.ValueOutputSlot{ + Node: n, + Index: 1, + } } func (t *BypassToShardStoreNode) GenerateOp() (exec.Op, error) { return &BypassToShardStore{ StorageID: t.StorageID, BypassFileInfo: t.BypassFileInfoSlot().Var().VarID, - BypassCallback: t.BypassCallbackVar().VarID, - FileHash: t.FileHashVar().VarID, + BypassCallback: t.BypassCallbackVar().Var().VarID, + FileHash: t.FileHashVar().Var().VarID, + }, nil +} + +// 旁路读取 +type BypassFromShardStoreNode struct { + dag.NodeBase + StorageID cdssdk.StorageID + FileHash cdssdk.FileHash +} + +func (b *GraphNodeBuilder) NewBypassFromShardStore(storageID cdssdk.StorageID, fileHash cdssdk.FileHash) *BypassFromShardStoreNode { + node := &BypassFromShardStoreNode{ + StorageID: storageID, + FileHash: fileHash, + } + b.AddNode(node) + + node.OutputValues().Init(node, 1) + return node +} + +func (n *BypassFromShardStoreNode) FilePathVar() dag.ValueOutputSlot { + return dag.ValueOutputSlot{ + Node: n, + Index: 0, + } +} + +func (n *BypassFromShardStoreNode) GenerateOp() (exec.Op, error) { + return &BypassFromShardStore{ + StorageID: n.StorageID, + FileHash: n.FileHash, + Output: n.FilePathVar().Var().VarID, }, nil } diff --git a/common/pkgs/ioswitch2/ops2/chunked.go b/common/pkgs/ioswitch2/ops2/chunked.go index 74758bf..6f45793 100644 --- a/common/pkgs/ioswitch2/ops2/chunked.go +++ b/common/pkgs/ioswitch2/ops2/chunked.go @@ -37,7 +37,10 @@ func (o *ChunkedSplit) Execute(ctx *exec.ExecContext, e *exec.Executor) error { sem := semaphore.NewWeighted(int64(len(outputs))) for i := range outputs { - sem.Acquire(ctx.Context, 1) + err = sem.Acquire(ctx.Context, 1) + if err != nil { + return err + } e.PutVar(o.Outputs[i], &exec.StreamValue{ Stream: io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) { diff --git a/common/pkgs/ioswitch2/ops2/driver.go b/common/pkgs/ioswitch2/ops2/driver.go index 8ca93fe..6e5fd1c 100644 --- a/common/pkgs/ioswitch2/ops2/driver.go +++ b/common/pkgs/ioswitch2/ops2/driver.go @@ -3,6 +3,7 @@ package ops2 import ( "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" + "gitlink.org.cn/cloudream/common/utils/math2" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2" ) @@ -45,7 +46,7 @@ type ToDriverNode struct { dag.NodeBase To ioswitch2.To Handle *exec.DriverReadStream - Range exec.Range + Range math2.Range } func (b *GraphNodeBuilder) NewToDriver(to ioswitch2.To, handle *exec.DriverReadStream) *ToDriverNode { diff --git a/common/pkgs/ioswitch2/ops2/ec.go b/common/pkgs/ioswitch2/ops2/ec.go index 2e81063..5f3a1c4 100644 --- a/common/pkgs/ioswitch2/ops2/ec.go +++ b/common/pkgs/ioswitch2/ops2/ec.go @@ -10,6 +10,7 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/ioswitch/utils" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" "gitlink.org.cn/cloudream/common/utils/io2" + "gitlink.org.cn/cloudream/common/utils/math2" "gitlink.org.cn/cloudream/common/utils/sync2" "gitlink.org.cn/cloudream/storage/common/pkgs/ec" ) @@ -45,20 +46,35 @@ func (o *ECMultiply) Execute(ctx *exec.ExecContext, e *exec.Executor) error { outputWrs[i] = wr } - fut := future.NewSetVoid() - go func() { - mul := ec.GaloisMultiplier().BuildGalois() + inputChunks := make([][]byte, len(o.Inputs)) + for i := range o.Inputs { + inputChunks[i] = make([]byte, math2.Min(o.ChunkSize, 64*1024)) + } - inputChunks := make([][]byte, len(o.Inputs)) - for i := range o.Inputs { - inputChunks[i] = make([]byte, o.ChunkSize) - } + // 输出用两个缓冲轮换 + outputBufPool := sync2.NewBucketPool[[][]byte]() + for i := 0; i < 2; i++ { outputChunks := make([][]byte, len(o.Outputs)) for i := range o.Outputs { - outputChunks[i] = make([]byte, o.ChunkSize) + outputChunks[i] = make([]byte, math2.Min(o.ChunkSize, 64*1024)) } + outputBufPool.PutEmpty(outputChunks) + } + + fut := future.NewSetVoid() + go func() { + mul := ec.GaloisMultiplier().BuildGalois() + defer outputBufPool.WakeUpAll() + + readLens := math2.SplitLessThan(o.ChunkSize, 64*1024) + readLenIdx := 0 for { + curReadLen := readLens[readLenIdx] + for i := range inputChunks { + inputChunks[i] = inputChunks[i][:curReadLen] + } + err := sync2.ParallelDo(inputs, func(s *exec.StreamValue, i int) error { _, err := io.ReadFull(s.Stream, inputChunks[i]) return err @@ -72,12 +88,34 @@ func (o *ECMultiply) Execute(ctx *exec.ExecContext, e *exec.Executor) error { return } - err = mul.Multiply(o.Coef, inputChunks, outputChunks) + outputBuf, ok := outputBufPool.GetEmpty() + if !ok { + return + } + for i := range outputBuf { + outputBuf[i] = outputBuf[i][:curReadLen] + } + + err = mul.Multiply(o.Coef, inputChunks, outputBuf) if err != nil { fut.SetError(err) return } + outputBufPool.PutFilled(outputBuf) + readLenIdx = (readLenIdx + 1) % len(readLens) + } + }() + + go func() { + defer outputBufPool.WakeUpAll() + + for { + outputChunks, ok := outputBufPool.GetFilled() + if !ok { + return + } + for i := range o.Outputs { err := io2.WriteAll(outputWrs[i], outputChunks[i]) if err != nil { @@ -85,6 +123,8 @@ func (o *ECMultiply) Execute(ctx *exec.ExecContext, e *exec.Executor) error { return } } + + outputBufPool.PutEmpty(outputChunks) } }() diff --git a/common/pkgs/ioswitch2/ops2/faas.go b/common/pkgs/ioswitch2/ops2/faas.go index eb54233..a6c7c63 100644 --- a/common/pkgs/ioswitch2/ops2/faas.go +++ b/common/pkgs/ioswitch2/ops2/faas.go @@ -1,12 +1,13 @@ package ops2 +/* import ( "fmt" "github.com/samber/lo" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" ) @@ -19,17 +20,17 @@ type InternalFaaSGalMultiply struct { } func (o *InternalFaaSGalMultiply) Execute(ctx *exec.ExecContext, e *exec.Executor) error { - stgMgr, err := exec.GetValueByType[*svcmgr.Manager](ctx) + stgAgts, err := exec.GetValueByType[*agtpool.AgentPool](ctx) if err != nil { return err } - fass, err := svcmgr.GetComponent[types.InternalFaaSCall](stgMgr, o.StorageID) + fass, err := agtpool.GetComponent[types.InternalFaaSCall](stgAgts, o.StorageID) if err != nil { return fmt.Errorf("getting faas component: %w", err) } - tmp, err := svcmgr.GetComponent[types.TempStore](stgMgr, o.StorageID) + tmp, err := agtpool.GetComponent[types.TempStore](stgAgts, o.StorageID) if err != nil { return fmt.Errorf("getting temp store component: %w", err) } @@ -58,3 +59,4 @@ func (o *InternalFaaSGalMultiply) Execute(ctx *exec.ExecContext, e *exec.Executo exec.PutArray(e, o.OutputFilePathes, outputVars) return nil } +*/ diff --git a/common/pkgs/ioswitch2/ops2/multipart.go b/common/pkgs/ioswitch2/ops2/multipart.go index a65d2dd..db64d2a 100644 --- a/common/pkgs/ioswitch2/ops2/multipart.go +++ b/common/pkgs/ioswitch2/ops2/multipart.go @@ -48,20 +48,22 @@ type MultipartInitiator struct { } func (o *MultipartInitiator) Execute(ctx *exec.ExecContext, e *exec.Executor) error { - initiator, err := factory.CreateComponent[types.MultipartInitiator](o.Storage) + blder := factory.GetBuilder(o.Storage) + multi, err := blder.CreateMultiparter() if err != nil { return err } - defer initiator.Abort() - // 启动一个新的上传任务 - initState, err := initiator.Initiate(ctx.Context) + // 启动一个新的上传任务W + multiTask, err := multi.Initiate(ctx.Context) if err != nil { return err } + defer multiTask.Abort() + // 分发上传参数 e.PutVar(o.UploadArgs, &MultipartUploadArgsValue{ - InitState: initState, + InitState: multiTask.InitState(), }) // 收集分片上传结果 @@ -76,7 +78,7 @@ func (o *MultipartInitiator) Execute(ctx *exec.ExecContext, e *exec.Executor) er } // 合并分片 - fileInfo, err := initiator.JoinParts(ctx.Context, partInfos) + fileInfo, err := multiTask.JoinParts(ctx.Context, partInfos) if err != nil { return fmt.Errorf("completing multipart upload: %v", err) } @@ -93,7 +95,7 @@ func (o *MultipartInitiator) Execute(ctx *exec.ExecContext, e *exec.Executor) er } if cb.Commited { - initiator.Complete() + multiTask.Complete() } return nil @@ -113,6 +115,7 @@ type MultipartUpload struct { } func (o *MultipartUpload) Execute(ctx *exec.ExecContext, e *exec.Executor) error { + blder := factory.GetBuilder(o.Storage) uploadArgs, err := exec.BindVar[*MultipartUploadArgsValue](e, ctx.Context, o.UploadArgs) if err != nil { return err @@ -124,13 +127,13 @@ func (o *MultipartUpload) Execute(ctx *exec.ExecContext, e *exec.Executor) error } defer partStr.Stream.Close() - uploader, err := factory.CreateComponent[types.MultipartUploader](o.Storage) + multi, err := blder.CreateMultiparter() if err != nil { return err } startTime := time.Now() - uploadedInfo, err := uploader.UploadPart(ctx.Context, uploadArgs.InitState, o.PartSize, o.PartNumber, partStr.Stream) + uploadedInfo, err := multi.UploadPart(ctx.Context, uploadArgs.InitState, o.PartSize, o.PartNumber, partStr.Stream) if err != nil { return err } @@ -163,12 +166,18 @@ func (b *GraphNodeBuilder) NewMultipartInitiator(storage stgmod.StorageDetail) * return node } -func (n *MultipartInitiatorNode) UploadArgsVar() *dag.ValueVar { - return n.OutputValues().Get(0) +func (n *MultipartInitiatorNode) UploadArgsVar() dag.ValueOutputSlot { + return dag.ValueOutputSlot{ + Node: n, + Index: 0, + } } -func (n *MultipartInitiatorNode) BypassFileInfoVar() *dag.ValueVar { - return n.OutputValues().Get(1) +func (n *MultipartInitiatorNode) BypassFileInfoVar() dag.ValueOutputSlot { + return dag.ValueOutputSlot{ + Node: n, + Index: 1, + } } func (n *MultipartInitiatorNode) BypassCallbackSlot() dag.ValueInputSlot { @@ -188,9 +197,9 @@ func (n *MultipartInitiatorNode) AppendPartInfoSlot() dag.ValueInputSlot { func (n *MultipartInitiatorNode) GenerateOp() (exec.Op, error) { return &MultipartInitiator{ Storage: n.Storage, - UploadArgs: n.UploadArgsVar().VarID, + UploadArgs: n.UploadArgsVar().Var().VarID, UploadedParts: n.InputValues().GetVarIDsStart(1), - BypassFileOutput: n.BypassFileInfoVar().VarID, + BypassFileOutput: n.BypassFileInfoVar().Var().VarID, BypassCallback: n.BypassCallbackSlot().Var().VarID, }, nil } @@ -223,8 +232,11 @@ func (n *MultipartUploadNode) UploadArgsSlot() dag.ValueInputSlot { } } -func (n *MultipartUploadNode) UploadResultVar() *dag.ValueVar { - return n.OutputValues().Get(0) +func (n *MultipartUploadNode) UploadResultVar() dag.ValueOutputSlot { + return dag.ValueOutputSlot{ + Node: n, + Index: 0, + } } func (n *MultipartUploadNode) PartStreamSlot() dag.StreamInputSlot { @@ -238,7 +250,7 @@ func (n *MultipartUploadNode) GenerateOp() (exec.Op, error) { return &MultipartUpload{ Storage: n.Storage, UploadArgs: n.UploadArgsSlot().Var().VarID, - UploadResult: n.UploadResultVar().VarID, + UploadResult: n.UploadResultVar().Var().VarID, PartStream: n.PartStreamSlot().Var().VarID, PartNumber: n.PartNumber, PartSize: n.PartSize, diff --git a/common/pkgs/ioswitch2/ops2/ops.go b/common/pkgs/ioswitch2/ops2/ops.go index 53be260..ab75df5 100644 --- a/common/pkgs/ioswitch2/ops2/ops.go +++ b/common/pkgs/ioswitch2/ops2/ops.go @@ -26,71 +26,3 @@ type ToNode interface { Input() dag.StreamInputSlot SetInput(input *dag.StreamVar) } - -// func formatStreamIO(node *dag.Node) string { -// is := "" -// for i, in := range node.InputStreams { -// if i > 0 { -// is += "," -// } - -// if in == nil { -// is += "." -// } else { -// is += fmt.Sprintf("%v", in.ID) -// } -// } - -// os := "" -// for i, out := range node.OutputStreams { -// if i > 0 -// os += "," -// } - -// if out == nil { -// os += "." -// } else { -// os += fmt.Sprintf("%v", out.ID) -// } -// } - -// if is == "" && os == "" { -// return "" -// } - -// return fmt.Sprintf("S{%s>%s}", is, os) -// } - -// func formatValueIO(node *dag.Node) string { -// is := "" -// for i, in := range node.InputValues { -// if i > 0 { -// is += "," -// } - -// if in == nil { -// is += "." -// } else { -// is += fmt.Sprintf("%v", in.ID) -// } -// } - -// os := "" -// for i, out := range node.OutputValues { -// if i > 0 { -// os += "," -// } - -// if out == nil { -// os += "." -// } else { -// os += fmt.Sprintf("%v", out.ID) -// } -// } - -// if is == "" && os == "" { -// return "" -// } - -// return fmt.Sprintf("V{%s>%s}", is, os) -// } diff --git a/common/pkgs/ioswitch2/ops2/range.go b/common/pkgs/ioswitch2/ops2/range.go index 82a454e..4bc70fa 100644 --- a/common/pkgs/ioswitch2/ops2/range.go +++ b/common/pkgs/ioswitch2/ops2/range.go @@ -81,7 +81,7 @@ func (o *Range) String() string { type RangeNode struct { dag.NodeBase - Range exec.Range + Range math2.Range } func (b *GraphNodeBuilder) NewRange() *RangeNode { @@ -93,7 +93,7 @@ func (b *GraphNodeBuilder) NewRange() *RangeNode { return node } -func (t *RangeNode) RangeStream(input *dag.StreamVar, rng exec.Range) *dag.StreamVar { +func (t *RangeNode) RangeStream(input *dag.StreamVar, rng math2.Range) *dag.StreamVar { input.To(t, 0) t.Range = rng return t.OutputStreams().Get(0) diff --git a/common/pkgs/ioswitch2/ops2/s2s.go b/common/pkgs/ioswitch2/ops2/s2s.go new file mode 100644 index 0000000..4e955e6 --- /dev/null +++ b/common/pkgs/ioswitch2/ops2/s2s.go @@ -0,0 +1,115 @@ +package ops2 + +import ( + "fmt" + + "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag" + "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" + stgmod "gitlink.org.cn/cloudream/storage/common/models" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/factory" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" +) + +func init() { + exec.UseOp[*S2STransfer]() +} + +type S2STransfer struct { + Src stgmod.StorageDetail + SrcPath exec.VarID + Dst stgmod.StorageDetail + Output exec.VarID + BypassCallback exec.VarID +} + +func (o *S2STransfer) Execute(ctx *exec.ExecContext, e *exec.Executor) error { + srcPath, err := exec.BindVar[*BypassFilePathValue](e, ctx.Context, o.SrcPath) + if err != nil { + return err + } + + s2s, err := factory.GetBuilder(o.Dst).CreateS2STransfer() + if err != nil { + return err + } + + // 传输文件 + dstPath, err := s2s.Transfer(ctx.Context, o.Src, srcPath.Path) + if err != nil { + return err + } + defer s2s.Abort() + + // 告知后续Op处理临时文件 + e.PutVar(o.Output, &BypassFileInfoValue{BypassFileInfo: types.BypassFileInfo{ + TempFilePath: dstPath, + FileHash: srcPath.Info.Hash, + Size: srcPath.Info.Size, + }}) + + // 等待后续Op处理临时文件 + cb, err := exec.BindVar[*BypassHandleResultValue](e, ctx.Context, o.BypassCallback) + if err != nil { + return fmt.Errorf("getting temp file callback: %v", err) + } + + if cb.Commited { + s2s.Complete() + } + + return nil +} + +func (o *S2STransfer) String() string { + return fmt.Sprintf("S2STransfer %v:%v -> %v:%v", o.Src.Storage.String(), o.SrcPath, o.Dst.Storage.String(), o.Output) +} + +type S2STransferNode struct { + dag.NodeBase + Src stgmod.StorageDetail + Dst stgmod.StorageDetail +} + +func (b *GraphNodeBuilder) NewS2STransfer(src stgmod.StorageDetail, dst stgmod.StorageDetail) *S2STransferNode { + n := &S2STransferNode{ + Src: src, + Dst: dst, + } + b.AddNode(n) + + n.OutputValues().Init(n, 1) + n.InputValues().Init(2) + + return n +} + +func (n *S2STransferNode) SrcPathSlot() dag.ValueInputSlot { + return dag.ValueInputSlot{ + Node: n, + Index: 0, + } +} + +func (n *S2STransferNode) BypassCallbackSlot() dag.ValueInputSlot { + return dag.ValueInputSlot{ + Node: n, + Index: 1, + } +} + +func (n *S2STransferNode) BypassFileInfoVar() dag.ValueOutputSlot { + return dag.ValueOutputSlot{ + Node: n, + Index: 0, + } +} + +func (n *S2STransferNode) GenerateOp() (exec.Op, error) { + return &S2STransfer{ + Src: n.Src, + SrcPath: n.SrcPathSlot().Var().VarID, + Dst: n.Dst, + Output: n.BypassFileInfoVar().Var().VarID, + BypassCallback: n.BypassCallbackSlot().Var().VarID, + }, nil +} diff --git a/common/pkgs/ioswitch2/ops2/shard_store.go b/common/pkgs/ioswitch2/ops2/shard_store.go index 8097ced..76dfd86 100644 --- a/common/pkgs/ioswitch2/ops2/shard_store.go +++ b/common/pkgs/ioswitch2/ops2/shard_store.go @@ -12,7 +12,7 @@ import ( "gitlink.org.cn/cloudream/common/utils/io2" stgmod "gitlink.org.cn/cloudream/storage/common/models" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" ) @@ -42,12 +42,12 @@ func (o *ShardRead) Execute(ctx *exec.ExecContext, e *exec.Executor) error { Debugf("reading from shard store") defer logger.Debugf("reading from shard store finished") - stgMgr, err := exec.GetValueByType[*svcmgr.Manager](ctx) + stgAgts, err := exec.GetValueByType[*agtpool.AgentPool](ctx) if err != nil { return fmt.Errorf("getting storage manager: %w", err) } - store, err := stgMgr.GetShardStore(o.StorageID) + store, err := stgAgts.GetShardStore(o.StorageID) if err != nil { return fmt.Errorf("getting shard store of storage %v: %w", o.StorageID, err) } @@ -84,12 +84,12 @@ func (o *ShardWrite) Execute(ctx *exec.ExecContext, e *exec.Executor) error { Debugf("writting file to shard store") defer logger.Debugf("write to shard store finished") - stgMgr, err := exec.GetValueByType[*svcmgr.Manager](ctx) + stgAgts, err := exec.GetValueByType[*agtpool.AgentPool](ctx) if err != nil { return fmt.Errorf("getting storage manager: %w", err) } - store, err := stgMgr.GetShardStore(o.StorageID) + store, err := stgAgts.GetShardStore(o.StorageID) if err != nil { return fmt.Errorf("getting shard store of storage %v: %w", o.StorageID, err) } diff --git a/common/pkgs/ioswitch2/ops2/shared_store.go b/common/pkgs/ioswitch2/ops2/shared_store.go index 22250b4..1d1bb11 100644 --- a/common/pkgs/ioswitch2/ops2/shared_store.go +++ b/common/pkgs/ioswitch2/ops2/shared_store.go @@ -7,8 +7,9 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/common/pkgs/logger" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" + stgmod "gitlink.org.cn/cloudream/storage/common/models" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool" ) func init() { @@ -16,12 +17,9 @@ func init() { } type SharedLoad struct { - Input exec.VarID `json:"input"` - StorageID cdssdk.StorageID `json:"storageID"` - UserID cdssdk.UserID `json:"userID"` - PackageID cdssdk.PackageID `json:"packageID"` - Path string `json:"path"` - FullPathOutput exec.VarID `json:"fullPathOutput"` + Input exec.VarID + StorageID cdssdk.StorageID + ObjectPath string } func (o *SharedLoad) Execute(ctx *exec.ExecContext, e *exec.Executor) error { @@ -30,12 +28,12 @@ func (o *SharedLoad) Execute(ctx *exec.ExecContext, e *exec.Executor) error { Debugf("load file to shared store") defer logger.Debugf("load file to shared store finished") - stgMgr, err := exec.GetValueByType[*svcmgr.Manager](ctx) + stgAgts, err := exec.GetValueByType[*agtpool.AgentPool](ctx) if err != nil { return fmt.Errorf("getting storage manager: %w", err) } - store, err := stgMgr.GetSharedStore(o.StorageID) + store, err := stgAgts.GetSharedStore(o.StorageID) if err != nil { return fmt.Errorf("getting shard store of storage %v: %w", o.StorageID, err) } @@ -46,44 +44,29 @@ func (o *SharedLoad) Execute(ctx *exec.ExecContext, e *exec.Executor) error { } defer input.Stream.Close() - fullPath, err := store.WritePackageObject(o.UserID, o.PackageID, o.Path, input.Stream) - if err != nil { - return fmt.Errorf("writing file to shard store: %w", err) - } - - if o.FullPathOutput > 0 { - e.PutVar(o.FullPathOutput, &exec.StringValue{ - Value: fullPath, - }) - } - return nil + return store.Write(o.ObjectPath, input.Stream) } func (o *SharedLoad) String() string { - return fmt.Sprintf("SharedLoad %v -> %v:%v/%v/%v", o.Input, o.StorageID, o.UserID, o.PackageID, o.Path) + return fmt.Sprintf("SharedLoad %v -> %v:%v", o.Input, o.StorageID, o.ObjectPath) } type SharedLoadNode struct { dag.NodeBase - To ioswitch2.To - StorageID cdssdk.StorageID - UserID cdssdk.UserID - PackageID cdssdk.PackageID - Path string + To ioswitch2.To + Storage stgmod.StorageDetail + ObjectPath string } -func (b *GraphNodeBuilder) NewSharedLoad(to ioswitch2.To, stgID cdssdk.StorageID, userID cdssdk.UserID, packageID cdssdk.PackageID, path string) *SharedLoadNode { +func (b *GraphNodeBuilder) NewSharedLoad(to ioswitch2.To, stg stgmod.StorageDetail, objPath string) *SharedLoadNode { node := &SharedLoadNode{ - To: to, - StorageID: stgID, - UserID: userID, - PackageID: packageID, - Path: path, + To: to, + Storage: stg, + ObjectPath: objPath, } b.AddNode(node) node.InputStreams().Init(1) - node.OutputValues().Init(node, 1) return node } @@ -102,17 +85,10 @@ func (t *SharedLoadNode) Input() dag.StreamInputSlot { } } -func (t *SharedLoadNode) FullPathVar() *dag.ValueVar { - return t.OutputValues().Get(0) -} - func (t *SharedLoadNode) GenerateOp() (exec.Op, error) { return &SharedLoad{ - Input: t.InputStreams().Get(0).VarID, - StorageID: t.StorageID, - UserID: t.UserID, - PackageID: t.PackageID, - Path: t.Path, - FullPathOutput: t.OutputValues().Get(0).VarID, + Input: t.InputStreams().Get(0).VarID, + StorageID: t.Storage.Storage.StorageID, + ObjectPath: t.ObjectPath, }, nil } diff --git a/common/pkgs/ioswitch2/parser/gen/generator.go b/common/pkgs/ioswitch2/parser/gen/generator.go new file mode 100644 index 0000000..de35c62 --- /dev/null +++ b/common/pkgs/ioswitch2/parser/gen/generator.go @@ -0,0 +1,488 @@ +package gen + +import ( + "fmt" + "math" + + "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag" + cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" + "gitlink.org.cn/cloudream/common/utils/lo2" + "gitlink.org.cn/cloudream/common/utils/math2" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" +) + +// 检查使用不同编码时参数是否设置到位 +func CheckEncodingParams(ctx *state.GenerateState) error { + for _, f := range ctx.Ft.Froms { + if f.GetStreamIndex().IsEC() { + ctx.UseEC = true + if ctx.Ft.ECParam == nil { + return fmt.Errorf("EC encoding parameters not set") + } + } + + if f.GetStreamIndex().IsSegment() { + ctx.UseSegment = true + if ctx.Ft.SegmentParam == nil { + return fmt.Errorf("segment parameters not set") + } + } + } + + for _, t := range ctx.Ft.Toes { + if t.GetStreamIndex().IsEC() { + ctx.UseEC = true + if ctx.Ft.ECParam == nil { + return fmt.Errorf("EC encoding parameters not set") + } + } + + if t.GetStreamIndex().IsSegment() { + ctx.UseSegment = true + if ctx.Ft.SegmentParam == nil { + return fmt.Errorf("segment parameters not set") + } + } + } + + return nil +} + +// 计算输入流的打开范围。如果From或者To中包含EC的流,则会将打开范围扩大到条带大小的整数倍。 +func CalcStreamRange(ctx *state.GenerateState) { + rng := math2.NewRange(math.MaxInt64, 0) + + for _, to := range ctx.Ft.Toes { + strIdx := to.GetStreamIndex() + if strIdx.IsRaw() { + toRng := to.GetRange() + rng.ExtendStart(toRng.Offset) + if toRng.Length != nil { + rng.ExtendEnd(toRng.Offset + *toRng.Length) + } else { + rng.Length = nil + } + } else if strIdx.IsEC() { + toRng := to.GetRange() + stripSize := ctx.Ft.ECParam.StripSize() + blkStartIndex := math2.FloorDiv(toRng.Offset, int64(ctx.Ft.ECParam.ChunkSize)) + rng.ExtendStart(blkStartIndex * stripSize) + if toRng.Length != nil { + blkEndIndex := math2.CeilDiv(toRng.Offset+*toRng.Length, int64(ctx.Ft.ECParam.ChunkSize)) + rng.ExtendEnd(blkEndIndex * stripSize) + } else { + rng.Length = nil + } + + } else if strIdx.IsSegment() { + // Segment节点的Range是相对于本段的,需要加上本段的起始位置 + toRng := to.GetRange() + + segStart := ctx.Ft.SegmentParam.CalcSegmentStart(strIdx.Index) + + offset := toRng.Offset + segStart + + rng.ExtendStart(offset) + if toRng.Length != nil { + rng.ExtendEnd(offset + *toRng.Length) + } else { + rng.Length = nil + } + } + } + + if ctx.UseEC { + stripSize := ctx.Ft.ECParam.StripSize() + rng.ExtendStart(math2.Floor(rng.Offset, stripSize)) + if rng.Length != nil { + rng.ExtendEnd(math2.Ceil(rng.Offset+*rng.Length, stripSize)) + } + } + + ctx.StreamRange = rng +} + +func Extend(ctx *state.GenerateState) error { + for _, fr := range ctx.Ft.Froms { + frNode, err := buildFromNode(ctx, fr) + if err != nil { + return err + } + ctx.FromNodes[fr] = frNode + + ctx.IndexedStreams = append(ctx.IndexedStreams, state.IndexedStream{ + Stream: frNode.Output().Var(), + StreamIndex: fr.GetStreamIndex(), + }) + + // 对于完整文件的From,生成Split指令 + if fr.GetStreamIndex().IsRaw() { + // 只有输入输出需要EC编码的块时,才生成相关指令 + if ctx.UseEC { + splitNode := ctx.DAG.NewChunkedSplit(ctx.Ft.ECParam.ChunkSize, ctx.Ft.ECParam.K) + splitNode.Split(frNode.Output().Var()) + for i := 0; i < ctx.Ft.ECParam.K; i++ { + ctx.IndexedStreams = append(ctx.IndexedStreams, state.IndexedStream{ + Stream: splitNode.SubStream(i), + StreamIndex: ioswitch2.ECStream(i), + }) + } + } + + // 同上 + if ctx.UseSegment { + splitNode := ctx.DAG.NewSegmentSplit(ctx.Ft.SegmentParam.Segments) + frNode.Output().Var().ToSlot(splitNode.InputSlot()) + for i := 0; i < len(ctx.Ft.SegmentParam.Segments); i++ { + ctx.IndexedStreams = append(ctx.IndexedStreams, state.IndexedStream{ + Stream: splitNode.Segment(i), + StreamIndex: ioswitch2.SegmentStream(i), + }) + } + } + } + } + + if ctx.UseEC { + // 如果有K个不同的文件块流,则生成Multiply指令,同时针对其生成的流,生成Join指令 + ecInputStrs := make(map[int]*dag.StreamVar) + for _, s := range ctx.IndexedStreams { + if s.StreamIndex.IsEC() && ecInputStrs[s.StreamIndex.Index] == nil { + ecInputStrs[s.StreamIndex.Index] = s.Stream + if len(ecInputStrs) == ctx.Ft.ECParam.K { + break + } + } + } + + if len(ecInputStrs) == ctx.Ft.ECParam.K { + mulNode := ctx.DAG.NewECMultiply(*ctx.Ft.ECParam) + + for i, s := range ecInputStrs { + mulNode.AddInput(s, i) + } + for i := 0; i < ctx.Ft.ECParam.N; i++ { + ctx.IndexedStreams = append(ctx.IndexedStreams, state.IndexedStream{ + Stream: mulNode.NewOutput(i), + StreamIndex: ioswitch2.ECStream(i), + }) + } + + joinNode := ctx.DAG.NewChunkedJoin(ctx.Ft.ECParam.ChunkSize) + for i := 0; i < ctx.Ft.ECParam.K; i++ { + // 不可能找不到流 + joinNode.AddInput(findOutputStream(ctx, ioswitch2.ECStream(i))) + } + ctx.IndexedStreams = append(ctx.IndexedStreams, state.IndexedStream{ + Stream: joinNode.Joined(), + StreamIndex: ioswitch2.RawStream(), + }) + } + } + + if ctx.UseSegment { + // 先假设有所有的顺序分段,生成Join指令,后续根据Range再实际计算是否缺少流 + joinNode := ctx.DAG.NewSegmentJoin(ctx.Ft.SegmentParam.Segments) + for i := 0; i < ctx.Ft.SegmentParam.SegmentCount(); i++ { + str := findOutputStream(ctx, ioswitch2.SegmentStream(i)) + if str != nil { + str.ToSlot(joinNode.InputSlot(i)) + } + } + ctx.IndexedStreams = append(ctx.IndexedStreams, state.IndexedStream{ + Stream: joinNode.Joined(), + StreamIndex: ioswitch2.RawStream(), + }) + + // SegmentJoin生成的Join指令可以用来生成EC块 + if ctx.UseEC { + splitNode := ctx.DAG.NewChunkedSplit(ctx.Ft.ECParam.ChunkSize, ctx.Ft.ECParam.K) + splitNode.Split(joinNode.Joined()) + + mulNode := ctx.DAG.NewECMultiply(*ctx.Ft.ECParam) + + for i := 0; i < ctx.Ft.ECParam.K; i++ { + mulNode.AddInput(splitNode.SubStream(i), i) + ctx.IndexedStreams = append(ctx.IndexedStreams, state.IndexedStream{ + Stream: splitNode.SubStream(i), + StreamIndex: ioswitch2.ECStream(i), + }) + } + + for i := 0; i < ctx.Ft.ECParam.N; i++ { + ctx.IndexedStreams = append(ctx.IndexedStreams, state.IndexedStream{ + Stream: mulNode.NewOutput(i), + StreamIndex: ioswitch2.ECStream(i), + }) + } + } + } + + // 为每一个To找到一个输入流 + for _, to := range ctx.Ft.Toes { + toNode, err := buildToNode(ctx, to) + if err != nil { + return err + } + ctx.ToNodes[to] = toNode + + str := findOutputStream(ctx, to.GetStreamIndex()) + if str == nil { + return fmt.Errorf("no output stream found for data index %d", to.GetStreamIndex()) + } + + toNode.SetInput(str) + } + + return nil +} + +func buildFromNode(ctx *state.GenerateState, f ioswitch2.From) (ops2.FromNode, error) { + var repRange math2.Range + repRange.Offset = ctx.StreamRange.Offset + if ctx.StreamRange.Length != nil { + repRngLen := *ctx.StreamRange.Length + repRange.Length = &repRngLen + } + + var blkRange math2.Range + if ctx.UseEC { + blkRange.Offset = ctx.StreamRange.Offset / int64(ctx.Ft.ECParam.ChunkSize*ctx.Ft.ECParam.K) * int64(ctx.Ft.ECParam.ChunkSize) + if ctx.StreamRange.Length != nil { + blkRngLen := *ctx.StreamRange.Length / int64(ctx.Ft.ECParam.ChunkSize*ctx.Ft.ECParam.K) * int64(ctx.Ft.ECParam.ChunkSize) + blkRange.Length = &blkRngLen + } + } + + switch f := f.(type) { + case *ioswitch2.FromShardstore: + t := ctx.DAG.NewShardRead(f, f.Storage.Storage.StorageID, types.NewOpen(f.FileHash)) + + if f.StreamIndex.IsRaw() { + t.Open.WithNullableLength(repRange.Offset, repRange.Length) + } else if f.StreamIndex.IsEC() { + t.Open.WithNullableLength(blkRange.Offset, blkRange.Length) + } else if f.StreamIndex.IsSegment() { + segStart := ctx.Ft.SegmentParam.CalcSegmentStart(f.StreamIndex.Index) + segLen := ctx.Ft.SegmentParam.Segments[f.StreamIndex.Index] + segEnd := segStart + segLen + + // 打开的范围不超过本段的范围 + + openOff := ctx.StreamRange.Offset - segStart + openOff = math2.Clamp(openOff, 0, segLen) + + openLen := segLen + + if ctx.StreamRange.Length != nil { + strEnd := ctx.StreamRange.Offset + *ctx.StreamRange.Length + openEnd := math2.Min(strEnd, segEnd) + openLen = openEnd - segStart - openOff + } + + t.Open.WithNullableLength(openOff, &openLen) + } + + switch addr := f.Hub.Address.(type) { + case *cdssdk.HttpAddressInfo: + t.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: f.Hub}) + t.Env().Pinned = true + + case *cdssdk.GRPCAddressInfo: + t.Env().ToEnvWorker(&ioswitch2.AgentWorker{Hub: f.Hub, Address: *addr}) + t.Env().Pinned = true + + default: + return nil, fmt.Errorf("unsupported node address type %T", addr) + } + + return t, nil + + case *ioswitch2.FromDriver: + n := ctx.DAG.NewFromDriver(f, f.Handle) + n.Env().ToEnvDriver() + n.Env().Pinned = true + + if f.StreamIndex.IsRaw() { + f.Handle.RangeHint.Offset = repRange.Offset + f.Handle.RangeHint.Length = repRange.Length + } else if f.StreamIndex.IsEC() { + f.Handle.RangeHint.Offset = blkRange.Offset + f.Handle.RangeHint.Length = blkRange.Length + } else if f.StreamIndex.IsSegment() { + segStart := ctx.Ft.SegmentParam.CalcSegmentStart(f.StreamIndex.Index) + segLen := ctx.Ft.SegmentParam.Segments[f.StreamIndex.Index] + segEnd := segStart + segLen + + // 打开的范围不超过本段的范围 + + openOff := repRange.Offset - segStart + openOff = math2.Clamp(openOff, 0, segLen) + + openLen := segLen + + if repRange.Length != nil { + repEnd := repRange.Offset + *repRange.Length + openEnd := math2.Min(repEnd, segEnd) + openLen = openEnd - openOff + } + + f.Handle.RangeHint.Offset = openOff + f.Handle.RangeHint.Length = &openLen + } + + return n, nil + + default: + return nil, fmt.Errorf("unsupported from type %T", f) + } +} + +func buildToNode(ctx *state.GenerateState, t ioswitch2.To) (ops2.ToNode, error) { + switch t := t.(type) { + case *ioswitch2.ToShardStore: + n := ctx.DAG.NewShardWrite(t, t.Storage, t.FileHashStoreKey) + + if err := setEnvByAddress(n, t.Hub, t.Hub.Address); err != nil { + return nil, err + } + + n.Env().Pinned = true + + return n, nil + + case *ioswitch2.ToDriver: + n := ctx.DAG.NewToDriver(t, t.Handle) + n.Env().ToEnvDriver() + n.Env().Pinned = true + + return n, nil + + case *ioswitch2.LoadToShared: + n := ctx.DAG.NewSharedLoad(t, t.Storage, t.ObjectPath) + + if err := setEnvByAddress(n, t.Hub, t.Hub.Address); err != nil { + return nil, err + } + + n.Env().Pinned = true + + return n, nil + + default: + return nil, fmt.Errorf("unsupported to type %T", t) + } +} + +func setEnvByAddress(n dag.Node, hub cdssdk.Hub, addr cdssdk.HubAddressInfo) error { + switch addr := addr.(type) { + case *cdssdk.HttpAddressInfo: + n.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: hub}) + + case *cdssdk.GRPCAddressInfo: + n.Env().ToEnvWorker(&ioswitch2.AgentWorker{Hub: hub, Address: *addr}) + + default: + return fmt.Errorf("unsupported node address type %T", addr) + } + + return nil +} + +func findOutputStream(ctx *state.GenerateState, streamIndex ioswitch2.StreamIndex) *dag.StreamVar { + var ret *dag.StreamVar + for _, s := range ctx.IndexedStreams { + if s.StreamIndex == streamIndex { + ret = s.Stream + break + } + } + return ret +} + +// 根据StreamRange,调整SegmentSplit中分段的个数和每段的大小 +func FixSegmentSplit(ctx *state.GenerateState) error { + var err error + dag.WalkOnlyType[*ops2.SegmentSplitNode](ctx.DAG.Graph, func(node *ops2.SegmentSplitNode) bool { + var strEnd *int64 + if ctx.StreamRange.Length != nil { + e := ctx.StreamRange.Offset + *ctx.StreamRange.Length + strEnd = &e + } + + startSeg, endSeg := ctx.Ft.SegmentParam.CalcSegmentRange(ctx.StreamRange.Offset, strEnd) + + // 关闭超出范围的分段 + for i := endSeg; i < len(node.Segments); i++ { + node.OutputStreams().Get(i).ClearAllDst() + } + node.OutputStreams().Slots.RemoveRange(endSeg, ctx.Ft.SegmentParam.SegmentCount()-endSeg) + node.Segments = lo2.RemoveRange(node.Segments, endSeg, ctx.Ft.SegmentParam.SegmentCount()-endSeg) + + for i := 0; i < startSeg; i++ { + node.OutputStreams().Get(i).ClearAllDst() + } + node.OutputStreams().Slots.RemoveRange(0, startSeg) + node.Segments = lo2.RemoveRange(node.Segments, 0, startSeg) + + // StreamRange开始的位置可能在某个分段的中间,此时这个分段的大小等于流开始位置到分段结束位置的距离 + startSegStart := ctx.Ft.SegmentParam.CalcSegmentStart(startSeg) + node.Segments[0] -= ctx.StreamRange.Offset - startSegStart + + // StreamRange结束的位置可能在某个分段的中间,此时这个分段的大小就等于流结束位置到分段起始位置的距离 + if strEnd != nil { + endSegStart := ctx.Ft.SegmentParam.CalcSegmentStart(endSeg - 1) + node.Segments[len(node.Segments)-1] = *strEnd - endSegStart + } + return true + }) + + return err +} + +// 从SegmentJoin中删除未使用的分段 +func FixSegmentJoin(ctx *state.GenerateState) error { + var err error + dag.WalkOnlyType[*ops2.SegmentJoinNode](ctx.DAG.Graph, func(node *ops2.SegmentJoinNode) bool { + start := ctx.StreamRange.Offset + var end *int64 + if ctx.StreamRange.Length != nil { + e := ctx.StreamRange.Offset + *ctx.StreamRange.Length + end = &e + } + + startSeg, endSeg := ctx.Ft.SegmentParam.CalcSegmentRange(start, end) + + // 关闭超出范围的分段 + for i := endSeg; i < len(node.Segments); i++ { + node.InputStreams().Get(i).NotTo(node) + } + node.InputStreams().Slots.RemoveRange(endSeg, ctx.Ft.SegmentParam.SegmentCount()-endSeg) + node.Segments = lo2.RemoveRange(node.Segments, endSeg, ctx.Ft.SegmentParam.SegmentCount()-endSeg) + + for i := 0; i < startSeg; i++ { + node.InputStreams().Get(i).NotTo(node) + } + node.InputStreams().Slots.RemoveRange(0, startSeg) + node.Segments = lo2.RemoveRange(node.Segments, 0, startSeg) + + // StreamRange开始的位置可能在某个分段的中间,此时这个分段的大小等于流开始位置到分段结束位置的距离 + startSegStart := ctx.Ft.SegmentParam.CalcSegmentStart(startSeg) + node.Segments[0] -= ctx.StreamRange.Offset - startSegStart + + // 检查一下必须的分段是否都被加入到Join中 + for i := 0; i < node.InputStreams().Len(); i++ { + if node.InputStreams().Get(i) == nil { + err = fmt.Errorf("segment %v missed to join an raw stream", i+startSeg) + return false + } + } + + return true + }) + + return err +} diff --git a/common/pkgs/ioswitch2/parser/opt/chunked.go b/common/pkgs/ioswitch2/parser/opt/chunked.go new file mode 100644 index 0000000..05efc7a --- /dev/null +++ b/common/pkgs/ioswitch2/parser/opt/chunked.go @@ -0,0 +1,98 @@ +package opt + +import ( + "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state" +) + +// 删除输出流未被使用的Join指令 +func RemoveUnusedJoin(ctx *state.GenerateState) bool { + changed := false + + dag.WalkOnlyType[*ops2.ChunkedJoinNode](ctx.DAG.Graph, func(node *ops2.ChunkedJoinNode) bool { + if node.Joined().Dst.Len() > 0 { + return true + } + + node.RemoveAllInputs() + ctx.DAG.RemoveNode(node) + return true + }) + + return changed +} + +// 删除未使用的Split指令 +func RemoveUnusedSplit(ctx *state.GenerateState) bool { + changed := false + dag.WalkOnlyType[*ops2.ChunkedSplitNode](ctx.DAG.Graph, func(typ *ops2.ChunkedSplitNode) bool { + // Split出来的每一个流都没有被使用,才能删除这个指令 + for _, out := range typ.OutputStreams().Slots.RawArray() { + if out.Dst.Len() > 0 { + return true + } + } + + typ.RemoveAllStream() + ctx.DAG.RemoveNode(typ) + changed = true + return true + }) + + return changed +} + +// 如果Split的结果被完全用于Join,则省略Split和Join指令 +func OmitSplitJoin(ctx *state.GenerateState) bool { + changed := false + + dag.WalkOnlyType[*ops2.ChunkedSplitNode](ctx.DAG.Graph, func(splitNode *ops2.ChunkedSplitNode) bool { + // Split指令的每一个输出都有且只有一个目的地 + var dstNode dag.Node + for _, out := range splitNode.OutputStreams().Slots.RawArray() { + if out.Dst.Len() != 1 { + return true + } + + if dstNode == nil { + dstNode = out.Dst.Get(0) + } else if dstNode != out.Dst.Get(0) { + return true + } + } + + if dstNode == nil { + return true + } + + // 且这个目的地要是一个Join指令 + joinNode, ok := dstNode.(*ops2.ChunkedJoinNode) + if !ok { + return true + } + + // 同时这个Join指令的输入也必须全部来自Split指令的输出。 + // 由于上面判断了Split指令的输出目的地都相同,所以这里只要判断Join指令的输入数量是否与Split指令的输出数量相同即可 + if joinNode.InputStreams().Len() != splitNode.OutputStreams().Len() { + return true + } + + // 所有条件都满足,可以开始省略操作,将Join操作的目的地的输入流替换为Split操作的输入流: + // F->Split->Join->T 变换为:F->T + splitInput := splitNode.InputStreams().Get(0) + for _, to := range joinNode.Joined().Dst.RawArray() { + splitInput.To(to, to.InputStreams().IndexOf(joinNode.Joined())) + } + splitInput.NotTo(splitNode) + + // 并删除这两个指令 + ctx.DAG.RemoveNode(joinNode) + ctx.DAG.RemoveNode(splitNode) + + changed = true + return true + }) + + return changed +} diff --git a/common/pkgs/ioswitch2/parser/opt/ec.go b/common/pkgs/ioswitch2/parser/opt/ec.go new file mode 100644 index 0000000..6e2c5b1 --- /dev/null +++ b/common/pkgs/ioswitch2/parser/opt/ec.go @@ -0,0 +1,38 @@ +package opt + +import ( + "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag" + "gitlink.org.cn/cloudream/common/utils/lo2" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state" +) + +// 减少未使用的Multiply指令的输出流。如果减少到0,则删除该指令 +func RemoveUnusedMultiplyOutput(ctx *state.GenerateState) bool { + changed := false + dag.WalkOnlyType[*ops2.ECMultiplyNode](ctx.DAG.Graph, func(node *ops2.ECMultiplyNode) bool { + outArr := node.OutputStreams().Slots.RawArray() + for i2, out := range outArr { + if out.Dst.Len() > 0 { + continue + } + + outArr[i2] = nil + node.OutputIndexes[i2] = -2 + changed = true + } + + node.OutputStreams().Slots.SetRawArray(lo2.RemoveAllDefault(outArr)) + node.OutputIndexes = lo2.RemoveAll(node.OutputIndexes, -2) + + // 如果所有输出流都被删除,则删除该指令 + if node.OutputStreams().Len() == 0 { + node.RemoveAllInputs() + ctx.DAG.RemoveNode(node) + changed = true + } + + return true + }) + return changed +} diff --git a/common/pkgs/ioswitch2/parser/opt/misc.go b/common/pkgs/ioswitch2/parser/opt/misc.go new file mode 100644 index 0000000..c9a8cb6 --- /dev/null +++ b/common/pkgs/ioswitch2/parser/opt/misc.go @@ -0,0 +1,154 @@ +package opt + +import ( + "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag" + "gitlink.org.cn/cloudream/common/utils/math2" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state" +) + +// 删除未使用的From流,不会删除FromDriver +func RemoveUnusedFromNode(ctx *state.GenerateState) { + dag.WalkOnlyType[ops2.FromNode](ctx.DAG.Graph, func(node ops2.FromNode) bool { + if _, ok := node.(*ops2.FromDriverNode); ok { + return true + } + + if node.Output().Var().Dst.Len() == 0 { + ctx.DAG.RemoveNode(node) + } + return true + }) +} + +// 对于所有未使用的流,增加Drop指令 +func DropUnused(ctx *state.GenerateState) { + ctx.DAG.Walk(func(node dag.Node) bool { + for _, out := range node.OutputStreams().Slots.RawArray() { + if out.Dst.Len() == 0 { + n := ctx.DAG.NewDropStream() + *n.Env() = *node.Env() + n.SetInput(out) + } + } + return true + }) +} + +// 为IPFS写入指令存储结果 +func StoreShardWriteResult(ctx *state.GenerateState) { + dag.WalkOnlyType[*ops2.ShardWriteNode](ctx.DAG.Graph, func(n *ops2.ShardWriteNode) bool { + if n.FileHashStoreKey == "" { + return true + } + + storeNode := ctx.DAG.NewStore() + storeNode.Env().ToEnvDriver() + + storeNode.Store(n.FileHashStoreKey, n.FileHashVar()) + return true + }) + + dag.WalkOnlyType[*ops2.BypassToShardStoreNode](ctx.DAG.Graph, func(n *ops2.BypassToShardStoreNode) bool { + if n.FileHashStoreKey == "" { + return true + } + + storeNode := ctx.DAG.NewStore() + storeNode.Env().ToEnvDriver() + + storeNode.Store(n.FileHashStoreKey, n.FileHashVar().Var()) + return true + }) +} + +// 生成Range指令。StreamRange可能超过文件总大小,但Range指令会在数据量不够时不报错而是正常返回 +func GenerateRange(ctx *state.GenerateState) { + for to, toNode := range ctx.ToNodes { + toStrIdx := to.GetStreamIndex() + toRng := to.GetRange() + + if toStrIdx.IsRaw() { + n := ctx.DAG.NewRange() + toInput := toNode.Input() + *n.Env() = *toInput.Var().Src.Env() + rnged := n.RangeStream(toInput.Var(), math2.Range{ + Offset: toRng.Offset - ctx.StreamRange.Offset, + Length: toRng.Length, + }) + toInput.Var().NotTo(toNode) + toNode.SetInput(rnged) + + } else if toStrIdx.IsEC() { + stripSize := int64(ctx.Ft.ECParam.ChunkSize * ctx.Ft.ECParam.K) + blkStartIdx := ctx.StreamRange.Offset / stripSize + + blkStart := blkStartIdx * int64(ctx.Ft.ECParam.ChunkSize) + + n := ctx.DAG.NewRange() + toInput := toNode.Input() + *n.Env() = *toInput.Var().Src.Env() + rnged := n.RangeStream(toInput.Var(), math2.Range{ + Offset: toRng.Offset - blkStart, + Length: toRng.Length, + }) + toInput.Var().NotTo(toNode) + toNode.SetInput(rnged) + } else if toStrIdx.IsSegment() { + // if frNode, ok := toNode.Input().Var().From().Node.(ops2.FromNode); ok { + // // 目前只有To也是分段时,才可能对接一个提供分段的From,此时不需要再生成Range指令 + // if frNode.GetFrom().GetStreamIndex().IsSegment() { + // continue + // } + // } + + // segStart := ctx.Ft.SegmentParam.CalcSegmentStart(toStrIdx.Index) + // strStart := segStart + toRng.Offset + + // n := ctx.DAG.NewRange() + // toInput := toNode.Input() + // *n.Env() = *toInput.Var().From().Node.Env() + // rnged := n.RangeStream(toInput.Var(), exec.Range{ + // Offset: strStart - ctx.StreamRange.Offset, + // Length: toRng.Length, + // }) + // toInput.Var().NotTo(toNode, toInput.Index) + // toNode.SetInput(rnged) + } + } +} + +// 生成Clone指令 +func GenerateClone(ctx *state.GenerateState) { + ctx.DAG.Walk(func(node dag.Node) bool { + for _, outVar := range node.OutputStreams().Slots.RawArray() { + if outVar.Dst.Len() <= 1 { + continue + } + + c := ctx.DAG.NewCloneStream() + *c.Env() = *node.Env() + for _, dst := range outVar.Dst.RawArray() { + c.NewOutput().To(dst, dst.InputStreams().IndexOf(outVar)) + } + outVar.Dst.Resize(0) + c.SetInput(outVar) + } + + for _, outVar := range node.OutputValues().Slots.RawArray() { + if outVar.Dst.Len() <= 1 { + continue + } + + t := ctx.DAG.NewCloneValue() + *t.Env() = *node.Env() + for _, dst := range outVar.Dst.RawArray() { + t.NewOutput().To(dst, dst.InputValues().IndexOf(outVar)) + } + outVar.Dst.Resize(0) + t.SetInput(outVar) + } + + return true + }) +} diff --git a/common/pkgs/ioswitch2/parser/opt/multipart.go b/common/pkgs/ioswitch2/parser/opt/multipart.go new file mode 100644 index 0000000..7169b2f --- /dev/null +++ b/common/pkgs/ioswitch2/parser/opt/multipart.go @@ -0,0 +1,105 @@ +package opt + +import ( + "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag" + "gitlink.org.cn/cloudream/common/utils/math2" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/factory" +) + +// 将SegmentJoin指令替换成分片上传指令 +func UseMultipartUploadToShardStore(ctx *state.GenerateState) { + dag.WalkOnlyType[*ops2.SegmentJoinNode](ctx.DAG.Graph, func(joinNode *ops2.SegmentJoinNode) bool { + if joinNode.Joined().Dst.Len() != 1 { + return true + } + + joinDst := joinNode.Joined().Dst.Get(0) + shardNode, ok := joinDst.(*ops2.ShardWriteNode) + if !ok { + return true + } + + // SegmentJoin的输出流的范围必须与ToShardStore的输入流的范围相同, + // 虽然可以通过调整SegmentJoin的输入流来调整范围,但太复杂,暂不支持 + toStrIdx := shardNode.GetTo().GetStreamIndex() + toStrRng := shardNode.GetTo().GetRange() + if toStrIdx.IsRaw() { + if !toStrRng.Equals(ctx.StreamRange) { + return true + } + } else { + return true + } + + // Join的目的地必须支持MultipartUpload功能才能替换成分片上传 + multiUpload, err := factory.GetBuilder(shardNode.Storage).CreateMultiparter() + if err != nil { + return true + } + + // Join的每一个段的大小必须超过最小分片大小。 + // 目前只支持拆分超过最大分片的流,不支持合并多个小段流以达到最小分片大小。 + for _, size := range joinNode.Segments { + if size < multiUpload.MinPartSize() { + return true + } + } + + initNode := ctx.DAG.NewMultipartInitiator(shardNode.Storage) + initNode.Env().CopyFrom(shardNode.Env()) + + partNumber := 1 + for i, size := range joinNode.Segments { + joinInput := joinNode.InputSlot(i) + + if size > multiUpload.MaxPartSize() { + // 如果一个分段的大小大于最大分片大小,则需要拆分为多个小段上传 + // 拆分以及上传指令直接在流的产生节点执行 + splits := math2.SplitLessThan(size, multiUpload.MaxPartSize()) + splitNode := ctx.DAG.NewSegmentSplit(splits) + splitNode.Env().CopyFrom(joinInput.Var().Src.Env()) + + joinInput.Var().ToSlot(splitNode.InputSlot()) + + for i2 := 0; i2 < len(splits); i2++ { + uploadNode := ctx.DAG.NewMultipartUpload(shardNode.Storage, partNumber, splits[i2]) + uploadNode.Env().CopyFrom(joinInput.Var().Src.Env()) + + initNode.UploadArgsVar().ToSlot(uploadNode.UploadArgsSlot()) + splitNode.SegmentVar(i2).ToSlot(uploadNode.PartStreamSlot()) + uploadNode.UploadResultVar().ToSlot(initNode.AppendPartInfoSlot()) + + partNumber++ + } + } else { + // 否则直接上传整个分段 + uploadNode := ctx.DAG.NewMultipartUpload(shardNode.Storage, partNumber, size) + // 上传指令直接在流的产生节点执行 + uploadNode.Env().CopyFrom(joinInput.Var().Src.Env()) + + initNode.UploadArgsVar().ToSlot(uploadNode.UploadArgsSlot()) + joinInput.Var().ToSlot(uploadNode.PartStreamSlot()) + uploadNode.UploadResultVar().ToSlot(initNode.AppendPartInfoSlot()) + + partNumber++ + } + + joinInput.Var().NotTo(joinNode) + } + + bypassNode := ctx.DAG.NewBypassToShardStore(shardNode.Storage.Storage.StorageID, shardNode.FileHashStoreKey) + bypassNode.Env().CopyFrom(shardNode.Env()) + + // 分片上传Node产生的结果送到bypassNode,bypassNode将处理结果再送回分片上传Node + initNode.BypassFileInfoVar().ToSlot(bypassNode.BypassFileInfoSlot()) + bypassNode.BypassCallbackVar().ToSlot(initNode.BypassCallbackSlot()) + + // 最后删除Join指令和ToShardStore指令 + ctx.DAG.RemoveNode(joinNode) + ctx.DAG.RemoveNode(shardNode) + delete(ctx.ToNodes, shardNode.GetTo()) + return true + }) +} diff --git a/common/pkgs/ioswitch2/parser/opt/pin.go b/common/pkgs/ioswitch2/parser/opt/pin.go new file mode 100644 index 0000000..066aba9 --- /dev/null +++ b/common/pkgs/ioswitch2/parser/opt/pin.go @@ -0,0 +1,69 @@ +package opt + +import ( + "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state" +) + +// 通过流的输入输出位置来确定指令的执行位置。 +// To系列的指令都会有固定的执行位置,这些位置会随着pin操作逐步扩散到整个DAG, +// 所以理论上不会出现有指令的位置始终无法确定的情况。 +func Pin(ctx *state.GenerateState) bool { + changed := false + ctx.DAG.Walk(func(node dag.Node) bool { + if node.Env().Pinned { + return true + } + + var toEnv *dag.NodeEnv + for _, out := range node.OutputStreams().Slots.RawArray() { + for _, to := range out.Dst.RawArray() { + if to.Env().Type == dag.EnvUnknown { + continue + } + + if toEnv == nil { + toEnv = to.Env() + } else if !toEnv.Equals(to.Env()) { + toEnv = nil + break + } + } + } + + if toEnv != nil { + if !node.Env().Equals(toEnv) { + changed = true + } + + *node.Env() = *toEnv + return true + } + + // 否则根据输入流的始发地来固定 + var fromEnv *dag.NodeEnv + for _, in := range node.InputStreams().Slots.RawArray() { + if in.Src.Env().Type == dag.EnvUnknown { + continue + } + + if fromEnv == nil { + fromEnv = in.Src.Env() + } else if !fromEnv.Equals(in.Src.Env()) { + fromEnv = nil + break + } + } + + if fromEnv != nil { + if !node.Env().Equals(fromEnv) { + changed = true + } + + *node.Env() = *fromEnv + } + return true + }) + + return changed +} diff --git a/common/pkgs/ioswitch2/parser/opt/s2s.go b/common/pkgs/ioswitch2/parser/opt/s2s.go new file mode 100644 index 0000000..4eb7fc4 --- /dev/null +++ b/common/pkgs/ioswitch2/parser/opt/s2s.go @@ -0,0 +1,131 @@ +package opt + +import ( + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/factory" +) + +// 将直接从一个存储服务传到另一个存储服务的过程换成S2S传输 +func UseS2STransfer(ctx *state.GenerateState) { + // S2S传输暂不支持只传文件的一部分 + if ctx.StreamRange.Offset != 0 || ctx.StreamRange.Length != nil { + return + } + + for fr, frNode := range ctx.FromNodes { + fromShard, ok := fr.(*ioswitch2.FromShardstore) + if !ok { + continue + } + + fromStgBld := factory.GetBuilder(fromShard.Storage) + if !fromStgBld.ShardStoreDesc().HasBypassRead() { + continue + } + + s2s, err := fromStgBld.CreateS2STransfer() + if err != nil { + continue + } + + // 此输出流的所有目的地都要能支持S2S传输 + outVar := frNode.Output().Var() + if outVar.Dst.Len() == 0 { + continue + } + + failed := false + var toShards []*ops2.ShardWriteNode + // var toShareds []*ops2.SharedLoadNode + + loop: + for i := 0; i < outVar.Dst.Len(); i++ { + dstNode := outVar.Dst.Get(i) + + switch dstNode := dstNode.(type) { + case *ops2.ShardWriteNode: + dstStgBld := factory.GetBuilder(dstNode.Storage) + if !dstStgBld.ShardStoreDesc().HasBypassWrite() { + failed = true + break + } + + if !s2s.CanTransfer(dstNode.Storage) { + failed = true + break + } + + toShards = append(toShards, dstNode) + + /* TODO 暂不支持共享存储服务 + case *ops2.SharedLoadNode: + if !s2s.CanTransfer(to.Storage) { + failed = true + break + } + toShareds = append(toShareds, to) + */ + default: + failed = true + break loop + } + } + if failed { + continue + } + + for _, toShard := range toShards { + s2sNode := ctx.DAG.NewS2STransfer(fromShard.Storage, toShard.Storage) + // 直传指令在目的地Hub上执行 + s2sNode.Env().CopyFrom(toShard.Env()) + + // 先获取文件路径,送到S2S节点 + brNode := ctx.DAG.NewBypassFromShardStore(fromShard.Storage.Storage.StorageID, fromShard.FileHash) + brNode.Env().CopyFrom(frNode.Env()) + brNode.FilePathVar().ToSlot(s2sNode.SrcPathSlot()) + + // 传输结果通知目的节点 + to := toShard.To.(*ioswitch2.ToShardStore) + bwNode := ctx.DAG.NewBypassToShardStore(toShard.Storage.Storage.StorageID, to.FileHashStoreKey) + bwNode.Env().CopyFrom(toShard.Env()) + + s2sNode.BypassFileInfoVar().ToSlot(bwNode.BypassFileInfoSlot()) + bwNode.BypassCallbackVar().ToSlot(s2sNode.BypassCallbackSlot()) + + // 从计划中删除目标节点 + ctx.DAG.RemoveNode(toShard) + delete(ctx.ToNodes, toShard.To) + } + + /* + for _, toShared := range toShareds { + s2sNode := ctx.DAG.NewS2STransfer(fromShard.Storage, toShared.Storage) + // 直传指令在目的地Hub上执行 + s2sNode.Env().CopyFrom(toShared.Env()) + + // 先获取文件路径,送到S2S节点 + brNode := ctx.DAG.NewBypassFromShardStore(fromShard.Storage.Storage.StorageID, fromShard.FileHash) + brNode.Env().CopyFrom(toShared.Env()) + brNode.FilePathVar().ToSlot(s2sNode.SrcPathSlot()) + + // 传输结果通知目的节点 + to := toShared.To.(*ioswitch2.LoadToShared) + bwNode := ctx.DAG.NewBypassToShardStore(toShard.Storage.Storage.StorageID, to.FileHashStoreKey) + bwNode.Env().CopyFrom(toShard.Env()) + + s2sNode.BypassFileInfoVar().ToSlot(bwNode.BypassFileInfoSlot()) + bwNode.BypassCallbackVar().ToSlot(s2sNode.BypassCallbackSlot()) + + // 从计划中删除目标节点 + ctx.DAG.RemoveNode(toShared) + delete(ctx.ToNodes, toShared.To) + } + */ + + // 从计划中删除源节点 + ctx.DAG.RemoveNode(frNode) + delete(ctx.FromNodes, fr) + } +} diff --git a/common/pkgs/ioswitch2/parser/opt/segment.go b/common/pkgs/ioswitch2/parser/opt/segment.go new file mode 100644 index 0000000..79f5d6a --- /dev/null +++ b/common/pkgs/ioswitch2/parser/opt/segment.go @@ -0,0 +1,98 @@ +package opt + +import ( + "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state" +) + +// 删除未使用的SegmentJoin +func RemoveUnusedSegmentJoin(ctx *state.GenerateState) bool { + changed := false + + dag.WalkOnlyType[*ops2.SegmentJoinNode](ctx.DAG.Graph, func(node *ops2.SegmentJoinNode) bool { + if node.Joined().Dst.Len() > 0 { + return true + } + + node.RemoveAllInputs() + ctx.DAG.RemoveNode(node) + return true + }) + + return changed +} + +// 删除未使用的SegmentSplit +func RemoveUnusedSegmentSplit(ctx *state.GenerateState) bool { + changed := false + dag.WalkOnlyType[*ops2.SegmentSplitNode](ctx.DAG.Graph, func(typ *ops2.SegmentSplitNode) bool { + // Split出来的每一个流都没有被使用,才能删除这个指令 + for _, out := range typ.OutputStreams().Slots.RawArray() { + if out.Dst.Len() > 0 { + return true + } + } + + typ.RemoveAllStream() + ctx.DAG.RemoveNode(typ) + changed = true + return true + }) + + return changed +} + +// 如果Split的结果被完全用于Join,则省略Split和Join指令 +func OmitSegmentSplitJoin(ctx *state.GenerateState) bool { + changed := false + + dag.WalkOnlyType[*ops2.SegmentSplitNode](ctx.DAG.Graph, func(splitNode *ops2.SegmentSplitNode) bool { + // 随便找一个输出流的目的地 + splitOut := splitNode.OutputStreams().Get(0) + if splitOut.Dst.Len() != 1 { + return true + } + dstNode := splitOut.Dst.Get(0) + + // 这个目的地要是一个Join指令 + joinNode, ok := dstNode.(*ops2.SegmentJoinNode) + if !ok { + return true + } + + if splitNode.OutputStreams().Len() != joinNode.Joined().Dst.Len() { + return true + } + + // Join指令的输入必须全部来自Split指令的输出,且位置要相同 + for i := 0; i < splitNode.OutputStreams().Len(); i++ { + splitOut := splitNode.OutputStreams().Get(i) + joinIn := joinNode.InputStreams().Get(i) + if splitOut != joinIn { + return true + } + + if splitOut != nil && splitOut.Dst.Len() != 1 { + return true + } + } + + // 所有条件都满足,可以开始省略操作,将Join操作的目的地的输入流替换为Split操作的输入流: + // F->Split->Join->T 变换为:F->T + splitInput := splitNode.InputStreams().Get(0) + for _, to := range joinNode.Joined().Dst.RawArray() { + splitInput.To(to, to.InputStreams().IndexOf(joinNode.Joined())) + } + splitInput.NotTo(splitNode) + + // 并删除这两个指令 + ctx.DAG.RemoveNode(joinNode) + ctx.DAG.RemoveNode(splitNode) + + changed = true + return true + }) + + return changed +} diff --git a/common/pkgs/ioswitch2/parser/parser.go b/common/pkgs/ioswitch2/parser/parser.go index c5c9092..b7db314 100644 --- a/common/pkgs/ioswitch2/parser/parser.go +++ b/common/pkgs/ioswitch2/parser/parser.go @@ -1,69 +1,41 @@ package parser import ( - "fmt" - "math" - - "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/plan" - cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" - "gitlink.org.cn/cloudream/common/utils/lo2" - "gitlink.org.cn/cloudream/common/utils/math2" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2" - "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/utils" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/gen" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/opt" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser/state" ) -type IndexedStream struct { - Stream *dag.StreamVar - StreamIndex ioswitch2.StreamIndex -} - -type ParseContext struct { - Ft ioswitch2.FromTo - DAG *ops2.GraphNodeBuilder - // 为了产生所有To所需的数据范围,而需要From打开的范围。 - // 这个范围是基于整个文件的,且上下界都取整到条带大小的整数倍,因此上界是有可能超过文件大小的。 - ToNodes map[ioswitch2.To]ops2.ToNode - IndexedStreams []IndexedStream - StreamRange exec.Range - UseEC bool // 是否使用纠删码 - UseSegment bool // 是否使用分段 -} - func Parse(ft ioswitch2.FromTo, blder *exec.PlanBuilder) error { - ctx := ParseContext{ - Ft: ft, - DAG: ops2.NewGraphNodeBuilder(), - ToNodes: make(map[ioswitch2.To]ops2.ToNode), - } + state := state.InitGenerateState(ft) // 分成两个阶段: // 1. 基于From和To生成更多指令,初步匹配to的需求 - err := checkEncodingParams(&ctx) + err := gen.CheckEncodingParams(state) if err != nil { return err } // 计算一下打开流的范围 - calcStreamRange(&ctx) + gen.CalcStreamRange(state) - err = extend(&ctx) + err = gen.Extend(state) if err != nil { return err } // 2. 优化上一步生成的指令 - err = fixSegmentJoin(&ctx) + err = gen.FixSegmentJoin(state) if err != nil { return err } - err = fixSegmentSplit(&ctx) + err = gen.FixSegmentSplit(state) if err != nil { return err } @@ -72,25 +44,25 @@ func Parse(ft ioswitch2.FromTo, blder *exec.PlanBuilder) error { // 从目前实现上来说不会死循环 for { opted := false - if removeUnusedJoin(&ctx) { + if opt.RemoveUnusedJoin(state) { opted = true } - if removeUnusedMultiplyOutput(&ctx) { + if opt.RemoveUnusedMultiplyOutput(state) { opted = true } - if removeUnusedSplit(&ctx) { + if opt.RemoveUnusedSplit(state) { opted = true } - if omitSplitJoin(&ctx) { + if opt.OmitSplitJoin(state) { opted = true } - if removeUnusedSegmentJoin(&ctx) { + if opt.RemoveUnusedSegmentJoin(state) { opted = true } - if removeUnusedSegmentSplit(&ctx) { + if opt.RemoveUnusedSegmentSplit(state) { opted = true } - if omitSegmentSplitJoin(&ctx) { + if opt.OmitSegmentSplitJoin(state) { opted = true } @@ -100,1009 +72,17 @@ func Parse(ft ioswitch2.FromTo, blder *exec.PlanBuilder) error { } // 确定指令执行位置的过程,也需要反复进行,直到没有变化为止。 - for pin(&ctx) { + for opt.Pin(state) { } // 下面这些只需要执行一次,但需要按顺序 - removeUnusedFromNode(&ctx) - useMultipartUploadToShardStore(&ctx) - dropUnused(&ctx) - storeShardWriteResult(&ctx) - generateRange(&ctx) - generateClone(&ctx) - - return plan.Generate(ctx.DAG.Graph, blder) -} -func findOutputStream(ctx *ParseContext, streamIndex ioswitch2.StreamIndex) *dag.StreamVar { - var ret *dag.StreamVar - for _, s := range ctx.IndexedStreams { - if s.StreamIndex == streamIndex { - ret = s.Stream - break - } - } - return ret -} - -// 检查使用不同编码时参数是否设置到位 -func checkEncodingParams(ctx *ParseContext) error { - for _, f := range ctx.Ft.Froms { - if f.GetStreamIndex().IsEC() { - ctx.UseEC = true - if ctx.Ft.ECParam == nil { - return fmt.Errorf("EC encoding parameters not set") - } - } - - if f.GetStreamIndex().IsSegment() { - ctx.UseSegment = true - if ctx.Ft.SegmentParam == nil { - return fmt.Errorf("segment parameters not set") - } - } - } - - for _, t := range ctx.Ft.Toes { - if t.GetStreamIndex().IsEC() { - ctx.UseEC = true - if ctx.Ft.ECParam == nil { - return fmt.Errorf("EC encoding parameters not set") - } - } - - if t.GetStreamIndex().IsSegment() { - ctx.UseSegment = true - if ctx.Ft.SegmentParam == nil { - return fmt.Errorf("segment parameters not set") - } - } - } - - return nil -} - -// 计算输入流的打开范围。如果From或者To中包含EC的流,则会将打开范围扩大到条带大小的整数倍。 -func calcStreamRange(ctx *ParseContext) { - rng := exec.NewRange(math.MaxInt64, 0) - - for _, to := range ctx.Ft.Toes { - strIdx := to.GetStreamIndex() - if strIdx.IsRaw() { - toRng := to.GetRange() - rng.ExtendStart(toRng.Offset) - if toRng.Length != nil { - rng.ExtendEnd(toRng.Offset + *toRng.Length) - } else { - rng.Length = nil - } - } else if strIdx.IsEC() { - toRng := to.GetRange() - stripSize := ctx.Ft.ECParam.StripSize() - blkStartIndex := math2.FloorDiv(toRng.Offset, int64(ctx.Ft.ECParam.ChunkSize)) - rng.ExtendStart(blkStartIndex * stripSize) - if toRng.Length != nil { - blkEndIndex := math2.CeilDiv(toRng.Offset+*toRng.Length, int64(ctx.Ft.ECParam.ChunkSize)) - rng.ExtendEnd(blkEndIndex * stripSize) - } else { - rng.Length = nil - } - - } else if strIdx.IsSegment() { - // Segment节点的Range是相对于本段的,需要加上本段的起始位置 - toRng := to.GetRange() - - segStart := ctx.Ft.SegmentParam.CalcSegmentStart(strIdx.Index) - - offset := toRng.Offset + segStart - - rng.ExtendStart(offset) - if toRng.Length != nil { - rng.ExtendEnd(offset + *toRng.Length) - } else { - rng.Length = nil - } - } - } - - if ctx.UseEC { - stripSize := ctx.Ft.ECParam.StripSize() - rng.ExtendStart(math2.Floor(rng.Offset, stripSize)) - if rng.Length != nil { - rng.ExtendEnd(math2.Ceil(rng.Offset+*rng.Length, stripSize)) - } - } - - ctx.StreamRange = rng -} - -func extend(ctx *ParseContext) error { - for _, fr := range ctx.Ft.Froms { - frNode, err := buildFromNode(ctx, fr) - if err != nil { - return err - } - - ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{ - Stream: frNode.Output().Var(), - StreamIndex: fr.GetStreamIndex(), - }) - - // 对于完整文件的From,生成Split指令 - if fr.GetStreamIndex().IsRaw() { - // 只有输入输出需要EC编码的块时,才生成相关指令 - if ctx.UseEC { - splitNode := ctx.DAG.NewChunkedSplit(ctx.Ft.ECParam.ChunkSize, ctx.Ft.ECParam.K) - splitNode.Split(frNode.Output().Var()) - for i := 0; i < ctx.Ft.ECParam.K; i++ { - ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{ - Stream: splitNode.SubStream(i), - StreamIndex: ioswitch2.ECSrteam(i), - }) - } - } - - // 同上 - if ctx.UseSegment { - splitNode := ctx.DAG.NewSegmentSplit(ctx.Ft.SegmentParam.Segments) - frNode.Output().Var().ToSlot(splitNode.InputSlot()) - for i := 0; i < len(ctx.Ft.SegmentParam.Segments); i++ { - ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{ - Stream: splitNode.Segment(i), - StreamIndex: ioswitch2.SegmentStream(i), - }) - } - } - } - } - - if ctx.UseEC { - // 如果有K个不同的文件块流,则生成Multiply指令,同时针对其生成的流,生成Join指令 - ecInputStrs := make(map[int]*dag.StreamVar) - for _, s := range ctx.IndexedStreams { - if s.StreamIndex.IsEC() && ecInputStrs[s.StreamIndex.Index] == nil { - ecInputStrs[s.StreamIndex.Index] = s.Stream - if len(ecInputStrs) == ctx.Ft.ECParam.K { - break - } - } - } - - if len(ecInputStrs) == ctx.Ft.ECParam.K { - mulNode := ctx.DAG.NewECMultiply(*ctx.Ft.ECParam) - - for i, s := range ecInputStrs { - mulNode.AddInput(s, i) - } - for i := 0; i < ctx.Ft.ECParam.N; i++ { - ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{ - Stream: mulNode.NewOutput(i), - StreamIndex: ioswitch2.ECSrteam(i), - }) - } - - joinNode := ctx.DAG.NewChunkedJoin(ctx.Ft.ECParam.ChunkSize) - for i := 0; i < ctx.Ft.ECParam.K; i++ { - // 不可能找不到流 - joinNode.AddInput(findOutputStream(ctx, ioswitch2.ECSrteam(i))) - } - ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{ - Stream: joinNode.Joined(), - StreamIndex: ioswitch2.RawStream(), - }) - } - } - - if ctx.UseSegment { - // 先假设有所有的顺序分段,生成Join指令,后续根据Range再实际计算是否缺少流 - joinNode := ctx.DAG.NewSegmentJoin(ctx.Ft.SegmentParam.Segments) - for i := 0; i < ctx.Ft.SegmentParam.SegmentCount(); i++ { - str := findOutputStream(ctx, ioswitch2.SegmentStream(i)) - if str != nil { - str.ToSlot(joinNode.InputSlot(i)) - } - } - ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{ - Stream: joinNode.Joined(), - StreamIndex: ioswitch2.RawStream(), - }) - - // SegmentJoin生成的Join指令可以用来生成EC块 - if ctx.UseEC { - splitNode := ctx.DAG.NewChunkedSplit(ctx.Ft.ECParam.ChunkSize, ctx.Ft.ECParam.K) - splitNode.Split(joinNode.Joined()) - - mulNode := ctx.DAG.NewECMultiply(*ctx.Ft.ECParam) - - for i := 0; i < ctx.Ft.ECParam.K; i++ { - mulNode.AddInput(splitNode.SubStream(i), i) - ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{ - Stream: splitNode.SubStream(i), - StreamIndex: ioswitch2.ECSrteam(i), - }) - } - - for i := 0; i < ctx.Ft.ECParam.N; i++ { - ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{ - Stream: mulNode.NewOutput(i), - StreamIndex: ioswitch2.ECSrteam(i), - }) - } - } - } - - // 为每一个To找到一个输入流 - for _, to := range ctx.Ft.Toes { - toNode, err := buildToNode(ctx, to) - if err != nil { - return err - } - ctx.ToNodes[to] = toNode - - str := findOutputStream(ctx, to.GetStreamIndex()) - if str == nil { - return fmt.Errorf("no output stream found for data index %d", to.GetStreamIndex()) - } - - toNode.SetInput(str) - } - - return nil -} - -func buildFromNode(ctx *ParseContext, f ioswitch2.From) (ops2.FromNode, error) { - var repRange exec.Range - repRange.Offset = ctx.StreamRange.Offset - if ctx.StreamRange.Length != nil { - repRngLen := *ctx.StreamRange.Length - repRange.Length = &repRngLen - } - - var blkRange exec.Range - if ctx.UseEC { - blkRange.Offset = ctx.StreamRange.Offset / int64(ctx.Ft.ECParam.ChunkSize*ctx.Ft.ECParam.K) * int64(ctx.Ft.ECParam.ChunkSize) - if ctx.StreamRange.Length != nil { - blkRngLen := *ctx.StreamRange.Length / int64(ctx.Ft.ECParam.ChunkSize*ctx.Ft.ECParam.K) * int64(ctx.Ft.ECParam.ChunkSize) - blkRange.Length = &blkRngLen - } - } - - switch f := f.(type) { - case *ioswitch2.FromShardstore: - t := ctx.DAG.NewShardRead(f, f.Storage.StorageID, types.NewOpen(f.FileHash)) - - if f.StreamIndex.IsRaw() { - t.Open.WithNullableLength(repRange.Offset, repRange.Length) - } else if f.StreamIndex.IsEC() { - t.Open.WithNullableLength(blkRange.Offset, blkRange.Length) - } else if f.StreamIndex.IsSegment() { - segStart := ctx.Ft.SegmentParam.CalcSegmentStart(f.StreamIndex.Index) - segLen := ctx.Ft.SegmentParam.Segments[f.StreamIndex.Index] - segEnd := segStart + segLen - - // 打开的范围不超过本段的范围 - - openOff := ctx.StreamRange.Offset - segStart - openOff = math2.Clamp(openOff, 0, segLen) - - openLen := segLen - - if ctx.StreamRange.Length != nil { - strEnd := ctx.StreamRange.Offset + *ctx.StreamRange.Length - openEnd := math2.Min(strEnd, segEnd) - openLen = openEnd - segStart - openOff - } - - t.Open.WithNullableLength(openOff, &openLen) - } - - switch addr := f.Hub.Address.(type) { - case *cdssdk.HttpAddressInfo: - t.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: f.Hub}) - t.Env().Pinned = true - - case *cdssdk.GRPCAddressInfo: - t.Env().ToEnvWorker(&ioswitch2.AgentWorker{Hub: f.Hub, Address: *addr}) - t.Env().Pinned = true - - default: - return nil, fmt.Errorf("unsupported node address type %T", addr) - } - - return t, nil - - case *ioswitch2.FromDriver: - n := ctx.DAG.NewFromDriver(f, f.Handle) - n.Env().ToEnvDriver() - n.Env().Pinned = true - - if f.StreamIndex.IsRaw() { - f.Handle.RangeHint.Offset = repRange.Offset - f.Handle.RangeHint.Length = repRange.Length - } else if f.StreamIndex.IsEC() { - f.Handle.RangeHint.Offset = blkRange.Offset - f.Handle.RangeHint.Length = blkRange.Length - } else if f.StreamIndex.IsSegment() { - segStart := ctx.Ft.SegmentParam.CalcSegmentStart(f.StreamIndex.Index) - segLen := ctx.Ft.SegmentParam.Segments[f.StreamIndex.Index] - segEnd := segStart + segLen - - // 打开的范围不超过本段的范围 - - openOff := repRange.Offset - segStart - openOff = math2.Clamp(openOff, 0, segLen) - - openLen := segLen - - if repRange.Length != nil { - repEnd := repRange.Offset + *repRange.Length - openEnd := math2.Min(repEnd, segEnd) - openLen = openEnd - openOff - } - - f.Handle.RangeHint.Offset = openOff - f.Handle.RangeHint.Length = &openLen - } - - return n, nil - - default: - return nil, fmt.Errorf("unsupported from type %T", f) - } -} - -func buildToNode(ctx *ParseContext, t ioswitch2.To) (ops2.ToNode, error) { - switch t := t.(type) { - case *ioswitch2.ToShardStore: - n := ctx.DAG.NewShardWrite(t, t.Storage, t.FileHashStoreKey) - - if err := setEnvByAddress(n, t.Hub, t.Hub.Address); err != nil { - return nil, err - } - - n.Env().Pinned = true - - return n, nil - - case *ioswitch2.ToDriver: - n := ctx.DAG.NewToDriver(t, t.Handle) - n.Env().ToEnvDriver() - n.Env().Pinned = true - - return n, nil - - case *ioswitch2.LoadToShared: - n := ctx.DAG.NewSharedLoad(t, t.Storage.StorageID, t.UserID, t.PackageID, t.Path) - - if err := setEnvByAddress(n, t.Hub, t.Hub.Address); err != nil { - return nil, err - } - - n.Env().Pinned = true - - return n, nil - - default: - return nil, fmt.Errorf("unsupported to type %T", t) - } -} - -func setEnvByAddress(n dag.Node, hub cdssdk.Hub, addr cdssdk.HubAddressInfo) error { - switch addr := addr.(type) { - case *cdssdk.HttpAddressInfo: - n.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: hub}) - - case *cdssdk.GRPCAddressInfo: - n.Env().ToEnvWorker(&ioswitch2.AgentWorker{Hub: hub, Address: *addr}) - - default: - return fmt.Errorf("unsupported node address type %T", addr) - } - - return nil -} - -// 根据StreamRange,调整SegmentSplit中分段的个数和每段的大小 -func fixSegmentSplit(ctx *ParseContext) error { - var err error - dag.WalkOnlyType[*ops2.SegmentSplitNode](ctx.DAG.Graph, func(node *ops2.SegmentSplitNode) bool { - var strEnd *int64 - if ctx.StreamRange.Length != nil { - e := ctx.StreamRange.Offset + *ctx.StreamRange.Length - strEnd = &e - } - - startSeg, endSeg := ctx.Ft.SegmentParam.CalcSegmentRange(ctx.StreamRange.Offset, strEnd) - - // 关闭超出范围的分段 - for i := endSeg; i < len(node.Segments); i++ { - node.OutputStreams().Get(i).ClearAllDst() - } - node.OutputStreams().Slots.RemoveRange(endSeg, ctx.Ft.SegmentParam.SegmentCount()-endSeg) - node.Segments = lo2.RemoveRange(node.Segments, endSeg, ctx.Ft.SegmentParam.SegmentCount()-endSeg) - - for i := 0; i < startSeg; i++ { - node.OutputStreams().Get(i).ClearAllDst() - } - node.OutputStreams().Slots.RemoveRange(0, startSeg) - node.Segments = lo2.RemoveRange(node.Segments, 0, startSeg) - - // StreamRange开始的位置可能在某个分段的中间,此时这个分段的大小等于流开始位置到分段结束位置的距离 - startSegStart := ctx.Ft.SegmentParam.CalcSegmentStart(startSeg) - node.Segments[0] -= ctx.StreamRange.Offset - startSegStart - - // StreamRange结束的位置可能在某个分段的中间,此时这个分段的大小就等于流结束位置到分段起始位置的距离 - if strEnd != nil { - endSegStart := ctx.Ft.SegmentParam.CalcSegmentStart(endSeg - 1) - node.Segments[len(node.Segments)-1] = *strEnd - endSegStart - } - return true - }) - - return err -} - -// 从SegmentJoin中删除未使用的分段 -func fixSegmentJoin(ctx *ParseContext) error { - var err error - dag.WalkOnlyType[*ops2.SegmentJoinNode](ctx.DAG.Graph, func(node *ops2.SegmentJoinNode) bool { - start := ctx.StreamRange.Offset - var end *int64 - if ctx.StreamRange.Length != nil { - e := ctx.StreamRange.Offset + *ctx.StreamRange.Length - end = &e - } - - startSeg, endSeg := ctx.Ft.SegmentParam.CalcSegmentRange(start, end) - - // 关闭超出范围的分段 - for i := endSeg; i < len(node.Segments); i++ { - node.InputStreams().Get(i).NotTo(node) - } - node.InputStreams().Slots.RemoveRange(endSeg, ctx.Ft.SegmentParam.SegmentCount()-endSeg) - node.Segments = lo2.RemoveRange(node.Segments, endSeg, ctx.Ft.SegmentParam.SegmentCount()-endSeg) - - for i := 0; i < startSeg; i++ { - node.InputStreams().Get(i).NotTo(node) - } - node.InputStreams().Slots.RemoveRange(0, startSeg) - node.Segments = lo2.RemoveRange(node.Segments, 0, startSeg) - - // StreamRange开始的位置可能在某个分段的中间,此时这个分段的大小等于流开始位置到分段结束位置的距离 - startSegStart := ctx.Ft.SegmentParam.CalcSegmentStart(startSeg) - node.Segments[0] -= ctx.StreamRange.Offset - startSegStart - - // 检查一下必须的分段是否都被加入到Join中 - for i := 0; i < node.InputStreams().Len(); i++ { - if node.InputStreams().Get(i) == nil { - err = fmt.Errorf("segment %v missed to join an raw stream", i+startSeg) - return false - } - } - - return true - }) - - return err -} - -// 删除未使用的SegmentJoin -func removeUnusedSegmentJoin(ctx *ParseContext) bool { - changed := false - - dag.WalkOnlyType[*ops2.SegmentJoinNode](ctx.DAG.Graph, func(node *ops2.SegmentJoinNode) bool { - if node.Joined().Dst.Len() > 0 { - return true - } - - node.RemoveAllInputs() - ctx.DAG.RemoveNode(node) - return true - }) - - return changed -} - -// 删除未使用的SegmentSplit -func removeUnusedSegmentSplit(ctx *ParseContext) bool { - changed := false - dag.WalkOnlyType[*ops2.SegmentSplitNode](ctx.DAG.Graph, func(typ *ops2.SegmentSplitNode) bool { - // Split出来的每一个流都没有被使用,才能删除这个指令 - for _, out := range typ.OutputStreams().Slots.RawArray() { - if out.Dst.Len() > 0 { - return true - } - } - - typ.RemoveAllStream() - ctx.DAG.RemoveNode(typ) - changed = true - return true - }) - - return changed -} - -// 如果Split的结果被完全用于Join,则省略Split和Join指令 -func omitSegmentSplitJoin(ctx *ParseContext) bool { - changed := false - - dag.WalkOnlyType[*ops2.SegmentSplitNode](ctx.DAG.Graph, func(splitNode *ops2.SegmentSplitNode) bool { - // 随便找一个输出流的目的地 - splitOut := splitNode.OutputStreams().Get(0) - if splitOut.Dst.Len() != 1 { - return true - } - dstNode := splitOut.Dst.Get(0) - - // 这个目的地要是一个Join指令 - joinNode, ok := dstNode.(*ops2.SegmentJoinNode) - if !ok { - return true - } - - if splitNode.OutputStreams().Len() != joinNode.Joined().Dst.Len() { - return true - } - - // Join指令的输入必须全部来自Split指令的输出,且位置要相同 - for i := 0; i < splitNode.OutputStreams().Len(); i++ { - splitOut := splitNode.OutputStreams().Get(i) - joinIn := joinNode.InputStreams().Get(i) - if splitOut != joinIn { - return true - } - - if splitOut != nil && splitOut.Dst.Len() != 1 { - return true - } - } - - // 所有条件都满足,可以开始省略操作,将Join操作的目的地的输入流替换为Split操作的输入流: - // F->Split->Join->T 变换为:F->T - splitInput := splitNode.InputStreams().Get(0) - for _, to := range joinNode.Joined().Dst.RawArray() { - splitInput.To(to, to.InputStreams().IndexOf(joinNode.Joined())) - } - splitInput.NotTo(splitNode) - - // 并删除这两个指令 - ctx.DAG.RemoveNode(joinNode) - ctx.DAG.RemoveNode(splitNode) - - changed = true - return true - }) - - return changed -} - -// 删除输出流未被使用的Join指令 -func removeUnusedJoin(ctx *ParseContext) bool { - changed := false - - dag.WalkOnlyType[*ops2.ChunkedJoinNode](ctx.DAG.Graph, func(node *ops2.ChunkedJoinNode) bool { - if node.Joined().Dst.Len() > 0 { - return true - } - - node.RemoveAllInputs() - ctx.DAG.RemoveNode(node) - return true - }) - - return changed -} - -// 减少未使用的Multiply指令的输出流。如果减少到0,则删除该指令 -func removeUnusedMultiplyOutput(ctx *ParseContext) bool { - changed := false - dag.WalkOnlyType[*ops2.ECMultiplyNode](ctx.DAG.Graph, func(node *ops2.ECMultiplyNode) bool { - outArr := node.OutputStreams().Slots.RawArray() - for i2, out := range outArr { - if out.Dst.Len() > 0 { - continue - } - - outArr[i2] = nil - node.OutputIndexes[i2] = -2 - changed = true - } - - node.OutputStreams().Slots.SetRawArray(lo2.RemoveAllDefault(outArr)) - node.OutputIndexes = lo2.RemoveAll(node.OutputIndexes, -2) - - // 如果所有输出流都被删除,则删除该指令 - if node.OutputStreams().Len() == 0 { - node.RemoveAllInputs() - ctx.DAG.RemoveNode(node) - changed = true - } - - return true - }) - return changed -} - -// 删除未使用的Split指令 -func removeUnusedSplit(ctx *ParseContext) bool { - changed := false - dag.WalkOnlyType[*ops2.ChunkedSplitNode](ctx.DAG.Graph, func(typ *ops2.ChunkedSplitNode) bool { - // Split出来的每一个流都没有被使用,才能删除这个指令 - for _, out := range typ.OutputStreams().Slots.RawArray() { - if out.Dst.Len() > 0 { - return true - } - } - - typ.RemoveAllStream() - ctx.DAG.RemoveNode(typ) - changed = true - return true - }) - - return changed -} - -// 如果Split的结果被完全用于Join,则省略Split和Join指令 -func omitSplitJoin(ctx *ParseContext) bool { - changed := false - - dag.WalkOnlyType[*ops2.ChunkedSplitNode](ctx.DAG.Graph, func(splitNode *ops2.ChunkedSplitNode) bool { - // Split指令的每一个输出都有且只有一个目的地 - var dstNode dag.Node - for _, out := range splitNode.OutputStreams().Slots.RawArray() { - if out.Dst.Len() != 1 { - return true - } - - if dstNode == nil { - dstNode = out.Dst.Get(0) - } else if dstNode != out.Dst.Get(0) { - return true - } - } - - if dstNode == nil { - return true - } - - // 且这个目的地要是一个Join指令 - joinNode, ok := dstNode.(*ops2.ChunkedJoinNode) - if !ok { - return true - } - - // 同时这个Join指令的输入也必须全部来自Split指令的输出。 - // 由于上面判断了Split指令的输出目的地都相同,所以这里只要判断Join指令的输入数量是否与Split指令的输出数量相同即可 - if joinNode.InputStreams().Len() != splitNode.OutputStreams().Len() { - return true - } - - // 所有条件都满足,可以开始省略操作,将Join操作的目的地的输入流替换为Split操作的输入流: - // F->Split->Join->T 变换为:F->T - splitInput := splitNode.InputStreams().Get(0) - for _, to := range joinNode.Joined().Dst.RawArray() { - splitInput.To(to, to.InputStreams().IndexOf(joinNode.Joined())) - } - splitInput.NotTo(splitNode) - - // 并删除这两个指令 - ctx.DAG.RemoveNode(joinNode) - ctx.DAG.RemoveNode(splitNode) - - changed = true - return true - }) - - return changed -} - -// 通过流的输入输出位置来确定指令的执行位置。 -// To系列的指令都会有固定的执行位置,这些位置会随着pin操作逐步扩散到整个DAG, -// 所以理论上不会出现有指令的位置始终无法确定的情况。 -func pin(ctx *ParseContext) bool { - changed := false - ctx.DAG.Walk(func(node dag.Node) bool { - if node.Env().Pinned { - return true - } - - var toEnv *dag.NodeEnv - for _, out := range node.OutputStreams().Slots.RawArray() { - for _, to := range out.Dst.RawArray() { - if to.Env().Type == dag.EnvUnknown { - continue - } - - if toEnv == nil { - toEnv = to.Env() - } else if !toEnv.Equals(to.Env()) { - toEnv = nil - break - } - } - } - - if toEnv != nil { - if !node.Env().Equals(toEnv) { - changed = true - } - - *node.Env() = *toEnv - return true - } - - // 否则根据输入流的始发地来固定 - var fromEnv *dag.NodeEnv - for _, in := range node.InputStreams().Slots.RawArray() { - if in.Src.Env().Type == dag.EnvUnknown { - continue - } - - if fromEnv == nil { - fromEnv = in.Src.Env() - } else if !fromEnv.Equals(in.Src.Env()) { - fromEnv = nil - break - } - } - - if fromEnv != nil { - if !node.Env().Equals(fromEnv) { - changed = true - } - - *node.Env() = *fromEnv - } - return true - }) - - return changed -} - -// 删除未使用的From流,不会删除FromDriver -func removeUnusedFromNode(ctx *ParseContext) { - dag.WalkOnlyType[ops2.FromNode](ctx.DAG.Graph, func(node ops2.FromNode) bool { - if _, ok := node.(*ops2.FromDriverNode); ok { - return true - } - - if node.Output().Var().Dst.Len() == 0 { - ctx.DAG.RemoveNode(node) - } - return true - }) -} - -// 对于所有未使用的流,增加Drop指令 -func dropUnused(ctx *ParseContext) { - ctx.DAG.Walk(func(node dag.Node) bool { - for _, out := range node.OutputStreams().Slots.RawArray() { - if out.Dst.Len() == 0 { - n := ctx.DAG.NewDropStream() - *n.Env() = *node.Env() - n.SetInput(out) - } - } - return true - }) -} - -// 将SegmentJoin指令替换成分片上传指令 -func useMultipartUploadToShardStore(ctx *ParseContext) { - dag.WalkOnlyType[*ops2.SegmentJoinNode](ctx.DAG.Graph, func(joinNode *ops2.SegmentJoinNode) bool { - if joinNode.Joined().Dst.Len() != 1 { - return true - } - - joinDst := joinNode.Joined().Dst.Get(0) - shardNode, ok := joinDst.(*ops2.ShardWriteNode) - if !ok { - return true - } - - // SegmentJoin的输出流的范围必须与ToShardStore的输入流的范围相同, - // 虽然可以通过调整SegmentJoin的输入流来调整范围,但太复杂,暂不支持 - toStrIdx := shardNode.GetTo().GetStreamIndex() - toStrRng := shardNode.GetTo().GetRange() - if toStrIdx.IsRaw() { - if !toStrRng.Equals(ctx.StreamRange) { - return true - } - } else { - return true - } - - // Join的目的地必须支持MultipartUpload功能才能替换成分片上传 - multiUpload := utils.FindFeature[*cdssdk.MultipartUploadFeature](shardNode.Storage) - if multiUpload == nil { - return true - } - - // Join的每一个段的大小必须超过最小分片大小。 - // 目前只支持拆分超过最大分片的流,不支持合并多个小段流以达到最小分片大小。 - for _, size := range joinNode.Segments { - if size < multiUpload.MinPartSize { - return true - } - } - - initNode := ctx.DAG.NewMultipartInitiator(shardNode.Storage) - initNode.Env().CopyFrom(shardNode.Env()) - - partNumber := 1 - for i, size := range joinNode.Segments { - joinInput := joinNode.InputSlot(i) - - if size > multiUpload.MaxPartSize { - // 如果一个分段的大小大于最大分片大小,则需要拆分为多个小段上传 - // 拆分以及上传指令直接在流的产生节点执行 - splits := math2.SplitLessThan(size, multiUpload.MaxPartSize) - splitNode := ctx.DAG.NewSegmentSplit(splits) - splitNode.Env().CopyFrom(joinInput.Var().Src.Env()) - - joinInput.Var().ToSlot(splitNode.InputSlot()) - - for i2 := 0; i2 < len(splits); i2++ { - uploadNode := ctx.DAG.NewMultipartUpload(shardNode.Storage, partNumber, splits[i2]) - uploadNode.Env().CopyFrom(joinInput.Var().Src.Env()) - - initNode.UploadArgsVar().ToSlot(uploadNode.UploadArgsSlot()) - splitNode.SegmentVar(i2).ToSlot(uploadNode.PartStreamSlot()) - uploadNode.UploadResultVar().ToSlot(initNode.AppendPartInfoSlot()) - - partNumber++ - } - } else { - // 否则直接上传整个分段 - uploadNode := ctx.DAG.NewMultipartUpload(shardNode.Storage, partNumber, size) - // 上传指令直接在流的产生节点执行 - uploadNode.Env().CopyFrom(joinInput.Var().Src.Env()) - - initNode.UploadArgsVar().ToSlot(uploadNode.UploadArgsSlot()) - joinInput.Var().ToSlot(uploadNode.PartStreamSlot()) - uploadNode.UploadResultVar().ToSlot(initNode.AppendPartInfoSlot()) - - partNumber++ - } - - joinInput.Var().NotTo(joinNode) - } - - bypassNode := ctx.DAG.NewBypassToShardStore(shardNode.Storage.Storage.StorageID, shardNode.FileHashStoreKey) - bypassNode.Env().CopyFrom(shardNode.Env()) - - // 分片上传Node产生的结果送到bypassNode,bypassNode将处理结果再送回分片上传Node - initNode.BypassFileInfoVar().ToSlot(bypassNode.BypassFileInfoSlot()) - bypassNode.BypassCallbackVar().ToSlot(initNode.BypassCallbackSlot()) - - // 最后删除Join指令和ToShardStore指令 - ctx.DAG.RemoveNode(joinNode) - ctx.DAG.RemoveNode(shardNode) - // 因为ToShardStore已经被替换,所以对应的To也要删除。 - // 虽然会跳过后续的Range过程,但由于之前做的流范围判断,不加Range也可以 - ctx.Ft.Toes = lo2.Remove(ctx.Ft.Toes, shardNode.GetTo()) - return true - }) -} - -// 为IPFS写入指令存储结果 -func storeShardWriteResult(ctx *ParseContext) { - dag.WalkOnlyType[*ops2.ShardWriteNode](ctx.DAG.Graph, func(n *ops2.ShardWriteNode) bool { - if n.FileHashStoreKey == "" { - return true - } - - storeNode := ctx.DAG.NewStore() - storeNode.Env().ToEnvDriver() - - storeNode.Store(n.FileHashStoreKey, n.FileHashVar()) - return true - }) - - dag.WalkOnlyType[*ops2.BypassToShardStoreNode](ctx.DAG.Graph, func(n *ops2.BypassToShardStoreNode) bool { - if n.FileHashStoreKey == "" { - return true - } - - storeNode := ctx.DAG.NewStore() - storeNode.Env().ToEnvDriver() - - storeNode.Store(n.FileHashStoreKey, n.FileHashVar()) - return true - }) -} - -// 生成Range指令。StreamRange可能超过文件总大小,但Range指令会在数据量不够时不报错而是正常返回 -func generateRange(ctx *ParseContext) { - for i := 0; i < len(ctx.Ft.Toes); i++ { - to := ctx.Ft.Toes[i] - toNode := ctx.ToNodes[to] - - toStrIdx := to.GetStreamIndex() - toRng := to.GetRange() - - if toStrIdx.IsRaw() { - n := ctx.DAG.NewRange() - toInput := toNode.Input() - *n.Env() = *toInput.Var().Src.Env() - rnged := n.RangeStream(toInput.Var(), exec.Range{ - Offset: toRng.Offset - ctx.StreamRange.Offset, - Length: toRng.Length, - }) - toInput.Var().NotTo(toNode) - toNode.SetInput(rnged) - - } else if toStrIdx.IsEC() { - stripSize := int64(ctx.Ft.ECParam.ChunkSize * ctx.Ft.ECParam.K) - blkStartIdx := ctx.StreamRange.Offset / stripSize - - blkStart := blkStartIdx * int64(ctx.Ft.ECParam.ChunkSize) - - n := ctx.DAG.NewRange() - toInput := toNode.Input() - *n.Env() = *toInput.Var().Src.Env() - rnged := n.RangeStream(toInput.Var(), exec.Range{ - Offset: toRng.Offset - blkStart, - Length: toRng.Length, - }) - toInput.Var().NotTo(toNode) - toNode.SetInput(rnged) - } else if toStrIdx.IsSegment() { - // if frNode, ok := toNode.Input().Var().From().Node.(ops2.FromNode); ok { - // // 目前只有To也是分段时,才可能对接一个提供分段的From,此时不需要再生成Range指令 - // if frNode.GetFrom().GetStreamIndex().IsSegment() { - // continue - // } - // } - - // segStart := ctx.Ft.SegmentParam.CalcSegmentStart(toStrIdx.Index) - // strStart := segStart + toRng.Offset - - // n := ctx.DAG.NewRange() - // toInput := toNode.Input() - // *n.Env() = *toInput.Var().From().Node.Env() - // rnged := n.RangeStream(toInput.Var(), exec.Range{ - // Offset: strStart - ctx.StreamRange.Offset, - // Length: toRng.Length, - // }) - // toInput.Var().NotTo(toNode, toInput.Index) - // toNode.SetInput(rnged) - } - } -} - -// 生成Clone指令 -func generateClone(ctx *ParseContext) { - ctx.DAG.Walk(func(node dag.Node) bool { - for _, outVar := range node.OutputStreams().Slots.RawArray() { - if outVar.Dst.Len() <= 1 { - continue - } - - c := ctx.DAG.NewCloneStream() - *c.Env() = *node.Env() - for _, dst := range outVar.Dst.RawArray() { - c.NewOutput().To(dst, dst.InputStreams().IndexOf(outVar)) - } - outVar.Dst.Resize(0) - c.SetInput(outVar) - } - - for _, outVar := range node.OutputValues().Slots.RawArray() { - if outVar.Dst.Len() <= 1 { - continue - } - - t := ctx.DAG.NewCloneValue() - *t.Env() = *node.Env() - for _, dst := range outVar.Dst.RawArray() { - t.NewOutput().To(dst, dst.InputValues().IndexOf(outVar)) - } - outVar.Dst.Resize(0) - t.SetInput(outVar) - } - - return true - }) + opt.RemoveUnusedFromNode(state) + opt.UseS2STransfer(state) + opt.UseMultipartUploadToShardStore(state) + opt.DropUnused(state) + opt.StoreShardWriteResult(state) + opt.GenerateRange(state) + opt.GenerateClone(state) + + return plan.Compile(state.DAG.Graph, blder) } diff --git a/common/pkgs/ioswitch2/parser/state/state.go b/common/pkgs/ioswitch2/parser/state/state.go new file mode 100644 index 0000000..9b8f483 --- /dev/null +++ b/common/pkgs/ioswitch2/parser/state/state.go @@ -0,0 +1,35 @@ +package state + +import ( + "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag" + "gitlink.org.cn/cloudream/common/utils/math2" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2" + "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2" +) + +type IndexedStream struct { + Stream *dag.StreamVar + StreamIndex ioswitch2.StreamIndex +} + +type GenerateState struct { + Ft ioswitch2.FromTo + DAG *ops2.GraphNodeBuilder + // 为了产生所有To所需的数据范围,而需要From打开的范围。 + // 这个范围是基于整个文件的,且上下界都取整到条带大小的整数倍,因此上界是有可能超过文件大小的。 + ToNodes map[ioswitch2.To]ops2.ToNode + FromNodes map[ioswitch2.From]ops2.FromNode + IndexedStreams []IndexedStream + StreamRange math2.Range + UseEC bool // 是否使用纠删码 + UseSegment bool // 是否使用分段 +} + +func InitGenerateState(ft ioswitch2.FromTo) *GenerateState { + return &GenerateState{ + Ft: ft, + DAG: ops2.NewGraphNodeBuilder(), + ToNodes: make(map[ioswitch2.To]ops2.ToNode), + FromNodes: make(map[ioswitch2.From]ops2.FromNode), + } +} diff --git a/common/pkgs/ioswitchlrc/fromto.go b/common/pkgs/ioswitchlrc/fromto.go index 7de6deb..8e65cab 100644 --- a/common/pkgs/ioswitchlrc/fromto.go +++ b/common/pkgs/ioswitchlrc/fromto.go @@ -3,6 +3,7 @@ package ioswitchlrc import ( "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" + "gitlink.org.cn/cloudream/common/utils/math2" ) type From interface { @@ -13,7 +14,7 @@ type To interface { // To所需要的文件流的范围。具体含义与DataIndex有关系: // 如果DataIndex == -1,则表示在整个文件的范围。 // 如果DataIndex >= 0,则表示在文件的某个分片的范围。 - GetRange() exec.Range + GetRange() math2.Range GetDataIndex() int } @@ -24,7 +25,7 @@ type FromDriver struct { func NewFromDriver(dataIndex int) (*FromDriver, *exec.DriverWriteStream) { handle := &exec.DriverWriteStream{ - RangeHint: &exec.Range{}, + RangeHint: &math2.Range{}, } return &FromDriver{ Handle: handle, @@ -58,7 +59,7 @@ func (f *FromNode) GetDataIndex() int { type ToDriver struct { Handle *exec.DriverReadStream DataIndex int - Range exec.Range + Range math2.Range } func NewToDriver(dataIndex int) (*ToDriver, *exec.DriverReadStream) { @@ -69,7 +70,7 @@ func NewToDriver(dataIndex int) (*ToDriver, *exec.DriverReadStream) { }, &str } -func NewToDriverWithRange(dataIndex int, rng exec.Range) (*ToDriver, *exec.DriverReadStream) { +func NewToDriverWithRange(dataIndex int, rng math2.Range) (*ToDriver, *exec.DriverReadStream) { str := exec.DriverReadStream{} return &ToDriver{ Handle: &str, @@ -82,7 +83,7 @@ func (t *ToDriver) GetDataIndex() int { return t.DataIndex } -func (t *ToDriver) GetRange() exec.Range { +func (t *ToDriver) GetRange() math2.Range { return t.Range } @@ -90,7 +91,7 @@ type ToNode struct { Hub cdssdk.Hub Storage cdssdk.Storage DataIndex int - Range exec.Range + Range math2.Range FileHashStoreKey string } @@ -103,7 +104,7 @@ func NewToStorage(hub cdssdk.Hub, stg cdssdk.Storage, dataIndex int, fileHashSto } } -func NewToStorageWithRange(hub cdssdk.Hub, stg cdssdk.Storage, dataIndex int, fileHashStoreKey string, rng exec.Range) *ToNode { +func NewToStorageWithRange(hub cdssdk.Hub, stg cdssdk.Storage, dataIndex int, fileHashStoreKey string, rng math2.Range) *ToNode { return &ToNode{ Hub: hub, Storage: stg, @@ -117,7 +118,7 @@ func (t *ToNode) GetDataIndex() int { return t.DataIndex } -func (t *ToNode) GetRange() exec.Range { +func (t *ToNode) GetRange() math2.Range { return t.Range } diff --git a/common/pkgs/ioswitchlrc/ops2/chunked.go b/common/pkgs/ioswitchlrc/ops2/chunked.go index 6a23c26..a9da6c8 100644 --- a/common/pkgs/ioswitchlrc/ops2/chunked.go +++ b/common/pkgs/ioswitchlrc/ops2/chunked.go @@ -37,7 +37,10 @@ func (o *ChunkedSplit) Execute(ctx *exec.ExecContext, e *exec.Executor) error { sem := semaphore.NewWeighted(int64(len(outputs))) for i := range outputs { - sem.Acquire(ctx.Context, 1) + err = sem.Acquire(ctx.Context, 1) + if err != nil { + return err + } e.PutVar(o.Outputs[i], &exec.StreamValue{ Stream: io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) { diff --git a/common/pkgs/ioswitchlrc/ops2/range.go b/common/pkgs/ioswitchlrc/ops2/range.go index 82a454e..4bc70fa 100644 --- a/common/pkgs/ioswitchlrc/ops2/range.go +++ b/common/pkgs/ioswitchlrc/ops2/range.go @@ -81,7 +81,7 @@ func (o *Range) String() string { type RangeNode struct { dag.NodeBase - Range exec.Range + Range math2.Range } func (b *GraphNodeBuilder) NewRange() *RangeNode { @@ -93,7 +93,7 @@ func (b *GraphNodeBuilder) NewRange() *RangeNode { return node } -func (t *RangeNode) RangeStream(input *dag.StreamVar, rng exec.Range) *dag.StreamVar { +func (t *RangeNode) RangeStream(input *dag.StreamVar, rng math2.Range) *dag.StreamVar { input.To(t, 0) t.Range = rng return t.OutputStreams().Get(0) diff --git a/common/pkgs/ioswitchlrc/ops2/shard_store.go b/common/pkgs/ioswitchlrc/ops2/shard_store.go index d6348f5..34c0e33 100644 --- a/common/pkgs/ioswitchlrc/ops2/shard_store.go +++ b/common/pkgs/ioswitchlrc/ops2/shard_store.go @@ -11,7 +11,7 @@ import ( cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" "gitlink.org.cn/cloudream/common/utils/io2" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" ) @@ -41,12 +41,12 @@ func (o *ShardRead) Execute(ctx *exec.ExecContext, e *exec.Executor) error { Debugf("reading from shard store") defer logger.Debugf("reading from shard store finished") - stgMgr, err := exec.GetValueByType[*svcmgr.Manager](ctx) + stgAgts, err := exec.GetValueByType[*agtpool.AgentPool](ctx) if err != nil { return fmt.Errorf("getting storage manager: %w", err) } - store, err := stgMgr.GetShardStore(o.StorageID) + store, err := stgAgts.GetShardStore(o.StorageID) if err != nil { return fmt.Errorf("getting shard store of storage %v: %w", o.StorageID, err) } @@ -83,12 +83,12 @@ func (o *ShardWrite) Execute(ctx *exec.ExecContext, e *exec.Executor) error { Debugf("writting file to shard store") defer logger.Debugf("write to shard store finished") - stgMgr, err := exec.GetValueByType[*svcmgr.Manager](ctx) + stgAgts, err := exec.GetValueByType[*agtpool.AgentPool](ctx) if err != nil { return fmt.Errorf("getting storage manager: %w", err) } - store, err := stgMgr.GetShardStore(o.StorageID) + store, err := stgAgts.GetShardStore(o.StorageID) if err != nil { return fmt.Errorf("getting shard store of storage %v: %w", o.StorageID, err) } diff --git a/common/pkgs/ioswitchlrc/parser/generator.go b/common/pkgs/ioswitchlrc/parser/generator.go index 8cd720e..c32867a 100644 --- a/common/pkgs/ioswitchlrc/parser/generator.go +++ b/common/pkgs/ioswitchlrc/parser/generator.go @@ -7,6 +7,7 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/plan" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" + "gitlink.org.cn/cloudream/common/utils/math2" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc/ops2" ) @@ -16,7 +17,7 @@ type GenerateContext struct { DAG *ops2.GraphNodeBuilder To []ioswitchlrc.To ToNodes map[ioswitchlrc.To]ops2.ToNode - StreamRange exec.Range + StreamRange math2.Range } // 输入一个完整文件,从这个完整文件产生任意文件块(也可再产生完整文件)。 @@ -48,7 +49,7 @@ func Encode(fr ioswitchlrc.From, toes []ioswitchlrc.To, blder *exec.PlanBuilder) generateClone(&ctx) generateRange(&ctx) - return plan.Generate(ctx.DAG.Graph, blder) + return plan.Compile(ctx.DAG.Graph, blder) } func buildDAGEncode(ctx *GenerateContext, fr ioswitchlrc.From, toes []ioswitchlrc.To) error { @@ -145,7 +146,7 @@ func ReconstructAny(frs []ioswitchlrc.From, toes []ioswitchlrc.To, blder *exec.P generateClone(&ctx) generateRange(&ctx) - return plan.Generate(ctx.DAG.Graph, blder) + return plan.Compile(ctx.DAG.Graph, blder) } func buildDAGReconstructAny(ctx *GenerateContext, frs []ioswitchlrc.From, toes []ioswitchlrc.To) error { @@ -266,7 +267,7 @@ func ReconstructGroup(frs []ioswitchlrc.From, toes []ioswitchlrc.To, blder *exec generateClone(&ctx) generateRange(&ctx) - return plan.Generate(ctx.DAG.Graph, blder) + return plan.Compile(ctx.DAG.Graph, blder) } func buildDAGReconstructGroup(ctx *GenerateContext, frs []ioswitchlrc.From, toes []ioswitchlrc.To) error { diff --git a/common/pkgs/ioswitchlrc/parser/passes.go b/common/pkgs/ioswitchlrc/parser/passes.go index d028b22..d7f3606 100644 --- a/common/pkgs/ioswitchlrc/parser/passes.go +++ b/common/pkgs/ioswitchlrc/parser/passes.go @@ -5,7 +5,6 @@ import ( "math" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag" - "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" "gitlink.org.cn/cloudream/common/utils/math2" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc" @@ -17,7 +16,7 @@ import ( func calcStreamRange(ctx *GenerateContext) { stripSize := int64(ctx.LRC.ChunkSize * ctx.LRC.K) - rng := exec.Range{ + rng := math2.Range{ Offset: math.MaxInt64, } @@ -49,8 +48,8 @@ func calcStreamRange(ctx *GenerateContext) { } func buildFromNode(ctx *GenerateContext, f ioswitchlrc.From) (ops2.FromNode, error) { - var repRange exec.Range - var blkRange exec.Range + var repRange math2.Range + var blkRange math2.Range repRange.Offset = ctx.StreamRange.Offset blkRange.Offset = ctx.StreamRange.Offset / int64(ctx.LRC.ChunkSize*ctx.LRC.K) * int64(ctx.LRC.ChunkSize) @@ -234,7 +233,7 @@ func generateRange(ctx *GenerateContext) { n := ctx.DAG.NewRange() toInput := toNode.Input() *n.Env() = *toInput.Var().Src.Env() - rnged := n.RangeStream(toInput.Var(), exec.Range{ + rnged := n.RangeStream(toInput.Var(), math2.Range{ Offset: toRng.Offset - ctx.StreamRange.Offset, Length: toRng.Length, }) @@ -250,7 +249,7 @@ func generateRange(ctx *GenerateContext) { n := ctx.DAG.NewRange() toInput := toNode.Input() *n.Env() = *toInput.Var().Src.Env() - rnged := n.RangeStream(toInput.Var(), exec.Range{ + rnged := n.RangeStream(toInput.Var(), math2.Range{ Offset: toRng.Offset - blkStart, Length: toRng.Length, }) diff --git a/common/pkgs/metacache/connectivity.go b/common/pkgs/metacache/connectivity.go new file mode 100644 index 0000000..7924661 --- /dev/null +++ b/common/pkgs/metacache/connectivity.go @@ -0,0 +1,96 @@ +package metacache + +import ( + "sync" + "time" + + "gitlink.org.cn/cloudream/common/pkgs/logger" + cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" + stgglb "gitlink.org.cn/cloudream/storage/common/globals" + coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" +) + +func (m *MetaCacheHost) AddConnectivity() *Connectivity { + cache := &Connectivity{ + entries: make(map[cdssdk.HubID]*ConnectivityEntry), + } + + m.caches = append(m.caches, cache) + return cache +} + +type Connectivity struct { + lock sync.RWMutex + entries map[cdssdk.HubID]*ConnectivityEntry +} + +func (c *Connectivity) Get(from cdssdk.HubID, to cdssdk.HubID) *time.Duration { + for i := 0; i < 2; i++ { + c.lock.RLock() + entry, ok := c.entries[from] + if ok { + con, ok := entry.To[to] + if ok { + c.lock.RUnlock() + + if con.Latency == nil { + return nil + } + l := time.Millisecond * time.Duration(*con.Latency) + return &l + } + } + c.lock.RUnlock() + + c.load(from) + } + + return nil +} + +func (c *Connectivity) ClearOutdated() { + c.lock.Lock() + defer c.lock.Unlock() + + for hubID, entry := range c.entries { + if time.Since(entry.UpdateTime) > time.Minute*5 { + delete(c.entries, hubID) + } + } +} + +func (c *Connectivity) load(hubID cdssdk.HubID) { + coorCli, err := stgglb.CoordinatorMQPool.Acquire() + if err != nil { + logger.Warnf("new coordinator client: %v", err) + return + } + defer stgglb.CoordinatorMQPool.Release(coorCli) + + get, err := coorCli.GetHubConnectivities(coormq.ReqGetHubConnectivities([]cdssdk.HubID{hubID})) + if err != nil { + logger.Warnf("get hub connectivities: %v", err) + return + } + + c.lock.Lock() + defer c.lock.Unlock() + + ce := &ConnectivityEntry{ + From: hubID, + To: make(map[cdssdk.HubID]cdssdk.HubConnectivity), + UpdateTime: time.Now(), + } + + for _, conn := range get.Connectivities { + ce.To[conn.ToHubID] = conn + } + + c.entries[hubID] = ce +} + +type ConnectivityEntry struct { + From cdssdk.HubID + To map[cdssdk.HubID]cdssdk.HubConnectivity + UpdateTime time.Time +} diff --git a/common/pkgs/metacache/host.go b/common/pkgs/metacache/host.go new file mode 100644 index 0000000..542a81a --- /dev/null +++ b/common/pkgs/metacache/host.go @@ -0,0 +1,27 @@ +package metacache + +import "time" + +type MetaCache interface { + ClearOutdated() +} + +type MetaCacheHost struct { + caches []MetaCache +} + +func NewHost() *MetaCacheHost { + return &MetaCacheHost{} +} + +func (m *MetaCacheHost) Serve() { + ticker := time.NewTicker(time.Minute) + for { + select { + case <-ticker.C: + for _, cache := range m.caches { + cache.ClearOutdated() + } + } + } +} diff --git a/common/pkgs/metacache/hubmeta.go b/common/pkgs/metacache/hubmeta.go new file mode 100644 index 0000000..e81455c --- /dev/null +++ b/common/pkgs/metacache/hubmeta.go @@ -0,0 +1,75 @@ +package metacache + +import ( + "time" + + "gitlink.org.cn/cloudream/common/pkgs/logger" + cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" + stgglb "gitlink.org.cn/cloudream/storage/common/globals" + coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" +) + +func (m *MetaCacheHost) AddHubMeta() *HubMeta { + meta := &HubMeta{} + meta.cache = NewSimpleMetaCache(SimpleMetaCacheConfig[cdssdk.HubID, cdssdk.Hub]{ + Getter: meta.load, + Expire: time.Minute * 5, + }) + + m.caches = append(m.caches, meta) + return meta +} + +type HubMeta struct { + cache *SimpleMetaCache[cdssdk.HubID, cdssdk.Hub] +} + +func (h *HubMeta) Get(hubID cdssdk.HubID) *cdssdk.Hub { + v, ok := h.cache.Get(hubID) + if ok { + return &v + } + return nil +} + +func (h *HubMeta) GetMany(hubIDs []cdssdk.HubID) []*cdssdk.Hub { + vs, oks := h.cache.GetMany(hubIDs) + ret := make([]*cdssdk.Hub, len(vs)) + for i := range vs { + if oks[i] { + ret[i] = &vs[i] + } + } + return ret +} + +func (h *HubMeta) ClearOutdated() { + h.cache.ClearOutdated() +} + +func (h *HubMeta) load(keys []cdssdk.HubID) ([]cdssdk.Hub, []bool) { + vs := make([]cdssdk.Hub, len(keys)) + oks := make([]bool, len(keys)) + + coorCli, err := stgglb.CoordinatorMQPool.Acquire() + if err != nil { + logger.Warnf("new coordinator client: %v", err) + return vs, oks + } + defer stgglb.CoordinatorMQPool.Release(coorCli) + + get, err := coorCli.GetHubs(coormq.NewGetHubs(keys)) + if err != nil { + logger.Warnf("get hubs: %v", err) + return vs, oks + } + + for i := range keys { + if get.Hubs[i] != nil { + vs[i] = *get.Hubs[i] + oks[i] = true + } + } + + return vs, oks +} diff --git a/common/pkgs/metacache/simple.go b/common/pkgs/metacache/simple.go new file mode 100644 index 0000000..ff2f780 --- /dev/null +++ b/common/pkgs/metacache/simple.go @@ -0,0 +1,121 @@ +package metacache + +import ( + "sync" + "time" +) + +type SimpleMetaCacheConfig[K comparable, V any] struct { + Getter Getter[K, V] + Expire time.Duration +} + +type Getter[K comparable, V any] func(keys []K) ([]V, []bool) + +type SimpleMetaCache[K comparable, V any] struct { + lock sync.RWMutex + cache map[K]*CacheEntry[K, V] + cfg SimpleMetaCacheConfig[K, V] +} + +func NewSimpleMetaCache[K comparable, V any](cfg SimpleMetaCacheConfig[K, V]) *SimpleMetaCache[K, V] { + return &SimpleMetaCache[K, V]{ + cache: make(map[K]*CacheEntry[K, V]), + cfg: cfg, + } +} + +func (mc *SimpleMetaCache[K, V]) Get(key K) (V, bool) { + var ret V + var ok bool + + for i := 0; i < 2; i++ { + mc.lock.RLock() + entry, o := mc.cache[key] + if o { + ret = entry.Data + ok = true + } + mc.lock.RUnlock() + + if o { + break + } + + mc.load([]K{key}) + } + + return ret, ok +} + +func (mc *SimpleMetaCache[K, V]) GetMany(keys []K) ([]V, []bool) { + result := make([]V, len(keys)) + oks := make([]bool, len(keys)) + + for i := 0; i < 2; i++ { + allGet := true + mc.lock.RLock() + for i, key := range keys { + entry, ok := mc.cache[key] + if ok { + result[i] = entry.Data + oks[i] = true + } else { + allGet = false + } + } + mc.lock.RUnlock() + + if allGet { + break + } + + mc.load(keys) + } + + return result, oks +} + +func (mc *SimpleMetaCache[K, V]) load(keys []K) { + vs, getOks := mc.cfg.Getter(keys) + + mc.lock.Lock() + defer mc.lock.Unlock() + + for i, key := range keys { + if !getOks[i] { + continue + } + + _, ok := mc.cache[key] + // 缓存中已有key则认为缓存中是最新的,不再更新 + if ok { + continue + } + + entry := &CacheEntry[K, V]{ + Key: key, + Data: vs[i], + UpdateTime: time.Now(), + } + mc.cache[key] = entry + } +} + +func (mc *SimpleMetaCache[K, V]) ClearOutdated() { + mc.lock.Lock() + defer mc.lock.Unlock() + + for key, entry := range mc.cache { + dt := time.Since(entry.UpdateTime) + if dt > mc.cfg.Expire || dt < 0 { + delete(mc.cache, key) + } + } +} + +type CacheEntry[K comparable, V any] struct { + Key K + Data V + UpdateTime time.Time +} diff --git a/common/pkgs/metacache/storagemeta.go b/common/pkgs/metacache/storagemeta.go new file mode 100644 index 0000000..9d1a041 --- /dev/null +++ b/common/pkgs/metacache/storagemeta.go @@ -0,0 +1,76 @@ +package metacache + +import ( + "time" + + "gitlink.org.cn/cloudream/common/pkgs/logger" + cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" + stgglb "gitlink.org.cn/cloudream/storage/common/globals" + stgmod "gitlink.org.cn/cloudream/storage/common/models" + coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" +) + +func (m *MetaCacheHost) AddStorageMeta() *StorageMeta { + meta := &StorageMeta{} + meta.cache = NewSimpleMetaCache(SimpleMetaCacheConfig[cdssdk.StorageID, stgmod.StorageDetail]{ + Getter: meta.load, + Expire: time.Minute * 5, + }) + + m.caches = append(m.caches, meta) + return meta +} + +type StorageMeta struct { + cache *SimpleMetaCache[cdssdk.StorageID, stgmod.StorageDetail] +} + +func (s *StorageMeta) Get(stgID cdssdk.StorageID) *stgmod.StorageDetail { + v, ok := s.cache.Get(stgID) + if ok { + return &v + } + return nil +} + +func (s *StorageMeta) GetMany(stgIDs []cdssdk.StorageID) []*stgmod.StorageDetail { + vs, oks := s.cache.GetMany(stgIDs) + ret := make([]*stgmod.StorageDetail, len(vs)) + for i := range vs { + if oks[i] { + ret[i] = &vs[i] + } + } + return ret +} + +func (s *StorageMeta) ClearOutdated() { + s.cache.ClearOutdated() +} + +func (s *StorageMeta) load(keys []cdssdk.StorageID) ([]stgmod.StorageDetail, []bool) { + vs := make([]stgmod.StorageDetail, len(keys)) + oks := make([]bool, len(keys)) + + coorCli, err := stgglb.CoordinatorMQPool.Acquire() + if err != nil { + logger.Warnf("new coordinator client: %v", err) + return vs, oks + } + defer stgglb.CoordinatorMQPool.Release(coorCli) + + get, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails(keys)) + if err != nil { + logger.Warnf("get storage details: %v", err) + return vs, oks + } + + for i := range keys { + if get.Storages[i] != nil { + vs[i] = *get.Storages[i] + oks[i] = true + } + } + + return vs, oks +} diff --git a/common/pkgs/mq/agent/client.go b/common/pkgs/mq/agent/client.go index 5debb8a..4a84ed7 100644 --- a/common/pkgs/mq/agent/client.go +++ b/common/pkgs/mq/agent/client.go @@ -13,8 +13,8 @@ type Client struct { id cdssdk.HubID } -func NewClient(id cdssdk.HubID, cfg *stgmq.Config) (*Client, error) { - rabbitCli, err := mq.NewRabbitMQTransport(cfg.MakeConnectingURL(), stgmq.MakeAgentQueueName(int64(id)), "") +func NewClient(id cdssdk.HubID, cfg mq.Config) (*Client, error) { + rabbitCli, err := mq.NewRabbitMQTransport(cfg, stgmq.MakeAgentQueueName(int64(id)), "") if err != nil { return nil, err } @@ -35,12 +35,12 @@ type Pool interface { } type pool struct { - mqcfg *stgmq.Config + mqcfg mq.Config shareds map[cdssdk.HubID]*Client lock sync.Mutex } -func NewPool(mqcfg *stgmq.Config) Pool { +func NewPool(mqcfg mq.Config) Pool { return &pool{ mqcfg: mqcfg, shareds: make(map[cdssdk.HubID]*Client), diff --git a/common/pkgs/mq/agent/server.go b/common/pkgs/mq/agent/server.go index eeeccfc..ec55be5 100644 --- a/common/pkgs/mq/agent/server.go +++ b/common/pkgs/mq/agent/server.go @@ -20,18 +20,17 @@ type Server struct { rabbitSvr mq.RabbitMQServer } -func NewServer(svc Service, id cdssdk.HubID, cfg *mymq.Config) (*Server, error) { +func NewServer(svc Service, id cdssdk.HubID, cfg mq.Config) (*Server, error) { srv := &Server{ service: svc, } rabbitSvr, err := mq.NewRabbitMQServer( - cfg.MakeConnectingURL(), + cfg, mymq.MakeAgentQueueName(int64(id)), func(msg *mq.Message) (*mq.Message, error) { return msgDispatcher.Handle(srv.service, msg) }, - cfg.Param, ) if err != nil { return nil, err diff --git a/common/pkgs/mq/agent/storage.go b/common/pkgs/mq/agent/storage.go index 8fac030..1182752 100644 --- a/common/pkgs/mq/agent/storage.go +++ b/common/pkgs/mq/agent/storage.go @@ -3,142 +3,14 @@ package agent import ( "gitlink.org.cn/cloudream/common/pkgs/mq" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" - - stgmod "gitlink.org.cn/cloudream/storage/common/models" - "gitlink.org.cn/cloudream/storage/common/pkgs/db2/model" ) type StorageService interface { - StartStorageLoadPackage(msg *StartStorageLoadPackage) (*StartStorageLoadPackageResp, *mq.CodeMessage) - - WaitStorageLoadPackage(msg *WaitStorageLoadPackage) (*WaitStorageLoadPackageResp, *mq.CodeMessage) - - StorageCheck(msg *StorageCheck) (*StorageCheckResp, *mq.CodeMessage) - - StorageGC(msg *StorageGC) (*StorageGCResp, *mq.CodeMessage) - StartStorageCreatePackage(msg *StartStorageCreatePackage) (*StartStorageCreatePackageResp, *mq.CodeMessage) WaitStorageCreatePackage(msg *WaitStorageCreatePackage) (*WaitStorageCreatePackageResp, *mq.CodeMessage) } -// 启动调度Package的任务 -var _ = Register(Service.StartStorageLoadPackage) - -type StartStorageLoadPackage struct { - mq.MessageBodyBase - UserID cdssdk.UserID `json:"userID"` - PackageID cdssdk.PackageID `json:"packageID"` - StorageID cdssdk.StorageID `json:"storageID"` -} -type StartStorageLoadPackageResp struct { - mq.MessageBodyBase - TaskID string `json:"taskID"` -} - -func NewStartStorageLoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID) *StartStorageLoadPackage { - return &StartStorageLoadPackage{ - UserID: userID, - PackageID: packageID, - StorageID: storageID, - } -} -func NewStartStorageLoadPackageResp(taskID string) *StartStorageLoadPackageResp { - return &StartStorageLoadPackageResp{ - TaskID: taskID, - } -} -func (client *Client) StartStorageLoadPackage(msg *StartStorageLoadPackage, opts ...mq.RequestOption) (*StartStorageLoadPackageResp, error) { - return mq.Request(Service.StartStorageLoadPackage, client.rabbitCli, msg, opts...) -} - -// 等待调度Package的任务 -var _ = Register(Service.WaitStorageLoadPackage) - -type WaitStorageLoadPackage struct { - mq.MessageBodyBase - TaskID string `json:"taskID"` - WaitTimeoutMs int64 `json:"waitTimeout"` -} -type WaitStorageLoadPackageResp struct { - mq.MessageBodyBase - IsComplete bool `json:"isComplete"` - Error string `json:"error"` - PackagePath string `json:"packagePath"` // 加载后的Package的路径,相对于数据库中配置的Directory - LocalBase string `json:"localBase"` // 存储服务本地的目录,LocalBase + PackagePath = Package在代理节点上的完整路径 - RemoteBase string `json:"remoteBase"` // 存储服务远程的目录,RemoteBase + PackagePath = Package在存储服务中的完整路径 -} - -func NewWaitStorageLoadPackage(taskID string, waitTimeoutMs int64) *WaitStorageLoadPackage { - return &WaitStorageLoadPackage{ - TaskID: taskID, - WaitTimeoutMs: waitTimeoutMs, - } -} -func NewWaitStorageLoadPackageResp(isComplete bool, err string, packagePath string, localBase string, remoteBase string) *WaitStorageLoadPackageResp { - return &WaitStorageLoadPackageResp{ - IsComplete: isComplete, - Error: err, - PackagePath: packagePath, - LocalBase: localBase, - RemoteBase: remoteBase, - } -} -func (client *Client) WaitStorageLoadPackage(msg *WaitStorageLoadPackage, opts ...mq.RequestOption) (*WaitStorageLoadPackageResp, error) { - return mq.Request(Service.WaitStorageLoadPackage, client.rabbitCli, msg, opts...) -} - -// 检查Storage -var _ = Register(Service.StorageCheck) - -type StorageCheck struct { - mq.MessageBodyBase - StorageID cdssdk.StorageID `json:"storageID"` -} -type StorageCheckResp struct { - mq.MessageBodyBase - Packages []stgmod.LoadedPackageID `json:"packages"` -} - -func NewStorageCheck(storageID cdssdk.StorageID) *StorageCheck { - return &StorageCheck{ - StorageID: storageID, - } -} -func NewStorageCheckResp(packages []stgmod.LoadedPackageID) *StorageCheckResp { - return &StorageCheckResp{ - Packages: packages, - } -} -func (client *Client) StorageCheck(msg *StorageCheck, opts ...mq.RequestOption) (*StorageCheckResp, error) { - return mq.Request(Service.StorageCheck, client.rabbitCli, msg, opts...) -} - -// 清理Cache中不用的文件 -var _ = Register(Service.StorageGC) - -type StorageGC struct { - mq.MessageBodyBase - StorageID cdssdk.StorageID `json:"storageID"` - Packages []model.StoragePackage `json:"packages"` -} -type StorageGCResp struct { - mq.MessageBodyBase -} - -func ReqStorageGC(storageID cdssdk.StorageID, packages []model.StoragePackage) *StorageGC { - return &StorageGC{ - StorageID: storageID, - Packages: packages, - } -} -func RespStorageGC() *StorageGCResp { - return &StorageGCResp{} -} -func (client *Client) StorageGC(msg *StorageGC, opts ...mq.RequestOption) (*StorageGCResp, error) { - return mq.Request(Service.StorageGC, client.rabbitCli, msg, opts...) -} - // 启动从Storage上传Package的任务 var _ = Register(Service.StartStorageCreatePackage) diff --git a/common/pkgs/mq/config.go b/common/pkgs/mq/config.go deleted file mode 100644 index f572d53..0000000 --- a/common/pkgs/mq/config.go +++ /dev/null @@ -1,19 +0,0 @@ -package mq - -import ( - "fmt" - - "gitlink.org.cn/cloudream/common/pkgs/mq" -) - -type Config struct { - Address string `json:"address"` - Account string `json:"account"` - Password string `json:"password"` - VHost string `json:"vhost"` - Param mq.RabbitMQParam `json:"param"` -} - -func (cfg *Config) MakeConnectingURL() string { - return fmt.Sprintf("amqp://%s:%s@%s%s", cfg.Account, cfg.Password, cfg.Address, cfg.VHost) -} diff --git a/common/pkgs/mq/coordinator/client.go b/common/pkgs/mq/coordinator/client.go index 84a4f28..ad7c1cb 100644 --- a/common/pkgs/mq/coordinator/client.go +++ b/common/pkgs/mq/coordinator/client.go @@ -11,8 +11,8 @@ type Client struct { rabbitCli *mq.RabbitMQTransport } -func NewClient(cfg *stgmq.Config) (*Client, error) { - rabbitCli, err := mq.NewRabbitMQTransport(cfg.MakeConnectingURL(), stgmq.COORDINATOR_QUEUE_NAME, "") +func NewClient(cfg mq.Config) (*Client, error) { + rabbitCli, err := mq.NewRabbitMQTransport(cfg, stgmq.COORDINATOR_QUEUE_NAME, "") if err != nil { return nil, err } @@ -32,12 +32,12 @@ type Pool interface { } type pool struct { - mqcfg *stgmq.Config + mqcfg mq.Config shared *Client lock sync.Mutex } -func NewPool(mqcfg *stgmq.Config) Pool { +func NewPool(mqcfg mq.Config) Pool { return &pool{ mqcfg: mqcfg, } diff --git a/common/pkgs/mq/coordinator/hub.go b/common/pkgs/mq/coordinator/hub.go index de7921d..bc2cf7b 100644 --- a/common/pkgs/mq/coordinator/hub.go +++ b/common/pkgs/mq/coordinator/hub.go @@ -80,7 +80,7 @@ type GetHubs struct { } type GetHubsResp struct { mq.MessageBodyBase - Hubs []cdssdk.Hub `json:"hubs"` + Hubs []*cdssdk.Hub `json:"hubs"` } func NewGetHubs(hubIDs []cdssdk.HubID) *GetHubs { @@ -88,7 +88,7 @@ func NewGetHubs(hubIDs []cdssdk.HubID) *GetHubs { HubIDs: hubIDs, } } -func NewGetHubsResp(hubs []cdssdk.Hub) *GetHubsResp { +func NewGetHubsResp(hubs []*cdssdk.Hub) *GetHubsResp { return &GetHubsResp{ Hubs: hubs, } @@ -96,7 +96,7 @@ func NewGetHubsResp(hubs []cdssdk.Hub) *GetHubsResp { func (r *GetHubsResp) GetHub(id cdssdk.HubID) *cdssdk.Hub { for _, n := range r.Hubs { if n.HubID == id { - return &n + return n } } diff --git a/common/pkgs/mq/coordinator/object.go b/common/pkgs/mq/coordinator/object.go index a792d13..c90712d 100644 --- a/common/pkgs/mq/coordinator/object.go +++ b/common/pkgs/mq/coordinator/object.go @@ -10,6 +10,8 @@ import ( ) type ObjectService interface { + GetObjects(msg *GetObjects) (*GetObjectsResp, *mq.CodeMessage) + GetObjectsByPath(msg *GetObjectsByPath) (*GetObjectsByPathResp, *mq.CodeMessage) GetPackageObjects(msg *GetPackageObjects) (*GetPackageObjectsResp, *mq.CodeMessage) @@ -26,11 +28,40 @@ type ObjectService interface { DeleteObjects(msg *DeleteObjects) (*DeleteObjectsResp, *mq.CodeMessage) + CloneObjects(msg *CloneObjects) (*CloneObjectsResp, *mq.CodeMessage) + GetDatabaseAll(msg *GetDatabaseAll) (*GetDatabaseAllResp, *mq.CodeMessage) AddAccessStat(msg *AddAccessStat) } +var _ = Register(Service.GetObjects) + +type GetObjects struct { + mq.MessageBodyBase + UserID cdssdk.UserID `json:"userID"` + ObjectIDs []cdssdk.ObjectID `json:"objectIDs"` +} +type GetObjectsResp struct { + mq.MessageBodyBase + Objects []*cdssdk.Object `json:"objects"` +} + +func ReqGetObjects(userID cdssdk.UserID, objectIDs []cdssdk.ObjectID) *GetObjects { + return &GetObjects{ + UserID: userID, + ObjectIDs: objectIDs, + } +} +func RespGetObjects(objects []*cdssdk.Object) *GetObjectsResp { + return &GetObjectsResp{ + Objects: objects, + } +} +func (client *Client) GetObjects(msg *GetObjects) (*GetObjectsResp, error) { + return mq.Request(Service.GetObjects, client.rabbitCli, msg) +} + // 查询指定前缀的Object,返回的Objects会按照ObjectID升序 var _ = Register(Service.GetObjectsByPath) @@ -256,6 +287,34 @@ func (client *Client) DeleteObjects(msg *DeleteObjects) (*DeleteObjectsResp, err return mq.Request(Service.DeleteObjects, client.rabbitCli, msg) } +// 克隆Object +var _ = Register(Service.CloneObjects) + +type CloneObjects struct { + mq.MessageBodyBase + UserID cdssdk.UserID `json:"userID"` + Clonings []cdsapi.CloningObject `json:"clonings"` +} +type CloneObjectsResp struct { + mq.MessageBodyBase + Objects []*cdssdk.Object `json:"objects"` +} + +func ReqCloneObjects(userID cdssdk.UserID, clonings []cdsapi.CloningObject) *CloneObjects { + return &CloneObjects{ + UserID: userID, + Clonings: clonings, + } +} +func RespCloneObjects(objects []*cdssdk.Object) *CloneObjectsResp { + return &CloneObjectsResp{ + Objects: objects, + } +} +func (client *Client) CloneObjects(msg *CloneObjects) (*CloneObjectsResp, error) { + return mq.Request(Service.CloneObjects, client.rabbitCli, msg) +} + // 增加访问计数 var _ = RegisterNoReply(Service.AddAccessStat) diff --git a/common/pkgs/mq/coordinator/package.go b/common/pkgs/mq/coordinator/package.go index 1ae60b7..e077a7b 100644 --- a/common/pkgs/mq/coordinator/package.go +++ b/common/pkgs/mq/coordinator/package.go @@ -20,9 +20,9 @@ type PackageService interface { DeletePackage(msg *DeletePackage) (*DeletePackageResp, *mq.CodeMessage) - GetPackageCachedStorages(msg *GetPackageCachedStorages) (*GetPackageCachedStoragesResp, *mq.CodeMessage) + ClonePackage(msg *ClonePackage) (*ClonePackageResp, *mq.CodeMessage) - GetPackageLoadedStorages(msg *GetPackageLoadedStorages) (*GetPackageLoadedStoragesResp, *mq.CodeMessage) + GetPackageCachedStorages(msg *GetPackageCachedStorages) (*GetPackageCachedStoragesResp, *mq.CodeMessage) } // 获取Package基本信息 @@ -186,6 +186,39 @@ func (client *Client) DeletePackage(msg *DeletePackage) (*DeletePackageResp, err return mq.Request(Service.DeletePackage, client.rabbitCli, msg) } +// 克隆Package +var _ = Register(Service.ClonePackage) + +type ClonePackage struct { + mq.MessageBodyBase + UserID cdssdk.UserID `json:"userID"` + PackageID cdssdk.PackageID `json:"packageID"` + BucketID cdssdk.BucketID `json:"bucketID"` + Name string `json:"name"` +} +type ClonePackageResp struct { + mq.MessageBodyBase + Package cdssdk.Package `json:"package"` +} + +func ReqClonePackage(userID cdssdk.UserID, packageID cdssdk.PackageID, bucketID cdssdk.BucketID, name string) *ClonePackage { + return &ClonePackage{ + UserID: userID, + PackageID: packageID, + BucketID: bucketID, + Name: name, + } +} +func RespClonePackage(pkg cdssdk.Package) *ClonePackageResp { + return &ClonePackageResp{ + Package: pkg, + } +} + +func (client *Client) ClonePackage(msg *ClonePackage) (*ClonePackageResp, error) { + return mq.Request(Service.ClonePackage, client.rabbitCli, msg) +} + // 根据PackageID获取object分布情况 var _ = Register(Service.GetPackageCachedStorages) @@ -225,34 +258,3 @@ func ReqGetPackageCachedStoragesResp(stgInfos []cdssdk.StoragePackageCachingInfo func (client *Client) GetPackageCachedStorages(msg *GetPackageCachedStorages) (*GetPackageCachedStoragesResp, error) { return mq.Request(Service.GetPackageCachedStorages, client.rabbitCli, msg) } - -// 根据PackageID获取storage分布情况 -var _ = Register(Service.GetPackageLoadedStorages) - -type GetPackageLoadedStorages struct { - mq.MessageBodyBase - UserID cdssdk.UserID `json:"userID"` - PackageID cdssdk.PackageID `json:"packageID"` -} - -type GetPackageLoadedStoragesResp struct { - mq.MessageBodyBase - StorageIDs []cdssdk.StorageID `json:"storageIDs"` -} - -func ReqGetPackageLoadedStorages(userID cdssdk.UserID, packageID cdssdk.PackageID) *GetPackageLoadedStorages { - return &GetPackageLoadedStorages{ - UserID: userID, - PackageID: packageID, - } -} - -func NewGetPackageLoadedStoragesResp(stgIDs []cdssdk.StorageID) *GetPackageLoadedStoragesResp { - return &GetPackageLoadedStoragesResp{ - StorageIDs: stgIDs, - } -} - -func (client *Client) GetPackageLoadedStorages(msg *GetPackageLoadedStorages) (*GetPackageLoadedStoragesResp, error) { - return mq.Request(Service.GetPackageLoadedStorages, client.rabbitCli, msg) -} diff --git a/common/pkgs/mq/coordinator/server.go b/common/pkgs/mq/coordinator/server.go index 49d0319..d286b2f 100644 --- a/common/pkgs/mq/coordinator/server.go +++ b/common/pkgs/mq/coordinator/server.go @@ -28,18 +28,17 @@ type Server struct { rabbitSvr mq.RabbitMQServer } -func NewServer(svc Service, cfg *mymq.Config) (*Server, error) { +func NewServer(svc Service, cfg mq.Config) (*Server, error) { srv := &Server{ service: svc, } rabbitSvr, err := mq.NewRabbitMQServer( - cfg.MakeConnectingURL(), + cfg, mymq.COORDINATOR_QUEUE_NAME, func(msg *mq.Message) (*mq.Message, error) { return msgDispatcher.Handle(srv.service, msg) }, - cfg.Param, ) if err != nil { return nil, err @@ -53,7 +52,7 @@ func (s *Server) Stop() { s.rabbitSvr.Close() } -func (s *Server) Start(cfg mymq.Config) *sync2.UnboundChannel[mq.RabbitMQServerEvent] { +func (s *Server) Start(cfg mq.Config) *sync2.UnboundChannel[mq.RabbitMQServerEvent] { return s.rabbitSvr.Start() } diff --git a/common/pkgs/mq/coordinator/storage.go b/common/pkgs/mq/coordinator/storage.go index e1a3a97..9eff9fc 100644 --- a/common/pkgs/mq/coordinator/storage.go +++ b/common/pkgs/mq/coordinator/storage.go @@ -144,24 +144,26 @@ var _ = Register(Service.StoragePackageLoaded) type StoragePackageLoaded struct { mq.MessageBodyBase - UserID cdssdk.UserID `json:"userID"` - StorageID cdssdk.StorageID `json:"storageID"` - PackageID cdssdk.PackageID `json:"packageID"` - PinnedBlocks []stgmod.ObjectBlock `json:"pinnedBlocks"` + UserID cdssdk.UserID `json:"userID"` + PackageID cdssdk.PackageID `json:"packageID"` + StorageID cdssdk.StorageID `json:"storageID"` + RootPath string `json:"rootPath"` + PinnedObjects []cdssdk.ObjectID `json:"pinnedObjects"` } type StoragePackageLoadedResp struct { mq.MessageBodyBase } -func NewStoragePackageLoaded(userID cdssdk.UserID, stgID cdssdk.StorageID, packageID cdssdk.PackageID, pinnedBlocks []stgmod.ObjectBlock) *StoragePackageLoaded { +func ReqStoragePackageLoaded(userID cdssdk.UserID, stgID cdssdk.StorageID, packageID cdssdk.PackageID, rootPath string, pinnedObjects []cdssdk.ObjectID) *StoragePackageLoaded { return &StoragePackageLoaded{ - UserID: userID, - PackageID: packageID, - StorageID: stgID, - PinnedBlocks: pinnedBlocks, + UserID: userID, + PackageID: packageID, + StorageID: stgID, + RootPath: rootPath, + PinnedObjects: pinnedObjects, } } -func NewStoragePackageLoadedResp() *StoragePackageLoadedResp { +func RespStoragePackageLoaded() *StoragePackageLoadedResp { return &StoragePackageLoadedResp{} } func (client *Client) StoragePackageLoaded(msg *StoragePackageLoaded) (*StoragePackageLoadedResp, error) { diff --git a/common/pkgs/mq/scanner/client.go b/common/pkgs/mq/scanner/client.go index 156a16d..0970d53 100644 --- a/common/pkgs/mq/scanner/client.go +++ b/common/pkgs/mq/scanner/client.go @@ -11,8 +11,8 @@ type Client struct { rabbitCli *mq.RabbitMQTransport } -func NewClient(cfg *stgmq.Config) (*Client, error) { - rabbitCli, err := mq.NewRabbitMQTransport(cfg.MakeConnectingURL(), stgmq.SCANNER_QUEUE_NAME, "") +func NewClient(cfg mq.Config) (*Client, error) { + rabbitCli, err := mq.NewRabbitMQTransport(cfg, stgmq.SCANNER_QUEUE_NAME, "") if err != nil { return nil, err } @@ -32,12 +32,12 @@ type Pool interface { } type pool struct { - mqcfg *stgmq.Config + mqcfg mq.Config shared *Client lock sync.Mutex } -func NewPool(mqcfg *stgmq.Config) Pool { +func NewPool(mqcfg mq.Config) Pool { return &pool{ mqcfg: mqcfg, } diff --git a/common/pkgs/mq/scanner/server.go b/common/pkgs/mq/scanner/server.go index 5b11565..2ca0c64 100644 --- a/common/pkgs/mq/scanner/server.go +++ b/common/pkgs/mq/scanner/server.go @@ -15,18 +15,17 @@ type Server struct { rabbitSvr mq.RabbitMQServer } -func NewServer(svc Service, cfg *mymq.Config) (*Server, error) { +func NewServer(svc Service, cfg mq.Config) (*Server, error) { srv := &Server{ service: svc, } rabbitSvr, err := mq.NewRabbitMQServer( - cfg.MakeConnectingURL(), + cfg, mymq.SCANNER_QUEUE_NAME, func(msg *mq.Message) (*mq.Message, error) { return msgDispatcher.Handle(srv.service, msg) }, - cfg.Param, ) if err != nil { return nil, err diff --git a/common/pkgs/storage/svcmgr/mgr.go b/common/pkgs/storage/agtpool/pool.go similarity index 51% rename from common/pkgs/storage/svcmgr/mgr.go rename to common/pkgs/storage/agtpool/pool.go index 83f627e..7b5bb1a 100644 --- a/common/pkgs/storage/svcmgr/mgr.go +++ b/common/pkgs/storage/agtpool/pool.go @@ -1,35 +1,33 @@ -package svcmgr +package agtpool import ( - "reflect" "sync" "gitlink.org.cn/cloudream/common/pkgs/async" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" - "gitlink.org.cn/cloudream/common/utils/reflect2" stgmod "gitlink.org.cn/cloudream/storage/common/models" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/factory" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" ) type storage struct { - Service types.StorageService + Agent types.StorageAgent } -type Manager struct { +type AgentPool struct { storages map[cdssdk.StorageID]*storage lock sync.Mutex eventChan *types.StorageEventChan } -func NewManager() *Manager { - return &Manager{ +func NewPool() *AgentPool { + return &AgentPool{ storages: make(map[cdssdk.StorageID]*storage), eventChan: async.NewUnboundChannel[types.StorageEvent](), } } -func (m *Manager) CreateService(detail stgmod.StorageDetail) error { +func (m *AgentPool) SetupAgent(detail stgmod.StorageDetail) error { m.lock.Lock() defer m.lock.Unlock() @@ -39,19 +37,20 @@ func (m *Manager) CreateService(detail stgmod.StorageDetail) error { stg := &storage{} - svc, err := factory.CreateService(detail) + bld := factory.GetBuilder(detail) + svc, err := bld.CreateAgent() if err != nil { return err } - stg.Service = svc + stg.Agent = svc m.storages[detail.Storage.StorageID] = stg svc.Start(m.eventChan) return nil } -func (m *Manager) GetInfo(stgID cdssdk.StorageID) (stgmod.StorageDetail, error) { +func (m *AgentPool) GetInfo(stgID cdssdk.StorageID) (stgmod.StorageDetail, error) { m.lock.Lock() defer m.lock.Unlock() @@ -60,21 +59,23 @@ func (m *Manager) GetInfo(stgID cdssdk.StorageID) (stgmod.StorageDetail, error) return stgmod.StorageDetail{}, types.ErrStorageNotFound } - return stg.Service.Info(), nil + return stg.Agent.Info(), nil } -// 查找指定Storage的ShardStore组件 -func (m *Manager) GetShardStore(stgID cdssdk.StorageID) (types.ShardStore, error) { - return GetComponent[types.ShardStore](m, stgID) -} +func (m *AgentPool) GetAgent(stgID cdssdk.StorageID) (types.StorageAgent, error) { + m.lock.Lock() + defer m.lock.Unlock() -// 查找指定Storage的SharedStore组件 -func (m *Manager) GetSharedStore(stgID cdssdk.StorageID) (types.SharedStore, error) { - return GetComponent[types.SharedStore](m, stgID) + stg := m.storages[stgID] + if stg == nil { + return nil, types.ErrStorageNotFound + } + + return stg.Agent, nil } -// 查找指定Storage的指定类型的组件,可以是ShardStore、SharedStore、或者其他自定义的组件 -func (m *Manager) GetComponent(stgID cdssdk.StorageID, typ reflect.Type) (any, error) { +// 查找指定Storage的ShardStore组件 +func (m *AgentPool) GetShardStore(stgID cdssdk.StorageID) (types.ShardStore, error) { m.lock.Lock() defer m.lock.Unlock() @@ -83,15 +84,18 @@ func (m *Manager) GetComponent(stgID cdssdk.StorageID, typ reflect.Type) (any, e return nil, types.ErrStorageNotFound } - return stg.Service.GetComponent(typ) + return stg.Agent.GetShardStore() } -func GetComponent[T any](mgr *Manager, stgID cdssdk.StorageID) (T, error) { - ret, err := mgr.GetComponent(stgID, reflect2.TypeOf[T]()) - if err != nil { - var def T - return def, err +// 查找指定Storage的SharedStore组件 +func (m *AgentPool) GetSharedStore(stgID cdssdk.StorageID) (types.SharedStore, error) { + m.lock.Lock() + defer m.lock.Unlock() + + stg := m.storages[stgID] + if stg == nil { + return nil, types.ErrStorageNotFound } - return ret.(T), nil + return stg.Agent.GetSharedStore() } diff --git a/common/pkgs/storage/cos/multiPartUploader.go b/common/pkgs/storage/cos/multiPartUploader.go deleted file mode 100644 index 4d161dc..0000000 --- a/common/pkgs/storage/cos/multiPartUploader.go +++ /dev/null @@ -1,80 +0,0 @@ -package cos - -import ( - "context" - "fmt" - "io" - "net/http" - "net/url" - - "github.com/tencentyun/cos-go-sdk-v5" - cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" -) - -type MultiPartUploader struct { - client *cos.Client -} - -func NewMultiPartUpload(address *cdssdk.COSType) *MultiPartUploader { - // cos的endpoint已包含bucket名,会自动将桶解析出来 - u, _ := url.Parse(address.Endpoint) - b := &cos.BaseURL{BucketURL: u} - client := cos.NewClient(b, &http.Client{ - Transport: &cos.AuthorizationTransport{ - SecretID: address.AK, - SecretKey: address.SK, - }, - }) - - return &MultiPartUploader{ - client: client, - } -} - -func (c *MultiPartUploader) Initiate(objectName string) (string, error) { - v, _, err := c.client.Object.InitiateMultipartUpload(context.Background(), objectName, nil) - if err != nil { - return "", fmt.Errorf("failed to initiate multipart upload: %w", err) - } - return v.UploadID, nil -} - -func (c *MultiPartUploader) UploadPart(uploadID string, key string, partSize int64, partNumber int, stream io.Reader) (*types.UploadedPartInfo, error) { - resp, err := c.client.Object.UploadPart( - context.Background(), key, uploadID, partNumber, stream, nil, - ) - if err != nil { - return nil, fmt.Errorf("failed to upload part: %w", err) - } - - result := &types.UploadedPartInfo{ - ETag: resp.Header.Get("ETag"), - PartNumber: partNumber, - } - return result, nil -} - -func (c *MultiPartUploader) Complete(uploadID string, key string, parts []*types.UploadedPartInfo) error { - opt := &cos.CompleteMultipartUploadOptions{} - for i := 0; i < len(parts); i++ { - opt.Parts = append(opt.Parts, cos.Object{ - PartNumber: parts[i].PartNumber, ETag: parts[i].ETag}, - ) - } - _, _, err := c.client.Object.CompleteMultipartUpload( - context.Background(), key, uploadID, opt, - ) - if err != nil { - return err - } - - return nil -} -func (c *MultiPartUploader) Abort() { - -} - -func (c *MultiPartUploader) Close() { - -} diff --git a/common/pkgs/storage/factory/factory.go b/common/pkgs/storage/factory/factory.go index 7b078de..ae46ceb 100644 --- a/common/pkgs/storage/factory/factory.go +++ b/common/pkgs/storage/factory/factory.go @@ -1,10 +1,8 @@ package factory import ( - "fmt" "reflect" - "gitlink.org.cn/cloudream/common/utils/reflect2" stgmod "gitlink.org.cn/cloudream/storage/common/models" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/factory/reg" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" @@ -14,35 +12,15 @@ import ( _ "gitlink.org.cn/cloudream/storage/common/pkgs/storage/s3" ) -func CreateService(detail stgmod.StorageDetail) (types.StorageService, error) { +// 此函数永远不会返回nil。如果找不到对应的Builder,则会返回EmptyBuilder, +// 此Builder的所有函数都会返回否定值或者封装后的ErrUnsupported错误(需要使用errors.Is检查) +func GetBuilder(detail stgmod.StorageDetail) types.StorageBuilder { typ := reflect.TypeOf(detail.Storage.Type) - bld, ok := reg.StorageBuilders[typ] - if !ok { - return nil, fmt.Errorf("unsupported storage type: %T", detail.Storage.Type) - } - - return bld.CreateService(detail) -} - -func CreateComponent[T any](detail stgmod.StorageDetail) (T, error) { - typ := reflect.TypeOf(detail.Storage.Type) - bld, ok := reg.StorageBuilders[typ] - if !ok { - var def T - return def, fmt.Errorf("unsupported storage type: %T", detail.Storage.Type) - } - - comp, err := bld.CreateComponent(detail, reflect2.TypeOf[T]()) - if err != nil { - var def T - return def, err - } - c, ok := comp.(T) + ctor, ok := reg.StorageBuilders[typ] if !ok { - var def T - return def, fmt.Errorf("invalid component type: %T", comp) + return &types.EmptyBuilder{} } - return c, nil + return ctor(detail) } diff --git a/common/pkgs/storage/factory/reg/reg.go b/common/pkgs/storage/factory/reg/reg.go index d0c3cca..4161080 100644 --- a/common/pkgs/storage/factory/reg/reg.go +++ b/common/pkgs/storage/factory/reg/reg.go @@ -5,15 +5,15 @@ import ( cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" "gitlink.org.cn/cloudream/common/utils/reflect2" + stgmod "gitlink.org.cn/cloudream/storage/common/models" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" ) -var StorageBuilders = make(map[reflect.Type]types.StorageBuilder) +type BuilderCtor func(detail stgmod.StorageDetail) types.StorageBuilder + +var StorageBuilders = make(map[reflect.Type]BuilderCtor) // 注册针对指定存储服务类型的Builder -func RegisterBuilder[T cdssdk.StorageType](createSvc types.StorageServiceBuilder, createComp types.StorageComponentBuilder) { - StorageBuilders[reflect2.TypeOf[T]()] = types.StorageBuilder{ - CreateService: createSvc, - CreateComponent: createComp, - } +func RegisterBuilder[T cdssdk.StorageType](ctor BuilderCtor) { + StorageBuilders[reflect2.TypeOf[T]()] = ctor } diff --git a/common/pkgs/storage/local/agent.go b/common/pkgs/storage/local/agent.go new file mode 100644 index 0000000..c2e5e52 --- /dev/null +++ b/common/pkgs/storage/local/agent.go @@ -0,0 +1,52 @@ +package local + +import ( + stgmod "gitlink.org.cn/cloudream/storage/common/models" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" +) + +type agent struct { + Detail stgmod.StorageDetail + ShardStore *ShardStore + SharedStore *SharedStore +} + +func (s *agent) Start(ch *types.StorageEventChan) { + if s.ShardStore != nil { + s.ShardStore.Start(ch) + } + + if s.SharedStore != nil { + s.SharedStore.Start(ch) + } +} + +func (s *agent) Stop() { + if s.ShardStore != nil { + s.ShardStore.Stop() + } + + if s.SharedStore != nil { + s.SharedStore.Stop() + } +} + +func (s *agent) Info() stgmod.StorageDetail { + return s.Detail +} + +func (a *agent) GetShardStore() (types.ShardStore, error) { + if a.ShardStore == nil { + return nil, types.ErrUnsupported + } + + return a.ShardStore, nil +} + +func (a *agent) GetSharedStore() (types.SharedStore, error) { + if a.SharedStore == nil { + return nil, types.ErrUnsupported + } + + return a.SharedStore, nil +} diff --git a/common/pkgs/storage/local/local.go b/common/pkgs/storage/local/local.go index 061f288..ca49db8 100644 --- a/common/pkgs/storage/local/local.go +++ b/common/pkgs/storage/local/local.go @@ -2,11 +2,8 @@ package local import ( "fmt" - "path/filepath" - "reflect" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" - "gitlink.org.cn/cloudream/common/utils/reflect2" stgmod "gitlink.org.cn/cloudream/storage/common/models" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/factory/reg" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" @@ -14,70 +11,79 @@ import ( ) func init() { - reg.RegisterBuilder[*cdssdk.LocalStorageType](createService, createComponent) + reg.RegisterBuilder[*cdssdk.LocalStorageType](func(detail stgmod.StorageDetail) types.StorageBuilder { + return &builder{ + detail: detail, + } + }) +} + +type builder struct { + detail stgmod.StorageDetail } -func createService(detail stgmod.StorageDetail) (types.StorageService, error) { - svc := &Service{ - Detail: detail, +func (b *builder) CreateAgent() (types.StorageAgent, error) { + agt := &agent{ + Detail: b.detail, } - if detail.Storage.ShardStore != nil { - local, ok := detail.Storage.ShardStore.(*cdssdk.LocalShardStorage) + if b.detail.Storage.ShardStore != nil { + local, ok := b.detail.Storage.ShardStore.(*cdssdk.LocalShardStorage) if !ok { - return nil, fmt.Errorf("invalid shard store type %T for local storage", detail.Storage.ShardStore) + return nil, fmt.Errorf("invalid shard store type %T for local storage", b.detail.Storage.ShardStore) } - store, err := NewShardStore(svc, *local) + store, err := NewShardStore(agt, *local) if err != nil { return nil, err } - svc.ShardStore = store + agt.ShardStore = store } - if detail.Storage.SharedStore != nil { - local, ok := detail.Storage.SharedStore.(*cdssdk.LocalSharedStorage) + if b.detail.Storage.SharedStore != nil { + local, ok := b.detail.Storage.SharedStore.(*cdssdk.LocalSharedStorage) if !ok { - return nil, fmt.Errorf("invalid shared store type %T for local storage", detail.Storage.SharedStore) + return nil, fmt.Errorf("invalid shared store type %T for local storage", b.detail.Storage.SharedStore) } - store, err := NewSharedStore(svc, *local) + store, err := NewSharedStore(agt, *local) if err != nil { return nil, err } - svc.SharedStore = store + agt.SharedStore = store } - return svc, nil + return agt, nil } -func createComponent(detail stgmod.StorageDetail, typ reflect.Type) (any, error) { - switch typ { - case reflect2.TypeOf[types.MultipartInitiator](): - feat := utils.FindFeature[*cdssdk.MultipartUploadFeature](detail) - if feat == nil { - return nil, fmt.Errorf("feature %T not found", cdssdk.MultipartUploadFeature{}) - } +func (b *builder) ShardStoreDesc() types.ShardStoreDesc { + return &ShardStoreDesc{builder: b} +} - absTempDir, err := filepath.Abs(feat.TempDir) - if err != nil { - return nil, fmt.Errorf("get abs temp dir %v: %v", feat.TempDir, err) - } +func (b *builder) SharedStoreDesc() types.SharedStoreDesc { + return &SharedStoreDesc{builder: b} +} - return &MultipartInitiator{ - absTempDir: absTempDir, - }, nil +func (b *builder) CreateMultiparter() (types.Multiparter, error) { + feat := utils.FindFeature[*cdssdk.MultipartUploadFeature](b.detail) + if feat == nil { + return nil, fmt.Errorf("feature %T not found", cdssdk.MultipartUploadFeature{}) + } - case reflect2.TypeOf[types.MultipartUploader](): - feat := utils.FindFeature[*cdssdk.MultipartUploadFeature](detail) - if feat == nil { - return nil, fmt.Errorf("feature %T not found", cdssdk.MultipartUploadFeature{}) - } + return &Multiparter{ + feat: feat, + }, nil +} - return &MultipartUploader{}, nil +func (b *builder) CreateS2STransfer() (types.S2STransfer, error) { + feat := utils.FindFeature[*cdssdk.S2STransferFeature](b.detail) + if feat == nil { + return nil, fmt.Errorf("feature %T not found", cdssdk.S2STransferFeature{}) } - return nil, fmt.Errorf("unsupported component type %v", typ) + return &S2STransfer{ + detail: b.detail, + }, nil } diff --git a/common/pkgs/storage/local/multipart_upload.go b/common/pkgs/storage/local/multipart_upload.go index 50eeae7..b5c4374 100644 --- a/common/pkgs/storage/local/multipart_upload.go +++ b/common/pkgs/storage/local/multipart_upload.go @@ -16,30 +16,76 @@ import ( "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" ) -type MultipartInitiator struct { - absTempDir string // 应该要是绝对路径 - tempFileName string - tempPartsDir string - joinedFilePath string +type Multiparter struct { + feat *cdssdk.MultipartUploadFeature } -func (i *MultipartInitiator) Initiate(ctx context.Context) (types.MultipartInitState, error) { - i.tempFileName = os2.GenerateRandomFileName(10) - i.tempPartsDir = filepath.Join(i.absTempDir, i.tempFileName) - i.joinedFilePath = filepath.Join(i.absTempDir, i.tempFileName+".joined") +func (m *Multiparter) MinPartSize() int64 { + return m.feat.MinPartSize +} - err := os.MkdirAll(i.tempPartsDir, 0777) +func (m *Multiparter) MaxPartSize() int64 { + return m.feat.MaxPartSize +} +func (m *Multiparter) Initiate(ctx context.Context) (types.MultipartTask, error) { + absTempDir, err := filepath.Abs(m.feat.TempDir) if err != nil { - return types.MultipartInitState{}, err + return nil, fmt.Errorf("get abs temp dir %v: %v", m.feat.TempDir, err) } - return types.MultipartInitState{ - UploadID: i.tempPartsDir, + tempFileName := os2.GenerateRandomFileName(10) + tempPartsDir := filepath.Join(absTempDir, tempFileName) + joinedFilePath := filepath.Join(absTempDir, tempFileName+".joined") + + err = os.MkdirAll(tempPartsDir, 0777) + + if err != nil { + return nil, err + } + + return &MultipartTask{ + absTempDir: absTempDir, + tempFileName: tempFileName, + tempPartsDir: tempPartsDir, + joinedFilePath: joinedFilePath, + uploadID: tempPartsDir, + }, nil +} + +func (m *Multiparter) UploadPart(ctx context.Context, init types.MultipartInitState, partSize int64, partNumber int, stream io.Reader) (types.UploadedPartInfo, error) { + partFilePath := filepath.Join(init.UploadID, fmt.Sprintf("%v", partNumber)) + partFile, err := os.Create(partFilePath) + if err != nil { + return types.UploadedPartInfo{}, err + } + defer partFile.Close() + + _, err = io.Copy(partFile, stream) + if err != nil { + return types.UploadedPartInfo{}, err + } + return types.UploadedPartInfo{ + ETag: partFilePath, + PartNumber: partNumber, }, nil } -func (i *MultipartInitiator) JoinParts(ctx context.Context, parts []types.UploadedPartInfo) (types.BypassFileInfo, error) { +type MultipartTask struct { + absTempDir string // 应该要是绝对路径 + tempFileName string + tempPartsDir string + joinedFilePath string + uploadID string +} + +func (i *MultipartTask) InitState() types.MultipartInitState { + return types.MultipartInitState{ + UploadID: i.uploadID, + } +} + +func (i *MultipartTask) JoinParts(ctx context.Context, parts []types.UploadedPartInfo) (types.BypassFileInfo, error) { parts = sort2.Sort(parts, func(l, r types.UploadedPartInfo) int { return l.PartNumber - r.PartNumber }) @@ -70,7 +116,7 @@ func (i *MultipartInitiator) JoinParts(ctx context.Context, parts []types.Upload }, nil } -func (i *MultipartInitiator) writePart(partInfo types.UploadedPartInfo, joined *os.File, hasher hash.Hash) (int64, error) { +func (i *MultipartTask) writePart(partInfo types.UploadedPartInfo, joined *os.File, hasher hash.Hash) (int64, error) { part, err := os.Open(partInfo.ETag) if err != nil { return 0, err @@ -100,35 +146,11 @@ func (i *MultipartInitiator) writePart(partInfo types.UploadedPartInfo, joined * return size, nil } -func (i *MultipartInitiator) Complete() { +func (i *MultipartTask) Complete() { i.Abort() } -func (i *MultipartInitiator) Abort() { +func (i *MultipartTask) Abort() { os.Remove(i.joinedFilePath) os.RemoveAll(i.tempPartsDir) } - -type MultipartUploader struct{} - -func (u *MultipartUploader) UploadPart(ctx context.Context, init types.MultipartInitState, partSize int64, partNumber int, stream io.Reader) (types.UploadedPartInfo, error) { - partFilePath := filepath.Join(init.UploadID, fmt.Sprintf("%v", partNumber)) - partFile, err := os.Create(partFilePath) - if err != nil { - return types.UploadedPartInfo{}, err - } - defer partFile.Close() - - _, err = io.Copy(partFile, stream) - if err != nil { - return types.UploadedPartInfo{}, err - } - return types.UploadedPartInfo{ - ETag: partFilePath, - PartNumber: partNumber, - }, nil -} - -func (u *MultipartUploader) Close() { - -} diff --git a/common/pkgs/storage/local/s2s.go b/common/pkgs/storage/local/s2s.go new file mode 100644 index 0000000..c7b8126 --- /dev/null +++ b/common/pkgs/storage/local/s2s.go @@ -0,0 +1,73 @@ +package local + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + + cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" + "gitlink.org.cn/cloudream/common/utils/os2" + stgmod "gitlink.org.cn/cloudream/storage/common/models" +) + +type S2STransfer struct { + feat cdssdk.S2STransferFeature + detail stgmod.StorageDetail + dstPath string +} + +// 只有同一个机器的存储之间才可以进行数据直传 +func (s *S2STransfer) CanTransfer(src stgmod.StorageDetail) bool { + _, ok := src.Storage.Type.(*cdssdk.LocalStorageType) + if !ok { + return false + } + + if src.Storage.MasterHub != s.detail.Storage.MasterHub { + return false + } + + return true +} + +// 执行数据直传 +func (s *S2STransfer) Transfer(ctx context.Context, src stgmod.StorageDetail, srcPath string) (string, error) { + absTempDir, err := filepath.Abs(s.feat.TempDir) + if err != nil { + return "", fmt.Errorf("get abs temp dir %v: %v", s.feat.TempDir, err) + } + + tempFileName := os2.GenerateRandomFileName(10) + s.dstPath = filepath.Join(absTempDir, tempFileName) + + copy, err := os.OpenFile(s.dstPath, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return "", err + } + defer copy.Close() + + srcFile, err := os.Open(srcPath) + if err != nil { + return "", err + } + defer srcFile.Close() + + _, err = io.Copy(copy, srcFile) + if err != nil { + return "", err + } + + return s.dstPath, nil +} + +func (s *S2STransfer) Complete() { + +} + +func (s *S2STransfer) Abort() { + if s.dstPath != "" { + os.Remove(s.dstPath) + } +} diff --git a/common/pkgs/storage/local/service.go b/common/pkgs/storage/local/service.go deleted file mode 100644 index 55f44c6..0000000 --- a/common/pkgs/storage/local/service.go +++ /dev/null @@ -1,58 +0,0 @@ -package local - -import ( - "reflect" - - "gitlink.org.cn/cloudream/common/utils/reflect2" - stgmod "gitlink.org.cn/cloudream/storage/common/models" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" -) - -type Service struct { - Detail stgmod.StorageDetail - ShardStore *ShardStore - SharedStore *SharedStore -} - -func (s *Service) Info() stgmod.StorageDetail { - return s.Detail -} - -func (s *Service) GetComponent(typ reflect.Type) (any, error) { - switch typ { - case reflect2.TypeOf[types.ShardStore](): - if s.ShardStore == nil { - return nil, types.ErrComponentNotFound - } - return s.ShardStore, nil - - case reflect2.TypeOf[types.SharedStore](): - if s.SharedStore == nil { - return nil, types.ErrComponentNotFound - } - return s.SharedStore, nil - - default: - return nil, types.ErrComponentNotFound - } -} - -func (s *Service) Start(ch *types.StorageEventChan) { - if s.ShardStore != nil { - s.ShardStore.Start(ch) - } - - if s.SharedStore != nil { - s.SharedStore.Start(ch) - } -} - -func (s *Service) Stop() { - if s.ShardStore != nil { - s.ShardStore.Stop() - } - - if s.SharedStore != nil { - s.SharedStore.Stop() - } -} diff --git a/common/pkgs/storage/local/shard_store.go b/common/pkgs/storage/local/shard_store.go index 6afa855..278cd2b 100644 --- a/common/pkgs/storage/local/shard_store.go +++ b/common/pkgs/storage/local/shard_store.go @@ -22,8 +22,24 @@ const ( BlocksDir = "blocks" ) +type ShardStoreDesc struct { + builder *builder +} + +func (s *ShardStoreDesc) Enabled() bool { + return s.builder.detail.Storage.ShardStore != nil +} + +func (s *ShardStoreDesc) HasBypassWrite() bool { + return true +} + +func (s *ShardStoreDesc) HasBypassRead() bool { + return true +} + type ShardStore struct { - svc *Service + agt *agent cfg cdssdk.LocalShardStorage absRoot string lock sync.Mutex @@ -31,14 +47,14 @@ type ShardStore struct { done chan any } -func NewShardStore(svc *Service, cfg cdssdk.LocalShardStorage) (*ShardStore, error) { +func NewShardStore(svc *agent, cfg cdssdk.LocalShardStorage) (*ShardStore, error) { absRoot, err := filepath.Abs(cfg.Root) if err != nil { return nil, fmt.Errorf("get abs root: %w", err) } return &ShardStore{ - svc: svc, + agt: svc, cfg: cfg, absRoot: absRoot, workingTempFiles: make(map[string]bool), @@ -378,6 +394,20 @@ func (s *ShardStore) Stats() types.Stats { } } +func (s *ShardStore) getLogger() logger.Logger { + return logger.WithField("ShardStore", "Local").WithField("Storage", s.agt.Detail.Storage.String()) +} + +func (s *ShardStore) getFileDirFromHash(hash cdssdk.FileHash) string { + return filepath.Join(s.absRoot, BlocksDir, hash.GetHashPrefix(2)) +} + +func (s *ShardStore) getFilePathFromHash(hash cdssdk.FileHash) string { + return filepath.Join(s.absRoot, BlocksDir, hash.GetHashPrefix(2), string(hash)) +} + +var _ types.BypassWrite = (*ShardStore)(nil) + func (s *ShardStore) BypassUploaded(info types.BypassFileInfo) error { s.lock.Lock() defer s.lock.Unlock() @@ -410,14 +440,24 @@ func (s *ShardStore) BypassUploaded(info types.BypassFileInfo) error { return nil } -func (s *ShardStore) getLogger() logger.Logger { - return logger.WithField("ShardStore", "Local").WithField("Storage", s.svc.Detail.Storage.String()) -} +var _ types.BypassRead = (*ShardStore)(nil) -func (s *ShardStore) getFileDirFromHash(hash cdssdk.FileHash) string { - return filepath.Join(s.absRoot, BlocksDir, hash.GetHashPrefix(2)) -} +func (s *ShardStore) BypassRead(fileHash cdssdk.FileHash) (types.BypassFilePath, error) { + s.lock.Lock() + defer s.lock.Unlock() -func (s *ShardStore) getFilePathFromHash(hash cdssdk.FileHash) string { - return filepath.Join(s.absRoot, BlocksDir, hash.GetHashPrefix(2), string(hash)) + filePath := s.getFilePathFromHash(fileHash) + stat, err := os.Stat(filePath) + if err != nil { + return types.BypassFilePath{}, err + } + + return types.BypassFilePath{ + Path: filePath, + Info: types.FileInfo{ + Hash: fileHash, + Size: stat.Size(), + Description: filePath, + }, + }, nil } diff --git a/common/pkgs/storage/local/shared_store.go b/common/pkgs/storage/local/shared_store.go index 4bd4215..bfd9730 100644 --- a/common/pkgs/storage/local/shared_store.go +++ b/common/pkgs/storage/local/shared_store.go @@ -1,30 +1,35 @@ package local import ( - "fmt" "io" - "io/fs" "os" "path/filepath" - "strconv" - "github.com/samber/lo" "gitlink.org.cn/cloudream/common/pkgs/logger" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" - stgmod "gitlink.org.cn/cloudream/storage/common/models" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/utils" ) +type SharedStoreDesc struct { + builder *builder +} + +func (d *SharedStoreDesc) Enabled() bool { + return d.builder.detail.Storage.SharedStore != nil +} + +func (d *SharedStoreDesc) HasBypassWrite() bool { + return false +} + type SharedStore struct { - svc *Service + agt *agent cfg cdssdk.LocalSharedStorage - // lock sync.Mutex } -func NewSharedStore(svc *Service, cfg cdssdk.LocalSharedStorage) (*SharedStore, error) { +func NewSharedStore(agt *agent, cfg cdssdk.LocalSharedStorage) (*SharedStore, error) { return &SharedStore{ - svc: svc, + agt: agt, cfg: cfg, }, nil } @@ -37,185 +42,27 @@ func (s *SharedStore) Stop() { s.getLogger().Infof("component stop") } -func (s *SharedStore) WritePackageObject(userID cdssdk.UserID, pkgID cdssdk.PackageID, path string, stream io.Reader) (string, error) { - relaPath := filepath.Join(utils.MakeLoadedPackagePath(userID, pkgID), path) - fullPath := filepath.Join(s.cfg.LoadBase, relaPath) +func (s *SharedStore) Write(objPath string, stream io.Reader) error { + fullPath := filepath.Join(s.cfg.LoadBase, objPath) err := os.MkdirAll(filepath.Dir(fullPath), 0755) if err != nil { - return "", err + return err } f, err := os.Create(fullPath) if err != nil { - return "", err + return err } defer f.Close() _, err = io.Copy(f, stream) if err != nil { - return "", err - } - - return filepath.ToSlash(relaPath), nil -} - -func (s *SharedStore) ListLoadedPackages() ([]stgmod.LoadedPackageID, error) { - entries, err := os.ReadDir(s.cfg.LoadBase) - if os.IsNotExist(err) { - return nil, nil - } - if err != nil { - s.getLogger().Warnf("list package directory: %v", err) - return nil, err - } - - var loadeds []stgmod.LoadedPackageID - for _, e := range entries { - if !e.IsDir() { - continue - } - - uid, err := strconv.ParseInt(e.Name(), 10, 64) - if err != nil { - continue - } - - userID := cdssdk.UserID(uid) - pkgs, err := s.listUserPackages(userID, fmt.Sprintf("%v", userID)) - if err != nil { - continue - } - - loadeds = append(loadeds, pkgs...) - } - - return loadeds, nil -} - -func (s *SharedStore) listUserPackages(userID cdssdk.UserID, userIDStr string) ([]stgmod.LoadedPackageID, error) { - userDir := filepath.Join(s.cfg.LoadBase, userIDStr) - entries, err := os.ReadDir(userDir) - if os.IsNotExist(err) { - return nil, nil - } - if err != nil { - s.getLogger().Warnf("list package directory: %v", err) - return nil, err - } - - var pkgs []stgmod.LoadedPackageID - for _, e := range entries { - if !e.IsDir() { - continue - } - - pkgID, err := strconv.ParseInt(e.Name(), 10, 64) - if err != nil { - continue - } - - pkgs = append(pkgs, stgmod.LoadedPackageID{ - UserID: userID, - PackageID: cdssdk.PackageID(pkgID), - }) - } - - return pkgs, nil -} - -func (s *SharedStore) PackageGC(avaiables []stgmod.LoadedPackageID) error { - log := s.getLogger() - - entries, err := os.ReadDir(s.cfg.LoadBase) - if err != nil { - log.Warnf("list storage directory: %s", err.Error()) return err } - // userID->pkgID->pkg - userPkgs := make(map[string]map[string]bool) - for _, pkg := range avaiables { - userIDStr := fmt.Sprintf("%v", pkg.UserID) - - pkgs, ok := userPkgs[userIDStr] - if !ok { - pkgs = make(map[string]bool) - userPkgs[userIDStr] = pkgs - } - - pkgIDStr := fmt.Sprintf("%v", pkg.PackageID) - pkgs[pkgIDStr] = true - } - - userDirs := lo.Filter(entries, func(info fs.DirEntry, index int) bool { return info.IsDir() }) - for _, dir := range userDirs { - pkgMap, ok := userPkgs[dir.Name()] - // 第一级目录名是UserID,先删除UserID在StoragePackage表里没出现过的文件夹 - if !ok { - rmPath := filepath.Join(s.cfg.LoadBase, dir.Name()) - err := os.RemoveAll(rmPath) - if err != nil { - log.Warnf("removing user dir %s: %s", rmPath, err.Error()) - } else { - log.Debugf("user dir %s removed by gc", rmPath) - } - continue - } - - pkgDir := filepath.Join(s.cfg.LoadBase, dir.Name()) - // 遍历每个UserID目录的packages目录里的内容 - pkgs, err := os.ReadDir(pkgDir) - if err != nil { - log.Warnf("reading package dir %s: %s", pkgDir, err.Error()) - continue - } - - for _, pkg := range pkgs { - if !pkgMap[pkg.Name()] { - rmPath := filepath.Join(pkgDir, pkg.Name()) - err := os.RemoveAll(rmPath) - if err != nil { - log.Warnf("removing package dir %s: %s", rmPath, err.Error()) - } else { - log.Debugf("package dir %s removed by gc", rmPath) - } - } - } - } - return nil } func (s *SharedStore) getLogger() logger.Logger { - return logger.WithField("SharedStore", "Local").WithField("Storage", s.svc.Detail.Storage.String()) -} - -type PackageWriter struct { - pkgRoot string - fullDirPath string -} - -func (w *PackageWriter) Root() string { - return w.pkgRoot -} - -func (w *PackageWriter) Write(path string, stream io.Reader) (string, error) { - fullFilePath := filepath.Join(w.fullDirPath, path) - err := os.MkdirAll(filepath.Dir(fullFilePath), 0755) - if err != nil { - return "", err - } - - f, err := os.Create(fullFilePath) - if err != nil { - return "", err - } - defer f.Close() - - _, err = io.Copy(f, stream) - if err != nil { - return "", err - } - - return filepath.ToSlash(filepath.Join(w.pkgRoot, path)), nil + return logger.WithField("SharedStore", "Local").WithField("Storage", s.agt.Detail.Storage.String()) } diff --git a/common/pkgs/storage/obs/faas.go b/common/pkgs/storage/obs/faas.go deleted file mode 100644 index 6e7889f..0000000 --- a/common/pkgs/storage/obs/faas.go +++ /dev/null @@ -1 +0,0 @@ -package obs diff --git a/common/pkgs/storage/obs/multiPartUploader.go b/common/pkgs/storage/obs/multiPartUploader.go deleted file mode 100644 index 9ce8a2e..0000000 --- a/common/pkgs/storage/obs/multiPartUploader.go +++ /dev/null @@ -1,90 +0,0 @@ -package obs - -import ( - "fmt" - "io" - - "github.com/huaweicloud/huaweicloud-sdk-go-obs/obs" - log "gitlink.org.cn/cloudream/common/pkgs/logger" - cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" -) - -type MultiPartUploader struct { - client *obs.ObsClient - bucket string -} - -func NewMultiPartUpload(address *cdssdk.OBSType) *MultiPartUploader { - client, err := obs.New(address.AK, address.SK, address.Endpoint) - if err != nil { - log.Fatalf("Error: %v", err) - } - - return &MultiPartUploader{ - client: client, - bucket: address.Bucket, - } -} - -func (c *MultiPartUploader) Initiate(objectName string) (string, error) { - input := &obs.InitiateMultipartUploadInput{} - input.Bucket = c.bucket - input.Key = objectName - imur, err := c.client.InitiateMultipartUpload(input) - if err != nil { - return "", fmt.Errorf("failed to initiate multipart upload: %w", err) - } - return imur.UploadId, nil -} - -func (c *MultiPartUploader) UploadPart(uploadID string, key string, partSize int64, partNumber int, stream io.Reader) (*types.UploadedPartInfo, error) { - uploadParam := &obs.UploadPartInput{ - Bucket: c.bucket, - Key: key, - UploadId: uploadID, - PartSize: partSize, - PartNumber: partNumber, - Body: stream, - } - - part, err := c.client.UploadPart(uploadParam) - if err != nil { - return nil, fmt.Errorf("failed to upload part: %w", err) - } - result := &types.UploadedPartInfo{ - ETag: part.ETag, - PartNumber: partNumber, - } - return result, nil -} - -func (c *MultiPartUploader) Complete(uploadID string, Key string, parts []*types.UploadedPartInfo) error { - var uploadPart []obs.Part - for i := 0; i < len(parts); i++ { - uploadPart = append(uploadPart, obs.Part{ - PartNumber: parts[i].PartNumber, - ETag: parts[i].ETag, - }) - } - - notifyParam := &obs.CompleteMultipartUploadInput{ - Bucket: c.bucket, - Key: Key, - UploadId: uploadID, - Parts: uploadPart, - } - - _, err := c.client.CompleteMultipartUpload(notifyParam) - if err != nil { - return err - } - return nil -} -func (c *MultiPartUploader) Abort() { - -} - -func (c *MultiPartUploader) Close() { - -} diff --git a/common/pkgs/storage/obs/obs.go b/common/pkgs/storage/obs/obs.go deleted file mode 100644 index 6e7889f..0000000 --- a/common/pkgs/storage/obs/obs.go +++ /dev/null @@ -1 +0,0 @@ -package obs diff --git a/common/pkgs/storage/oss/multiPartUploader.go b/common/pkgs/storage/oss/multiPartUploader.go deleted file mode 100644 index 7ce5771..0000000 --- a/common/pkgs/storage/oss/multiPartUploader.go +++ /dev/null @@ -1,88 +0,0 @@ -package oss - -import ( - "fmt" - "io" - "log" - - "github.com/aliyun/aliyun-oss-go-sdk/oss" - cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" -) - -type MultiPartUploader struct { - client *oss.Client - bucket *oss.Bucket -} - -func NewMultiPartUpload(address *cdssdk.OSSType) *MultiPartUploader { - // 创建OSSClient实例。 - client, err := oss.New(address.Endpoint, address.AK, address.SK) - if err != nil { - log.Fatalf("Error: %v", err) - } - - bucket, err := client.Bucket(address.Bucket) - if err != nil { - log.Fatalf("Error: %v", err) - } - - return &MultiPartUploader{ - client: client, - bucket: bucket, - } -} - -func (c *MultiPartUploader) Initiate(objectName string) (string, error) { - imur, err := c.bucket.InitiateMultipartUpload(objectName) - if err != nil { - return "", fmt.Errorf("failed to initiate multipart upload: %w", err) - } - return imur.UploadID, nil -} - -func (c *MultiPartUploader) UploadPart(uploadID string, key string, partSize int64, partNumber int, stream io.Reader) (*types.UploadedPartInfo, error) { - uploadParam := oss.InitiateMultipartUploadResult{ - UploadID: uploadID, - Key: key, - Bucket: c.bucket.BucketName, - } - part, err := c.bucket.UploadPart(uploadParam, stream, partSize, partNumber) - if err != nil { - return nil, fmt.Errorf("failed to upload part: %w", err) - } - result := &types.UploadedPartInfo{ - ETag: part.ETag, - PartNumber: partNumber, - } - return result, nil -} - -func (c *MultiPartUploader) Complete(uploadID string, Key string, parts []*types.UploadedPartInfo) error { - notifyParam := oss.InitiateMultipartUploadResult{ - UploadID: uploadID, - Key: Key, - Bucket: c.bucket.BucketName, - } - var uploadPart []oss.UploadPart - for i := 0; i < len(parts); i++ { - uploadPart = append(uploadPart, oss.UploadPart{ - PartNumber: parts[i].PartNumber, - ETag: parts[i].ETag, - }) - } - _, err := c.bucket.CompleteMultipartUpload(notifyParam, uploadPart) - if err != nil { - return err - } - return nil -} - -func (c *MultiPartUploader) Abort() { - -} - -func (c *MultiPartUploader) Close() { - // 关闭client - -} diff --git a/common/pkgs/storage/s3/agent.go b/common/pkgs/storage/s3/agent.go new file mode 100644 index 0000000..f33d6c0 --- /dev/null +++ b/common/pkgs/storage/s3/agent.go @@ -0,0 +1,39 @@ +package s3 + +import ( + stgmod "gitlink.org.cn/cloudream/storage/common/models" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" +) + +type Agent struct { + Detail stgmod.StorageDetail + ShardStore *ShardStore +} + +func (s *Agent) Start(ch *types.StorageEventChan) { + if s.ShardStore != nil { + s.ShardStore.Start(ch) + } +} + +func (a *Agent) Stop() { + if a.ShardStore != nil { + a.ShardStore.Stop() + } +} + +func (a *Agent) Info() stgmod.StorageDetail { + return a.Detail +} + +func (a *Agent) GetShardStore() (types.ShardStore, error) { + if a.ShardStore == nil { + return nil, types.ErrUnsupported + } + + return a.ShardStore, nil +} + +func (a *Agent) GetSharedStore() (types.SharedStore, error) { + return nil, types.ErrUnsupported +} diff --git a/common/pkgs/storage/s3/multipart_upload.go b/common/pkgs/storage/s3/multipart_upload.go index e6e371d..b80ac1e 100644 --- a/common/pkgs/storage/s3/multipart_upload.go +++ b/common/pkgs/storage/s3/multipart_upload.go @@ -13,41 +13,94 @@ import ( "gitlink.org.cn/cloudream/common/utils/io2" "gitlink.org.cn/cloudream/common/utils/os2" "gitlink.org.cn/cloudream/common/utils/sort2" + stgmod "gitlink.org.cn/cloudream/storage/common/models" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" ) -type MultipartInitiator struct { - cli *s3.Client - bucket string - tempDir string - tempFileName string - tempFilePath string - uploadID string +type Multiparter struct { + detail stgmod.StorageDetail + feat *cdssdk.MultipartUploadFeature +} + +func (m *Multiparter) MinPartSize() int64 { + return m.feat.MinPartSize +} + +func (m *Multiparter) MaxPartSize() int64 { + return m.feat.MaxPartSize } -func (i *MultipartInitiator) Initiate(ctx context.Context) (types.MultipartInitState, error) { - i.tempFileName = os2.GenerateRandomFileName(10) - i.tempFilePath = filepath.Join(i.tempDir, i.tempFileName) +func (m *Multiparter) Initiate(ctx context.Context) (types.MultipartTask, error) { + tempFileName := os2.GenerateRandomFileName(10) + tempFilePath := filepath.Join(m.feat.TempDir, tempFileName) - resp, err := i.cli.CreateMultipartUpload(ctx, &s3.CreateMultipartUploadInput{ - Bucket: aws.String(i.bucket), - Key: aws.String(i.tempFilePath), + cli, bkt, err := createS3Client(m.detail.Storage.Type) + if err != nil { + return nil, err + } + + resp, err := cli.CreateMultipartUpload(ctx, &s3.CreateMultipartUploadInput{ + Bucket: aws.String(bkt), + Key: aws.String(tempFilePath), ChecksumAlgorithm: s3types.ChecksumAlgorithmSha256, }) if err != nil { - return types.MultipartInitState{}, err + return nil, err } - i.uploadID = *resp.UploadId + return &MultipartTask{ + cli: cli, + bucket: bkt, + tempDir: m.feat.TempDir, + tempFileName: tempFileName, + tempFilePath: tempFilePath, + uploadID: *resp.UploadId, + }, nil +} + +func (m *Multiparter) UploadPart(ctx context.Context, init types.MultipartInitState, partSize int64, partNumber int, stream io.Reader) (types.UploadedPartInfo, error) { + cli, _, err := createS3Client(m.detail.Storage.Type) + if err != nil { + return types.UploadedPartInfo{}, err + } + + hashStr := io2.NewReadHasher(sha256.New(), stream) + resp, err := cli.UploadPart(ctx, &s3.UploadPartInput{ + Bucket: aws.String(init.Bucket), + Key: aws.String(init.Key), + UploadId: aws.String(init.UploadID), + PartNumber: aws.Int32(int32(partNumber)), + Body: hashStr, + }) + if err != nil { + return types.UploadedPartInfo{}, err + } + + return types.UploadedPartInfo{ + ETag: *resp.ETag, + PartNumber: partNumber, + PartHash: hashStr.Sum(), + }, nil +} + +type MultipartTask struct { + cli *s3.Client + bucket string + tempDir string + tempFileName string + tempFilePath string + uploadID string +} +func (i *MultipartTask) InitState() types.MultipartInitState { return types.MultipartInitState{ - UploadID: *resp.UploadId, + UploadID: i.uploadID, Bucket: i.bucket, Key: i.tempFilePath, - }, nil + } } -func (i *MultipartInitiator) JoinParts(ctx context.Context, parts []types.UploadedPartInfo) (types.BypassFileInfo, error) { +func (i *MultipartTask) JoinParts(ctx context.Context, parts []types.UploadedPartInfo) (types.BypassFileInfo, error) { parts = sort2.Sort(parts, func(l, r types.UploadedPartInfo) int { return l.PartNumber - r.PartNumber }) @@ -94,11 +147,11 @@ func (i *MultipartInitiator) JoinParts(ctx context.Context, parts []types.Upload } -func (i *MultipartInitiator) Complete() { +func (i *MultipartTask) Complete() { } -func (i *MultipartInitiator) Abort() { +func (i *MultipartTask) Abort() { // TODO2 根据注释描述,Abort不能停止正在上传的分片,需要等待其上传完成才能彻底删除, // 考虑增加定时任务去定时清理 i.cli.AbortMultipartUpload(context.Background(), &s3.AbortMultipartUploadInput{ @@ -111,32 +164,3 @@ func (i *MultipartInitiator) Abort() { Key: aws.String(i.tempFilePath), }) } - -type MultipartUploader struct { - cli *s3.Client - bucket string -} - -func (u *MultipartUploader) UploadPart(ctx context.Context, init types.MultipartInitState, partSize int64, partNumber int, stream io.Reader) (types.UploadedPartInfo, error) { - hashStr := io2.NewReadHasher(sha256.New(), stream) - resp, err := u.cli.UploadPart(ctx, &s3.UploadPartInput{ - Bucket: aws.String(init.Bucket), - Key: aws.String(init.Key), - UploadId: aws.String(init.UploadID), - PartNumber: aws.Int32(int32(partNumber)), - Body: hashStr, - }) - if err != nil { - return types.UploadedPartInfo{}, err - } - - return types.UploadedPartInfo{ - ETag: *resp.ETag, - PartNumber: partNumber, - PartHash: hashStr.Sum(), - }, nil -} - -func (u *MultipartUploader) Close() { - -} diff --git a/common/pkgs/storage/s3/obs/client.go b/common/pkgs/storage/s3/obs/client.go new file mode 100644 index 0000000..e2f73c0 --- /dev/null +++ b/common/pkgs/storage/s3/obs/client.go @@ -0,0 +1,27 @@ +package obs + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/s3" + cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" +) + +func CreateS2Client(addr *cdssdk.OBSType) (*s3.Client, string, error) { + awsConfig := aws.Config{} + + cre := aws.Credentials{ + AccessKeyID: addr.AK, + SecretAccessKey: addr.SK, + } + awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: cre} + awsConfig.Region = addr.Region + + options := []func(*s3.Options){} + options = append(options, func(s3Opt *s3.Options) { + s3Opt.BaseEndpoint = &addr.Endpoint + }) + + cli := s3.NewFromConfig(awsConfig, options...) + return cli, addr.Bucket, nil +} diff --git a/common/pkgs/storage/s3/obs/obs_test.go b/common/pkgs/storage/s3/obs/obs_test.go new file mode 100644 index 0000000..d874473 --- /dev/null +++ b/common/pkgs/storage/s3/obs/obs_test.go @@ -0,0 +1,45 @@ +package obs + +import ( + "context" + "testing" + + . "github.com/smartystreets/goconvey/convey" + cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" + stgmod "gitlink.org.cn/cloudream/storage/common/models" +) + +func Test_S2S(t *testing.T) { + Convey("OBS", t, func() { + s2s := S2STransfer{ + dstStg: &cdssdk.OBSType{ + Region: "cn-north-4", + Endpoint: "obs.cn-north-4.myhuaweicloud.com", + AK: "", + SK: "", + Bucket: "pcm3-bucket3", + ProjectID: "", + }, + feat: &cdssdk.S2STransferFeature{ + TempDir: "s2s", + }, + } + + newPath, err := s2s.Transfer(context.TODO(), stgmod.StorageDetail{ + Storage: cdssdk.Storage{ + Type: &cdssdk.OBSType{ + Region: "cn-north-4", + Endpoint: "obs.cn-north-4.myhuaweicloud.com", + AK: "", + SK: "", + Bucket: "pcm2-bucket2", + ProjectID: "", + }, + }, + }, "test_data/test03.txt") + defer s2s.Abort() + + So(err, ShouldEqual, nil) + t.Logf("newPath: %s", newPath) + }) +} diff --git a/common/pkgs/storage/s3/obs/s2s.go b/common/pkgs/storage/s3/obs/s2s.go new file mode 100644 index 0000000..5c28b84 --- /dev/null +++ b/common/pkgs/storage/s3/obs/s2s.go @@ -0,0 +1,174 @@ +package obs + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/huaweicloud/huaweicloud-sdk-go-v3/core/auth/basic" + oms "github.com/huaweicloud/huaweicloud-sdk-go-v3/services/oms/v2" + "github.com/huaweicloud/huaweicloud-sdk-go-v3/services/oms/v2/model" + omsregion "github.com/huaweicloud/huaweicloud-sdk-go-v3/services/oms/v2/region" + cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" + "gitlink.org.cn/cloudream/common/utils/os2" + stgmod "gitlink.org.cn/cloudream/storage/common/models" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/s3/utils" +) + +type S2STransfer struct { + dstStg *cdssdk.OBSType + feat *cdssdk.S2STransferFeature + taskID *int64 + omsCli *oms.OmsClient +} + +func NewS2STransfer(dstStg *cdssdk.OBSType, feat *cdssdk.S2STransferFeature) *S2STransfer { + return &S2STransfer{ + dstStg: dstStg, + feat: feat, + } +} + +// 判断是否能从指定的源存储中直传到当前存储的目的路径 +func (s *S2STransfer) CanTransfer(src stgmod.StorageDetail) bool { + req := s.makeRequest(src.Storage.Type, "") + return req != nil +} + +// 执行数据直传。返回传输后的文件路径 +func (s *S2STransfer) Transfer(ctx context.Context, src stgmod.StorageDetail, srcPath string) (string, error) { + req := s.makeRequest(src.Storage.Type, srcPath) + if req == nil { + return "", fmt.Errorf("unsupported source storage type: %T", src.Storage.Type) + } + + auth, err := basic.NewCredentialsBuilder(). + WithAk(s.dstStg.AK). + WithSk(s.dstStg.SK). + WithProjectId(s.dstStg.ProjectID). + SafeBuild() + if err != nil { + return "", err + } + + region, err := omsregion.SafeValueOf(s.dstStg.Region) + if err != nil { + return "", err + } + + cli, err := oms.OmsClientBuilder(). + WithRegion(region). + WithCredential(auth). + SafeBuild() + if err != nil { + return "", err + } + + tempPrefix := utils.JoinKey(s.feat.TempDir, os2.GenerateRandomFileName(10)) + "/" + + taskType := model.GetCreateTaskReqTaskTypeEnum().OBJECT + s.omsCli = oms.NewOmsClient(cli) + resp, err := s.omsCli.CreateTask(&model.CreateTaskRequest{ + Body: &model.CreateTaskReq{ + TaskType: &taskType, + SrcNode: req, + DstNode: &model.DstNodeReq{ + Region: s.dstStg.Region, + Ak: s.dstStg.AK, + Sk: s.dstStg.SK, + Bucket: s.dstStg.Bucket, + SavePrefix: &tempPrefix, + }, + }, + }) + if err != nil { + return "", fmt.Errorf("create task: %w", err) + } + + s.taskID = resp.Id + + err = s.waitTask(ctx, *resp.Id) + if err != nil { + return "", fmt.Errorf("wait task: %w", err) + } + + return utils.JoinKey(tempPrefix, srcPath), nil +} + +func (s *S2STransfer) makeRequest(srcStg cdssdk.StorageType, srcPath string) *model.SrcNodeReq { + switch srcStg := srcStg.(type) { + case *cdssdk.OBSType: + cloudType := "HuaweiCloud" + return &model.SrcNodeReq{ + CloudType: &cloudType, + Region: &srcStg.Region, + Ak: &srcStg.AK, + Sk: &srcStg.SK, + Bucket: &srcStg.Bucket, + ObjectKey: &[]string{srcPath}, + } + + default: + return nil + } +} + +func (s *S2STransfer) waitTask(ctx context.Context, taskId int64) error { + ticker := time.NewTicker(time.Second * 5) + defer ticker.Stop() + + failures := 0 + + for { + resp, err := s.omsCli.ShowTask(&model.ShowTaskRequest{ + TaskId: fmt.Sprintf("%v", taskId), + }) + if err != nil { + if failures < 3 { + failures++ + continue + } + + return fmt.Errorf("show task failed too many times: %w", err) + } + failures = 0 + + if *resp.Status == 3 { + return fmt.Errorf("task stopped") + } + + if *resp.Status == 4 { + return errors.New(resp.ErrorReason.String()) + } + + if *resp.Status == 5 { + return nil + } + + select { + case <-ticker.C: + continue + case <-ctx.Done(): + return ctx.Err() + } + } +} + +// 完成传输 +func (s *S2STransfer) Complete() { + +} + +// 取消传输。如果已经调用了Complete,则这个方法应该无效果 +func (s *S2STransfer) Abort() { + if s.taskID != nil { + s.omsCli.StopTask(&model.StopTaskRequest{ + TaskId: fmt.Sprintf("%v", *s.taskID), + }) + + s.omsCli.DeleteTask(&model.DeleteTaskRequest{ + TaskId: fmt.Sprintf("%v", *s.taskID), + }) + } +} diff --git a/common/pkgs/storage/s3/s3.go b/common/pkgs/storage/s3/s3.go index aed8506..c65a19f 100644 --- a/common/pkgs/storage/s3/s3.go +++ b/common/pkgs/storage/s3/s3.go @@ -2,42 +2,50 @@ package s3 import ( "fmt" - "reflect" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/service/s3" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" - "gitlink.org.cn/cloudream/common/utils/reflect2" stgmod "gitlink.org.cn/cloudream/storage/common/models" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/factory/reg" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/s3/obs" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/utils" ) func init() { - reg.RegisterBuilder[*cdssdk.COSType](createService, createComponent) - reg.RegisterBuilder[*cdssdk.OSSType](createService, createComponent) - reg.RegisterBuilder[*cdssdk.OBSType](createService, createComponent) + reg.RegisterBuilder[*cdssdk.COSType](newBuilder) + reg.RegisterBuilder[*cdssdk.OSSType](newBuilder) + reg.RegisterBuilder[*cdssdk.OBSType](newBuilder) } -func createService(detail stgmod.StorageDetail) (types.StorageService, error) { - svc := &Service{ - Detail: detail, +type builder struct { + types.EmptyBuilder + detail stgmod.StorageDetail +} + +func newBuilder(detail stgmod.StorageDetail) types.StorageBuilder { + return &builder{ + detail: detail, } +} - if detail.Storage.ShardStore != nil { - cfg, ok := detail.Storage.ShardStore.(*cdssdk.S3ShardStorage) +func (b *builder) CreateAgent() (types.StorageAgent, error) { + agt := &Agent{ + Detail: b.detail, + } + + if b.detail.Storage.ShardStore != nil { + cfg, ok := b.detail.Storage.ShardStore.(*cdssdk.S3ShardStorage) if !ok { - return nil, fmt.Errorf("invalid shard store type %T for local storage", detail.Storage.ShardStore) + return nil, fmt.Errorf("invalid shard store type %T for local storage", b.detail.Storage.ShardStore) } - cli, bkt, err := createS3Client(detail.Storage.Type) + cli, bkt, err := createS3Client(b.detail.Storage.Type) if err != nil { return nil, err } - store, err := NewShardStore(svc, cli, bkt, *cfg, ShardStoreOption{ + store, err := NewShardStore(agt, cli, bkt, *cfg, ShardStoreOption{ // 目前对接的存储服务都不支持从上传接口直接获取到Sha256 UseAWSSha256: false, }) @@ -45,49 +53,44 @@ func createService(detail stgmod.StorageDetail) (types.StorageService, error) { return nil, err } - svc.ShardStore = store + agt.ShardStore = store } - return svc, nil + return agt, nil } -func createComponent(detail stgmod.StorageDetail, typ reflect.Type) (any, error) { - switch typ { - case reflect2.TypeOf[types.MultipartInitiator](): - feat := utils.FindFeature[*cdssdk.MultipartUploadFeature](detail) - if feat == nil { - return nil, fmt.Errorf("feature %T not found", cdssdk.MultipartUploadFeature{}) - } - - cli, bkt, err := createS3Client(detail.Storage.Type) - if err != nil { - return nil, err - } +func (b *builder) ShardStoreDesc() types.ShardStoreDesc { + return &ShardStoreDesc{builder: b} +} - return &MultipartInitiator{ - cli: cli, - bucket: bkt, - tempDir: feat.TempDir, - }, nil +func (b *builder) SharedStoreDesc() types.SharedStoreDesc { + return &SharedStoreDesc{} +} - case reflect2.TypeOf[types.MultipartUploader](): - feat := utils.FindFeature[*cdssdk.MultipartUploadFeature](detail) - if feat == nil { - return nil, fmt.Errorf("feature %T not found", cdssdk.MultipartUploadFeature{}) - } +func (b *builder) CreateMultiparter() (types.Multiparter, error) { + feat := utils.FindFeature[*cdssdk.MultipartUploadFeature](b.detail) + if feat == nil { + return nil, fmt.Errorf("feature %T not found", cdssdk.MultipartUploadFeature{}) + } - cli, bkt, err := createS3Client(detail.Storage.Type) - if err != nil { - return nil, err - } + return &Multiparter{ + detail: b.detail, + feat: feat, + }, nil +} - return &MultipartUploader{ - cli: cli, - bucket: bkt, - }, nil +func (b *builder) CreateS2STransfer() (types.S2STransfer, error) { + feat := utils.FindFeature[*cdssdk.S2STransferFeature](b.detail) + if feat == nil { + return nil, fmt.Errorf("feature %T not found", cdssdk.S2STransferFeature{}) } - return nil, fmt.Errorf("unsupported component type %v", typ) + switch addr := b.detail.Storage.Type.(type) { + case *cdssdk.OBSType: + return obs.NewS2STransfer(addr, feat), nil + default: + return nil, fmt.Errorf("unsupported storage type %T", addr) + } } func createS3Client(addr cdssdk.StorageType) (*s3.Client, string, error) { @@ -97,22 +100,7 @@ func createS3Client(addr cdssdk.StorageType) (*s3.Client, string, error) { // case *cdssdk.OSSType: case *cdssdk.OBSType: - awsConfig := aws.Config{} - - cre := aws.Credentials{ - AccessKeyID: addr.AK, - SecretAccessKey: addr.SK, - } - awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: cre} - awsConfig.Region = addr.Region - - options := []func(*s3.Options){} - options = append(options, func(s3Opt *s3.Options) { - s3Opt.BaseEndpoint = &addr.Endpoint - }) - - cli := s3.NewFromConfig(awsConfig, options...) - return cli, addr.Bucket, nil + return obs.CreateS2Client(addr) default: return nil, "", fmt.Errorf("unsupported storage type %T", addr) diff --git a/common/pkgs/storage/s3/s3_test.go b/common/pkgs/storage/s3/s3_test.go index a0765a1..7d81900 100644 --- a/common/pkgs/storage/s3/s3_test.go +++ b/common/pkgs/storage/s3/s3_test.go @@ -1,8 +1,12 @@ package s3 import ( + "bytes" "context" "fmt" + "os" + "path/filepath" + "strings" "testing" "github.com/aws/aws-sdk-go-v2/aws" @@ -14,34 +18,79 @@ import ( func Test_S3(t *testing.T) { Convey("OBS", t, func() { cli, bkt, err := createS3Client(&cdssdk.OBSType{ - Region: "0", - AK: "0", - SK: "0", - Endpoint: "0", - Bucket: "0", + Region: "cn-north-4", + AK: "*", + SK: "*", + Endpoint: "https://obs.cn-north-4.myhuaweicloud.com", + Bucket: "pcm3-bucket3", }) So(err, ShouldEqual, nil) - var marker *string - for { - resp, err := cli.ListObjects(context.Background(), &s3.ListObjectsInput{ - Bucket: aws.String(bkt), - Prefix: aws.String("cds"), - MaxKeys: aws.Int32(5), - Marker: marker, - }) - So(err, ShouldEqual, nil) - - fmt.Printf("\n") - for _, obj := range resp.Contents { - fmt.Printf("%v, %v\n", *obj.Key, *obj.LastModified) + // file, err := os.Open("./sky") + So(err, ShouldEqual, nil) + // defer file.Close() + + _, err = cli.PutObject(context.Background(), &s3.PutObjectInput{ + Bucket: aws.String(bkt), + Key: aws.String("sky2"), + Body: bytes.NewReader([]byte("hello world")), + // ChecksumAlgorithm: s3types.ChecksumAlgorithmSha256, + // ContentType: aws.String("application/octet-stream"), + ContentLength: aws.Int64(11), + // ContentEncoding: aws.String("identity"), + }) + + So(err, ShouldEqual, nil) + + // var marker *string + // for { + // resp, err := cli.ListObjects(context.Background(), &s3.ListObjectsInput{ + // Bucket: aws.String(bkt), + // Prefix: aws.String("cds"), + // MaxKeys: aws.Int32(5), + // Marker: marker, + // }) + // So(err, ShouldEqual, nil) + + // fmt.Printf("\n") + // for _, obj := range resp.Contents { + // fmt.Printf("%v, %v\n", *obj.Key, *obj.LastModified) + // } + + // if *resp.IsTruncated { + // marker = resp.NextMarker + // } else { + // break + // } + // } + + }) +} + +func Test_2(t *testing.T) { + Convey("OBS", t, func() { + dir := "d:\\Projects\\cloudream\\workspace\\storage\\common\\pkgs\\storage\\s3" + filepath.WalkDir(dir, func(fname string, d os.DirEntry, err error) error { + if err != nil { + return nil } - if *resp.IsTruncated { - marker = resp.NextMarker - } else { - break + info, err := d.Info() + if err != nil { + return nil } - } + + if info.IsDir() { + return nil + } + + path := strings.TrimPrefix(fname, dir+string(os.PathSeparator)) + // path := fname + comps := strings.Split(filepath.ToSlash(path), "/") + fmt.Println(path) + fmt.Println(comps) + // s.fs.syncer.SyncObject(append([]string{userName}, comps...), info.Size()) + return nil + }) }) } diff --git a/common/pkgs/storage/s3/service.go b/common/pkgs/storage/s3/service.go deleted file mode 100644 index 506ba37..0000000 --- a/common/pkgs/storage/s3/service.go +++ /dev/null @@ -1,43 +0,0 @@ -package s3 - -import ( - "reflect" - - "gitlink.org.cn/cloudream/common/utils/reflect2" - stgmod "gitlink.org.cn/cloudream/storage/common/models" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" -) - -type Service struct { - Detail stgmod.StorageDetail - ShardStore *ShardStore -} - -func (s *Service) Info() stgmod.StorageDetail { - return s.Detail -} - -func (s *Service) GetComponent(typ reflect.Type) (any, error) { - switch typ { - case reflect2.TypeOf[types.ShardStore](): - if s.ShardStore == nil { - return nil, types.ErrComponentNotFound - } - return s.ShardStore, nil - - default: - return nil, types.ErrComponentNotFound - } -} - -func (s *Service) Start(ch *types.StorageEventChan) { - if s.ShardStore != nil { - s.ShardStore.Start(ch) - } -} - -func (s *Service) Stop() { - if s.ShardStore != nil { - s.ShardStore.Stop() - } -} diff --git a/common/pkgs/storage/s3/shard_store.go b/common/pkgs/storage/s3/shard_store.go index c802c65..7185476 100644 --- a/common/pkgs/storage/s3/shard_store.go +++ b/common/pkgs/storage/s3/shard_store.go @@ -17,6 +17,7 @@ import ( cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" "gitlink.org.cn/cloudream/common/utils/io2" "gitlink.org.cn/cloudream/common/utils/os2" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/s3/utils" "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types" ) @@ -25,12 +26,28 @@ const ( BlocksDir = "blocks" ) +type ShardStoreDesc struct { + builder *builder +} + +func (s *ShardStoreDesc) Enabled() bool { + return s.builder.detail.Storage.ShardStore != nil +} + +func (s *ShardStoreDesc) HasBypassWrite() bool { + return true +} + +func (s *ShardStoreDesc) HasBypassRead() bool { + return true +} + type ShardStoreOption struct { UseAWSSha256 bool // 能否直接使用AWS提供的SHA256校验,如果不行,则使用本地计算。默认使用本地计算。 } type ShardStore struct { - svc *Service + svc *Agent cli *s3.Client bucket string cfg cdssdk.S3ShardStorage @@ -40,7 +57,7 @@ type ShardStore struct { done chan any } -func NewShardStore(svc *Service, cli *s3.Client, bkt string, cfg cdssdk.S3ShardStorage, opt ShardStoreOption) (*ShardStore, error) { +func NewShardStore(svc *Agent, cli *s3.Client, bkt string, cfg cdssdk.S3ShardStorage, opt ShardStoreOption) (*ShardStore, error) { return &ShardStore{ svc: svc, cli: cli, @@ -82,7 +99,7 @@ func (s *ShardStore) removeUnusedTempFiles() { for { resp, err := s.cli.ListObjects(context.Background(), &s3.ListObjectsInput{ Bucket: aws.String(s.bucket), - Prefix: aws.String(JoinKey(s.cfg.Root, TempDir, "/")), + Prefix: aws.String(utils.JoinKey(s.cfg.Root, TempDir, "/")), Marker: marker, }) @@ -92,7 +109,7 @@ func (s *ShardStore) removeUnusedTempFiles() { } for _, obj := range resp.Contents { - objName := BaseKey(*obj.Key) + objName := utils.BaseKey(*obj.Key) if s.workingTempFiles[objName] { continue @@ -178,7 +195,7 @@ func (s *ShardStore) createWithAwsSha256(stream io.Reader) (types.FileInfo, erro return types.FileInfo{}, errors.New("SHA256 checksum not found in response") } - hash, err := DecodeBase64Hash(*resp.ChecksumSHA256) + hash, err := utils.DecodeBase64Hash(*resp.ChecksumSHA256) if err != nil { log.Warnf("decode SHA256 checksum %v: %v", *resp.ChecksumSHA256, err) s.onCreateFailed(key, fileName) @@ -197,10 +214,9 @@ func (s *ShardStore) createWithCalcSha256(stream io.Reader) (types.FileInfo, err counter := io2.NewCounter(hashStr) _, err := s.cli.PutObject(context.TODO(), &s3.PutObjectInput{ - Bucket: aws.String(s.bucket), - Key: aws.String(key), - Body: counter, - ChecksumAlgorithm: s3types.ChecksumAlgorithmSha256, + Bucket: aws.String(s.bucket), + Key: aws.String(key), + Body: counter, }) if err != nil { log.Warnf("uploading file %v: %v", key, err) @@ -219,11 +235,11 @@ func (s *ShardStore) createTempFile() (string, string) { s.lock.Lock() defer s.lock.Unlock() - tmpDir := JoinKey(s.cfg.Root, TempDir) + tmpDir := utils.JoinKey(s.cfg.Root, TempDir) tmpName := os2.GenerateRandomFileName(20) s.workingTempFiles[tmpName] = true - return JoinKey(tmpDir, tmpName), tmpName + return utils.JoinKey(tmpDir, tmpName), tmpName } func (s *ShardStore) onCreateFinished(tempFilePath string, size int64, hash cdssdk.FileHash) (types.FileInfo, error) { @@ -243,11 +259,11 @@ func (s *ShardStore) onCreateFinished(tempFilePath string, size int64, hash cdss log.Debugf("write file %v finished, size: %v, hash: %v", tempFilePath, size, hash) blockDir := s.getFileDirFromHash(hash) - newPath := JoinKey(blockDir, string(hash)) + newPath := utils.JoinKey(blockDir, string(hash)) _, err := s.cli.CopyObject(context.Background(), &s3.CopyObjectInput{ Bucket: aws.String(s.bucket), - CopySource: aws.String(tempFilePath), + CopySource: aws.String(utils.JoinKey(s.bucket, tempFilePath)), Key: aws.String(newPath), }) if err != nil { @@ -327,7 +343,7 @@ func (s *ShardStore) ListAll() ([]types.FileInfo, error) { var infos []types.FileInfo - blockDir := JoinKey(s.cfg.Root, BlocksDir) + blockDir := utils.JoinKey(s.cfg.Root, BlocksDir) var marker *string for { @@ -343,7 +359,7 @@ func (s *ShardStore) ListAll() ([]types.FileInfo, error) { } for _, obj := range resp.Contents { - key := BaseKey(*obj.Key) + key := utils.BaseKey(*obj.Key) fileHash, err := cdssdk.ParseHash(key) if err != nil { @@ -376,7 +392,7 @@ func (s *ShardStore) GC(avaiables []cdssdk.FileHash) error { avais[hash] = true } - blockDir := JoinKey(s.cfg.Root, BlocksDir) + blockDir := utils.JoinKey(s.cfg.Root, BlocksDir) var deletes []s3types.ObjectIdentifier var marker *string @@ -393,7 +409,7 @@ func (s *ShardStore) GC(avaiables []cdssdk.FileHash) error { } for _, obj := range resp.Contents { - key := BaseKey(*obj.Key) + key := utils.BaseKey(*obj.Key) fileHash, err := cdssdk.ParseHash(key) if err != nil { continue @@ -441,6 +457,20 @@ func (s *ShardStore) Stats() types.Stats { } } +func (s *ShardStore) getLogger() logger.Logger { + return logger.WithField("ShardStore", "S3").WithField("Storage", s.svc.Detail.Storage.String()) +} + +func (s *ShardStore) getFileDirFromHash(hash cdssdk.FileHash) string { + return utils.JoinKey(s.cfg.Root, BlocksDir, hash.GetHashPrefix(2)) +} + +func (s *ShardStore) getFilePathFromHash(hash cdssdk.FileHash) string { + return utils.JoinKey(s.cfg.Root, BlocksDir, hash.GetHashPrefix(2), string(hash)) +} + +var _ types.BypassWrite = (*ShardStore)(nil) + func (s *ShardStore) BypassUploaded(info types.BypassFileInfo) error { if info.FileHash == "" { return fmt.Errorf("empty file hash is not allowed by this shard store") @@ -461,10 +491,10 @@ func (s *ShardStore) BypassUploaded(info types.BypassFileInfo) error { log.Debugf("%v bypass uploaded, size: %v, hash: %v", info.TempFilePath, info.Size, info.FileHash) blockDir := s.getFileDirFromHash(info.FileHash) - newPath := JoinKey(blockDir, string(info.FileHash)) + newPath := utils.JoinKey(blockDir, string(info.FileHash)) _, err := s.cli.CopyObject(context.Background(), &s3.CopyObjectInput{ - CopySource: aws.String(JoinKey(s.bucket, info.TempFilePath)), + CopySource: aws.String(utils.JoinKey(s.bucket, info.TempFilePath)), Bucket: aws.String(s.bucket), Key: aws.String(newPath), }) @@ -476,14 +506,28 @@ func (s *ShardStore) BypassUploaded(info types.BypassFileInfo) error { return nil } -func (s *ShardStore) getLogger() logger.Logger { - return logger.WithField("ShardStore", "S3").WithField("Storage", s.svc.Detail.Storage.String()) -} +var _ types.BypassRead = (*ShardStore)(nil) -func (s *ShardStore) getFileDirFromHash(hash cdssdk.FileHash) string { - return JoinKey(s.cfg.Root, BlocksDir, hash.GetHashPrefix(2)) -} +func (s *ShardStore) BypassRead(fileHash cdssdk.FileHash) (types.BypassFilePath, error) { + s.lock.Lock() + defer s.lock.Unlock() -func (s *ShardStore) getFilePathFromHash(hash cdssdk.FileHash) string { - return JoinKey(s.cfg.Root, BlocksDir, hash.GetHashPrefix(2), string(hash)) + filePath := s.getFilePathFromHash(fileHash) + info, err := s.cli.HeadObject(context.TODO(), &s3.HeadObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(filePath), + }) + if err != nil { + s.getLogger().Warnf("get file %v: %v", filePath, err) + return types.BypassFilePath{}, err + } + + return types.BypassFilePath{ + Path: filePath, + Info: types.FileInfo{ + Hash: fileHash, + Size: *info.ContentLength, + Description: filePath, + }, + }, nil } diff --git a/common/pkgs/storage/s3/shared_store.go b/common/pkgs/storage/s3/shared_store.go new file mode 100644 index 0000000..bb822e8 --- /dev/null +++ b/common/pkgs/storage/s3/shared_store.go @@ -0,0 +1,12 @@ +package s3 + +type SharedStoreDesc struct { +} + +func (d *SharedStoreDesc) Enabled() bool { + return false +} + +func (d *SharedStoreDesc) HasBypassWrite() bool { + return false +} diff --git a/common/pkgs/storage/s3/utils.go b/common/pkgs/storage/s3/utils/utils.go similarity index 93% rename from common/pkgs/storage/s3/utils.go rename to common/pkgs/storage/s3/utils/utils.go index f17b2ac..a1d2454 100644 --- a/common/pkgs/storage/s3/utils.go +++ b/common/pkgs/storage/s3/utils/utils.go @@ -1,4 +1,4 @@ -package s3 +package utils import ( "encoding/base64" @@ -11,6 +11,9 @@ func JoinKey(comps ...string) string { hasTrailingSlash := true for _, comp := range comps { + if comp == "" { + continue + } if !hasTrailingSlash { sb.WriteString("/") } diff --git a/common/pkgs/storage/types/bypass.go b/common/pkgs/storage/types/bypass.go index 7c33e68..5e8d23f 100644 --- a/common/pkgs/storage/types/bypass.go +++ b/common/pkgs/storage/types/bypass.go @@ -10,6 +10,20 @@ type BypassFileInfo struct { Size int64 } -type BypassNotifier interface { +// 不通过ShardStore上传文件,但上传完成后需要通知ShardStore。 +// 也可以用于共享存储。 +type BypassWrite interface { BypassUploaded(info BypassFileInfo) error } + +// 描述指定文件在分片存储中的路径。可以考虑设计成interface。 +type BypassFilePath struct { + Path string + Info FileInfo +} + +// 不通过ShardStore读取文件,但需要它返回文件的路径。 +// 仅用于分片存储。 +type BypassRead interface { + BypassRead(fileHash cdssdk.FileHash) (BypassFilePath, error) +} diff --git a/common/pkgs/storage/types/empty_builder.go b/common/pkgs/storage/types/empty_builder.go new file mode 100644 index 0000000..be0178c --- /dev/null +++ b/common/pkgs/storage/types/empty_builder.go @@ -0,0 +1,59 @@ +package types + +import ( + "fmt" + + stgmod "gitlink.org.cn/cloudream/storage/common/models" +) + +type EmptyBuilder struct { + Detail stgmod.StorageDetail +} + +// 创建一个在MasterHub上长期运行的存储服务 +func (b *EmptyBuilder) CreateAgent() (StorageAgent, error) { + return nil, fmt.Errorf("create agent for %T: %w", b.Detail.Storage.Type, ErrUnsupported) +} + +func (b *EmptyBuilder) ShardStoreDesc() ShardStoreDesc { + return &EmptyShardStoreDesc{} +} + +func (b *EmptyBuilder) SharedStoreDesc() SharedStoreDesc { + return &EmptySharedStoreDesc{} +} + +// 创建一个分片上传组件 +func (b *EmptyBuilder) CreateMultiparter() (Multiparter, error) { + return nil, fmt.Errorf("create multipart initiator for %T: %w", b.Detail.Storage.Type, ErrUnsupported) +} + +func (b *EmptyBuilder) CreateS2STransfer() (S2STransfer, error) { + return nil, fmt.Errorf("create s2s transfer for %T: %w", b.Detail.Storage.Type, ErrUnsupported) +} + +type EmptyShardStoreDesc struct { +} + +func (d *EmptyShardStoreDesc) Enabled() bool { + return false +} + +func (d *EmptyShardStoreDesc) HasBypassWrite() bool { + return false +} + +func (d *EmptyShardStoreDesc) HasBypassRead() bool { + return false +} + +type EmptySharedStoreDesc struct { +} + +func (d *EmptySharedStoreDesc) Enabled() bool { + return false +} + +func (d *EmptySharedStoreDesc) HasBypassWrite() bool { + return false +} diff --git a/common/pkgs/storage/types/s2s.go b/common/pkgs/storage/types/s2s.go new file mode 100644 index 0000000..fccb1f2 --- /dev/null +++ b/common/pkgs/storage/types/s2s.go @@ -0,0 +1,18 @@ +package types + +import ( + "context" + + stgmod "gitlink.org.cn/cloudream/storage/common/models" +) + +type S2STransfer interface { + // 判断是否能从指定的源存储中直传到当前存储的目的路径 + CanTransfer(src stgmod.StorageDetail) bool + // 执行数据直传。返回传输后的文件路径 + Transfer(ctx context.Context, src stgmod.StorageDetail, srcPath string) (string, error) + // 完成传输 + Complete() + // 取消传输。如果已经调用了Complete,则这个方法应该无效果 + Abort() +} diff --git a/common/pkgs/storage/types/s3_client.go b/common/pkgs/storage/types/s3_client.go index 9514d24..4ec3f9c 100644 --- a/common/pkgs/storage/types/s3_client.go +++ b/common/pkgs/storage/types/s3_client.go @@ -5,9 +5,17 @@ import ( "io" ) -type MultipartInitiator interface { +type Multiparter interface { + MaxPartSize() int64 + MinPartSize() int64 // 启动一个分片上传 - Initiate(ctx context.Context) (MultipartInitState, error) + Initiate(ctx context.Context) (MultipartTask, error) + // 上传一个分片 + UploadPart(ctx context.Context, init MultipartInitState, partSize int64, partNumber int, stream io.Reader) (UploadedPartInfo, error) +} + +type MultipartTask interface { + InitState() MultipartInitState // 所有分片上传完成后,合并分片 JoinParts(ctx context.Context, parts []UploadedPartInfo) (BypassFileInfo, error) // 合成之后的文件已被使用 @@ -16,11 +24,6 @@ type MultipartInitiator interface { Abort() } -type MultipartUploader interface { - UploadPart(ctx context.Context, init MultipartInitState, partSize int64, partNumber int, stream io.Reader) (UploadedPartInfo, error) - Close() -} - // TODO 可以考虑重构成一个接口,支持不同的类型的分片有不同内容的实现 type MultipartInitState struct { UploadID string diff --git a/common/pkgs/storage/types/shared_store.go b/common/pkgs/storage/types/shared_store.go index 4ada1e5..98456e8 100644 --- a/common/pkgs/storage/types/shared_store.go +++ b/common/pkgs/storage/types/shared_store.go @@ -2,18 +2,11 @@ package types import ( "io" - - cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" - stgmod "gitlink.org.cn/cloudream/storage/common/models" ) type SharedStore interface { Start(ch *StorageEventChan) Stop() - // 写入一个文件到Package的调度目录下,返回值为文件路径:userID/pkgID/path - WritePackageObject(userID cdssdk.UserID, pkgID cdssdk.PackageID, path string, stream io.Reader) (string, error) - // 获取所有已加载的Package信息 - ListLoadedPackages() ([]stgmod.LoadedPackageID, error) - // 垃圾回收,删除过期的Package - PackageGC(avaiables []stgmod.LoadedPackageID) error + + Write(objectPath string, stream io.Reader) error } diff --git a/common/pkgs/storage/types/types.go b/common/pkgs/storage/types/types.go index 3c60afc..11b2516 100644 --- a/common/pkgs/storage/types/types.go +++ b/common/pkgs/storage/types/types.go @@ -2,7 +2,6 @@ package types import ( "errors" - "reflect" "gitlink.org.cn/cloudream/common/pkgs/async" stgmod "gitlink.org.cn/cloudream/storage/common/models" @@ -10,7 +9,8 @@ import ( var ErrStorageNotFound = errors.New("storage not found") -var ErrComponentNotFound = errors.New("component not found") +// 不支持的操作。可以作为StorageBuilder中任意函数的错误返回值,代表该操作不被支持。 +var ErrUnsupported = errors.New("unsupported operation") var ErrStorageExists = errors.New("storage already exists") @@ -18,21 +18,52 @@ type StorageEvent interface{} type StorageEventChan = async.UnboundChannel[StorageEvent] -// 代表一个长期运行在MasterHub上的存储服务 -type StorageService interface { - Info() stgmod.StorageDetail - GetComponent(typ reflect.Type) (any, error) +// 在MasterHub上运行,代理一个存储服务。 +// +// 存放Storage的运行时数据。如果一个组件需要与Agent交互(比如实际是ShardStore功能的一部分),或者是需要长期运行, +// 那么就将该组件的Get函数放到StorageAgent接口中。可以同时在StorageBuilder中同时提供HasXXX函数, +// 用于判断该Storage是否支持某个功能,用于生成ioswitch计划时判断是否能利用此功能。 +type StorageAgent interface { Start(ch *StorageEventChan) Stop() + + Info() stgmod.StorageDetail + // 获取分片存储服务 + GetShardStore() (ShardStore, error) + // 获取共享存储服务 + GetSharedStore() (SharedStore, error) } -// 创建一个在MasterHub上长期运行的存储服务 -type StorageServiceBuilder func(detail stgmod.StorageDetail) (StorageService, error) +// 创建存储服务的指定组件。 +// +// 如果指定组件比较独立,不需要依赖运行时数据,或者不需要与Agent交互,那么就可以将Create函数放到这个接口中。 +// 增加Has函数用于判断该Storage是否有某个组件。 +// 如果Create函数仅仅只是创建一个结构体,没有其他副作用,那么也可以用Create函数来判断是否支持某个功能。 +type StorageBuilder interface { + // 创建一个在MasterHub上长期运行的存储服务 + CreateAgent() (StorageAgent, error) + // 是否支持分片存储服务 + ShardStoreDesc() ShardStoreDesc + // 是否支持共享存储服务 + SharedStoreDesc() SharedStoreDesc + // 创建一个分片上传组件 + CreateMultiparter() (Multiparter, error) + // 创建一个存储服务直传组件 + CreateS2STransfer() (S2STransfer, error) +} -// 根据存储服务信息创建一个指定类型的组件 -type StorageComponentBuilder func(detail stgmod.StorageDetail, typ reflect.Type) (any, error) +type ShardStoreDesc interface { + // 是否已启动 + Enabled() bool + // 是否能旁路上传 + HasBypassWrite() bool + // 是否能旁路读取 + HasBypassRead() bool +} -type StorageBuilder struct { - CreateService StorageServiceBuilder - CreateComponent StorageComponentBuilder +type SharedStoreDesc interface { + // 是否已启动 + Enabled() bool + // 是否能旁路上传 + HasBypassWrite() bool } diff --git a/common/pkgs/uploader/create_load.go b/common/pkgs/uploader/create_load.go index 9a087bd..16590b4 100644 --- a/common/pkgs/uploader/create_load.go +++ b/common/pkgs/uploader/create_load.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "path" "sync" "time" @@ -16,13 +17,13 @@ import ( "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser" coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/utils" ) type CreateLoadUploader struct { pkg cdssdk.Package userID cdssdk.UserID targetStgs []stgmod.StorageDetail + loadRoots []string uploader *Uploader distlock *distlock.Mutex successes []coormq.AddObjectEntry @@ -31,21 +32,20 @@ type CreateLoadUploader struct { } type CreateLoadResult struct { - Package cdssdk.Package - Objects map[string]cdssdk.Object - LoadedDirs []string + Package cdssdk.Package + Objects map[string]cdssdk.Object } -func (u *CreateLoadUploader) Upload(path string, size int64, stream io.Reader) error { +func (u *CreateLoadUploader) Upload(pa string, size int64, stream io.Reader) error { uploadTime := time.Now() stgIDs := make([]cdssdk.StorageID, 0, len(u.targetStgs)) ft := ioswitch2.FromTo{} fromExec, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream()) ft.AddFrom(fromExec) - for _, stg := range u.targetStgs { + for i, stg := range u.targetStgs { ft.AddTo(ioswitch2.NewToShardStore(*stg.MasterHub, stg, ioswitch2.RawStream(), "fileHash")) - ft.AddTo(ioswitch2.NewLoadToShared(*stg.MasterHub, stg.Storage, u.userID, u.pkg.PackageID, path)) + ft.AddTo(ioswitch2.NewLoadToShared(*stg.MasterHub, stg, path.Join(u.loadRoots[i], pa))) stgIDs = append(stgIDs, stg.Storage.StorageID) } @@ -56,7 +56,7 @@ func (u *CreateLoadUploader) Upload(path string, size int64, stream io.Reader) e } exeCtx := exec.NewExecContext() - exec.SetValueByType(exeCtx, u.uploader.stgMgr) + exec.SetValueByType(exeCtx, u.uploader.stgAgts) exec := plans.Execute(exeCtx) exec.BeginWrite(io.NopCloser(stream), hd) ret, err := exec.Wait(context.TODO()) @@ -70,7 +70,7 @@ func (u *CreateLoadUploader) Upload(path string, size int64, stream io.Reader) e // 记录上传结果 fileHash := ret["fileHash"].(*ops2.FileHashValue).Hash u.successes = append(u.successes, coormq.AddObjectEntry{ - Path: path, + Path: pa, Size: size, FileHash: fileHash, UploadTime: uploadTime, @@ -110,14 +110,9 @@ func (u *CreateLoadUploader) Commit() (CreateLoadResult, error) { ret.Objects[entry.Path] = entry } - for _, stg := range u.targetStgs { - _, err := coorCli.StoragePackageLoaded(coormq.NewStoragePackageLoaded(u.userID, stg.Storage.StorageID, u.pkg.PackageID, nil)) - if err != nil { - return CreateLoadResult{}, fmt.Errorf("notifying storage package loaded: %w", err) - } - - // TODO 考虑让SharedStore来生成Load目录路径 - ret.LoadedDirs = append(ret.LoadedDirs, utils.MakeLoadedPackagePath(u.userID, u.pkg.PackageID)) + for i, stg := range u.targetStgs { + // 不关注是否成功 + coorCli.StoragePackageLoaded(coormq.ReqStoragePackageLoaded(u.userID, stg.Storage.StorageID, u.pkg.PackageID, u.loadRoots[i], nil)) } return ret, nil diff --git a/common/pkgs/uploader/update.go b/common/pkgs/uploader/update.go index 9a31c47..862b9f6 100644 --- a/common/pkgs/uploader/update.go +++ b/common/pkgs/uploader/update.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "path" "sync" "time" @@ -19,13 +20,15 @@ import ( ) type UpdateUploader struct { - uploader *Uploader - pkgID cdssdk.PackageID - targetStg stgmod.StorageDetail - distMutex *distlock.Mutex - successes []coormq.AddObjectEntry - lock sync.Mutex - commited bool + uploader *Uploader + pkgID cdssdk.PackageID + targetStg stgmod.StorageDetail + distMutex *distlock.Mutex + loadToStgs []stgmod.StorageDetail + loadToPath []string + successes []coormq.AddObjectEntry + lock sync.Mutex + commited bool } type UploadStorageInfo struct { @@ -39,12 +42,17 @@ type UpdateResult struct { Objects map[string]cdssdk.Object } -func (w *UpdateUploader) Upload(path string, size int64, stream io.Reader) error { +func (w *UpdateUploader) Upload(pat string, size int64, stream io.Reader) error { uploadTime := time.Now() ft := ioswitch2.NewFromTo() fromExec, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream()) - ft.AddFrom(fromExec).AddTo(ioswitch2.NewToShardStore(*w.targetStg.MasterHub, w.targetStg, ioswitch2.RawStream(), "fileHash")) + ft.AddFrom(fromExec). + AddTo(ioswitch2.NewToShardStore(*w.targetStg.MasterHub, w.targetStg, ioswitch2.RawStream(), "fileHash")) + + for i, stg := range w.loadToStgs { + ft.AddTo(ioswitch2.NewLoadToShared(*stg.MasterHub, stg, path.Join(w.loadToPath[i], pat))) + } plans := exec.NewPlanBuilder() err := parser.Parse(ft, plans) @@ -53,7 +61,7 @@ func (w *UpdateUploader) Upload(path string, size int64, stream io.Reader) error } exeCtx := exec.NewExecContext() - exec.SetValueByType(exeCtx, w.uploader.stgMgr) + exec.SetValueByType(exeCtx, w.uploader.stgAgts) exec := plans.Execute(exeCtx) exec.BeginWrite(io.NopCloser(stream), hd) ret, err := exec.Wait(context.TODO()) @@ -66,7 +74,7 @@ func (w *UpdateUploader) Upload(path string, size int64, stream io.Reader) error // 记录上传结果 w.successes = append(w.successes, coormq.AddObjectEntry{ - Path: path, + Path: pat, Size: size, FileHash: ret["fileHash"].(*ops2.FileHashValue).Hash, UploadTime: uploadTime, diff --git a/common/pkgs/uploader/uploader.go b/common/pkgs/uploader/uploader.go index 9b525e7..30ea971 100644 --- a/common/pkgs/uploader/uploader.go +++ b/common/pkgs/uploader/uploader.go @@ -14,25 +14,31 @@ import ( "gitlink.org.cn/cloudream/storage/common/pkgs/connectivity" "gitlink.org.cn/cloudream/storage/common/pkgs/distlock" "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" + "gitlink.org.cn/cloudream/storage/common/pkgs/metacache" coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/factory" ) type Uploader struct { distlock *distlock.Service connectivity *connectivity.Collector - stgMgr *svcmgr.Manager + stgAgts *agtpool.AgentPool + stgMeta *metacache.StorageMeta + loadTo []cdssdk.StorageID + loadToPath []string } -func NewUploader(distlock *distlock.Service, connectivity *connectivity.Collector, stgMgr *svcmgr.Manager) *Uploader { +func NewUploader(distlock *distlock.Service, connectivity *connectivity.Collector, stgAgts *agtpool.AgentPool, stgMeta *metacache.StorageMeta) *Uploader { return &Uploader{ distlock: distlock, connectivity: connectivity, - stgMgr: stgMgr, + stgAgts: stgAgts, + stgMeta: stgMeta, } } -func (u *Uploader) BeginUpdate(userID cdssdk.UserID, pkgID cdssdk.PackageID, affinity cdssdk.StorageID) (*UpdateUploader, error) { +func (u *Uploader) BeginUpdate(userID cdssdk.UserID, pkgID cdssdk.PackageID, affinity cdssdk.StorageID, loadTo []cdssdk.StorageID, loadToPath []string) (*UpdateUploader, error) { coorCli, err := stgglb.CoordinatorMQPool.Acquire() if err != nil { return nil, fmt.Errorf("new coordinator client: %w", err) @@ -54,8 +60,8 @@ func (u *Uploader) BeginUpdate(userID cdssdk.UserID, pkgID cdssdk.PackageID, aff delay := time.Duration(math.MaxInt64) con, ok := cons[stg.MasterHub.HubID] - if ok && con.Delay != nil { - delay = *con.Delay + if ok && con.Latency != nil { + delay = *con.Latency } userStgs = append(userStgs, UploadStorageInfo{ @@ -69,6 +75,24 @@ func (u *Uploader) BeginUpdate(userID cdssdk.UserID, pkgID cdssdk.PackageID, aff return nil, fmt.Errorf("user no available storages") } + loadToStgs := make([]stgmod.StorageDetail, len(loadTo)) + for i, stgID := range loadTo { + stg, ok := lo.Find(getUserStgsResp.Storages, func(stg stgmod.StorageDetail) bool { + return stg.Storage.StorageID == stgID + }) + if !ok { + return nil, fmt.Errorf("load to storage %v not found", stgID) + } + if stg.MasterHub == nil { + return nil, fmt.Errorf("load to storage %v has no master hub", stgID) + } + if factory.GetBuilder(stg).ShardStoreDesc().Enabled() { + return nil, fmt.Errorf("load to storage %v has no shared store", stgID) + } + + loadToStgs[i] = stg + } + target := u.chooseUploadStorage(userStgs, affinity) // 给上传节点的IPFS加锁 @@ -80,10 +104,12 @@ func (u *Uploader) BeginUpdate(userID cdssdk.UserID, pkgID cdssdk.PackageID, aff } return &UpdateUploader{ - uploader: u, - pkgID: pkgID, - targetStg: target.Storage, - distMutex: distMutex, + uploader: u, + pkgID: pkgID, + targetStg: target.Storage, + distMutex: distMutex, + loadToStgs: loadToStgs, + loadToPath: loadToPath, }, nil } @@ -110,20 +136,17 @@ func (w *Uploader) chooseUploadStorage(storages []UploadStorageInfo, stgAffinity return storages[0] } -func (u *Uploader) BeginCreateLoad(userID cdssdk.UserID, bktID cdssdk.BucketID, pkgName string, loadTo []cdssdk.StorageID) (*CreateLoadUploader, error) { +func (u *Uploader) BeginCreateLoad(userID cdssdk.UserID, bktID cdssdk.BucketID, pkgName string, loadTo []cdssdk.StorageID, loadToPath []string) (*CreateLoadUploader, error) { coorCli, err := stgglb.CoordinatorMQPool.Acquire() if err != nil { return nil, fmt.Errorf("new coordinator client: %w", err) } defer stgglb.CoordinatorMQPool.Release(coorCli) - getStgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails(loadTo)) - if err != nil { - return nil, fmt.Errorf("getting storages: %w", err) - } + getStgs := u.stgMeta.GetMany(loadTo) targetStgs := make([]stgmod.StorageDetail, len(loadTo)) - for i, stg := range getStgs.Storages { + for i, stg := range getStgs { if stg == nil { return nil, fmt.Errorf("storage %v not found", loadTo[i]) } @@ -139,7 +162,6 @@ func (u *Uploader) BeginCreateLoad(userID cdssdk.UserID, bktID cdssdk.BucketID, for _, stg := range targetStgs { reqBld.Shard().Buzy(stg.Storage.StorageID) reqBld.Storage().Buzy(stg.Storage.StorageID) - reqBld.Metadata().StoragePackage().CreateOne(userID, stg.Storage.StorageID, createPkg.Package.PackageID) } lock, err := reqBld.MutexLock(u.distlock) if err != nil { @@ -150,6 +172,7 @@ func (u *Uploader) BeginCreateLoad(userID cdssdk.UserID, bktID cdssdk.BucketID, pkg: createPkg.Package, userID: userID, targetStgs: targetStgs, + loadRoots: loadToPath, uploader: u, distlock: lock, }, nil diff --git a/coordinator/internal/cmd/migrate.go b/coordinator/internal/cmd/migrate.go index eb55f0c..3367862 100644 --- a/coordinator/internal/cmd/migrate.go +++ b/coordinator/internal/cmd/migrate.go @@ -52,7 +52,6 @@ func migrate(configPath string) { migrateOne(db, stgmod.PackageAccessStat{}) migrateOne(db, cdssdk.Package{}) migrateOne(db, cdssdk.PinnedObject{}) - migrateOne(db, model.StoragePackage{}) migrateOne(db, cdssdk.Storage{}) migrateOne(db, model.UserStorage{}) migrateOne(db, model.UserBucket{}) diff --git a/coordinator/internal/cmd/serve.go b/coordinator/internal/cmd/serve.go index 30c07e3..59057d5 100644 --- a/coordinator/internal/cmd/serve.go +++ b/coordinator/internal/cmd/serve.go @@ -4,8 +4,6 @@ import ( "fmt" "os" - stgmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq" - "gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/mq" "gitlink.org.cn/cloudream/storage/common/pkgs/db2" @@ -32,7 +30,7 @@ func serve(configPath string) { logger.Fatalf("new db2 failed, err: %s", err.Error()) } - coorSvr, err := coormq.NewServer(mymq.NewService(db2), &config.Cfg().RabbitMQ) + coorSvr, err := coormq.NewServer(mymq.NewService(db2), config.Cfg().RabbitMQ) if err != nil { logger.Fatalf("new coordinator server failed, err: %s", err.Error()) } @@ -48,7 +46,7 @@ func serve(configPath string) { <-forever } -func serveCoorServer(server *coormq.Server, cfg stgmq.Config) { +func serveCoorServer(server *coormq.Server, cfg mq.Config) { logger.Info("start serving command server") ch := server.Start(cfg) diff --git a/coordinator/internal/config/config.go b/coordinator/internal/config/config.go index 49ae494..76789e7 100644 --- a/coordinator/internal/config/config.go +++ b/coordinator/internal/config/config.go @@ -2,15 +2,15 @@ package config import ( log "gitlink.org.cn/cloudream/common/pkgs/logger" + "gitlink.org.cn/cloudream/common/pkgs/mq" c "gitlink.org.cn/cloudream/common/utils/config" db "gitlink.org.cn/cloudream/storage/common/pkgs/db2/config" - stgmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq" ) type Config struct { - Logger log.Config `json:"logger"` - DB db.Config `json:"db"` - RabbitMQ stgmq.Config `json:"rabbitMQ"` + Logger log.Config `json:"logger"` + DB db.Config `json:"db"` + RabbitMQ mq.Config `json:"rabbitMQ"` } var cfg Config diff --git a/coordinator/internal/mq/bucket.go b/coordinator/internal/mq/bucket.go index 965640d..ccaf031 100644 --- a/coordinator/internal/mq/bucket.go +++ b/coordinator/internal/mq/bucket.go @@ -50,7 +50,7 @@ func (svc *Service) GetUserBuckets(msg *coormq.GetUserBuckets) (*coormq.GetUserB } func (svc *Service) GetBucketPackages(msg *coormq.GetBucketPackages) (*coormq.GetBucketPackagesResp, *mq.CodeMessage) { - packages, err := svc.db2.Package().GetBucketPackages(svc.db2.DefCtx(), msg.UserID, msg.BucketID) + packages, err := svc.db2.Package().GetUserBucketPackages(svc.db2.DefCtx(), msg.UserID, msg.BucketID) if err != nil { logger.WithField("UserID", msg.UserID). @@ -103,7 +103,23 @@ func (svc *Service) DeleteBucket(msg *coormq.DeleteBucket) (*coormq.DeleteBucket return fmt.Errorf("bucket is not avaiable to the user") } - err := svc.db2.Bucket().Delete(tx, msg.BucketID) + if err := svc.db2.UserBucket().DeleteByBucketID(tx, msg.BucketID); err != nil { + return fmt.Errorf("deleting user bucket: %w", err) + } + + pkgs, err := svc.db2.Package().GetBucketPackages(tx, msg.BucketID) + if err != nil { + return fmt.Errorf("getting bucket packages: %w", err) + } + + for _, pkg := range pkgs { + err := svc.db2.Package().DeleteComplete(tx, pkg.PackageID) + if err != nil { + return fmt.Errorf("deleting package %v: %w", pkg.PackageID, err) + } + } + + err = svc.db2.Bucket().Delete(tx, msg.BucketID) if err != nil { return fmt.Errorf("deleting bucket: %w", err) } diff --git a/coordinator/internal/mq/hub.go b/coordinator/internal/mq/hub.go index fc75cdf..59f49a4 100644 --- a/coordinator/internal/mq/hub.go +++ b/coordinator/internal/mq/hub.go @@ -57,27 +57,39 @@ func (svc *Service) GetUserHubs(msg *coormq.GetUserHubs) (*coormq.GetUserHubsRes } func (svc *Service) GetHubs(msg *coormq.GetHubs) (*coormq.GetHubsResp, *mq.CodeMessage) { - var hubs []cdssdk.Hub + var hubs []*cdssdk.Hub if msg.HubIDs == nil { - var err error - hubs, err = svc.db2.Hub().GetAllHubs(svc.db2.DefCtx()) + get, err := svc.db2.Hub().GetAllHubs(svc.db2.DefCtx()) if err != nil { logger.Warnf("getting all hubs: %s", err.Error()) return nil, mq.Failed(errorcode.OperationFailed, "get all hub failed") } + for _, hub := range get { + h := hub + hubs = append(hubs, &h) + } } else { // 可以不用事务 + get, err := svc.db2.Hub().BatchGetByID(svc.db2.DefCtx(), msg.HubIDs) + if err != nil { + logger.Warnf("batch get hubs by id: %s", err.Error()) + return nil, mq.Failed(errorcode.OperationFailed, fmt.Sprintf("batch get hubs by id: %v", err)) + } + + getMp := make(map[cdssdk.HubID]cdssdk.Hub) + for _, hub := range get { + getMp[hub.HubID] = hub + } + for _, id := range msg.HubIDs { - hub, err := svc.db2.Hub().GetByID(svc.db2.DefCtx(), id) - if err != nil { - logger.WithField("HubID", id). - Warnf("query hub failed, err: %s", err.Error()) - return nil, mq.Failed(errorcode.OperationFailed, "query hub failed") + if hub, ok := getMp[id]; ok { + h := hub + hubs = append(hubs, &h) + } else { + hubs = append(hubs, nil) } - - hubs = append(hubs, hub) } } diff --git a/coordinator/internal/mq/object.go b/coordinator/internal/mq/object.go index ad02731..4431e16 100644 --- a/coordinator/internal/mq/object.go +++ b/coordinator/internal/mq/object.go @@ -18,6 +18,41 @@ import ( coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" ) +func (svc *Service) GetObjects(msg *coormq.GetObjects) (*coormq.GetObjectsResp, *mq.CodeMessage) { + var ret []*cdssdk.Object + err := svc.db2.DoTx(func(tx db2.SQLContext) error { + // TODO 应该检查用户是否有每一个Object所在Package的权限 + objs, err := svc.db2.Object().BatchGet(tx, msg.ObjectIDs) + if err != nil { + return err + } + + objMp := make(map[cdssdk.ObjectID]cdssdk.Object) + for _, obj := range objs { + objMp[obj.ObjectID] = obj + } + + for _, objID := range msg.ObjectIDs { + o, ok := objMp[objID] + if ok { + ret = append(ret, &o) + } else { + ret = append(ret, nil) + } + } + + return err + }) + if err != nil { + logger.WithField("UserID", msg.UserID). + Warn(err.Error()) + + return nil, mq.Failed(errorcode.OperationFailed, "get objects failed") + } + + return mq.ReplyOK(coormq.RespGetObjects(ret)) +} + func (svc *Service) GetObjectsByPath(msg *coormq.GetObjectsByPath) (*coormq.GetObjectsByPathResp, *mq.CodeMessage) { var objs []cdssdk.Object err := svc.db2.DoTx(func(tx db2.SQLContext) error { @@ -448,3 +483,130 @@ func (svc *Service) DeleteObjects(msg *coormq.DeleteObjects) (*coormq.DeleteObje return mq.ReplyOK(coormq.RespDeleteObjects()) } + +func (svc *Service) CloneObjects(msg *coormq.CloneObjects) (*coormq.CloneObjectsResp, *mq.CodeMessage) { + type CloningObject struct { + Cloning cdsapi.CloningObject + OrgIndex int + } + type PackageClonings struct { + PackageID cdssdk.PackageID + Clonings map[string]CloningObject + } + + // TODO 要检查用户是否有Object、Package的权限 + clonings := make(map[cdssdk.PackageID]*PackageClonings) + for i, cloning := range msg.Clonings { + pkg, ok := clonings[cloning.NewPackageID] + if !ok { + pkg = &PackageClonings{ + PackageID: cloning.NewPackageID, + Clonings: make(map[string]CloningObject), + } + clonings[cloning.NewPackageID] = pkg + } + pkg.Clonings[cloning.NewPath] = CloningObject{ + Cloning: cloning, + OrgIndex: i, + } + } + + ret := make([]*cdssdk.Object, len(msg.Clonings)) + err := svc.db2.DoTx(func(tx db2.SQLContext) error { + // 剔除掉新路径已经存在的对象 + for _, pkg := range clonings { + exists, err := svc.db2.Object().BatchGetByPackagePath(tx, pkg.PackageID, lo.Keys(pkg.Clonings)) + if err != nil { + return fmt.Errorf("batch getting objects by package path: %w", err) + } + + for _, obj := range exists { + delete(pkg.Clonings, obj.Path) + } + } + + // 删除目的Package不存在的对象 + newPkg, err := svc.db2.Package().BatchTestPackageID(tx, lo.Keys(clonings)) + if err != nil { + return fmt.Errorf("batch testing package id: %w", err) + } + for _, pkg := range clonings { + if !newPkg[pkg.PackageID] { + delete(clonings, pkg.PackageID) + } + } + + var avaiClonings []CloningObject + var avaiObjIDs []cdssdk.ObjectID + for _, pkg := range clonings { + for _, cloning := range pkg.Clonings { + avaiClonings = append(avaiClonings, cloning) + avaiObjIDs = append(avaiObjIDs, cloning.Cloning.ObjectID) + } + } + + avaiDetails, err := svc.db2.Object().BatchGetDetails(tx, avaiObjIDs) + if err != nil { + return fmt.Errorf("batch getting object details: %w", err) + } + + avaiDetailsMap := make(map[cdssdk.ObjectID]stgmod.ObjectDetail) + for _, detail := range avaiDetails { + avaiDetailsMap[detail.Object.ObjectID] = detail + } + + oldAvaiClonings := avaiClonings + avaiClonings = nil + + var newObjs []cdssdk.Object + for _, cloning := range oldAvaiClonings { + // 进一步剔除原始对象不存在的情况 + detail, ok := avaiDetailsMap[cloning.Cloning.ObjectID] + if !ok { + continue + } + + avaiClonings = append(avaiClonings, cloning) + + newObj := detail.Object + newObj.ObjectID = 0 + newObj.Path = cloning.Cloning.NewPath + newObj.PackageID = cloning.Cloning.NewPackageID + newObjs = append(newObjs, newObj) + } + + // 先创建出新对象 + err = svc.db2.Object().BatchCreate(tx, &newObjs) + if err != nil { + return fmt.Errorf("batch creating objects: %w", err) + } + + // 创建了新对象就能拿到新对象ID,再创建新对象块 + var newBlks []stgmod.ObjectBlock + for i, cloning := range avaiClonings { + oldBlks := avaiDetailsMap[cloning.Cloning.ObjectID].Blocks + for _, blk := range oldBlks { + newBlk := blk + newBlk.ObjectID = newObjs[i].ObjectID + newBlks = append(newBlks, newBlk) + } + } + + err = svc.db2.ObjectBlock().BatchCreate(tx, newBlks) + if err != nil { + return fmt.Errorf("batch creating object blocks: %w", err) + } + + for i, cloning := range avaiClonings { + ret[cloning.OrgIndex] = &newObjs[i] + } + return nil + }) + + if err != nil { + logger.Warnf("cloning objects: %s", err.Error()) + return nil, mq.Failed(errorcode.OperationFailed, err.Error()) + } + + return mq.ReplyOK(coormq.RespCloneObjects(ret)) +} diff --git a/coordinator/internal/mq/package.go b/coordinator/internal/mq/package.go index 6ad4ba6..307a0cb 100644 --- a/coordinator/internal/mq/package.go +++ b/coordinator/internal/mq/package.go @@ -5,6 +5,7 @@ import ( "fmt" "sort" + stgmod "gitlink.org.cn/cloudream/storage/common/models" "gitlink.org.cn/cloudream/storage/common/pkgs/db2" "gorm.io/gorm" @@ -59,16 +60,11 @@ func (svc *Service) CreatePackage(msg *coormq.CreatePackage) (*coormq.CreatePack return fmt.Errorf("bucket is not avaiable to the user") } - pkgID, err := svc.db2.Package().Create(tx, msg.BucketID, msg.Name) + pkg, err = svc.db2.Package().Create(tx, msg.BucketID, msg.Name) if err != nil { return fmt.Errorf("creating package: %w", err) } - pkg, err = svc.db2.Package().GetByID(tx, pkgID) - if err != nil { - return fmt.Errorf("getting package by id: %w", err) - } - return nil }) if err != nil { @@ -127,35 +123,87 @@ func (svc *Service) DeletePackage(msg *coormq.DeletePackage) (*coormq.DeletePack return fmt.Errorf("package is not available to the user") } - err := svc.db2.Package().SoftDelete(tx, msg.PackageID) + err := svc.db2.Package().DeleteComplete(tx, msg.PackageID) + if err != nil { + return fmt.Errorf("deleting package: %w", err) + } + + return nil + }) + if err != nil { + logger.WithField("UserID", msg.UserID). + WithField("PackageID", msg.PackageID). + Warnf(err.Error()) + return nil, mq.Failed(errorcode.OperationFailed, "delete package failed") + } + + return mq.ReplyOK(coormq.NewDeletePackageResp()) +} + +func (svc *Service) ClonePackage(msg *coormq.ClonePackage) (*coormq.ClonePackageResp, *mq.CodeMessage) { + var pkg cdssdk.Package + err := svc.db2.DoTx(func(tx db2.SQLContext) error { + var err error + + isAvai, _ := svc.db2.Bucket().IsAvailable(tx, msg.BucketID, msg.UserID) + if !isAvai { + return fmt.Errorf("bucket is not avaiable to the user") + } + + pkg, err = svc.db2.Package().Create(tx, msg.BucketID, msg.Name) + if err != nil { + return fmt.Errorf("creating package: %w", err) + } + + objs, err := svc.db2.Object().GetPackageObjects(tx, msg.PackageID) if err != nil { - return fmt.Errorf("soft delete package: %w", err) + return fmt.Errorf("getting package objects: %w", err) } - err = svc.db2.Package().DeleteUnused(tx, msg.PackageID) + objBlks, err := svc.db2.ObjectBlock().GetInPackageID(tx, msg.PackageID) if err != nil { - logger.WithField("UserID", msg.UserID). - WithField("PackageID", msg.PackageID). - Warnf("deleting unused package: %w", err.Error()) + return fmt.Errorf("getting object blocks: %w", err) + } + + clonedObjs := make([]cdssdk.Object, len(objs)) + for i, obj := range objs { + clonedObjs[i] = obj + clonedObjs[i].ObjectID = 0 + clonedObjs[i].PackageID = pkg.PackageID } - err = svc.db2.PackageAccessStat().DeleteByPackageID(tx, msg.PackageID) + err = svc.db2.Object().BatchCreate(tx, &clonedObjs) if err != nil { - logger.WithField("UserID", msg.UserID). - WithField("PackageID", msg.PackageID). - Warnf("deleting package access stat: %w", err.Error()) + return fmt.Errorf("batch creating objects: %w", err) + } + + oldToNew := make(map[cdssdk.ObjectID]cdssdk.ObjectID) + for i, obj := range clonedObjs { + oldToNew[objs[i].ObjectID] = obj.ObjectID + } + + clonedBlks := make([]stgmod.ObjectBlock, len(objBlks)) + for i, blk := range objBlks { + clonedBlks[i] = blk + clonedBlks[i].ObjectID = oldToNew[blk.ObjectID] + } + + err = svc.db2.ObjectBlock().BatchCreate(tx, clonedBlks) + if err != nil { + return fmt.Errorf("batch creating object blocks: %w", err) } return nil }) if err != nil { - logger.WithField("UserID", msg.UserID). - WithField("PackageID", msg.PackageID). - Warnf(err.Error()) - return nil, mq.Failed(errorcode.OperationFailed, "delete package failed") + if errors.Is(err, gorm.ErrDuplicatedKey) { + return nil, mq.Failed(errorcode.DataExists, "package already exists") + } + + return nil, mq.Failed(errorcode.OperationFailed, err.Error()) } - return mq.ReplyOK(coormq.NewDeletePackageResp()) + return mq.ReplyOK(coormq.RespClonePackage(pkg)) } func (svc *Service) GetPackageCachedStorages(msg *coormq.GetPackageCachedStorages) (*coormq.GetPackageCachedStoragesResp, *mq.CodeMessage) { @@ -212,26 +260,6 @@ func (svc *Service) GetPackageCachedStorages(msg *coormq.GetPackageCachedStorage return mq.ReplyOK(coormq.ReqGetPackageCachedStoragesResp(stgInfos, packageSize)) } -func (svc *Service) GetPackageLoadedStorages(msg *coormq.GetPackageLoadedStorages) (*coormq.GetPackageLoadedStoragesResp, *mq.CodeMessage) { - storages, err := svc.db2.StoragePackage().FindPackageStorages(svc.db2.DefCtx(), msg.PackageID) - if err != nil { - logger.WithField("PackageID", msg.PackageID). - Warnf("get storages by packageID failed, err: %s", err.Error()) - return nil, mq.Failed(errorcode.OperationFailed, "get storages by packageID failed") - } - - uniqueStgIDs := make(map[cdssdk.StorageID]bool) - var stgIDs []cdssdk.StorageID - for _, stg := range storages { - if !uniqueStgIDs[stg.StorageID] { - uniqueStgIDs[stg.StorageID] = true - stgIDs = append(stgIDs, stg.StorageID) - } - } - - return mq.ReplyOK(coormq.NewGetPackageLoadedStoragesResp(stgIDs)) -} - func (svc *Service) AddAccessStat(msg *coormq.AddAccessStat) { pkgIDs := make([]cdssdk.PackageID, len(msg.Entries)) objIDs := make([]cdssdk.ObjectID, len(msg.Entries)) diff --git a/coordinator/internal/mq/storage.go b/coordinator/internal/mq/storage.go index 61ad71c..fb61606 100644 --- a/coordinator/internal/mq/storage.go +++ b/coordinator/internal/mq/storage.go @@ -3,6 +3,7 @@ package mq import ( "errors" "fmt" + "time" "gitlink.org.cn/cloudream/common/consts/errorcode" "gitlink.org.cn/cloudream/common/pkgs/logger" @@ -104,35 +105,26 @@ func (svc *Service) GetStorageByName(msg *coormq.GetStorageByName) (*coormq.GetS func (svc *Service) StoragePackageLoaded(msg *coormq.StoragePackageLoaded) (*coormq.StoragePackageLoadedResp, *mq.CodeMessage) { err := svc.db2.DoTx(func(tx db2.SQLContext) error { - // 可以不用检查用户是否存在 - if ok, _ := svc.db2.Package().IsAvailable(tx, msg.UserID, msg.PackageID); !ok { - return fmt.Errorf("package is not available to user") - } - - if ok, _ := svc.db2.Storage().IsAvailable(tx, msg.UserID, msg.StorageID); !ok { - return fmt.Errorf("storage is not available to user") - } - - err := svc.db2.StoragePackage().CreateOrUpdate(tx, msg.StorageID, msg.PackageID, msg.UserID) + // TODO 权限检查 + exists, err := svc.db2.Object().BatchTestObjectID(tx, msg.PinnedObjects) if err != nil { - return fmt.Errorf("creating storage package: %w", err) + return fmt.Errorf("testing object id: %w", err) } - stg, err := svc.db2.Storage().GetByID(tx, msg.StorageID) - if err != nil { - return fmt.Errorf("getting storage: %w", err) + pinned := make([]cdssdk.PinnedObject, 0, len(msg.PinnedObjects)) + for _, obj := range msg.PinnedObjects { + if exists[obj] { + pinned = append(pinned, cdssdk.PinnedObject{ + StorageID: msg.StorageID, + ObjectID: obj, + CreateTime: time.Now(), + }) + } } - err = svc.db2.PinnedObject().CreateFromPackage(tx, msg.PackageID, stg.StorageID) + err = svc.db2.PinnedObject().BatchTryCreate(tx, pinned) if err != nil { - return fmt.Errorf("creating pinned object from package: %w", err) - } - - if len(msg.PinnedBlocks) > 0 { - err = svc.db2.ObjectBlock().BatchCreate(tx, msg.PinnedBlocks) - if err != nil { - return fmt.Errorf("batch creating object block: %w", err) - } + return fmt.Errorf("batch creating pinned object: %w", err) } return nil @@ -145,5 +137,5 @@ func (svc *Service) StoragePackageLoaded(msg *coormq.StoragePackageLoaded) (*coo return nil, mq.Failed(errorcode.OperationFailed, "user load package to storage failed") } - return mq.ReplyOK(coormq.NewStoragePackageLoadedResp()) + return mq.ReplyOK(coormq.RespStoragePackageLoaded()) } diff --git a/coordinator/internal/mq/temp.go b/coordinator/internal/mq/temp.go index eee531e..513a536 100644 --- a/coordinator/internal/mq/temp.go +++ b/coordinator/internal/mq/temp.go @@ -26,7 +26,7 @@ func (svc *Service) GetDatabaseAll(msg *coormq.GetDatabaseAll) (*coormq.GetDatab } for _, bkt := range bkts { - ps, err := svc.db2.Package().GetBucketPackages(tx, msg.UserID, bkt.BucketID) + ps, err := svc.db2.Package().GetUserBucketPackages(tx, msg.UserID, bkt.BucketID) if err != nil { return fmt.Errorf("get bucket packages: %w", err) } diff --git a/go.mod b/go.mod index 4e4e0fd..d2dafca 100644 --- a/go.mod +++ b/go.mod @@ -7,14 +7,13 @@ toolchain go1.23.2 replace gitlink.org.cn/cloudream/common v0.0.0 => ../common require ( - github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible github.com/aws/aws-sdk-go-v2 v1.32.6 github.com/aws/aws-sdk-go-v2/credentials v1.17.47 github.com/aws/aws-sdk-go-v2/service/s3 v1.71.0 github.com/gin-gonic/gin v1.7.7 github.com/go-sql-driver/mysql v1.8.1 github.com/hashicorp/golang-lru/v2 v2.0.5 - github.com/huaweicloud/huaweicloud-sdk-go-obs v3.24.9+incompatible + github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.131 github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf github.com/jedib0t/go-pretty/v6 v6.4.7 github.com/klauspost/reedsolomon v1.11.8 @@ -23,8 +22,8 @@ require ( github.com/smartystreets/goconvey v1.8.1 github.com/spf13/cobra v1.8.0 github.com/spf13/viper v1.19.0 - github.com/tencentyun/cos-go-sdk-v5 v0.7.56 gitlink.org.cn/cloudream/common v0.0.0 + golang.org/x/sync v0.6.0 google.golang.org/grpc v1.62.1 google.golang.org/protobuf v1.33.0 gorm.io/datatypes v1.2.5 @@ -42,15 +41,12 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 // indirect github.com/aws/smithy-go v1.22.1 // indirect - github.com/clbanning/mxj v1.8.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/google/go-querystring v1.0.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/magiconair/properties v1.8.7 // indirect - github.com/mozillazg/go-httpheader v0.2.1 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect @@ -58,7 +54,15 @@ require ( github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect - golang.org/x/time v0.7.0 // indirect + github.com/tjfoc/gmsm v1.4.1 // indirect + go.mongodb.org/mongo-driver v1.12.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect @@ -79,7 +83,7 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect + github.com/json-iterator/go v1.1.12 github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/klauspost/cpuid/v2 v2.2.4 // indirect github.com/leodido/go-urn v1.2.4 // indirect @@ -101,13 +105,5 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.9.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.22.0 // indirect - golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/sync v0.6.0 - golang.org/x/sys v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect gorm.io/driver/mysql v1.5.7 ) diff --git a/go.sum b/go.sum index ac97be9..0e8704c 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,7 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= -github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F7Y6wUM49Lcha2FMXt4UM8g= -github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/antonfisher/nested-logrus-formatter v1.3.1 h1:NFJIr+pzwv5QLHTPyKz9UMEoHck02Q9L0FP13b/xSbQ= github.com/antonfisher/nested-logrus-formatter v1.3.1/go.mod h1:6WTfyWFkBc9+zyBaKIqRrg/KwMqBbodBjgbHjDz7zjA= github.com/aws/aws-sdk-go-v2 v1.32.6 h1:7BokKRgRPuGmKkFMhEg/jSul+tB9VvXhcViILtfG8b4= @@ -31,8 +30,9 @@ github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= -github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= @@ -42,6 +42,9 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -65,22 +68,34 @@ github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqw github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= @@ -94,8 +109,8 @@ github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvH github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/huaweicloud/huaweicloud-sdk-go-obs v3.24.9+incompatible h1:XQVXdk+WAJ4fSNB6mMRuYNvFWou7BZs6SZB925hPrnk= -github.com/huaweicloud/huaweicloud-sdk-go-obs v3.24.9+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s= +github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.131 h1:34E2+lzM/yi0GlYAEQEUuf4/3mAoAadA+7oaq9q3Mys= +github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.131/go.mod h1:JWz2ujO9X3oU5wb6kXp+DpR2UuDj2SldDbX8T0FSuhI= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -123,6 +138,7 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/reedsolomon v1.11.8 h1:s8RpUW5TK4hjr+djiOpbZJB4ksx+TdYbRH7vHQpwPOY= @@ -148,7 +164,6 @@ github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOj github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/microsoft/go-mssqldb v1.7.2 h1:CHkFJiObW7ItKTJfHo1QX7QBBD1iV+mn1eOyRP3b/PA= github.com/microsoft/go-mssqldb v1.7.2/go.mod h1:kOvZKUdrhhFQmxLZqbwUV0rHkNkZpthMITIb2Ko1IoA= -github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -157,8 +172,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ= -github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -167,6 +181,7 @@ github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdL github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= @@ -215,16 +230,19 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.563/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y= -github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.563/go.mod h1:uom4Nvi9W+Qkom0exYiJ9VWJjXwyxtPYTkKkaLMlfE0= -github.com/tencentyun/cos-go-sdk-v5 v0.7.56 h1:fOA3l3XbVN2kTjQKYPvhDms0Fq8zDcinO3boXodFaLw= -github.com/tencentyun/cos-go-sdk-v5 v0.7.56/go.mod h1:8+hG+mQMuRP/OIS9d83syAvXvrMj9HhkND6Q1fLghw0= +github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho= +github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zyedidia/generic v1.2.1 h1:Zv5KS/N2m0XZZiuLS82qheRG4X1o5gsWreGb0hR7XDc= github.com/zyedidia/generic v1.2.1/go.mod h1:ly2RBz4mnz1yeuVbQA/VFwGjK3mnHGRj1JuoG336Bis= go.etcd.io/etcd/api/v3 v3.5.12 h1:W4sw5ZoU2Juc9gBWuLk5U6fHfNVyY1WC5g9uiXZio/c= @@ -233,6 +251,8 @@ go.etcd.io/etcd/client/pkg/v3 v3.5.12 h1:EYDL6pWwyOsylrQyLp2w+HkQ46ATiOvoEdMarin go.etcd.io/etcd/client/pkg/v3 v3.5.12/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= go.etcd.io/etcd/client/v3 v3.5.12 h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg= go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw= +go.mongodb.org/mongo-driver v1.12.0 h1:aPx33jmn/rQuJXPQLZQ8NtfPQG8CaqgLThFtqRb0PiE= +go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= @@ -244,61 +264,121 @@ go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 h1:rIo7ocm2roD9DcFIX67Ym8icoGCKSARAiPljFhh5suQ= google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y= google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c h1:lfpJ/2rWPa/kJgxyyXM8PrNnfCzcmxJ265mADgwmvLI= google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= @@ -328,3 +408,5 @@ gorm.io/driver/sqlserver v1.5.4/go.mod h1:+frZ/qYmuna11zHPlh5oc2O6ZA/lS88Keb0XSH gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.11 h1:/Wfyg1B/je1hnDx3sMkX+gAlxrlZpn6X0BXRlwXlvHg= gorm.io/gorm v1.25.11/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/scanner/internal/config/config.go b/scanner/internal/config/config.go index 281db97..46cd5f2 100644 --- a/scanner/internal/config/config.go +++ b/scanner/internal/config/config.go @@ -3,9 +3,9 @@ package config import ( "gitlink.org.cn/cloudream/common/pkgs/distlock" log "gitlink.org.cn/cloudream/common/pkgs/logger" + "gitlink.org.cn/cloudream/common/pkgs/mq" c "gitlink.org.cn/cloudream/common/utils/config" db "gitlink.org.cn/cloudream/storage/common/pkgs/db2/config" - stgmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq" ) type Config struct { @@ -14,7 +14,7 @@ type Config struct { HubUnavailableSeconds int `json:"hubUnavailableSeconds"` // 如果节点上次上报时间超过这个值,则认为节点已经不可用 Logger log.Config `json:"logger"` DB db.Config `json:"db"` - RabbitMQ stgmq.Config `json:"rabbitMQ"` + RabbitMQ mq.Config `json:"rabbitMQ"` DistLock distlock.Config `json:"distlock"` } diff --git a/scanner/internal/event/agent_check_storage.go b/scanner/internal/event/agent_check_storage.go deleted file mode 100644 index 1f75ea0..0000000 --- a/scanner/internal/event/agent_check_storage.go +++ /dev/null @@ -1,136 +0,0 @@ -package event - -import ( - "database/sql" - "time" - - "gitlink.org.cn/cloudream/common/pkgs/logger" - "gitlink.org.cn/cloudream/common/pkgs/mq" - cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" - "gitlink.org.cn/cloudream/storage/common/consts" - stgglb "gitlink.org.cn/cloudream/storage/common/globals" - "gitlink.org.cn/cloudream/storage/common/pkgs/db2" - "gitlink.org.cn/cloudream/storage/common/pkgs/db2/model" - agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent" - scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event" -) - -type AgentCheckStorage struct { - *scevt.AgentCheckStorage -} - -func NewAgentCheckStorage(evt *scevt.AgentCheckStorage) *AgentCheckStorage { - return &AgentCheckStorage{ - AgentCheckStorage: evt, - } -} - -func (t *AgentCheckStorage) TryMerge(other Event) bool { - event, ok := other.(*AgentCheckStorage) - if !ok { - return false - } - - if t.StorageID != event.StorageID { - return false - } - - return true -} - -func (t *AgentCheckStorage) Execute(execCtx ExecuteContext) { - log := logger.WithType[AgentCheckStorage]("Event") - log.Debugf("begin with %v", logger.FormatStruct(t.AgentCheckStorage)) - defer log.Debugf("end") - - // 读取数据的地方就不加锁了,因为check任务会反复执行,单次失败问题不大 - - stg, err := execCtx.Args.DB.Storage().GetByID(execCtx.Args.DB.DefCtx(), t.StorageID) - if err != nil { - if err != sql.ErrNoRows { - log.WithField("StorageID", t.StorageID).Warnf("get storage failed, err: %s", err.Error()) - } - return - } - - hub, err := execCtx.Args.DB.Hub().GetByID(execCtx.Args.DB.DefCtx(), stg.MasterHub) - if err != nil { - if err != sql.ErrNoRows { - log.WithField("StorageID", t.StorageID).Warnf("get storage hub failed, err: %s", err.Error()) - } - return - } - - if hub.State != consts.HubStateNormal { - return - } - - agtCli, err := stgglb.AgentMQPool.Acquire(stg.MasterHub) - if err != nil { - log.WithField("MasterHub", stg.MasterHub).Warnf("create agent client failed, err: %s", err.Error()) - return - } - defer stgglb.AgentMQPool.Release(agtCli) - - checkResp, err := agtCli.StorageCheck(agtmq.NewStorageCheck(stg.StorageID), mq.RequestOption{Timeout: time.Minute}) - if err != nil { - log.WithField("MasterHub", stg.MasterHub).Warnf("checking storage: %s", err.Error()) - return - } - realPkgs := make(map[cdssdk.UserID]map[cdssdk.PackageID]bool) - for _, pkg := range checkResp.Packages { - pkgs, ok := realPkgs[pkg.UserID] - if !ok { - pkgs = make(map[cdssdk.PackageID]bool) - realPkgs[pkg.UserID] = pkgs - } - - pkgs[pkg.PackageID] = true - } - - execCtx.Args.DB.DoTx(func(tx db2.SQLContext) error { - packages, err := execCtx.Args.DB.StoragePackage().GetAllByStorageID(tx, t.StorageID) - if err != nil { - log.Warnf("getting storage package: %s", err.Error()) - return nil - } - - var rms []model.StoragePackage - for _, pkg := range packages { - pkgMap, ok := realPkgs[pkg.UserID] - if !ok { - rms = append(rms, pkg) - continue - } - - if !pkgMap[pkg.PackageID] { - rms = append(rms, pkg) - } - } - - rmdPkgIDs := make(map[cdssdk.PackageID]bool) - for _, rm := range rms { - err := execCtx.Args.DB.StoragePackage().Delete(tx, rm.StorageID, rm.PackageID, rm.UserID) - if err != nil { - log.Warnf("deleting storage package: %s", err.Error()) - continue - } - rmdPkgIDs[rm.PackageID] = true - } - - // 彻底删除已经是Deleted状态,且不被再引用的Package - for pkgID := range rmdPkgIDs { - err := execCtx.Args.DB.Package().DeleteUnused(tx, pkgID) - if err != nil { - log.Warnf("deleting unused package: %s", err.Error()) - continue - } - } - - return nil - }) -} - -func init() { - RegisterMessageConvertor(NewAgentCheckStorage) -} diff --git a/scanner/internal/event/agent_storage_gc.go b/scanner/internal/event/agent_storage_gc.go deleted file mode 100644 index e080264..0000000 --- a/scanner/internal/event/agent_storage_gc.go +++ /dev/null @@ -1,86 +0,0 @@ -package event - -import ( - "time" - - "gitlink.org.cn/cloudream/common/pkgs/logger" - "gitlink.org.cn/cloudream/common/pkgs/mq" - stgglb "gitlink.org.cn/cloudream/storage/common/globals" - "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" - - agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent" - scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event" -) - -type AgentStorageGC struct { - *scevt.AgentStorageGC -} - -func NewAgentStorageGC(evt *scevt.AgentStorageGC) *AgentStorageGC { - return &AgentStorageGC{ - AgentStorageGC: evt, - } -} - -func (t *AgentStorageGC) TryMerge(other Event) bool { - event, ok := other.(*AgentStorageGC) - if !ok { - return false - } - - if event.StorageID != t.StorageID { - return false - } - - return true -} - -func (t *AgentStorageGC) Execute(execCtx ExecuteContext) { - log := logger.WithType[AgentStorageGC]("Event") - startTime := time.Now() - log.Debugf("begin with %v", logger.FormatStruct(t.AgentStorageGC)) - defer func() { - log.Debugf("end, time: %v", time.Since(startTime)) - }() - - // TODO unavailable的节点需不需要发送任务? - - mutex, err := reqbuilder.NewBuilder(). - // 进行GC - Storage().GC(t.StorageID). - MutexLock(execCtx.Args.DistLock) - if err != nil { - log.Warnf("acquire locks failed, err: %s", err.Error()) - return - } - defer mutex.Unlock() - - getStg, err := execCtx.Args.DB.Storage().GetByID(execCtx.Args.DB.DefCtx(), t.StorageID) - if err != nil { - log.WithField("StorageID", t.StorageID).Warnf("getting storage: %s", err.Error()) - return - } - - stgPkgs, err := execCtx.Args.DB.StoragePackage().GetAllByStorageID(execCtx.Args.DB.DefCtx(), t.StorageID) - if err != nil { - log.WithField("StorageID", t.StorageID).Warnf("getting storage packages: %s", err.Error()) - return - } - - agtCli, err := stgglb.AgentMQPool.Acquire(getStg.MasterHub) - if err != nil { - log.WithField("MasterHub", getStg.MasterHub).Warnf("create agent client failed, err: %s", err.Error()) - return - } - defer stgglb.AgentMQPool.Release(agtCli) - - _, err = agtCli.StorageGC(agtmq.ReqStorageGC(t.StorageID, stgPkgs), mq.RequestOption{Timeout: time.Minute}) - if err != nil { - log.WithField("StorageID", t.StorageID).Warnf("storage gc: %s", err.Error()) - return - } -} - -func init() { - RegisterMessageConvertor(NewAgentStorageGC) -} diff --git a/scanner/internal/event/check_package.go b/scanner/internal/event/check_package.go deleted file mode 100644 index e9004f6..0000000 --- a/scanner/internal/event/check_package.go +++ /dev/null @@ -1,44 +0,0 @@ -package event - -import ( - "github.com/samber/lo" - "gitlink.org.cn/cloudream/common/pkgs/logger" - scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event" -) - -type CheckPackage struct { - *scevt.CheckPackage -} - -func NewCheckPackage(evt *scevt.CheckPackage) *CheckPackage { - return &CheckPackage{ - CheckPackage: evt, - } -} - -func (t *CheckPackage) TryMerge(other Event) bool { - event, ok := other.(*CheckPackage) - if !ok { - return false - } - - t.PackageIDs = lo.Union(t.PackageIDs, event.PackageIDs) - return true -} - -func (t *CheckPackage) Execute(execCtx ExecuteContext) { - log := logger.WithType[CheckPackage]("Event") - log.Debugf("begin with %v", logger.FormatStruct(t.CheckPackage)) - defer log.Debugf("end") - - for _, objID := range t.PackageIDs { - err := execCtx.Args.DB.Package().DeleteUnused(execCtx.Args.DB.DefCtx(), objID) - if err != nil { - log.WithField("PackageID", objID).Warnf("delete unused package failed, err: %s", err.Error()) - } - } -} - -func init() { - RegisterMessageConvertor(NewCheckPackage) -} diff --git a/scanner/internal/event/check_package_redundancy.go b/scanner/internal/event/check_package_redundancy.go index a5e994a..47e149a 100644 --- a/scanner/internal/event/check_package_redundancy.go +++ b/scanner/internal/event/check_package_redundancy.go @@ -10,6 +10,7 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/common/pkgs/logger" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" + "gitlink.org.cn/cloudream/common/utils/math2" "gitlink.org.cn/cloudream/common/utils/sort2" stgglb "gitlink.org.cn/cloudream/storage/common/globals" stgmod "gitlink.org.cn/cloudream/storage/common/models" @@ -462,7 +463,7 @@ func (t *CheckPackageRedundancy) noneToRep(ctx ExecuteContext, obj stgmod.Object uploadStgs = lo.UniqBy(uploadStgs, func(item *StorageLoadInfo) cdssdk.StorageID { return item.Storage.Storage.StorageID }) ft := ioswitch2.NewFromTo() - ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.Storage.MasterHub, srcStg.Storage.Storage, ioswitch2.RawStream())) + ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.Storage.MasterHub, srcStg.Storage, ioswitch2.RawStream())) for i, stg := range uploadStgs { ft.AddTo(ioswitch2.NewToShardStore(*stg.Storage.MasterHub, stg.Storage, ioswitch2.RawStream(), fmt.Sprintf("%d", i))) } @@ -513,9 +514,9 @@ func (t *CheckPackageRedundancy) noneToEC(ctx ExecuteContext, obj stgmod.ObjectD ft := ioswitch2.NewFromTo() ft.ECParam = red - ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.Storage.MasterHub, srcStg.Storage.Storage, ioswitch2.RawStream())) + ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.Storage.MasterHub, srcStg.Storage, ioswitch2.RawStream())) for i := 0; i < red.N; i++ { - ft.AddTo(ioswitch2.NewToShardStore(*uploadStgs[i].Storage.MasterHub, uploadStgs[i].Storage, ioswitch2.ECSrteam(i), fmt.Sprintf("%d", i))) + ft.AddTo(ioswitch2.NewToShardStore(*uploadStgs[i].Storage.MasterHub, uploadStgs[i].Storage, ioswitch2.ECStream(i), fmt.Sprintf("%d", i))) } plans := exec.NewPlanBuilder() err := parser.Parse(ft, plans) @@ -613,7 +614,7 @@ func (t *CheckPackageRedundancy) noneToSeg(ctx ExecuteContext, obj stgmod.Object ft := ioswitch2.NewFromTo() ft.SegmentParam = red - ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.Storage.MasterHub, srcStg.Storage.Storage, ioswitch2.RawStream())) + ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.Storage.MasterHub, srcStg.Storage, ioswitch2.RawStream())) for i, stg := range uploadStgs { ft.AddTo(ioswitch2.NewToShardStore(*stg.Storage.MasterHub, stg.Storage, ioswitch2.SegmentStream(i), fmt.Sprintf("%d", i))) } @@ -666,7 +667,7 @@ func (t *CheckPackageRedundancy) repToRep(ctx ExecuteContext, obj stgmod.ObjectD uploadStgs = lo.UniqBy(uploadStgs, func(item *StorageLoadInfo) cdssdk.StorageID { return item.Storage.Storage.StorageID }) ft := ioswitch2.NewFromTo() - ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.Storage.MasterHub, srcStg.Storage.Storage, ioswitch2.RawStream())) + ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.Storage.MasterHub, srcStg.Storage, ioswitch2.RawStream())) for i, stg := range uploadStgs { ft.AddTo(ioswitch2.NewToShardStore(*stg.Storage.MasterHub, stg.Storage, ioswitch2.RawStream(), fmt.Sprintf("%d", i))) } @@ -745,11 +746,11 @@ func (t *CheckPackageRedundancy) ecToRep(ctx ExecuteContext, obj stgmod.ObjectDe ft.ECParam = srcRed for i2, block := range chosenBlocks { - ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, *chosenBlockStg[i2].MasterHub, chosenBlockStg[i2].Storage, ioswitch2.ECSrteam(block.Index))) + ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, *chosenBlockStg[i2].MasterHub, chosenBlockStg[i2], ioswitch2.ECStream(block.Index))) } len := obj.Object.Size - ft.AddTo(ioswitch2.NewToShardStoreWithRange(*uploadStgs[i].Storage.MasterHub, uploadStgs[i].Storage, ioswitch2.RawStream(), fmt.Sprintf("%d", i), exec.Range{ + ft.AddTo(ioswitch2.NewToShardStoreWithRange(*uploadStgs[i].Storage.MasterHub, uploadStgs[i].Storage, ioswitch2.RawStream(), fmt.Sprintf("%d", i), math2.Range{ Offset: 0, Length: &len, })) @@ -841,11 +842,11 @@ func (t *CheckPackageRedundancy) ecToEC(ctx ExecuteContext, obj stgmod.ObjectDet ft := ioswitch2.NewFromTo() ft.ECParam = srcRed for i2, block := range chosenBlocks { - ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, *chosenBlockStg[i2].MasterHub, chosenBlockStg[i2].Storage, ioswitch2.ECSrteam(block.Index))) + ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, *chosenBlockStg[i2].MasterHub, chosenBlockStg[i2], ioswitch2.ECStream(block.Index))) } // 输出只需要自己要保存的那一块 - ft.AddTo(ioswitch2.NewToShardStore(*stg.Storage.MasterHub, stg.Storage, ioswitch2.ECSrteam(i), fmt.Sprintf("%d", i))) + ft.AddTo(ioswitch2.NewToShardStore(*stg.Storage.MasterHub, stg.Storage, ioswitch2.ECStream(i), fmt.Sprintf("%d", i))) err := parser.Parse(ft, planBlder) if err != nil { diff --git a/scanner/internal/event/clean_pinned.go b/scanner/internal/event/clean_pinned.go index 5d5bfd2..4af4c90 100644 --- a/scanner/internal/event/clean_pinned.go +++ b/scanner/internal/event/clean_pinned.go @@ -741,7 +741,7 @@ func (t *CleanPinned) makePlansForRepObject(allStgInfos map[cdssdk.StorageID]*st ft := ioswitch2.NewFromTo() fromStg := allStgInfos[obj.Blocks[0].StorageID] - ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *fromStg.MasterHub, fromStg.Storage, ioswitch2.RawStream())) + ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *fromStg.MasterHub, *fromStg, ioswitch2.RawStream())) toStg := allStgInfos[solu.blockList[i].StorageID] ft.AddTo(ioswitch2.NewToShardStore(*toStg.MasterHub, *toStg, ioswitch2.RawStream(), fmt.Sprintf("%d.0", obj.Object.ObjectID))) @@ -799,10 +799,10 @@ func (t *CleanPinned) makePlansForECObject(allStgInfos map[cdssdk.StorageID]*stg for id, idxs := range reconstrct { ft := ioswitch2.NewFromTo() ft.ECParam = ecRed - ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *allStgInfos[id].MasterHub, allStgInfos[id].Storage, ioswitch2.RawStream())) + ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *allStgInfos[id].MasterHub, *allStgInfos[id], ioswitch2.RawStream())) for _, i := range *idxs { - ft.AddTo(ioswitch2.NewToShardStore(*allStgInfos[id].MasterHub, *allStgInfos[id], ioswitch2.ECSrteam(i), fmt.Sprintf("%d.%d", obj.Object.ObjectID, i))) + ft.AddTo(ioswitch2.NewToShardStore(*allStgInfos[id].MasterHub, *allStgInfos[id], ioswitch2.ECStream(i), fmt.Sprintf("%d.%d", obj.Object.ObjectID, i))) } err := parser.Parse(ft, planBld) diff --git a/scanner/internal/event/event.go b/scanner/internal/event/event.go index a5a3567..900d404 100644 --- a/scanner/internal/event/event.go +++ b/scanner/internal/event/event.go @@ -9,13 +9,13 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/typedispatcher" "gitlink.org.cn/cloudream/storage/common/pkgs/db2" scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool" ) type ExecuteArgs struct { DB *db2.DB DistLock *distlock.Service - StgMgr *svcmgr.Manager + StgMgr *agtpool.AgentPool } type Executor = event.Executor[ExecuteArgs] @@ -26,11 +26,11 @@ type Event = event.Event[ExecuteArgs] type ExecuteOption = event.ExecuteOption -func NewExecutor(db *db2.DB, distLock *distlock.Service, stgMgr *svcmgr.Manager) Executor { +func NewExecutor(db *db2.DB, distLock *distlock.Service, stgAgts *agtpool.AgentPool) Executor { return event.NewExecutor(ExecuteArgs{ DB: db, DistLock: distLock, - StgMgr: stgMgr, + StgMgr: stgAgts, }) } diff --git a/scanner/internal/tickevent/batch_check_all_package.go b/scanner/internal/tickevent/batch_check_all_package.go deleted file mode 100644 index 5f79339..0000000 --- a/scanner/internal/tickevent/batch_check_all_package.go +++ /dev/null @@ -1,38 +0,0 @@ -package tickevent - -import ( - "gitlink.org.cn/cloudream/common/pkgs/logger" - scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event" - "gitlink.org.cn/cloudream/storage/scanner/internal/event" -) - -type BatchCheckAllPackage struct { - lastCheckStart int -} - -func NewBatchCheckAllPackage() *BatchCheckAllPackage { - return &BatchCheckAllPackage{} -} - -func (e *BatchCheckAllPackage) Execute(ctx ExecuteContext) { - log := logger.WithType[BatchCheckAllPackage]("TickEvent") - log.Debugf("begin") - defer log.Debugf("end") - - packageIDs, err := ctx.Args.DB.Package().BatchGetAllPackageIDs(ctx.Args.DB.DefCtx(), e.lastCheckStart, CheckPackageBatchSize) - if err != nil { - log.Warnf("batch get package ids failed, err: %s", err.Error()) - return - } - - ctx.Args.EventExecutor.Post(event.NewCheckPackage(scevt.NewCheckPackage(packageIDs))) - - // 如果结果的长度小于预期的长度,则认为已经查询了所有,下次从头再来 - if len(packageIDs) < CheckPackageBatchSize { - e.lastCheckStart = 0 - log.Debugf("all package checked, next time will start check at 0") - - } else { - e.lastCheckStart += CheckPackageBatchSize - } -} diff --git a/scanner/internal/tickevent/batch_check_all_storage.go b/scanner/internal/tickevent/batch_check_all_storage.go deleted file mode 100644 index 0745bf2..0000000 --- a/scanner/internal/tickevent/batch_check_all_storage.go +++ /dev/null @@ -1,43 +0,0 @@ -package tickevent - -import ( - "gitlink.org.cn/cloudream/common/pkgs/logger" - scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event" - "gitlink.org.cn/cloudream/storage/scanner/internal/event" -) - -const CHECK_STORAGE_BATCH_SIZE = 5 - -type BatchCheckAllStorage struct { - lastCheckStart int -} - -func NewBatchCheckAllStorage() *BatchCheckAllStorage { - return &BatchCheckAllStorage{} -} - -func (e *BatchCheckAllStorage) Execute(ctx ExecuteContext) { - log := logger.WithType[BatchCheckAllStorage]("TickEvent") - log.Debugf("begin") - defer log.Debugf("end") - - storageIDs, err := ctx.Args.DB.Storage().BatchGetAllStorageIDs(ctx.Args.DB.DefCtx(), e.lastCheckStart, CHECK_STORAGE_BATCH_SIZE) - if err != nil { - log.Warnf("batch get storage ids failed, err: %s", err.Error()) - return - } - - for _, stgID := range storageIDs { - // 设置nil代表进行全量检查 - ctx.Args.EventExecutor.Post(event.NewAgentCheckStorage(scevt.NewAgentCheckStorage(stgID))) - } - - // 如果结果的长度小于预期的长度,则认为已经查询了所有,下次从头再来 - if len(storageIDs) < CHECK_STORAGE_BATCH_SIZE { - e.lastCheckStart = 0 - log.Debugf("all storage checked, next time will start check at 0") - - } else { - e.lastCheckStart += CHECK_STORAGE_BATCH_SIZE - } -} diff --git a/scanner/internal/tickevent/storage_gc.go b/scanner/internal/tickevent/storage_gc.go index 4b4b9e7..35e7b1b 100644 --- a/scanner/internal/tickevent/storage_gc.go +++ b/scanner/internal/tickevent/storage_gc.go @@ -41,6 +41,5 @@ func (e *StorageGC) Execute(ctx ExecuteContext) { } ctx.Args.EventExecutor.Post(event.NewAgentShardStoreGC(scevt.NewAgentShardStoreGC(e.storageIDs[0]))) - ctx.Args.EventExecutor.Post(event.NewAgentStorageGC(scevt.NewAgentStorageGC(e.storageIDs[0]))) e.storageIDs = e.storageIDs[1:] } diff --git a/scanner/main.go b/scanner/main.go index b227ee7..c9de497 100644 --- a/scanner/main.go +++ b/scanner/main.go @@ -10,7 +10,7 @@ import ( "gitlink.org.cn/cloudream/storage/common/pkgs/distlock" agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent" scmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner" - "gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr" + "gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool" "gitlink.org.cn/cloudream/storage/scanner/internal/config" "gitlink.org.cn/cloudream/storage/scanner/internal/event" "gitlink.org.cn/cloudream/storage/scanner/internal/mq" @@ -35,7 +35,7 @@ func main() { logger.Fatalf("new db failed, err: %s", err.Error()) } - stgglb.InitMQPool(&config.Cfg().RabbitMQ) + stgglb.InitMQPool(config.Cfg().RabbitMQ) stgglb.InitAgentRPCPool(&agtrpc.PoolConfig{}) @@ -48,13 +48,13 @@ func main() { go serveDistLock(distlockSvc) // 启动存储服务管理器 - stgMgr := svcmgr.NewManager() + stgAgts := agtpool.NewPool() // 启动事件执行器 - eventExecutor := event.NewExecutor(db, distlockSvc, stgMgr) + eventExecutor := event.NewExecutor(db, distlockSvc, stgAgts) go serveEventExecutor(&eventExecutor) - agtSvr, err := scmq.NewServer(mq.NewService(&eventExecutor), &config.Cfg().RabbitMQ) + agtSvr, err := scmq.NewServer(mq.NewService(&eventExecutor), config.Cfg().RabbitMQ) if err != nil { logger.Fatalf("new agent server failed, err: %s", err.Error()) } @@ -141,14 +141,8 @@ func startTickEvent(tickExecutor *tickevent.Executor) { tickExecutor.Start(tickevent.NewBatchAllAgentCheckShardStore(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000}) - tickExecutor.Start(tickevent.NewBatchCheckAllPackage(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000}) - tickExecutor.Start(tickevent.NewStorageGC(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000}) - // tickExecutor.Start(tickevent.NewBatchCheckAllRepCount(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000}) - - tickExecutor.Start(tickevent.NewBatchCheckAllStorage(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000}) - tickExecutor.Start(tickevent.NewCheckAgentState(), 5*60*1000, tickevent.StartOption{RandomStartDelayMs: 60 * 1000}) tickExecutor.Start(tickevent.NewBatchCheckPackageRedundancy(), interval, tickevent.StartOption{RandomStartDelayMs: 20 * 60 * 1000})