package uploader import ( "context" "fmt" "io" "math" "math/rand" "time" "github.com/samber/lo" "gitlink.org.cn/cloudream/common/utils/lo2" "gitlink.org.cn/cloudream/common/utils/sort2" "gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/client/internal/metacache" "gitlink.org.cn/cloudream/jcs-pub/client/internal/publock" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/connectivity" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type Uploader struct { pubLock *publock.PubLock connectivity *connectivity.Collector stgPool *pool.Pool spaceMeta *metacache.UserSpaceMeta db *db.DB } func NewUploader(pubLock *publock.PubLock, connectivity *connectivity.Collector, stgPool *pool.Pool, spaceMeta *metacache.UserSpaceMeta, db *db.DB) *Uploader { return &Uploader{ pubLock: pubLock, connectivity: connectivity, stgPool: stgPool, spaceMeta: spaceMeta, db: db, } } func (u *Uploader) BeginUpdate(pkgID jcstypes.PackageID, affinity jcstypes.UserSpaceID, copyTo []jcstypes.UserSpaceID, copyToPath []jcstypes.JPath) (*UpdateUploader, error) { spaceIDs, err := u.db.UserSpace().GetAllIDs(u.db.DefCtx()) if err != nil { return nil, fmt.Errorf("getting user space ids: %w", err) } spaceDetails := u.spaceMeta.GetMany(spaceIDs) spaceDetails = lo2.RemoveAllDefault(spaceDetails) var uploadSpaces []UploadSpaceInfo if !stgglb.StandaloneMode { cons := u.connectivity.GetAll() for _, space := range spaceDetails { latency := time.Duration(math.MaxInt64) if space.RecommendHub != nil { con, ok := cons[space.RecommendHub.HubID] if ok && con.Latency != nil { latency = *con.Latency } } uploadSpaces = append(uploadSpaces, UploadSpaceInfo{ Space: *space, Delay: latency, IsSameLocation: space.UserSpace.Storage.GetLocation() == stgglb.Local.Location, }) } } else { for _, space := range spaceDetails { uploadSpaces = append(uploadSpaces, UploadSpaceInfo{ Space: *space, IsSameLocation: space.UserSpace.Storage.GetLocation() == stgglb.Local.Location, }) } } if len(uploadSpaces) == 0 { return nil, fmt.Errorf("user no available userspaces") } copyToSpaces := make([]jcstypes.UserSpaceDetail, len(copyTo)) for i, spaceID := range copyTo { space, ok := lo.Find(spaceDetails, func(space *jcstypes.UserSpaceDetail) bool { return space.UserSpace.UserSpaceID == spaceID }) if !ok { return nil, fmt.Errorf("user space %v not found", spaceID) } copyToSpaces[i] = *space } target := u.chooseUploadStorage(uploadSpaces, affinity) // 防止上传的副本被清除 pubLock, err := u.pubLock.BeginMutex().UserSpace().Buzy(target.Space.UserSpace.UserSpaceID).End().Lock() if err != nil { return nil, fmt.Errorf("acquire lock: %w", err) } return &UpdateUploader{ uploader: u, pkgID: pkgID, targetSpace: target.Space, pubLock: pubLock, copyToSpaces: copyToSpaces, copyToPath: copyToPath, }, nil } // chooseUploadStorage 选择一个上传文件的节点 // 1. 选择设置了亲和性的节点 // 2. 从与当前客户端相同地域的节点中随机选一个 // 3. 没有的话从所有节点选择延迟最低的节点 func (w *Uploader) chooseUploadStorage(spaces []UploadSpaceInfo, spaceAffinity jcstypes.UserSpaceID) UploadSpaceInfo { if spaceAffinity > 0 { aff, ok := lo.Find(spaces, func(space UploadSpaceInfo) bool { return space.Space.UserSpace.UserSpaceID == spaceAffinity }) if ok { return aff } } sameLocationStorages := lo.Filter(spaces, func(e UploadSpaceInfo, i int) bool { return e.IsSameLocation }) if len(sameLocationStorages) > 0 { return sameLocationStorages[rand.Intn(len(sameLocationStorages))] } // 选择延迟最低的节点 spaces = sort2.Sort(spaces, func(e1, e2 UploadSpaceInfo) int { return sort2.Cmp(e1.Delay, e2.Delay) }) return spaces[0] } func (u *Uploader) BeginCreateUpload(bktID jcstypes.BucketID, pkgName string, copyTo []jcstypes.UserSpaceID, copyToPath []jcstypes.JPath) (*CreateUploader, error) { getSpaces := u.spaceMeta.GetMany(copyTo) spacesStgs := make([]jcstypes.UserSpaceDetail, len(copyTo)) for i, stg := range getSpaces { if stg == nil { return nil, fmt.Errorf("storage %v not found", copyTo[i]) } spacesStgs[i] = *stg } pkg, err := db.DoTx01(u.db, func(tx db.SQLContext) (jcstypes.Package, error) { _, err := u.db.Bucket().GetByID(tx, bktID) if err != nil { return jcstypes.Package{}, err } return u.db.Package().Create(u.db.DefCtx(), bktID, pkgName, time.Now()) }) if err != nil { return nil, fmt.Errorf("create package: %w", err) } reqBld := u.pubLock.BeginMutex() for _, stg := range spacesStgs { reqBld.UserSpace().Buzy(stg.UserSpace.UserSpaceID) } lock, err := reqBld.Lock() if err != nil { return nil, fmt.Errorf("acquire lock: %w", err) } return &CreateUploader{ pkg: pkg, targetSpaces: spacesStgs, copyRoots: copyToPath, uploader: u, pubLock: lock, }, nil } func (u *Uploader) UploadPart(objID jcstypes.ObjectID, index int, stream io.Reader) error { detail, err := u.db.Object().GetDetail(u.db.DefCtx(), objID) if err != nil { return fmt.Errorf("getting object detail: %w", err) } objDe := detail _, ok := objDe.Object.Redundancy.(*jcstypes.MultipartUploadRedundancy) if !ok { return fmt.Errorf("object %v is not a multipart upload", objID) } var space jcstypes.UserSpaceDetail if len(objDe.Blocks) > 0 { cstg := u.spaceMeta.Get(objDe.Blocks[0].UserSpaceID) if cstg == nil { return fmt.Errorf("space %v not found", objDe.Blocks[0].UserSpaceID) } space = *cstg } else { spaceIDs, err := u.db.UserSpace().GetAllIDs(u.db.DefCtx()) if err != nil { return fmt.Errorf("getting user space ids: %w", err) } spaces := u.spaceMeta.GetMany(spaceIDs) spaces = lo2.RemoveAllDefault(spaces) var userStgs []UploadSpaceInfo if !stgglb.StandaloneMode { cons := u.connectivity.GetAll() for _, space := range spaces { delay := time.Duration(math.MaxInt64) if space.RecommendHub != nil { con, ok := cons[space.RecommendHub.HubID] if ok && con.Latency != nil { delay = *con.Latency } } userStgs = append(userStgs, UploadSpaceInfo{ Space: *space, Delay: delay, IsSameLocation: space.UserSpace.Storage.GetLocation() == stgglb.Local.Location, }) } } else { for _, space := range spaces { userStgs = append(userStgs, UploadSpaceInfo{ Space: *space, IsSameLocation: space.UserSpace.Storage.GetLocation() == stgglb.Local.Location, }) } } if len(userStgs) == 0 { return fmt.Errorf("user no available storages") } space = u.chooseUploadStorage(userStgs, 0).Space } lock, err := u.pubLock.BeginMutex().UserSpace().Buzy(space.UserSpace.UserSpaceID).End().Lock() if err != nil { return fmt.Errorf("acquire lock: %w", err) } defer lock.Unlock() ft := ioswitch2.NewFromTo() fromDrv, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream()) ft.AddFrom(fromDrv). AddTo(ioswitch2.NewToShardStore(space, ioswitch2.RawStream(), "shard")) plans := exec.NewPlanBuilder() err = parser.Parse(ft, plans) if err != nil { return fmt.Errorf("parse fromto: %w", err) } exeCtx := exec.NewExecContext() exec.SetValueByType(exeCtx, u.stgPool) exec := plans.Execute(exeCtx) exec.BeginWrite(io.NopCloser(stream), hd) ret, err := exec.Wait(context.TODO()) if err != nil { return fmt.Errorf("executing plan: %w", err) } shardInfo := ret.Get("shard").(*ops2.FileInfoValue) err = u.db.DoTx(func(tx db.SQLContext) error { return u.db.Object().AppendPart(tx, jcstypes.ObjectBlock{ ObjectID: objID, Index: index, UserSpaceID: space.UserSpace.UserSpaceID, FileHash: shardInfo.Hash, Size: shardInfo.Size, }) }) return err }