package uploader import ( "context" "fmt" "io" "path" "sync" "time" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/client/types" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser" ) type CreateLoadUploader struct { pkg types.Package targetSpaces []types.UserSpaceDetail loadRoots []string uploader *Uploader // distlock *distlock.Mutex successes []db.AddObjectEntry lock sync.Mutex commited bool } type CreateLoadResult struct { Package types.Package Objects map[string]types.Object } func (u *CreateLoadUploader) Upload(pa string, size int64, stream io.Reader) error { uploadTime := time.Now() spaceIDs := make([]types.UserSpaceID, 0, len(u.targetSpaces)) ft := ioswitch2.FromTo{} fromExec, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream()) ft.AddFrom(fromExec) for i, space := range u.targetSpaces { ft.AddTo(ioswitch2.NewToShardStore(*space.MasterHub, space, ioswitch2.RawStream(), "fileHash")) ft.AddTo(ioswitch2.NewLoadToPublic(*space.MasterHub, space, path.Join(u.loadRoots[i], pa))) spaceIDs = append(spaceIDs, space.UserSpace.UserSpaceID) } plans := exec.NewPlanBuilder() err := parser.Parse(ft, plans) if err != nil { return fmt.Errorf("parsing plan: %w", err) } exeCtx := exec.NewExecContext() exec.SetValueByType(exeCtx, u.uploader.stgPool) exec := plans.Execute(exeCtx) exec.BeginWrite(io.NopCloser(stream), hd) ret, err := exec.Wait(context.TODO()) if err != nil { return fmt.Errorf("executing plan: %w", err) } u.lock.Lock() defer u.lock.Unlock() // 记录上传结果 fileHash := ret["fileHash"].(*ops2.ShardInfoValue).Hash u.successes = append(u.successes, db.AddObjectEntry{ Path: pa, Size: size, FileHash: fileHash, UploadTime: uploadTime, UserSpaceIDs: spaceIDs, }) return nil } func (u *CreateLoadUploader) Commit() (CreateLoadResult, error) { u.lock.Lock() defer u.lock.Unlock() if u.commited { return CreateLoadResult{}, fmt.Errorf("package already commited") } u.commited = true // defer u.distlock.Unlock() var addedObjs []types.Object err := u.uploader.db.DoTx(func(tx db.SQLContext) error { var err error addedObjs, err = u.uploader.db.Object().BatchAdd(tx, u.pkg.PackageID, u.successes) return err }) if err != nil { return CreateLoadResult{}, fmt.Errorf("adding objects: %w", err) } ret := CreateLoadResult{ Package: u.pkg, Objects: make(map[string]types.Object), } for _, entry := range addedObjs { ret.Objects[entry.Path] = entry } return ret, nil } func (u *CreateLoadUploader) Abort() { u.lock.Lock() defer u.lock.Unlock() if u.commited { return } u.commited = true // u.distlock.Unlock() // TODO 可以考虑删除PackageID }