|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124 |
- package uploader
-
- import (
- "context"
- "fmt"
- "io"
- "path"
- "sync"
- "time"
-
- "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
- "gitlink.org.cn/cloudream/storage2/client/internal/db"
- "gitlink.org.cn/cloudream/storage2/client/types"
- "gitlink.org.cn/cloudream/storage2/common/pkgs/distlock"
- "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2"
- "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/ops2"
- "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/parser"
- )
-
- type CreateLoadUploader struct {
- pkg types.Package
- targetSpaces []types.UserSpaceDetail
- loadRoots []string
- uploader *Uploader
- distlock *distlock.Mutex
- successes []db.AddObjectEntry
- lock sync.Mutex
- commited bool
- }
-
- type CreateLoadResult struct {
- Package types.Package
- Objects map[string]types.Object
- }
-
- func (u *CreateLoadUploader) Upload(pa string, size int64, stream io.Reader) error {
- uploadTime := time.Now()
- spaceIDs := make([]types.UserSpaceID, 0, len(u.targetSpaces))
-
- ft := ioswitch2.FromTo{}
- fromExec, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream())
- ft.AddFrom(fromExec)
- for i, space := range u.targetSpaces {
- ft.AddTo(ioswitch2.NewToShardStore(*space.MasterHub, space, ioswitch2.RawStream(), "fileHash"))
- ft.AddTo(ioswitch2.NewLoadToPublic(*space.MasterHub, space, path.Join(u.loadRoots[i], pa)))
- spaceIDs = append(spaceIDs, space.UserSpace.UserSpaceID)
- }
-
- plans := exec.NewPlanBuilder()
- err := parser.Parse(ft, plans)
- if err != nil {
- return fmt.Errorf("parsing plan: %w", err)
- }
-
- exeCtx := exec.NewExecContext()
- exec.SetValueByType(exeCtx, u.uploader.stgPool)
- exec := plans.Execute(exeCtx)
- exec.BeginWrite(io.NopCloser(stream), hd)
- ret, err := exec.Wait(context.TODO())
- if err != nil {
- return fmt.Errorf("executing plan: %w", err)
- }
-
- u.lock.Lock()
- defer u.lock.Unlock()
-
- // 记录上传结果
- fileHash := ret["fileHash"].(*ops2.ShardInfoValue).Hash
- u.successes = append(u.successes, db.AddObjectEntry{
- Path: pa,
- Size: size,
- FileHash: fileHash,
- UploadTime: uploadTime,
- UserSpaceIDs: spaceIDs,
- })
- return nil
- }
-
- func (u *CreateLoadUploader) Commit() (CreateLoadResult, error) {
- u.lock.Lock()
- defer u.lock.Unlock()
-
- if u.commited {
- return CreateLoadResult{}, fmt.Errorf("package already commited")
- }
- u.commited = true
-
- defer u.distlock.Unlock()
-
- var addedObjs []types.Object
- err := u.uploader.db.DoTx(func(tx db.SQLContext) error {
- var err error
- addedObjs, err = u.uploader.db.Object().BatchAdd(tx, u.pkg.PackageID, u.successes)
- return err
- })
- if err != nil {
- return CreateLoadResult{}, fmt.Errorf("adding objects: %w", err)
- }
-
- ret := CreateLoadResult{
- Package: u.pkg,
- Objects: make(map[string]types.Object),
- }
-
- for _, entry := range addedObjs {
- ret.Objects[entry.Path] = entry
- }
-
- return ret, nil
- }
-
- func (u *CreateLoadUploader) Abort() {
- u.lock.Lock()
- defer u.lock.Unlock()
-
- if u.commited {
- return
- }
- u.commited = true
-
- u.distlock.Unlock()
-
- // TODO 可以考虑删除PackageID
- }
|