diff --git a/client/internal/accessstat/access_stat.go b/client/internal/accessstat/access_stat.go index 5e2d417..e3e6af7 100644 --- a/client/internal/accessstat/access_stat.go +++ b/client/internal/accessstat/access_stat.go @@ -7,7 +7,7 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/async" "gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/jcs-pub/client/internal/db" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type AccessStatEventChan = async.UnboundChannel[AccessStatEvent] @@ -30,9 +30,9 @@ type AccessStat struct { } type entryKey struct { - objID jcsypes.ObjectID - pkgID jcsypes.PackageID - spaceID jcsypes.UserSpaceID + objID jcstypes.ObjectID + pkgID jcstypes.PackageID + spaceID jcstypes.UserSpaceID } func NewAccessStat(cfg Config, db *db.DB) *AccessStat { @@ -44,7 +44,7 @@ func NewAccessStat(cfg Config, db *db.DB) *AccessStat { } } -func (p *AccessStat) AddAccessCounter(objID jcsypes.ObjectID, pkgID jcsypes.PackageID, spaceID jcsypes.UserSpaceID, value float64) { +func (p *AccessStat) AddAccessCounter(objID jcstypes.ObjectID, pkgID jcstypes.PackageID, spaceID jcstypes.UserSpaceID, value float64) { p.lock.Lock() defer p.lock.Unlock() diff --git a/client/internal/accesstoken/accesstoken.go b/client/internal/accesstoken/accesstoken.go index e98a30e..1ce967c 100644 --- a/client/internal/accesstoken/accesstoken.go +++ b/client/internal/accesstoken/accesstoken.go @@ -15,7 +15,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/accesstoken" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type KeeperEvent interface { @@ -30,7 +30,7 @@ type ExitEvent struct { type Keeper struct { cfg Config enabled bool - token cortypes.UserAccessToken + token jcstypes.UserAccessToken priKey ed25519.PrivateKey lock sync.RWMutex done chan any @@ -144,7 +144,7 @@ func (k *Keeper) Stop() { } } -func (k *Keeper) GetToken() cortypes.UserAccessToken { +func (k *Keeper) GetToken() jcstypes.UserAccessToken { k.lock.RLock() defer k.lock.RUnlock() diff --git a/client/internal/cmdline/migrate.go b/client/internal/cmdline/migrate.go index c9d0686..c0da1ad 100644 --- a/client/internal/cmdline/migrate.go +++ b/client/internal/cmdline/migrate.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" "gitlink.org.cn/cloudream/jcs-pub/client/internal/config" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/driver/mysql" "gorm.io/gorm" ) @@ -38,15 +38,15 @@ func migrate(configPath string) { } db = db.Set("gorm:table_options", "CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci") - migrateOne(db, jcsypes.Bucket{}) - migrateOne(db, jcsypes.ObjectAccessStat{}) - migrateOne(db, jcsypes.ObjectBlock{}) - migrateOne(db, jcsypes.Object{}) - migrateOne(db, jcsypes.PackageAccessStat{}) - migrateOne(db, jcsypes.Package{}) - migrateOne(db, jcsypes.PinnedObject{}) - migrateOne(db, jcsypes.UserSpace{}) - migrateOne(db, jcsypes.SpaceSyncTask{}) + migrateOne(db, jcstypes.Bucket{}) + migrateOne(db, jcstypes.ObjectAccessStat{}) + migrateOne(db, jcstypes.ObjectBlock{}) + migrateOne(db, jcstypes.Object{}) + migrateOne(db, jcstypes.PackageAccessStat{}) + migrateOne(db, jcstypes.Package{}) + migrateOne(db, jcstypes.PinnedObject{}) + migrateOne(db, jcstypes.UserSpace{}) + migrateOne(db, jcstypes.SpaceSyncTask{}) fmt.Println("migrate success") } diff --git a/client/internal/cmdline/test.go b/client/internal/cmdline/test.go index 9797ea8..f3bc714 100644 --- a/client/internal/cmdline/test.go +++ b/client/internal/cmdline/test.go @@ -27,7 +27,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/sysevent" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/common/types/datamap" ) @@ -61,7 +61,7 @@ func doTest(svc *services.Service) { ft = ioswitch2.NewFromTo() ft.AddFrom(ioswitch2.NewFromShardstore("Full1AE5436AF72D8EF93923486E0E167315CEF0C91898064DADFAC22216FFBC5E3D", *space1, ioswitch2.RawStream())) - ft.AddTo(ioswitch2.NewToBaseStore(*space2, jcsypes.PathFromComps("test3.txt"))) + ft.AddTo(ioswitch2.NewToBaseStore(*space2, jcstypes.PathFromComps("test3.txt"))) plans := exec.NewPlanBuilder() parser.Parse(ft, plans) fmt.Println(plans) diff --git a/client/internal/downloader/downloader.go b/client/internal/downloader/downloader.go index 95d1076..2af4066 100644 --- a/client/internal/downloader/downloader.go +++ b/client/internal/downloader/downloader.go @@ -11,7 +11,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/client/internal/speedstats" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/connectivity" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) const ( @@ -21,18 +21,18 @@ const ( type DownloadIterator = iterator.Iterator[*Downloading] type DownloadReqeust struct { - ObjectID jcsypes.ObjectID + ObjectID jcstypes.ObjectID Offset int64 Length int64 } type downloadReqeust2 struct { - Detail *jcsypes.ObjectDetail + Detail *jcstypes.ObjectDetail Raw DownloadReqeust } type Downloading struct { - Object *jcsypes.Object + Object *jcstypes.Object File io.ReadCloser // 文件流,如果文件不存在,那么为nil Request DownloadReqeust } @@ -65,7 +65,7 @@ func NewDownloader(cfg Config, conn *connectivity.Collector, stgPool *pool.Pool, } func (d *Downloader) DownloadObjects(reqs []DownloadReqeust) DownloadIterator { - objIDs := make([]jcsypes.ObjectID, len(reqs)) + objIDs := make([]jcstypes.ObjectID, len(reqs)) for i, req := range reqs { objIDs[i] = req.ObjectID } @@ -79,7 +79,7 @@ func (d *Downloader) DownloadObjects(reqs []DownloadReqeust) DownloadIterator { return iterator.FuseError[*Downloading](fmt.Errorf("request to db: %w", err)) } - detailsMap := make(map[jcsypes.ObjectID]*jcsypes.ObjectDetail) + detailsMap := make(map[jcstypes.ObjectID]*jcstypes.ObjectDetail) for _, detail := range objDetails { d := detail detailsMap[detail.Object.ObjectID] = &d @@ -96,7 +96,7 @@ func (d *Downloader) DownloadObjects(reqs []DownloadReqeust) DownloadIterator { return NewDownloadObjectIterator(d, req2s) } -func (d *Downloader) DownloadObjectByDetail(detail jcsypes.ObjectDetail, off int64, length int64) (*Downloading, error) { +func (d *Downloader) DownloadObjectByDetail(detail jcstypes.ObjectDetail, off int64, length int64) (*Downloading, error) { req2s := []downloadReqeust2{{ Detail: &detail, Raw: DownloadReqeust{ @@ -110,56 +110,56 @@ func (d *Downloader) DownloadObjectByDetail(detail jcsypes.ObjectDetail, off int return iter.MoveNext() } -func (d *Downloader) DownloadPackage(pkgID jcsypes.PackageID, prefix string) (jcsypes.Package, DownloadIterator, error) { - pkg, details, err := db.DoTx02(d.db, func(tx db.SQLContext) (jcsypes.Package, []jcsypes.ObjectDetail, error) { +func (d *Downloader) DownloadPackage(pkgID jcstypes.PackageID, prefix string) (jcstypes.Package, DownloadIterator, error) { + pkg, details, err := db.DoTx02(d.db, func(tx db.SQLContext) (jcstypes.Package, []jcstypes.ObjectDetail, error) { pkg, err := d.db.Package().GetByID(tx, pkgID) if err != nil { - return jcsypes.Package{}, nil, err + return jcstypes.Package{}, nil, err } - var details []jcsypes.ObjectDetail + var details []jcstypes.ObjectDetail if prefix != "" { objs, err := d.db.Object().GetWithPathPrefix(tx, pkgID, prefix) if err != nil { - return jcsypes.Package{}, nil, err + return jcstypes.Package{}, nil, err } - objIDs := make([]jcsypes.ObjectID, len(objs)) + objIDs := make([]jcstypes.ObjectID, len(objs)) for i, obj := range objs { objIDs[i] = obj.ObjectID } allBlocks, err := d.db.ObjectBlock().BatchGetByObjectID(tx, objIDs) if err != nil { - return jcsypes.Package{}, nil, err + return jcstypes.Package{}, nil, err } allPinnedObjs, err := d.db.PinnedObject().BatchGetByObjectID(tx, objIDs) if err != nil { - return jcsypes.Package{}, nil, err + return jcstypes.Package{}, nil, err } - details = make([]jcsypes.ObjectDetail, 0, len(objs)) + details = make([]jcstypes.ObjectDetail, 0, len(objs)) for _, obj := range objs { - detail := jcsypes.ObjectDetail{ + detail := jcstypes.ObjectDetail{ Object: obj, } details = append(details, detail) } - jcsypes.DetailsFillObjectBlocks(details, allBlocks) - jcsypes.DetailsFillPinnedAt(details, allPinnedObjs) + jcstypes.DetailsFillObjectBlocks(details, allBlocks) + jcstypes.DetailsFillPinnedAt(details, allPinnedObjs) } else { details, err = d.db.Object().GetPackageObjectDetails(tx, pkgID) if err != nil { - return jcsypes.Package{}, nil, err + return jcstypes.Package{}, nil, err } } return pkg, details, nil }) if err != nil { - return jcsypes.Package{}, nil, err + return jcstypes.Package{}, nil, err } req2s := make([]downloadReqeust2, len(details)) @@ -180,11 +180,11 @@ func (d *Downloader) DownloadPackage(pkgID jcsypes.PackageID, prefix string) (jc type ObjectECStrip struct { Data []byte - ObjectFileHash jcsypes.FileHash // 添加这条缓存时,Object的FileHash + ObjectFileHash jcstypes.FileHash // 添加这条缓存时,Object的FileHash } type ECStripKey struct { - ObjectID jcsypes.ObjectID + ObjectID jcstypes.ObjectID StripIndex int64 } diff --git a/client/internal/downloader/lrc_strip_iterator.go b/client/internal/downloader/lrc_strip_iterator.go index 1adf18b..9c9428f 100644 --- a/client/internal/downloader/lrc_strip_iterator.go +++ b/client/internal/downloader/lrc_strip_iterator.go @@ -12,14 +12,14 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitchlrc" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitchlrc/parser" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type LRCStripIterator struct { downloader *Downloader - object jcsypes.Object + object jcstypes.Object blocks []downloadBlock - red jcsypes.LRCRedundancy + red jcstypes.LRCRedundancy curStripIndex int64 cache *StripCache dataChan chan dataChanEntry @@ -28,7 +28,7 @@ type LRCStripIterator struct { inited bool } -func NewLRCStripIterator(downloder *Downloader, object jcsypes.Object, blocks []downloadBlock, red jcsypes.LRCRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *LRCStripIterator { +func NewLRCStripIterator(downloder *Downloader, object jcstypes.Object, blocks []downloadBlock, red jcstypes.LRCRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *LRCStripIterator { if maxPrefetch <= 0 { maxPrefetch = 1 } diff --git a/client/internal/downloader/strategy/selector.go b/client/internal/downloader/strategy/selector.go index fac99b8..bdeb2f7 100644 --- a/client/internal/downloader/strategy/selector.go +++ b/client/internal/downloader/strategy/selector.go @@ -12,13 +12,13 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/client/internal/metacache" "gitlink.org.cn/cloudream/jcs-pub/common/consts" "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type Request struct { Detail types.ObjectDetail Range math2.Range - DestLocation cortypes.Location + DestLocation jcstypes.Location } type Strategy interface { @@ -113,7 +113,7 @@ type downloadBlock struct { type request2 struct { Detail types.ObjectDetail Range math2.Range - DestLocation cortypes.Location + DestLocation jcstypes.Location } func (s *Selector) selectForNoneOrRep(req request2) (Strategy, error) { diff --git a/client/internal/http/v1/object.go b/client/internal/http/v1/object.go index 63f4113..13cf042 100644 --- a/client/internal/http/v1/object.go +++ b/client/internal/http/v1/object.go @@ -19,7 +19,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/client/internal/http/types" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" "gitlink.org.cn/cloudream/jcs-pub/common/ecode" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type ObjectService struct { @@ -113,9 +113,9 @@ func (s *ObjectService) Upload(ctx *gin.Context) { return } - copyToPath := make([]jcsypes.JPath, 0, len(info.CopyToPath)) + copyToPath := make([]jcstypes.JPath, 0, len(info.CopyToPath)) for _, p := range info.CopyToPath { - copyToPath = append(copyToPath, jcsypes.PathFromJcsPathString(p)) + copyToPath = append(copyToPath, jcstypes.PathFromJcsPathString(p)) } up, err := s.svc.Uploader.BeginUpdate(info.PackageID, info.Affinity, info.CopyTo, copyToPath) @@ -147,7 +147,7 @@ func (s *ObjectService) Upload(ctx *gin.Context) { } path = filepath.ToSlash(path) - err = up.Upload(jcsypes.PathFromJcsPathString(path), file) + err = up.Upload(jcstypes.PathFromJcsPathString(path), file) if err != nil { log.Warnf("uploading file: %s", err.Error()) ctx.JSON(http.StatusOK, types.Failed(ecode.OperationFailed, fmt.Sprintf("uploading file %v: %v", file.FileName(), err))) @@ -163,7 +163,7 @@ func (s *ObjectService) Upload(ctx *gin.Context) { return } - uploadeds := make([]jcsypes.Object, len(pathes)) + uploadeds := make([]jcstypes.Object, len(pathes)) for i := range pathes { uploadeds[i] = ret.Objects[pathes[i]] } @@ -398,7 +398,7 @@ func (s *ObjectService) DeleteByPath(ctx *gin.Context) { return } - err = s.svc.ObjectSvc().Delete([]jcsypes.ObjectID{resp.Objects[0].ObjectID}) + err = s.svc.ObjectSvc().Delete([]jcstypes.ObjectID{resp.Objects[0].ObjectID}) if err != nil { log.Warnf("deleting objects: %s", err.Error()) ctx.JSON(http.StatusOK, types.Failed(ecode.OperationFailed, "delete objects failed")) diff --git a/client/internal/http/v1/package.go b/client/internal/http/v1/package.go index c96b38e..a704442 100644 --- a/client/internal/http/v1/package.go +++ b/client/internal/http/v1/package.go @@ -21,7 +21,7 @@ import ( cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" "gitlink.org.cn/cloudream/jcs-pub/common/ecode" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/iterator" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm" ) @@ -151,9 +151,9 @@ func (s *PackageService) CreateLoad(ctx *gin.Context) { return } - copyToPath := make([]jcsypes.JPath, 0, len(info.CopyToPath)) + copyToPath := make([]jcstypes.JPath, 0, len(info.CopyToPath)) for _, p := range info.CopyToPath { - copyToPath = append(copyToPath, jcsypes.PathFromJcsPathString(p)) + copyToPath = append(copyToPath, jcstypes.PathFromJcsPathString(p)) } up, err := s.svc.Uploader.BeginCreateUpload(info.BucketID, info.Name, info.CopyTo, copyToPath) @@ -184,7 +184,7 @@ func (s *PackageService) CreateLoad(ctx *gin.Context) { } path = filepath.ToSlash(path) - err = up.Upload(jcsypes.PathFromJcsPathString(path), file) + err = up.Upload(jcstypes.PathFromJcsPathString(path), file) if err != nil { log.Warnf("uploading file: %s", err.Error()) ctx.JSON(http.StatusOK, types.Failed(ecode.OperationFailed, fmt.Sprintf("uploading file %v: %v", file.FileName(), err))) @@ -200,7 +200,7 @@ func (s *PackageService) CreateLoad(ctx *gin.Context) { return } - objs := make([]jcsypes.Object, len(pathes)) + objs := make([]jcstypes.Object, len(pathes)) for i := range pathes { objs[i] = ret.Objects[pathes[i]] } @@ -233,7 +233,7 @@ func (s *PackageService) Download(ctx *gin.Context) { } } -func (s *PackageService) downloadZip(ctx *gin.Context, req cliapi.PackageDownload, pkg jcsypes.Package, iter downloader.DownloadIterator) { +func (s *PackageService) downloadZip(ctx *gin.Context, req cliapi.PackageDownload, pkg jcstypes.Package, iter downloader.DownloadIterator) { log := logger.WithField("HTTP", "Package.Download") ctx.Header("Content-Disposition", "attachment; filename="+url.PathEscape(pkg.Name)+".zip") @@ -276,7 +276,7 @@ func (s *PackageService) downloadZip(ctx *gin.Context, req cliapi.PackageDownloa } } -func (s *PackageService) downloadTar(ctx *gin.Context, req cliapi.PackageDownload, pkg jcsypes.Package, iter downloader.DownloadIterator) { +func (s *PackageService) downloadTar(ctx *gin.Context, req cliapi.PackageDownload, pkg jcstypes.Package, iter downloader.DownloadIterator) { log := logger.WithField("HTTP", "Package.Download") ctx.Header("Content-Disposition", "attachment; filename="+url.PathEscape(pkg.Name)+".tar") diff --git a/client/internal/http/v1/presigned.go b/client/internal/http/v1/presigned.go index 07f3a1e..c4dca00 100644 --- a/client/internal/http/v1/presigned.go +++ b/client/internal/http/v1/presigned.go @@ -15,7 +15,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/client/internal/http/types" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" "gitlink.org.cn/cloudream/jcs-pub/common/ecode" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type PresignedService struct { @@ -156,9 +156,9 @@ func (s *PresignedService) ObjectUpload(ctx *gin.Context) { return } - copyToPath := make([]jcsypes.JPath, 0, len(req.CopyToPath)) + copyToPath := make([]jcstypes.JPath, 0, len(req.CopyToPath)) for _, p := range req.CopyToPath { - copyToPath = append(copyToPath, jcsypes.PathFromJcsPathString(p)) + copyToPath = append(copyToPath, jcstypes.PathFromJcsPathString(p)) } up, err := s.svc.Uploader.BeginUpdate(req.PackageID, req.Affinity, req.CopyTo, copyToPath) @@ -171,7 +171,7 @@ func (s *PresignedService) ObjectUpload(ctx *gin.Context) { path := filepath.ToSlash(req.Path) - err = up.Upload(jcsypes.PathFromJcsPathString(path), ctx.Request.Body) + err = up.Upload(jcstypes.PathFromJcsPathString(path), ctx.Request.Body) if err != nil { log.Warnf("uploading file: %s", err.Error()) ctx.JSON(http.StatusOK, types.Failed(ecode.OperationFailed, fmt.Sprintf("uploading file %v: %v", req.Path, err))) diff --git a/client/internal/http/v1/space_syncer.go b/client/internal/http/v1/space_syncer.go index ed46ac5..5460368 100644 --- a/client/internal/http/v1/space_syncer.go +++ b/client/internal/http/v1/space_syncer.go @@ -8,7 +8,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/client/internal/http/types" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" "gitlink.org.cn/cloudream/jcs-pub/common/ecode" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type SpaceSyncerService struct { @@ -41,21 +41,21 @@ func (s *SpaceSyncerService) CreateTask(ctx *gin.Context) { return } - dests := make([]jcsypes.SpaceSyncDest, 0, len(req.DestUserSpaceIDs)) + dests := make([]jcstypes.SpaceSyncDest, 0, len(req.DestUserSpaceIDs)) for _, id := range req.DestUserSpaceIDs { - dests = append(dests, jcsypes.SpaceSyncDest{ - DestUserSpaceID: jcsypes.UserSpaceID(id), - DestPath: jcsypes.PathFromJcsPathString(req.DestPathes[0]), + dests = append(dests, jcstypes.SpaceSyncDest{ + DestUserSpaceID: jcstypes.UserSpaceID(id), + DestPath: jcstypes.PathFromJcsPathString(req.DestPathes[0]), }) } - info, err := s.svc.SpaceSyncer.CreateTask(jcsypes.SpaceSyncTask{ + info, err := s.svc.SpaceSyncer.CreateTask(jcstypes.SpaceSyncTask{ Trigger: req.Trigger, Mode: req.Mode, Filters: req.Filters, Options: req.Options, SrcUserSpaceID: req.SrcUserSpaceID, - SrcPath: jcsypes.PathFromJcsPathString(req.SrcPath), + SrcPath: jcstypes.PathFromJcsPathString(req.SrcPath), Dests: dests, }) if err != nil { diff --git a/client/internal/http/v1/user_space.go b/client/internal/http/v1/user_space.go index a1257d9..76245ea 100644 --- a/client/internal/http/v1/user_space.go +++ b/client/internal/http/v1/user_space.go @@ -9,7 +9,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/client/internal/http/types" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" "gitlink.org.cn/cloudream/jcs-pub/common/ecode" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type UserSpaceService struct { @@ -52,7 +52,7 @@ func (s *UserSpaceService) CreatePackage(ctx *gin.Context) { return } - pkg, err := s.svc.Uploader.UserSpaceUpload(req.UserSpaceID, jcsypes.PathFromJcsPathString(req.Path), req.BucketID, req.Name, req.SpaceAffinity) + pkg, err := s.svc.Uploader.UserSpaceUpload(req.UserSpaceID, jcstypes.PathFromJcsPathString(req.Path), req.BucketID, req.Name, req.SpaceAffinity) if err != nil { log.Warnf("userspace create package: %s", err.Error()) ctx.JSON(http.StatusOK, types.Failed(ecode.OperationFailed, fmt.Sprintf("userspace create package: %v", err))) diff --git a/client/internal/metacache/connectivity.go b/client/internal/metacache/connectivity.go index 9d863a1..6feb8dc 100644 --- a/client/internal/metacache/connectivity.go +++ b/client/internal/metacache/connectivity.go @@ -8,12 +8,12 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/logger" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func (m *MetaCacheHost) AddConnectivity() *Connectivity { cache := &Connectivity{ - entries: make(map[cortypes.HubID]*ConnectivityEntry), + entries: make(map[jcstypes.HubID]*ConnectivityEntry), } m.caches = append(m.caches, cache) @@ -22,10 +22,10 @@ func (m *MetaCacheHost) AddConnectivity() *Connectivity { type Connectivity struct { lock sync.RWMutex - entries map[cortypes.HubID]*ConnectivityEntry + entries map[jcstypes.HubID]*ConnectivityEntry } -func (c *Connectivity) Get(from cortypes.HubID, to cortypes.HubID) *time.Duration { +func (c *Connectivity) Get(from jcstypes.HubID, to jcstypes.HubID) *time.Duration { for i := 0; i < 2; i++ { c.lock.RLock() entry, ok := c.entries[from] @@ -60,12 +60,12 @@ func (c *Connectivity) ClearOutdated() { } } -func (c *Connectivity) load(hubID cortypes.HubID) { +func (c *Connectivity) load(hubID jcstypes.HubID) { coorCli := stgglb.CoordinatorRPCPool.Get() defer coorCli.Release() - get, cerr := coorCli.GetHubConnectivities(context.Background(), corrpc.ReqGetHubConnectivities([]cortypes.HubID{hubID})) + get, cerr := coorCli.GetHubConnectivities(context.Background(), corrpc.ReqGetHubConnectivities([]jcstypes.HubID{hubID})) if cerr != nil { logger.Warnf("get hub connectivities: %v", cerr) return @@ -76,7 +76,7 @@ func (c *Connectivity) load(hubID cortypes.HubID) { ce := &ConnectivityEntry{ From: hubID, - To: make(map[cortypes.HubID]cortypes.HubConnectivity), + To: make(map[jcstypes.HubID]jcstypes.HubConnectivity), UpdateTime: time.Now(), } @@ -88,7 +88,7 @@ func (c *Connectivity) load(hubID cortypes.HubID) { } type ConnectivityEntry struct { - From cortypes.HubID - To map[cortypes.HubID]cortypes.HubConnectivity + From jcstypes.HubID + To map[jcstypes.HubID]jcstypes.HubConnectivity UpdateTime time.Time } diff --git a/client/internal/metacache/hubmeta.go b/client/internal/metacache/hubmeta.go index 7442b69..c945ebd 100644 --- a/client/internal/metacache/hubmeta.go +++ b/client/internal/metacache/hubmeta.go @@ -7,12 +7,12 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/logger" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func (m *MetaCacheHost) AddHubMeta() *HubMeta { meta := &HubMeta{} - meta.cache = NewSimpleMetaCache(SimpleMetaCacheConfig[cortypes.HubID, cortypes.Hub]{ + meta.cache = NewSimpleMetaCache(SimpleMetaCacheConfig[jcstypes.HubID, jcstypes.Hub]{ Getter: meta.load, Expire: time.Minute * 5, }) @@ -22,10 +22,10 @@ func (m *MetaCacheHost) AddHubMeta() *HubMeta { } type HubMeta struct { - cache *SimpleMetaCache[cortypes.HubID, cortypes.Hub] + cache *SimpleMetaCache[jcstypes.HubID, jcstypes.Hub] } -func (h *HubMeta) Get(hubID cortypes.HubID) *cortypes.Hub { +func (h *HubMeta) Get(hubID jcstypes.HubID) *jcstypes.Hub { v, ok := h.cache.Get(hubID) if ok { return &v @@ -33,9 +33,9 @@ func (h *HubMeta) Get(hubID cortypes.HubID) *cortypes.Hub { return nil } -func (h *HubMeta) GetMany(hubIDs []cortypes.HubID) []*cortypes.Hub { +func (h *HubMeta) GetMany(hubIDs []jcstypes.HubID) []*jcstypes.Hub { vs, oks := h.cache.GetMany(hubIDs) - ret := make([]*cortypes.Hub, len(vs)) + ret := make([]*jcstypes.Hub, len(vs)) for i := range vs { if oks[i] { ret[i] = &vs[i] @@ -48,8 +48,8 @@ func (h *HubMeta) ClearOutdated() { h.cache.ClearOutdated() } -func (h *HubMeta) load(keys []cortypes.HubID) ([]cortypes.Hub, []bool) { - vs := make([]cortypes.Hub, len(keys)) +func (h *HubMeta) load(keys []jcstypes.HubID) ([]jcstypes.Hub, []bool) { + vs := make([]jcstypes.Hub, len(keys)) oks := make([]bool, len(keys)) coorCli := stgglb.CoordinatorRPCPool.Get() diff --git a/client/internal/metacache/user_space_meta.go b/client/internal/metacache/user_space_meta.go index 9abe7db..eb1a9c7 100644 --- a/client/internal/metacache/user_space_meta.go +++ b/client/internal/metacache/user_space_meta.go @@ -8,7 +8,7 @@ import ( stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func (m *MetaCacheHost) AddStorageMeta() *UserSpaceMeta { @@ -78,7 +78,7 @@ func (s *UserSpaceMeta) load(keys []types.UserSpaceID) ([]types.UserSpaceDetail, coorCli := stgglb.CoordinatorRPCPool.Get() defer coorCli.Release() - stgs := make([]cortypes.StorageType, len(spaces)) + stgs := make([]jcstypes.StorageType, len(spaces)) for i := range spaces { stgs[i] = spaces[i].Storage } diff --git a/client/internal/mount/mount_linux.go b/client/internal/mount/mount_linux.go index 7fe3fad..5d2598a 100644 --- a/client/internal/mount/mount_linux.go +++ b/client/internal/mount/mount_linux.go @@ -12,7 +12,7 @@ import ( fuse2 "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse" "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/vfs" "gitlink.org.cn/cloudream/jcs-pub/client/internal/uploader" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type Mount struct { @@ -107,14 +107,14 @@ func (m *Mount) StartReclaimSpace() { m.vfs.ReclaimSpace() } -func (m *Mount) NotifyObjectInvalid(obj jcsypes.Object) { +func (m *Mount) NotifyObjectInvalid(obj jcstypes.Object) { } -func (m *Mount) NotifyPackageInvalid(pkg jcsypes.Package) { +func (m *Mount) NotifyPackageInvalid(pkg jcstypes.Package) { } -func (m *Mount) NotifyBucketInvalid(bkt jcsypes.Bucket) { +func (m *Mount) NotifyBucketInvalid(bkt jcstypes.Bucket) { } diff --git a/client/internal/mount/mount_win.go b/client/internal/mount/mount_win.go index 8b112a6..2a68480 100644 --- a/client/internal/mount/mount_win.go +++ b/client/internal/mount/mount_win.go @@ -8,7 +8,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/client/internal/downloader" "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/config" "gitlink.org.cn/cloudream/jcs-pub/client/internal/uploader" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type Mount struct { @@ -43,14 +43,14 @@ func (m *Mount) StartReclaimSpace() { } -func (m *Mount) NotifyObjectInvalid(obj jcsypes.Object) { +func (m *Mount) NotifyObjectInvalid(obj jcstypes.Object) { } -func (m *Mount) NotifyPackageInvalid(pkg jcsypes.Package) { +func (m *Mount) NotifyPackageInvalid(pkg jcstypes.Package) { } -func (m *Mount) NotifyBucketInvalid(bkt jcsypes.Bucket) { +func (m *Mount) NotifyBucketInvalid(bkt jcstypes.Bucket) { } diff --git a/client/internal/mount/vfs/cache/cache.go b/client/internal/mount/vfs/cache/cache.go index 16d5b2a..58d470d 100644 --- a/client/internal/mount/vfs/cache/cache.go +++ b/client/internal/mount/vfs/cache/cache.go @@ -21,7 +21,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/config" "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse" "gitlink.org.cn/cloudream/jcs-pub/client/internal/uploader" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type CacheEntry interface { @@ -196,7 +196,7 @@ func (c *Cache) CreateFile(pathComps []string) *CacheFile { // 尝试加载缓存文件,如果文件不存在,则使用obj的信息创建一个新缓存文件,而如果obj为nil,那么会返回nil。 // // 记得使用Release减少引用计数 -func (c *Cache) LoadFile(pathComps []string, obj *jcsypes.Object) *CacheFile { +func (c *Cache) LoadFile(pathComps []string, obj *jcstypes.Object) *CacheFile { c.lock.Lock() defer c.lock.Unlock() @@ -490,7 +490,7 @@ func (c *Cache) Move(pathComps []string, newPathComps []string) error { type syncPackage struct { bktName string pkgName string - pkg jcsypes.Package + pkg jcstypes.Package upObjs []*uploadingObject } @@ -917,7 +917,7 @@ func (c *Cache) doUpdatingOnly(pkgs []*syncPackage) { pathes := make([]string, 0, len(p.upObjs)) modTimes := make([]time.Time, 0, len(p.upObjs)) for _, obj := range p.upObjs { - pathes = append(pathes, jcsypes.JoinObjectPath(obj.pathComps[2:]...)) + pathes = append(pathes, jcstypes.JoinObjectPath(obj.pathComps[2:]...)) modTimes = append(modTimes, obj.modTime) } @@ -1008,7 +1008,7 @@ func (c *Cache) doUploading(pkgs []*syncPackage) { counter := io2.Counter(&rd) - err = upder.Upload(jcsypes.PathFromComps(o.pathComps[2:]...), counter, uploader.UploadOption{ + err = upder.Upload(jcstypes.PathFromComps(o.pathComps[2:]...), counter, uploader.UploadOption{ CreateTime: o.modTime, }) if err != nil { @@ -1036,8 +1036,8 @@ func (c *Cache) doUploading(pkgs []*syncPackage) { continue } - oldPath := jcsypes.JoinObjectPath(o.pathComps[2:]...) - newPath := jcsypes.JoinObjectPath(o.cache.pathComps[2:]...) + oldPath := jcstypes.JoinObjectPath(o.pathComps[2:]...) + newPath := jcstypes.JoinObjectPath(o.cache.pathComps[2:]...) if o.isDeleted { upder.CancelObject(oldPath) diff --git a/client/internal/mount/vfs/cache/file.go b/client/internal/mount/vfs/cache/file.go index 5d778dc..6c6099a 100644 --- a/client/internal/mount/vfs/cache/file.go +++ b/client/internal/mount/vfs/cache/file.go @@ -14,7 +14,7 @@ import ( "gitlink.org.cn/cloudream/common/utils/math2" "gitlink.org.cn/cloudream/common/utils/serder" "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type CacheLevel int @@ -102,7 +102,7 @@ type CacheFile struct { cache *Cache pathComps []string info FileInfo - remoteObj *jcsypes.Object + remoteObj *jcstypes.Object rwLock *sync.RWMutex readers []*CacheFileHandle writers []*CacheFileHandle @@ -262,7 +262,7 @@ func loadCacheFile(cache *Cache, pathComps []string) (*CacheFile, error) { return ch, nil } -func newCacheFileFromObject(cache *Cache, pathComps []string, obj *jcsypes.Object) (*CacheFile, error) { +func newCacheFileFromObject(cache *Cache, pathComps []string, obj *jcstypes.Object) (*CacheFile, error) { metaPath := cache.GetCacheMetaPath(pathComps...) dataPath := cache.GetCacheDataPath(pathComps...) diff --git a/client/internal/mount/vfs/fuse.go b/client/internal/mount/vfs/fuse.go index 048c8fa..f293463 100644 --- a/client/internal/mount/vfs/fuse.go +++ b/client/internal/mount/vfs/fuse.go @@ -9,7 +9,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse" "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/vfs/cache" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm" ) @@ -35,7 +35,7 @@ func child(vfs *Vfs, ctx context.Context, parent FuseNode, name string) (fuse.Fs return nil } - objPath := jcsypes.JoinObjectPath(childPathComps[2:]...) + objPath := jcstypes.JoinObjectPath(childPathComps[2:]...) obj, err := d.Object().GetByPath(tx, pkg.PackageID, objPath) if err == nil { ret = newFileFromObject(vfs, childPathComps, obj) @@ -45,7 +45,7 @@ func child(vfs *Vfs, ctx context.Context, parent FuseNode, name string) (fuse.Fs return err } - err = d.Object().HasObjectWithPrefix(tx, pkg.PackageID, objPath+jcsypes.ObjectPathSeparator) + err = d.Object().HasObjectWithPrefix(tx, pkg.PackageID, objPath+jcstypes.ObjectPathSeparator) if err == nil { dir := vfs.cache.LoadDir(childPathComps, &cache.CreateDirOption{ ModTime: time.Now(), @@ -98,10 +98,10 @@ func listChildren(vfs *Vfs, ctx context.Context, parent FuseNode) ([]fuse.FsEntr return err } - objPath := jcsypes.JoinObjectPath(myPathComps[2:]...) + objPath := jcstypes.JoinObjectPath(myPathComps[2:]...) objPrefix := objPath if objPath != "" { - objPrefix += jcsypes.ObjectPathSeparator + objPrefix += jcstypes.ObjectPathSeparator } objs, coms, err := d.Object().GetByPrefixGrouped(tx, pkg.PackageID, objPrefix) @@ -110,8 +110,8 @@ func listChildren(vfs *Vfs, ctx context.Context, parent FuseNode) ([]fuse.FsEntr } for _, dir := range coms { - dir = strings.TrimSuffix(dir, jcsypes.ObjectPathSeparator) - pathComps := lo2.AppendNew(myPathComps, jcsypes.BaseName(dir)) + dir = strings.TrimSuffix(dir, jcstypes.ObjectPathSeparator) + pathComps := lo2.AppendNew(myPathComps, jcstypes.BaseName(dir)) cd := vfs.cache.LoadDir(pathComps, &cache.CreateDirOption{ ModTime: time.Now(), @@ -124,7 +124,7 @@ func listChildren(vfs *Vfs, ctx context.Context, parent FuseNode) ([]fuse.FsEntr } for _, obj := range objs { - pathComps := lo2.AppendNew(myPathComps, jcsypes.BaseName(obj.Path)) + pathComps := lo2.AppendNew(myPathComps, jcstypes.BaseName(obj.Path)) file := newFileFromObject(vfs, pathComps, obj) dbEntries[file.Name()] = file } @@ -179,14 +179,14 @@ func newFile(vfs *Vfs, ctx context.Context, name string, parent FuseNode, flags func removeChild(vfs *Vfs, ctx context.Context, name string, parent FuseNode) error { pathComps := lo2.AppendNew(parent.PathComps(), name) - joinedPath := jcsypes.JoinObjectPath(pathComps[2:]...) + joinedPath := jcstypes.JoinObjectPath(pathComps[2:]...) d := vfs.db // TODO 生成系统事件 return vfs.db.DoTx(func(tx db.SQLContext) error { pkg, err := d.Package().GetByFullName(tx, pathComps[0], pathComps[1]) if err == nil { - err := d.Object().HasObjectWithPrefix(tx, pkg.PackageID, joinedPath+jcsypes.ObjectPathSeparator) + err := d.Object().HasObjectWithPrefix(tx, pkg.PackageID, joinedPath+jcstypes.ObjectPathSeparator) if err == nil { return fuse.ErrNotEmpty } @@ -211,7 +211,7 @@ func removeChild(vfs *Vfs, ctx context.Context, name string, parent FuseNode) er func moveChild(vfs *Vfs, ctx context.Context, oldName string, oldParent FuseNode, newName string, newParent FuseNode) error { newParentPath := newParent.PathComps() newChildPath := lo2.AppendNew(newParentPath, newName) - newChildPathJoined := jcsypes.JoinObjectPath(newChildPath[2:]...) + newChildPathJoined := jcstypes.JoinObjectPath(newChildPath[2:]...) // 不允许移动任何内容到Package层级以上 if len(newParentPath) < 2 { @@ -219,7 +219,7 @@ func moveChild(vfs *Vfs, ctx context.Context, oldName string, oldParent FuseNode } oldChildPath := lo2.AppendNew(oldParent.PathComps(), oldName) - oldChildPathJoined := jcsypes.JoinObjectPath(oldChildPath[2:]...) + oldChildPathJoined := jcstypes.JoinObjectPath(oldChildPath[2:]...) // 先更新远程,再更新本地,因为远程使用事务更新,可以回滚,而本地不行 return vfs.db.DoTx(func(tx db.SQLContext) error { @@ -259,7 +259,7 @@ func moveRemote(vfs *Vfs, tx db.SQLContext, oldChildPath []string, newParentPath return fuse.ErrExists } - err = d.Object().HasObjectWithPrefix(tx, newPkg.PackageID, newChildPathJoined+jcsypes.ObjectPathSeparator) + err = d.Object().HasObjectWithPrefix(tx, newPkg.PackageID, newChildPathJoined+jcstypes.ObjectPathSeparator) if err == nil { return fuse.ErrExists } @@ -283,17 +283,17 @@ func moveRemote(vfs *Vfs, tx db.SQLContext, oldChildPath []string, newParentPath oldObj.PackageID = newPkg.PackageID oldObj.Path = newChildPathJoined - return d.Object().BatchUpdate(tx, []jcsypes.Object{oldObj}) + return d.Object().BatchUpdate(tx, []jcstypes.Object{oldObj}) } if err != gorm.ErrRecordNotFound { return err } - err = d.Object().HasObjectWithPrefix(tx, oldPkg.PackageID, oldChildPathJoined+jcsypes.ObjectPathSeparator) + err = d.Object().HasObjectWithPrefix(tx, oldPkg.PackageID, oldChildPathJoined+jcstypes.ObjectPathSeparator) if err == nil { return d.Object().MoveByPrefix(tx, - oldPkg.PackageID, oldChildPathJoined+jcsypes.ObjectPathSeparator, - newPkg.PackageID, newChildPathJoined+jcsypes.ObjectPathSeparator, + oldPkg.PackageID, oldChildPathJoined+jcstypes.ObjectPathSeparator, + newPkg.PackageID, newChildPathJoined+jcstypes.ObjectPathSeparator, ) } if err == gorm.ErrRecordNotFound { diff --git a/client/internal/mount/vfs/fuse_bucket.go b/client/internal/mount/vfs/fuse_bucket.go index f5b6f9c..44af37d 100644 --- a/client/internal/mount/vfs/fuse_bucket.go +++ b/client/internal/mount/vfs/fuse_bucket.go @@ -9,7 +9,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse" "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/vfs/cache" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm" ) @@ -117,7 +117,7 @@ func (r *FuseBucket) listChildren() ([]fuse.FsEntry, error) { return nil, err } - pkgMap := make(map[string]*jcsypes.Package) + pkgMap := make(map[string]*jcstypes.Package) for _, pkg := range pkgs { p := pkg pkgMap[pkg.Name] = &p diff --git a/client/internal/mount/vfs/fuse_dir.go b/client/internal/mount/vfs/fuse_dir.go index d03bbe4..c519d6b 100644 --- a/client/internal/mount/vfs/fuse_dir.go +++ b/client/internal/mount/vfs/fuse_dir.go @@ -8,7 +8,7 @@ import ( db2 "gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse" "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/vfs/cache" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm" ) @@ -108,7 +108,7 @@ func (r *FuseDir) loadCacheDir() *cache.CacheDir { return err } - err = r.vfs.db.Object().HasObjectWithPrefix(tx, pkg.PackageID, jcsypes.JoinObjectPath(r.pathComps[2:]...)) + err = r.vfs.db.Object().HasObjectWithPrefix(tx, pkg.PackageID, jcstypes.JoinObjectPath(r.pathComps[2:]...)) if err == nil { createOpt = &cache.CreateDirOption{ ModTime: time.Now(), diff --git a/client/internal/mount/vfs/fuse_file.go b/client/internal/mount/vfs/fuse_file.go index b3d9fad..8fbef99 100644 --- a/client/internal/mount/vfs/fuse_file.go +++ b/client/internal/mount/vfs/fuse_file.go @@ -6,7 +6,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse" "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/vfs/cache" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm" ) @@ -28,7 +28,7 @@ func newFileFromCache(info cache.CacheEntryInfo, vfs *Vfs) *FuseFileNode { } } -func newFileFromObject(vfs *Vfs, pathComps []string, obj jcsypes.Object) *FuseFileNode { +func newFileFromObject(vfs *Vfs, pathComps []string, obj jcstypes.Object) *FuseFileNode { return &FuseFileNode{ vfs: vfs, pathComps: pathComps, @@ -117,7 +117,7 @@ func (n *FuseFileNode) loadCacheFile() *cache.CacheFile { return n.vfs.cache.LoadFile(n.pathComps, nil) } - cdsObj, err := n.vfs.db.Object().GetByFullPath(n.vfs.db.DefCtx(), n.pathComps[0], n.pathComps[1], jcsypes.JoinObjectPath(n.pathComps[2:]...)) + cdsObj, err := n.vfs.db.Object().GetByFullPath(n.vfs.db.DefCtx(), n.pathComps[0], n.pathComps[1], jcstypes.JoinObjectPath(n.pathComps[2:]...)) if err == nil { file := n.vfs.cache.LoadFile(n.pathComps, &cdsObj) if file == nil { diff --git a/client/internal/mount/vfs/fuse_root.go b/client/internal/mount/vfs/fuse_root.go index de14595..bd6d527 100644 --- a/client/internal/mount/vfs/fuse_root.go +++ b/client/internal/mount/vfs/fuse_root.go @@ -8,7 +8,7 @@ import ( db2 "gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse" "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/vfs/cache" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm" ) @@ -104,7 +104,7 @@ func (r *FuseRoot) listChildren() ([]fuse.FsEntry, error) { return nil, err } - bktMap := make(map[string]*jcsypes.Bucket) + bktMap := make(map[string]*jcstypes.Bucket) for _, bkt := range bkts { b := bkt bktMap[bkt.Name] = &b diff --git a/client/internal/repl/load.go b/client/internal/repl/load.go index 0c7ee1b..23c9b4f 100644 --- a/client/internal/repl/load.go +++ b/client/internal/repl/load.go @@ -8,7 +8,7 @@ import ( "time" "github.com/spf13/cobra" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func init() { @@ -31,7 +31,7 @@ func init() { fmt.Printf("Invalid user space ID: %s\n", args[1]) } - loadByID(cmdCtx, jcsypes.PackageID(pkgID), jcsypes.UserSpaceID(userSpaceID), args[2]) + loadByID(cmdCtx, jcstypes.PackageID(pkgID), jcstypes.UserSpaceID(userSpaceID), args[2]) } else { loadByPath(cmdCtx, args[0], args[1], args[2]) } @@ -42,7 +42,7 @@ func init() { } func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string, rootPath string) { - comps := strings.Split(strings.Trim(pkgPath, jcsypes.ObjectPathSeparator), jcsypes.ObjectPathSeparator) + comps := strings.Split(strings.Trim(pkgPath, jcstypes.ObjectPathSeparator), jcstypes.ObjectPathSeparator) if len(comps) != 2 { fmt.Printf("Package path must be in format of /") return @@ -63,7 +63,7 @@ func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string, rootPath loadByID(cmdCtx, pkg.PackageID, stg.StorageID, rootPath) } -func loadByID(cmdCtx *CommandContext, pkgID jcsypes.PackageID, stgID jcsypes.StorageID, rootPath string) { +func loadByID(cmdCtx *CommandContext, pkgID jcstypes.PackageID, stgID jcstypes.StorageID, rootPath string) { startTime := time.Now() err := cmdCtx.Cmdline.Svc.StorageSvc().LoadPackage(pkgID, stgID, rootPath) diff --git a/client/internal/repl/lsp.go b/client/internal/repl/lsp.go index e0fc2bc..f5815d3 100644 --- a/client/internal/repl/lsp.go +++ b/client/internal/repl/lsp.go @@ -7,7 +7,7 @@ import ( "github.com/jedib0t/go-pretty/v6/table" "github.com/spf13/cobra" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func init() { @@ -26,7 +26,7 @@ func init() { return } - lspOneByID(cmdCtx, jcsypes.PackageID(id)) + lspOneByID(cmdCtx, jcstypes.PackageID(id)) } else { lspByPath(cmdCtx, args[0]) } @@ -40,7 +40,7 @@ func init() { func lspByPath(cmdCtx *CommandContext, path string) { db2 := cmdCtx.repl.db - comps := strings.Split(strings.Trim(path, jcsypes.ObjectPathSeparator), jcsypes.ObjectPathSeparator) + comps := strings.Split(strings.Trim(path, jcstypes.ObjectPathSeparator), jcstypes.ObjectPathSeparator) if len(comps) != 2 { fmt.Printf("Package path must be in format of /") return @@ -58,7 +58,7 @@ func lspByPath(cmdCtx *CommandContext, path string) { fmt.Println(wr.Render()) } -func lspOneByID(cmdCtx *CommandContext, id jcsypes.PackageID) { +func lspOneByID(cmdCtx *CommandContext, id jcstypes.PackageID) { db2 := cmdCtx.repl.db pkg, err := db2.Package().GetByID(db2.DefCtx(), id) diff --git a/client/internal/services/user_space.go b/client/internal/services/user_space.go index b06e517..cd2b7d4 100644 --- a/client/internal/services/user_space.go +++ b/client/internal/services/user_space.go @@ -8,7 +8,7 @@ import ( "github.com/samber/lo" "gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm" "gitlink.org.cn/cloudream/jcs-pub/client/internal/db" @@ -31,41 +31,41 @@ func (svc *Service) UserSpaceSvc() *UserSpaceService { return &UserSpaceService{Service: svc} } -func (svc *UserSpaceService) Get(userspaceID jcsypes.UserSpaceID) (jcsypes.UserSpace, error) { +func (svc *UserSpaceService) Get(userspaceID jcstypes.UserSpaceID) (jcstypes.UserSpace, error) { return svc.DB.UserSpace().GetByID(svc.DB.DefCtx(), userspaceID) } -func (svc *UserSpaceService) GetByName(name string) (jcsypes.UserSpace, error) { +func (svc *UserSpaceService) GetByName(name string) (jcstypes.UserSpace, error) { return svc.DB.UserSpace().GetByName(svc.DB.DefCtx(), name) } -func (svc *UserSpaceService) GetAll() ([]jcsypes.UserSpace, error) { +func (svc *UserSpaceService) GetAll() ([]jcstypes.UserSpace, error) { return svc.DB.UserSpace().GetAll(svc.DB.DefCtx()) } func (svc *UserSpaceService) Create(req cliapi.UserSpaceCreate) (*cliapi.UserSpaceCreateResp, *ecode.CodeError) { db2 := svc.DB - space, err := db.DoTx01(db2, func(tx db.SQLContext) (jcsypes.UserSpace, error) { + space, err := db.DoTx01(db2, func(tx db.SQLContext) (jcstypes.UserSpace, error) { space, err := db2.UserSpace().GetByName(tx, req.Name) if err == nil { - return jcsypes.UserSpace{}, gorm.ErrDuplicatedKey + return jcstypes.UserSpace{}, gorm.ErrDuplicatedKey } if err != gorm.ErrRecordNotFound { - return jcsypes.UserSpace{}, err + return jcstypes.UserSpace{}, err } - space = jcsypes.UserSpace{ + space = jcstypes.UserSpace{ Name: req.Name, Storage: req.Storage, Credential: req.Credential, ShardStore: req.ShardStore, Features: req.Features, - WorkingDir: jcsypes.PathFromJcsPathString(req.WorkingDir), + WorkingDir: jcstypes.PathFromJcsPathString(req.WorkingDir), Revision: 0, } err = db2.UserSpace().Create(tx, &space) if err != nil { - return jcsypes.UserSpace{}, err + return jcstypes.UserSpace{}, err } return space, nil }) @@ -80,19 +80,19 @@ func (svc *UserSpaceService) Create(req cliapi.UserSpaceCreate) (*cliapi.UserSpa func (svc *UserSpaceService) Update(req cliapi.UserSpaceUpdate) (*cliapi.UserSpaceUpdateResp, *ecode.CodeError) { db2 := svc.DB - space, err := db.DoTx01(db2, func(tx db.SQLContext) (jcsypes.UserSpace, error) { + space, err := db.DoTx01(db2, func(tx db.SQLContext) (jcstypes.UserSpace, error) { space, err := db2.UserSpace().GetByID(tx, req.UserSpaceID) if err != nil { - return jcsypes.UserSpace{}, err + return jcstypes.UserSpace{}, err } if space.Name != req.Name { _, err = db2.UserSpace().GetByName(tx, req.Name) if err == nil { - return jcsypes.UserSpace{}, gorm.ErrDuplicatedKey + return jcstypes.UserSpace{}, gorm.ErrDuplicatedKey } if err != gorm.ErrRecordNotFound { - return jcsypes.UserSpace{}, err + return jcstypes.UserSpace{}, err } } @@ -110,7 +110,7 @@ func (svc *UserSpaceService) Update(req cliapi.UserSpaceUpdate) (*cliapi.UserSpa } // 通知元数据缓存无效 - svc.UserSpaceMeta.Drop([]jcsypes.UserSpaceID{req.UserSpaceID}) + svc.UserSpaceMeta.Drop([]jcstypes.UserSpaceID{req.UserSpaceID}) // 通知存储服务组件池停止组件。TODO 对于在Hub上运行的组件,需要一个机制去定时清理 svc.StgPool.Drop(stgglb.UserID, space.UserSpaceID) @@ -155,7 +155,7 @@ func (svc *UserSpaceService) Delete(req cliapi.UserSpaceDelete) (*cliapi.UserSpa } // 通知元数据缓存无效 - svc.UserSpaceMeta.Drop([]jcsypes.UserSpaceID{req.UserSpaceID}) + svc.UserSpaceMeta.Drop([]jcstypes.UserSpaceID{req.UserSpaceID}) // 通知存储服务组件池停止组件。TODO 对于在Hub上运行的组件,需要一个机制去定时清理 svc.StgPool.Drop(stgglb.UserID, req.UserSpaceID) @@ -166,13 +166,13 @@ func (svc *UserSpaceService) Delete(req cliapi.UserSpaceDelete) (*cliapi.UserSpa } func (svc *UserSpaceService) Test(req cliapi.UserSpaceTest) (*cliapi.UserSpaceTestResp, *ecode.CodeError) { - detail := jcsypes.UserSpaceDetail{ + detail := jcstypes.UserSpaceDetail{ UserID: stgglb.UserID, - UserSpace: jcsypes.UserSpace{ + UserSpace: jcstypes.UserSpace{ Name: "test", Storage: req.Storage, Credential: req.Credential, - WorkingDir: jcsypes.PathFromJcsPathString(req.WorikingDir), + WorkingDir: jcstypes.PathFromJcsPathString(req.WorikingDir), }, } blder := factory.GetBuilder(&detail) @@ -189,7 +189,7 @@ func (svc *UserSpaceService) Test(req cliapi.UserSpaceTest) (*cliapi.UserSpaceTe return &cliapi.UserSpaceTestResp{}, nil } -func (svc *UserSpaceService) DownloadPackage(packageID jcsypes.PackageID, userspaceID jcsypes.UserSpaceID, rootPath string) error { +func (svc *UserSpaceService) DownloadPackage(packageID jcstypes.PackageID, userspaceID jcstypes.UserSpaceID, rootPath string) error { destSpace := svc.UserSpaceMeta.Get(userspaceID) if destSpace == nil { return fmt.Errorf("userspace not found: %d", userspaceID) @@ -208,10 +208,10 @@ func (svc *UserSpaceService) DownloadPackage(packageID jcsypes.PackageID, usersp } defer mutex.Unlock() - rootJPath := jcsypes.PathFromJcsPathString(rootPath) + rootJPath := jcstypes.PathFromJcsPathString(rootPath) dIndex := 0 - var pinned []jcsypes.PinnedObject + var pinned []jcstypes.PinnedObject for dIndex < len(details) { plans := exec.NewPlanBuilder() for i := 0; i < 10 && dIndex < len(details); i++ { @@ -252,7 +252,7 @@ func (svc *UserSpaceService) DownloadPackage(packageID jcsypes.PackageID, usersp return fmt.Errorf("unsupported download strategy: %T", strg) } - objPath := jcsypes.PathFromJcsPathString(details[dIndex].Object.Path) + objPath := jcstypes.PathFromJcsPathString(details[dIndex].Object.Path) dstPath := rootJPath.ConcatNew(objPath) newDstSpace := *destSpace @@ -265,7 +265,7 @@ func (svc *UserSpaceService) DownloadPackage(packageID jcsypes.PackageID, usersp // 顺便保存到同存储服务的分片存储中 if destSpace.UserSpace.ShardStore != nil { ft.AddTo(ioswitch2.NewToShardStore(newDstSpace, ioswitch2.RawStream(), "")) - pinned = append(pinned, jcsypes.PinnedObject{ + pinned = append(pinned, jcstypes.PinnedObject{ ObjectID: details[dIndex].Object.ObjectID, UserSpaceID: destSpace.UserSpace.UserSpaceID, CreateTime: time.Now(), @@ -297,7 +297,7 @@ func (svc *UserSpaceService) DownloadPackage(packageID jcsypes.PackageID, usersp } err = svc.DB.DoTx(func(tx db.SQLContext) error { - objIDs := make([]jcsypes.ObjectID, len(pinned)) + objIDs := make([]jcstypes.ObjectID, len(pinned)) for i, obj := range pinned { objIDs[i] = obj.ObjectID } @@ -307,7 +307,7 @@ func (svc *UserSpaceService) DownloadPackage(packageID jcsypes.PackageID, usersp return err } - pinned = lo.Filter(pinned, func(p jcsypes.PinnedObject, idx int) bool { return avaiIDs[p.ObjectID] }) + pinned = lo.Filter(pinned, func(p jcstypes.PinnedObject, idx int) bool { return avaiIDs[p.ObjectID] }) return svc.DB.PinnedObject().BatchTryCreate(svc.DB.DefCtx(), pinned) }) if err != nil { diff --git a/client/internal/services/utils.go b/client/internal/services/utils.go index d658396..9b7540c 100644 --- a/client/internal/services/utils.go +++ b/client/internal/services/utils.go @@ -1,22 +1,22 @@ package services import ( - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/common/types/datamap" ) -func getBlockTypeFromRed(red jcsypes.Redundancy) string { +func getBlockTypeFromRed(red jcstypes.Redundancy) string { switch red.(type) { - case *jcsypes.NoneRedundancy: + case *jcstypes.NoneRedundancy: return datamap.BlockTypeRaw - case *jcsypes.ECRedundancy: + case *jcstypes.ECRedundancy: return datamap.BlockTypeEC - case *jcsypes.LRCRedundancy: + case *jcstypes.LRCRedundancy: return datamap.BlockTypeEC - case *jcsypes.SegmentRedundancy: + case *jcstypes.SegmentRedundancy: return datamap.BlockTypeSegment } return "" diff --git a/client/internal/spacesyncer/execute_diff.go b/client/internal/spacesyncer/execute_diff.go index 5ca9293..d0a1448 100644 --- a/client/internal/spacesyncer/execute_diff.go +++ b/client/internal/spacesyncer/execute_diff.go @@ -12,10 +12,10 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser" stgtypes "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) -func executeDiff(syncer *SpaceSyncer, task *task, mode *jcsypes.SpaceSyncModeDiff) { +func executeDiff(syncer *SpaceSyncer, task *task, mode *jcstypes.SpaceSyncModeDiff) { log := logger.WithField("Mod", logMod).WithField("TaskID", task.Task.TaskID) startTime := time.Now() @@ -112,7 +112,7 @@ func executeDiff(syncer *SpaceSyncer, task *task, mode *jcsypes.SpaceSyncModeDif } var willSync []stgtypes.DirEntry - var willMkdirs []jcsypes.JPath + var willMkdirs []jcstypes.JPath dirTree.Iterate(func(path []string, node *trie.Node[srcDstDirEntry], isWordNode bool) trie.VisitCtrl { if node.Value.src == nil { @@ -123,7 +123,7 @@ func executeDiff(syncer *SpaceSyncer, task *task, mode *jcsypes.SpaceSyncModeDif if node.Value.src.IsDir { if node.Value.dst == nil { if node.IsEmpty() { - willMkdirs = append(willMkdirs, jcsypes.PathFromComps(path...)) + willMkdirs = append(willMkdirs, jcstypes.PathFromComps(path...)) } } } else { @@ -193,7 +193,7 @@ func executeDiff(syncer *SpaceSyncer, task *task, mode *jcsypes.SpaceSyncModeDif } } -func diffCreateSrcNode(tree *trie.Trie[srcDstDirEntry], path jcsypes.JPath, e *stgtypes.DirEntry) { +func diffCreateSrcNode(tree *trie.Trie[srcDstDirEntry], path jcstypes.JPath, e *stgtypes.DirEntry) { var ptr = &tree.Root for _, c := range path.Comps() { if ptr.Value.src != nil && ptr.Value.src.IsDir { @@ -205,7 +205,7 @@ func diffCreateSrcNode(tree *trie.Trie[srcDstDirEntry], path jcsypes.JPath, e *s ptr.Value.src = e } -func diffCreateDstNode(tree *trie.Trie[srcDstDirEntry], path jcsypes.JPath, e *stgtypes.DirEntry) { +func diffCreateDstNode(tree *trie.Trie[srcDstDirEntry], path jcstypes.JPath, e *stgtypes.DirEntry) { var ptr = &tree.Root for _, c := range path.Comps() { if ptr.Value.src != nil && ptr.Value.src.IsDir { @@ -227,7 +227,7 @@ type srcDstDirEntry struct { dst *stgtypes.DirEntry } -func cmpFile(diff *jcsypes.SpaceSyncModeDiff, src, dst *stgtypes.DirEntry) bool { +func cmpFile(diff *jcstypes.SpaceSyncModeDiff, src, dst *stgtypes.DirEntry) bool { if diff.IncludeSize && src.Size != dst.Size { return false } diff --git a/client/internal/spacesyncer/filter.go b/client/internal/spacesyncer/filter.go index 73b02c0..b9efb5d 100644 --- a/client/internal/spacesyncer/filter.go +++ b/client/internal/spacesyncer/filter.go @@ -2,7 +2,7 @@ package spacesyncer import ( stgtypes "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type FilterFn func(info stgtypes.DirEntry) bool @@ -11,7 +11,7 @@ func buildFilter(task *task) FilterFn { var fns []FilterFn for _, f := range task.Task.Filters { switch f := f.(type) { - case *jcsypes.SpaceSyncFilterSize: + case *jcstypes.SpaceSyncFilterSize: fns = append(fns, filterSize(f)) } } @@ -26,7 +26,7 @@ func buildFilter(task *task) FilterFn { } } -func filterSize(filter *jcsypes.SpaceSyncFilterSize) FilterFn { +func filterSize(filter *jcstypes.SpaceSyncFilterSize) FilterFn { return func(info stgtypes.DirEntry) bool { if filter.MinSize > 0 && info.Size < filter.MinSize { return false diff --git a/client/internal/spacesyncer/space_syncer.go b/client/internal/spacesyncer/space_syncer.go index fb292a8..1b5b52f 100644 --- a/client/internal/spacesyncer/space_syncer.go +++ b/client/internal/spacesyncer/space_syncer.go @@ -10,7 +10,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/client/internal/metacache" stgpool "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) const ( @@ -26,7 +26,7 @@ type SpaceSyncer struct { stgPool *stgpool.Pool spaceMeta *metacache.UserSpaceMeta lock sync.Mutex - tasks map[jcsypes.SpaceSyncTaskID]*task + tasks map[jcstypes.SpaceSyncTaskID]*task } func New(db *db.DB, stgPool *stgpool.Pool, spaceMeta *metacache.UserSpaceMeta) *SpaceSyncer { @@ -34,7 +34,7 @@ func New(db *db.DB, stgPool *stgpool.Pool, spaceMeta *metacache.UserSpaceMeta) * db: db, stgPool: stgPool, spaceMeta: spaceMeta, - tasks: make(map[jcsypes.SpaceSyncTaskID]*task), + tasks: make(map[jcstypes.SpaceSyncTaskID]*task), } } @@ -50,7 +50,7 @@ func (s *SpaceSyncer) Start() *async.UnboundChannel[SpaceSyncerEvent] { if err != nil { log.Warnf("load task from db: %v", err) } else { - var rms []jcsypes.SpaceSyncTaskID + var rms []jcstypes.SpaceSyncTaskID for _, t := range allTask { ctx, cancel := context.WithCancel(context.Background()) tsk := task{ @@ -60,14 +60,14 @@ func (s *SpaceSyncer) Start() *async.UnboundChannel[SpaceSyncerEvent] { } switch tr := t.Trigger.(type) { - case *jcsypes.SpaceSyncTriggerOnce: + case *jcstypes.SpaceSyncTriggerOnce: // Once类型的任务没有执行完也不执行了 rms = append(rms, t.TaskID) - case *jcsypes.SpaceSyncTriggerInterval: + case *jcstypes.SpaceSyncTriggerInterval: triggerInterval(s, &tsk, tr) - case *jcsypes.SpaceSyncTriggerAt: + case *jcstypes.SpaceSyncTriggerAt: triggerAt(s, &tsk, tr) } @@ -95,10 +95,10 @@ func (s *SpaceSyncer) Stop() { t.CancelFn() } - s.tasks = make(map[jcsypes.SpaceSyncTaskID]*task) + s.tasks = make(map[jcstypes.SpaceSyncTaskID]*task) } -func (s *SpaceSyncer) CreateTask(t jcsypes.SpaceSyncTask) (*TaskInfo, error) { +func (s *SpaceSyncer) CreateTask(t jcstypes.SpaceSyncTask) (*TaskInfo, error) { log := logger.WithField("Mod", logMod) d := s.db @@ -126,13 +126,13 @@ func (s *SpaceSyncer) CreateTask(t jcsypes.SpaceSyncTask) (*TaskInfo, error) { s.lock.Unlock() switch tr := t.Trigger.(type) { - case *jcsypes.SpaceSyncTriggerOnce: + case *jcstypes.SpaceSyncTriggerOnce: triggerOnce(s, &tsk) - case *jcsypes.SpaceSyncTriggerInterval: + case *jcstypes.SpaceSyncTriggerInterval: triggerInterval(s, &tsk, tr) - case *jcsypes.SpaceSyncTriggerAt: + case *jcstypes.SpaceSyncTriggerAt: triggerAt(s, &tsk, tr) } @@ -143,7 +143,7 @@ func (s *SpaceSyncer) CreateTask(t jcsypes.SpaceSyncTask) (*TaskInfo, error) { }, nil } -func (s *SpaceSyncer) CancelTask(taskID jcsypes.SpaceSyncTaskID) { +func (s *SpaceSyncer) CancelTask(taskID jcstypes.SpaceSyncTaskID) { log := logger.WithField("Mod", logMod) s.lock.Lock() @@ -166,7 +166,7 @@ func (s *SpaceSyncer) CancelTask(taskID jcsypes.SpaceSyncTaskID) { log.Infof("task %v canceled", taskID) } -func (s *SpaceSyncer) GetTask(taskID jcsypes.SpaceSyncTaskID) *jcsypes.SpaceSyncTask { +func (s *SpaceSyncer) GetTask(taskID jcstypes.SpaceSyncTaskID) *jcstypes.SpaceSyncTask { s.lock.Lock() defer s.lock.Unlock() @@ -181,11 +181,11 @@ func (s *SpaceSyncer) GetTask(taskID jcsypes.SpaceSyncTaskID) *jcsypes.SpaceSync } type TaskInfo struct { - Task jcsypes.SpaceSyncTask + Task jcstypes.SpaceSyncTask } type task struct { - Task jcsypes.SpaceSyncTask + Task jcstypes.SpaceSyncTask Context context.Context CancelFn func() } diff --git a/client/internal/spacesyncer/trigger.go b/client/internal/spacesyncer/trigger.go index 7321929..7aab378 100644 --- a/client/internal/spacesyncer/trigger.go +++ b/client/internal/spacesyncer/trigger.go @@ -5,7 +5,7 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/utils/sort2" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func triggerOnce(syncer *SpaceSyncer, task *task) { @@ -31,7 +31,7 @@ func triggerOnce(syncer *SpaceSyncer, task *task) { }() } -func triggerInterval(syncer *SpaceSyncer, task *task, trigger *jcsypes.SpaceSyncTriggerInterval) { +func triggerInterval(syncer *SpaceSyncer, task *task, trigger *jcstypes.SpaceSyncTriggerInterval) { go func() { log := logger.WithField("Mod", logMod) @@ -66,7 +66,7 @@ func triggerInterval(syncer *SpaceSyncer, task *task, trigger *jcsypes.SpaceSync }() } -func triggerAt(syncer *SpaceSyncer, task *task, trigger *jcsypes.SpaceSyncTriggerAt) { +func triggerAt(syncer *SpaceSyncer, task *task, trigger *jcstypes.SpaceSyncTriggerAt) { go func() { log := logger.WithField("Mod", logMod) diff --git a/client/internal/speedstats/speedstats.go b/client/internal/speedstats/speedstats.go index 12e898e..01f3cbc 100644 --- a/client/internal/speedstats/speedstats.go +++ b/client/internal/speedstats/speedstats.go @@ -5,7 +5,7 @@ import ( "sync" "time" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) const ( @@ -121,9 +121,9 @@ func (p *SpeedStats) ShouldAtClient(size int64) bool { return v < prob } -func (p *SpeedStats) DumpStatus() jcsypes.SpeedStatsStatus { - return jcsypes.SpeedStatsStatus{ - Below100M: []jcsypes.SpeedStatsStatusEntry{ +func (p *SpeedStats) DumpStatus() jcstypes.SpeedStatsStatus { + return jcstypes.SpeedStatsStatus{ + Below100M: []jcstypes.SpeedStatsStatusEntry{ { TotalSize: p.stats100M[0].TotalSize, TotalTime: p.stats100M[0].TotalTime, @@ -137,7 +137,7 @@ func (p *SpeedStats) DumpStatus() jcsypes.SpeedStatsStatus { LastSpeed: p.stats100M[1].LastSpeed, }, }, - Below1G: []jcsypes.SpeedStatsStatusEntry{ + Below1G: []jcstypes.SpeedStatsStatusEntry{ { TotalSize: p.stats1G[0].TotalSize, TotalTime: p.stats1G[0].TotalTime, @@ -151,7 +151,7 @@ func (p *SpeedStats) DumpStatus() jcsypes.SpeedStatsStatus { LastSpeed: p.stats1G[1].LastSpeed, }, }, - Above1G: []jcsypes.SpeedStatsStatusEntry{ + Above1G: []jcstypes.SpeedStatsStatusEntry{ { TotalSize: p.statsAbove1G[0].TotalSize, TotalTime: p.statsAbove1G[0].TotalTime, diff --git a/client/internal/ticktock/change_redundancy.go b/client/internal/ticktock/change_redundancy.go index 363d86a..304510a 100644 --- a/client/internal/ticktock/change_redundancy.go +++ b/client/internal/ticktock/change_redundancy.go @@ -8,7 +8,7 @@ import ( "gitlink.org.cn/cloudream/common/utils/reflect2" "gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock/reqbuilder" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/common/types/datamap" ) @@ -34,7 +34,7 @@ func (j *ChangeRedundancy) Execute(t *TickTock) { ctx := &changeRedundancyContext{ ticktock: t, - allUserSpaces: make(map[jcsypes.UserSpaceID]*userSpaceUsageInfo), + allUserSpaces: make(map[jcstypes.UserSpaceID]*userSpaceUsageInfo), } spaceIDs, err := t.db.UserSpace().GetAllIDs(t.db.DefCtx()) @@ -58,7 +58,7 @@ func (j *ChangeRedundancy) Execute(t *TickTock) { return } - lastPkgID := jcsypes.PackageID(0) + lastPkgID := jcstypes.PackageID(0) loop: for { @@ -90,16 +90,16 @@ loop: type changeRedundancyContext struct { ticktock *TickTock - allUserSpaces map[jcsypes.UserSpaceID]*userSpaceUsageInfo - mostBlockStgIDs []jcsypes.UserSpaceID + allUserSpaces map[jcstypes.UserSpaceID]*userSpaceUsageInfo + mostBlockStgIDs []jcstypes.UserSpaceID } type userSpaceUsageInfo struct { - UserSpace *jcsypes.UserSpaceDetail + UserSpace *jcstypes.UserSpaceDetail AccessAmount float64 } -func (j *ChangeRedundancy) changeOne(ctx *changeRedundancyContext, pkg jcsypes.PackageDetail) error { +func (j *ChangeRedundancy) changeOne(ctx *changeRedundancyContext, pkg jcstypes.PackageDetail) error { log := logger.WithType[ChangeRedundancy]("TickTock") db2 := ctx.ticktock.db @@ -121,7 +121,7 @@ func (j *ChangeRedundancy) changeOne(ctx *changeRedundancyContext, pkg jcsypes.P info.AccessAmount = stat.Amount } - lastObjID := jcsypes.ObjectID(0) + lastObjID := jcstypes.ObjectID(0) for { objs, err := db.DoTx31(db2, db2.Object().BatchGetDetailsPaged, pkg.Package.PackageID, lastObjID, BatchGetObjectDetailCount) if err != nil { @@ -139,7 +139,7 @@ func (j *ChangeRedundancy) changeOne(ctx *changeRedundancyContext, pkg jcsypes.P ctx.mostBlockStgIDs = j.summaryRepObjectBlockUserSpaces(ctx, objs, 2) - var willShrinks []jcsypes.ObjectDetail + var willShrinks []jcstypes.ObjectDetail for _, obj := range objs { newRed, selectedSpaces := j.chooseRedundancy(ctx, obj) diff --git a/client/internal/ticktock/check_shardstore.go b/client/internal/ticktock/check_shardstore.go index f667333..91a6197 100644 --- a/client/internal/ticktock/check_shardstore.go +++ b/client/internal/ticktock/check_shardstore.go @@ -10,7 +10,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/client/internal/db" stgtypes "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) // CheckShardStore 代表一个用于处理代理缓存检查事件的结构体 @@ -52,8 +52,8 @@ func (j *CheckShardStore) Execute(t *TickTock) { } } -func (j *CheckShardStore) checkOne(t *TickTock, space *jcsypes.UserSpaceDetail) error { - // addr, ok := space.RecommendHub.Address.(*cortypes.GRPCAddressInfo) +func (j *CheckShardStore) checkOne(t *TickTock, space *jcstypes.UserSpaceDetail) error { + // addr, ok := space.RecommendHub.Address.(*jcstypes.GRPCAddressInfo) // if !ok { // return fmt.Errorf("master of user space %v has no grpc address", space.UserSpace) // } @@ -80,9 +80,9 @@ func (j *CheckShardStore) checkOne(t *TickTock, space *jcsypes.UserSpaceDetail) return fmt.Errorf("listing all files: %w", err) } - fileHashes := lo.Map(infos, func(info stgtypes.FileInfo, _ int) jcsypes.FileHash { return info.Hash }) + fileHashes := lo.Map(infos, func(info stgtypes.FileInfo, _ int) jcstypes.FileHash { return info.Hash }) - realFileHashes := lo.SliceToMap(fileHashes, func(hash jcsypes.FileHash) (jcsypes.FileHash, bool) { return hash, true }) + realFileHashes := lo.SliceToMap(fileHashes, func(hash jcstypes.FileHash) (jcstypes.FileHash, bool) { return hash, true }) // 在事务中执行缓存更新操作 t.db.DoTx(func(tx db.SQLContext) error { @@ -95,7 +95,7 @@ func (j *CheckShardStore) checkOne(t *TickTock, space *jcsypes.UserSpaceDetail) } // checkPinnedObject 对比PinnedObject表,若实际文件不存在,则进行删除操作 -func (*CheckShardStore) checkPinnedObject(t *TickTock, tx db.SQLContext, space *jcsypes.UserSpaceDetail, realFileHashes map[jcsypes.FileHash]bool) { +func (*CheckShardStore) checkPinnedObject(t *TickTock, tx db.SQLContext, space *jcstypes.UserSpaceDetail, realFileHashes map[jcstypes.FileHash]bool) { log := logger.WithType[CheckShardStore]("TickTock") objs, err := t.db.PinnedObject().GetObjectsByUserSpaceID(tx, space.UserSpace.UserSpaceID) @@ -104,7 +104,7 @@ func (*CheckShardStore) checkPinnedObject(t *TickTock, tx db.SQLContext, space * return } - var rms []jcsypes.ObjectID + var rms []jcstypes.ObjectID for _, c := range objs { if realFileHashes[c.FileHash] { continue @@ -121,7 +121,7 @@ func (*CheckShardStore) checkPinnedObject(t *TickTock, tx db.SQLContext, space * } // checkObjectBlock 对比ObjectBlock表,若实际文件不存在,则进行删除操作 -func (*CheckShardStore) checkObjectBlock(t *TickTock, tx db.SQLContext, space *jcsypes.UserSpaceDetail, realFileHashes map[jcsypes.FileHash]bool) { +func (*CheckShardStore) checkObjectBlock(t *TickTock, tx db.SQLContext, space *jcstypes.UserSpaceDetail, realFileHashes map[jcstypes.FileHash]bool) { log := logger.WithType[CheckShardStore]("TickTock") blocks, err := t.db.ObjectBlock().GetByUserSpaceID(tx, space.UserSpace.UserSpaceID) @@ -130,7 +130,7 @@ func (*CheckShardStore) checkObjectBlock(t *TickTock, tx db.SQLContext, space *j return } - var rms []jcsypes.FileHash + var rms []jcstypes.FileHash for _, b := range blocks { if realFileHashes[b.FileHash] { continue diff --git a/client/internal/ticktock/redundancy_recover.go b/client/internal/ticktock/redundancy_recover.go index 7e32635..cf1bd30 100644 --- a/client/internal/ticktock/redundancy_recover.go +++ b/client/internal/ticktock/redundancy_recover.go @@ -16,56 +16,55 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitchlrc" lrcparser "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitchlrc/parser" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/common/types/datamap" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" ) -func (t *ChangeRedundancy) chooseRedundancy(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail) (jcsypes.Redundancy, []*userSpaceUsageInfo) { +func (t *ChangeRedundancy) chooseRedundancy(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail) (jcstypes.Redundancy, []*userSpaceUsageInfo) { switch obj.Object.Redundancy.(type) { - case *jcsypes.NoneRedundancy: + case *jcstypes.NoneRedundancy: if obj.Object.Size > ctx.ticktock.cfg.ECFileSizeThreshold { - newStgs := t.chooseNewUserSpacesForEC(ctx, &jcsypes.DefaultECRedundancy) - return &jcsypes.DefaultECRedundancy, newStgs + newStgs := t.chooseNewUserSpacesForEC(ctx, &jcstypes.DefaultECRedundancy) + return &jcstypes.DefaultECRedundancy, newStgs } - return &jcsypes.DefaultRepRedundancy, t.chooseNewUserSpacesForRep(ctx, &jcsypes.DefaultRepRedundancy) + return &jcstypes.DefaultRepRedundancy, t.chooseNewUserSpacesForRep(ctx, &jcstypes.DefaultRepRedundancy) - case *jcsypes.RepRedundancy: + case *jcstypes.RepRedundancy: if obj.Object.Size >= ctx.ticktock.cfg.ECFileSizeThreshold { - newStgs := t.chooseNewUserSpacesForEC(ctx, &jcsypes.DefaultECRedundancy) - return &jcsypes.DefaultECRedundancy, newStgs + newStgs := t.chooseNewUserSpacesForEC(ctx, &jcstypes.DefaultECRedundancy) + return &jcstypes.DefaultECRedundancy, newStgs } - newSpaces := t.rechooseUserSpacesForRep(ctx, &jcsypes.DefaultRepRedundancy) + newSpaces := t.rechooseUserSpacesForRep(ctx, &jcstypes.DefaultRepRedundancy) for _, s := range newSpaces { if !obj.ContainsBlock(0, s.UserSpace.UserSpace.UserSpaceID) && !obj.ContainsPinned(s.UserSpace.UserSpace.UserSpaceID) { - return &jcsypes.DefaultRepRedundancy, newSpaces + return &jcstypes.DefaultRepRedundancy, newSpaces } } return nil, nil - case *jcsypes.ECRedundancy: + case *jcstypes.ECRedundancy: if obj.Object.Size < ctx.ticktock.cfg.ECFileSizeThreshold { - return &jcsypes.DefaultRepRedundancy, t.chooseNewUserSpacesForRep(ctx, &jcsypes.DefaultRepRedundancy) + return &jcstypes.DefaultRepRedundancy, t.chooseNewUserSpacesForRep(ctx, &jcstypes.DefaultRepRedundancy) } - newSpaces := t.rechooseUserSpacesForEC(ctx, obj, &jcsypes.DefaultECRedundancy) + newSpaces := t.rechooseUserSpacesForEC(ctx, obj, &jcstypes.DefaultECRedundancy) for i, s := range newSpaces { if !obj.ContainsBlock(i, s.UserSpace.UserSpace.UserSpaceID) { - return &jcsypes.DefaultECRedundancy, newSpaces + return &jcstypes.DefaultECRedundancy, newSpaces } } return nil, nil - case *jcsypes.LRCRedundancy: - newLRCStgs := t.rechooseUserSpacesForLRC(ctx, obj, &jcsypes.DefaultLRCRedundancy) + case *jcstypes.LRCRedundancy: + newLRCStgs := t.rechooseUserSpacesForLRC(ctx, obj, &jcstypes.DefaultLRCRedundancy) for i, s := range newLRCStgs { if !obj.ContainsBlock(i, s.UserSpace.UserSpace.UserSpaceID) { - return &jcsypes.DefaultLRCRedundancy, newLRCStgs + return &jcstypes.DefaultLRCRedundancy, newLRCStgs } } @@ -74,7 +73,7 @@ func (t *ChangeRedundancy) chooseRedundancy(ctx *changeRedundancyContext, obj jc return nil, nil } -func (t *ChangeRedundancy) doChangeRedundancy(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, newRed jcsypes.Redundancy, selectedUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { +func (t *ChangeRedundancy) doChangeRedundancy(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, newRed jcstypes.Redundancy, selectedUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { log := logger.WithType[ChangeRedundancy]("TickTock") var updating *db.UpdatingObjectRedundancy @@ -82,48 +81,48 @@ func (t *ChangeRedundancy) doChangeRedundancy(ctx *changeRedundancyContext, obj var err error switch srcRed := obj.Object.Redundancy.(type) { - case *jcsypes.NoneRedundancy: + case *jcstypes.NoneRedundancy: switch newRed := newRed.(type) { - case *jcsypes.RepRedundancy: + case *jcstypes.RepRedundancy: log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: none -> rep") updating, evt, err = t.noneToRep(ctx, obj, newRed, selectedUserSpaces) - case *jcsypes.ECRedundancy: + case *jcstypes.ECRedundancy: log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: none -> ec") updating, evt, err = t.noneToEC(ctx, obj, newRed, selectedUserSpaces) - case *jcsypes.LRCRedundancy: + case *jcstypes.LRCRedundancy: log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: none -> lrc") updating, evt, err = t.noneToLRC(ctx, obj, newRed, selectedUserSpaces) - case *jcsypes.SegmentRedundancy: + case *jcstypes.SegmentRedundancy: log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: none -> segment") updating, evt, err = t.noneToSeg(ctx, obj, newRed, selectedUserSpaces) } - case *jcsypes.RepRedundancy: + case *jcstypes.RepRedundancy: switch newRed := newRed.(type) { - case *jcsypes.RepRedundancy: + case *jcstypes.RepRedundancy: updating, evt, err = t.repToRep(ctx, obj, srcRed, selectedUserSpaces) - case *jcsypes.ECRedundancy: + case *jcstypes.ECRedundancy: log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: rep -> ec") updating, evt, err = t.repToEC(ctx, obj, newRed, selectedUserSpaces) } - case *jcsypes.ECRedundancy: + case *jcstypes.ECRedundancy: switch newRed := newRed.(type) { - case *jcsypes.RepRedundancy: + case *jcstypes.RepRedundancy: log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: ec -> rep") updating, evt, err = t.ecToRep(ctx, obj, srcRed, newRed, selectedUserSpaces) - case *jcsypes.ECRedundancy: + case *jcstypes.ECRedundancy: updating, evt, err = t.ecToEC(ctx, obj, srcRed, newRed, selectedUserSpaces) } - case *jcsypes.LRCRedundancy: + case *jcstypes.LRCRedundancy: switch newRed := newRed.(type) { - case *jcsypes.LRCRedundancy: + case *jcstypes.LRCRedundancy: updating, evt, err = t.lrcToLRC(ctx, obj, srcRed, newRed, selectedUserSpaces) } } @@ -132,16 +131,16 @@ func (t *ChangeRedundancy) doChangeRedundancy(ctx *changeRedundancyContext, obj } // 统计每个对象块所在的节点,选出块最多的不超过userspaceCnt个节点 -func (t *ChangeRedundancy) summaryRepObjectBlockUserSpaces(ctx *changeRedundancyContext, objs []jcsypes.ObjectDetail, userspaceCnt int) []jcsypes.UserSpaceID { +func (t *ChangeRedundancy) summaryRepObjectBlockUserSpaces(ctx *changeRedundancyContext, objs []jcstypes.ObjectDetail, userspaceCnt int) []jcstypes.UserSpaceID { type stgBlocks struct { - UserSpaceID jcsypes.UserSpaceID + UserSpaceID jcstypes.UserSpaceID Count int } - stgBlocksMap := make(map[jcsypes.UserSpaceID]*stgBlocks) + stgBlocksMap := make(map[jcstypes.UserSpaceID]*stgBlocks) for _, obj := range objs { shouldUseEC := obj.Object.Size > ctx.ticktock.cfg.ECFileSizeThreshold - if _, ok := obj.Object.Redundancy.(*jcsypes.RepRedundancy); ok && !shouldUseEC { + if _, ok := obj.Object.Redundancy.(*jcstypes.RepRedundancy); ok && !shouldUseEC { for _, block := range obj.Blocks { if _, ok := stgBlocksMap[block.UserSpaceID]; !ok { stgBlocksMap[block.UserSpaceID] = &stgBlocks{ @@ -159,14 +158,14 @@ func (t *ChangeRedundancy) summaryRepObjectBlockUserSpaces(ctx *changeRedundancy return right.Count - left.Count }) - ids := lo.Map(userspaces, func(item *stgBlocks, idx int) jcsypes.UserSpaceID { return item.UserSpaceID }) + ids := lo.Map(userspaces, func(item *stgBlocks, idx int) jcstypes.UserSpaceID { return item.UserSpaceID }) if len(ids) > userspaceCnt { ids = ids[:userspaceCnt] } return ids } -func (t *ChangeRedundancy) chooseNewUserSpacesForRep(ctx *changeRedundancyContext, red *jcsypes.RepRedundancy) []*userSpaceUsageInfo { +func (t *ChangeRedundancy) chooseNewUserSpacesForRep(ctx *changeRedundancyContext, red *jcstypes.RepRedundancy) []*userSpaceUsageInfo { sortedUserSpaces := sort2.Sort(lo.Values(ctx.allUserSpaces), func(left *userSpaceUsageInfo, right *userSpaceUsageInfo) int { return sort2.Cmp(right.AccessAmount, left.AccessAmount) }) @@ -174,7 +173,7 @@ func (t *ChangeRedundancy) chooseNewUserSpacesForRep(ctx *changeRedundancyContex return t.chooseSoManyUserSpaces(red.RepCount, sortedUserSpaces) } -func (t *ChangeRedundancy) chooseNewUserSpacesForEC(ctx *changeRedundancyContext, red *jcsypes.ECRedundancy) []*userSpaceUsageInfo { +func (t *ChangeRedundancy) chooseNewUserSpacesForEC(ctx *changeRedundancyContext, red *jcstypes.ECRedundancy) []*userSpaceUsageInfo { sortedUserSpaces := sort2.Sort(lo.Values(ctx.allUserSpaces), func(left *userSpaceUsageInfo, right *userSpaceUsageInfo) int { return sort2.Cmp(right.AccessAmount, left.AccessAmount) }) @@ -182,7 +181,7 @@ func (t *ChangeRedundancy) chooseNewUserSpacesForEC(ctx *changeRedundancyContext return t.chooseSoManyUserSpaces(red.N, sortedUserSpaces) } -func (t *ChangeRedundancy) chooseNewUserSpacesForLRC(ctx *changeRedundancyContext, red *jcsypes.LRCRedundancy) []*userSpaceUsageInfo { +func (t *ChangeRedundancy) chooseNewUserSpacesForLRC(ctx *changeRedundancyContext, red *jcstypes.LRCRedundancy) []*userSpaceUsageInfo { sortedUserSpaces := sort2.Sort(lo.Values(ctx.allUserSpaces), func(left *userSpaceUsageInfo, right *userSpaceUsageInfo) int { return sort2.Cmp(right.AccessAmount, left.AccessAmount) }) @@ -198,7 +197,7 @@ func (t *ChangeRedundancy) chooseNewUserSpacesForSeg(ctx *changeRedundancyContex return t.chooseSoManyUserSpaces(segCount, sortedUserSpaces) } -func (t *ChangeRedundancy) rechooseUserSpacesForRep(ctx *changeRedundancyContext, red *jcsypes.RepRedundancy) []*userSpaceUsageInfo { +func (t *ChangeRedundancy) rechooseUserSpacesForRep(ctx *changeRedundancyContext, red *jcstypes.RepRedundancy) []*userSpaceUsageInfo { type rechooseUserSpace struct { *userSpaceUsageInfo HasBlock bool @@ -233,7 +232,7 @@ func (t *ChangeRedundancy) rechooseUserSpacesForRep(ctx *changeRedundancyContext return t.chooseSoManyUserSpaces(red.RepCount, lo.Map(sortedStgs, func(userspace *rechooseUserSpace, idx int) *userSpaceUsageInfo { return userspace.userSpaceUsageInfo })) } -func (t *ChangeRedundancy) rechooseUserSpacesForEC(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, red *jcsypes.ECRedundancy) []*userSpaceUsageInfo { +func (t *ChangeRedundancy) rechooseUserSpacesForEC(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, red *jcstypes.ECRedundancy) []*userSpaceUsageInfo { type rechooseStg struct { *userSpaceUsageInfo CachedBlockIndex int @@ -269,7 +268,7 @@ func (t *ChangeRedundancy) rechooseUserSpacesForEC(ctx *changeRedundancyContext, return t.chooseSoManyUserSpaces(red.N, lo.Map(sortedStgs, func(userspace *rechooseStg, idx int) *userSpaceUsageInfo { return userspace.userSpaceUsageInfo })) } -func (t *ChangeRedundancy) rechooseUserSpacesForLRC(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, red *jcsypes.LRCRedundancy) []*userSpaceUsageInfo { +func (t *ChangeRedundancy) rechooseUserSpacesForLRC(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, red *jcstypes.LRCRedundancy) []*userSpaceUsageInfo { type rechooseStg struct { *userSpaceUsageInfo CachedBlockIndex int @@ -322,7 +321,7 @@ func (t *ChangeRedundancy) chooseSoManyUserSpaces(count int, stgs []*userSpaceUs var chosen []*userSpaceUsageInfo for len(chosen) < count { // 在每一轮内都选不同地区的节点,如果节点数不够,那么就再来一轮 - chosenLocations := make(map[cortypes.Location]bool) + chosenLocations := make(map[jcstypes.Location]bool) for i, stg := range extendStgs { if stg == nil { continue @@ -341,7 +340,7 @@ func (t *ChangeRedundancy) chooseSoManyUserSpaces(count int, stgs []*userSpaceUs return chosen } -func (t *ChangeRedundancy) noneToRep(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, red *jcsypes.RepRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { +func (t *ChangeRedundancy) noneToRep(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, red *jcstypes.RepRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { if len(obj.Blocks) == 0 { return nil, nil, fmt.Errorf("object is not cached on any userspaces, cannot change its redundancy to rep") } @@ -352,7 +351,7 @@ func (t *ChangeRedundancy) noneToRep(ctx *changeRedundancyContext, obj jcsypes.O } // 如果选择的备份节点都是同一个,那么就只要上传一次 - uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) jcsypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID }) + uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) jcstypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID }) ft := ioswitch2.NewFromTo() ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.UserSpace, ioswitch2.RawStream())) @@ -374,11 +373,11 @@ func (t *ChangeRedundancy) noneToRep(ctx *changeRedundancyContext, obj jcsypes.O return nil, nil, fmt.Errorf("executing io plan: %w", err) } - var blocks []jcsypes.ObjectBlock + var blocks []jcstypes.ObjectBlock var blockChgs []datamap.BlockChange for i, stg := range uploadStgs { r := ret.Get(fmt.Sprintf("%d", i)).(*ops2.FileInfoValue) - blocks = append(blocks, jcsypes.ObjectBlock{ + blocks = append(blocks, jcstypes.ObjectBlock{ ObjectID: obj.Object.ObjectID, Index: 0, UserSpaceID: stg.UserSpace.UserSpace.UserSpaceID, @@ -412,7 +411,7 @@ func (t *ChangeRedundancy) noneToRep(ctx *changeRedundancyContext, obj jcsypes.O }, nil } -func (t *ChangeRedundancy) noneToEC(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, red *jcsypes.ECRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { +func (t *ChangeRedundancy) noneToEC(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, red *jcstypes.ECRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { if len(obj.Blocks) == 0 { return nil, nil, fmt.Errorf("object is not cached on any userspaces, cannot change its redundancy to ec") } @@ -441,12 +440,12 @@ func (t *ChangeRedundancy) noneToEC(ctx *changeRedundancyContext, obj jcsypes.Ob return nil, nil, fmt.Errorf("executing io plan: %w", err) } - var blocks []jcsypes.ObjectBlock + var blocks []jcstypes.ObjectBlock var evtTargetBlocks []datamap.Block var evtBlockTrans []datamap.DataTransfer for i := 0; i < red.N; i++ { r := ioRet.Get(fmt.Sprintf("%d", i)).(*ops2.FileInfoValue) - blocks = append(blocks, jcsypes.ObjectBlock{ + blocks = append(blocks, jcstypes.ObjectBlock{ ObjectID: obj.Object.ObjectID, Index: i, UserSpaceID: uploadStgs[i].UserSpace.UserSpace.UserSpaceID, @@ -494,7 +493,7 @@ func (t *ChangeRedundancy) noneToEC(ctx *changeRedundancyContext, obj jcsypes.Ob }, nil } -func (t *ChangeRedundancy) noneToLRC(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, red *jcsypes.LRCRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { +func (t *ChangeRedundancy) noneToLRC(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, red *jcstypes.LRCRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { if len(obj.Blocks) == 0 { return nil, nil, fmt.Errorf("object is not cached on any userspaces, cannot change its redundancy to ec") } @@ -522,12 +521,12 @@ func (t *ChangeRedundancy) noneToLRC(ctx *changeRedundancyContext, obj jcsypes.O return nil, nil, fmt.Errorf("executing io plan: %w", err) } - var blocks []jcsypes.ObjectBlock + var blocks []jcstypes.ObjectBlock var evtTargetBlocks []datamap.Block var evtBlockTrans []datamap.DataTransfer for i := 0; i < red.N; i++ { r := ioRet.Get(fmt.Sprintf("%d", i)).(*ops2.FileInfoValue) - blocks = append(blocks, jcsypes.ObjectBlock{ + blocks = append(blocks, jcstypes.ObjectBlock{ ObjectID: obj.Object.ObjectID, Index: i, UserSpaceID: uploadStgs[i].UserSpace.UserSpace.UserSpaceID, @@ -576,7 +575,7 @@ func (t *ChangeRedundancy) noneToLRC(ctx *changeRedundancyContext, obj jcsypes.O nil } -func (t *ChangeRedundancy) noneToSeg(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, red *jcsypes.SegmentRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { +func (t *ChangeRedundancy) noneToSeg(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, red *jcstypes.SegmentRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { if len(obj.Blocks) == 0 { return nil, nil, fmt.Errorf("object is not cached on any userspaces, cannot change its redundancy to rep") } @@ -587,7 +586,7 @@ func (t *ChangeRedundancy) noneToSeg(ctx *changeRedundancyContext, obj jcsypes.O } // 如果选择的备份节点都是同一个,那么就只要上传一次 - uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) jcsypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID }) + uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) jcstypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID }) ft := ioswitch2.NewFromTo() ft.SegmentParam = red @@ -610,12 +609,12 @@ func (t *ChangeRedundancy) noneToSeg(ctx *changeRedundancyContext, obj jcsypes.O return nil, nil, fmt.Errorf("executing io plan: %w", err) } - var blocks []jcsypes.ObjectBlock + var blocks []jcstypes.ObjectBlock var evtTargetBlocks []datamap.Block var evtBlockTrans []datamap.DataTransfer for i, stg := range uploadStgs { r := ret.Get(fmt.Sprintf("%d", i)).(*ops2.FileInfoValue) - blocks = append(blocks, jcsypes.ObjectBlock{ + blocks = append(blocks, jcstypes.ObjectBlock{ ObjectID: obj.Object.ObjectID, Index: i, UserSpaceID: stg.UserSpace.UserSpace.UserSpaceID, @@ -664,7 +663,7 @@ func (t *ChangeRedundancy) noneToSeg(ctx *changeRedundancyContext, obj jcsypes.O nil } -func (t *ChangeRedundancy) repToRep(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, red *jcsypes.RepRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { +func (t *ChangeRedundancy) repToRep(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, red *jcstypes.RepRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { if len(obj.Blocks) == 0 { return nil, nil, fmt.Errorf("object is not cached on any userspaces, cannot change its redundancy to rep") } @@ -675,7 +674,7 @@ func (t *ChangeRedundancy) repToRep(ctx *changeRedundancyContext, obj jcsypes.Ob } // 如果选择的备份节点都是同一个,那么就只要上传一次 - uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) jcsypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID }) + uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) jcstypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID }) ft := ioswitch2.NewFromTo() ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.UserSpace, ioswitch2.RawStream())) @@ -697,11 +696,11 @@ func (t *ChangeRedundancy) repToRep(ctx *changeRedundancyContext, obj jcsypes.Ob return nil, nil, fmt.Errorf("executing io plan: %w", err) } - var blocks []jcsypes.ObjectBlock + var blocks []jcstypes.ObjectBlock var blockChgs []datamap.BlockChange for i, stg := range uploadStgs { r := ret.Get(fmt.Sprintf("%d", i)).(*ops2.FileInfoValue) - blocks = append(blocks, jcsypes.ObjectBlock{ + blocks = append(blocks, jcstypes.ObjectBlock{ ObjectID: obj.Object.ObjectID, Index: 0, UserSpaceID: stg.UserSpace.UserSpace.UserSpaceID, @@ -737,14 +736,14 @@ func (t *ChangeRedundancy) repToRep(ctx *changeRedundancyContext, obj jcsypes.Ob nil } -func (t *ChangeRedundancy) repToEC(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, red *jcsypes.ECRedundancy, uploadUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { +func (t *ChangeRedundancy) repToEC(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, red *jcstypes.ECRedundancy, uploadUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { return t.noneToEC(ctx, obj, red, uploadUserSpaces) } -func (t *ChangeRedundancy) ecToRep(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, srcRed *jcsypes.ECRedundancy, tarRed *jcsypes.RepRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { - var chosenBlocks []jcsypes.GrouppedObjectBlock +func (t *ChangeRedundancy) ecToRep(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, srcRed *jcstypes.ECRedundancy, tarRed *jcstypes.RepRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { + var chosenBlocks []jcstypes.GrouppedObjectBlock var chosenBlockIndexes []int - var chosenBlockStg []jcsypes.UserSpaceDetail + var chosenBlockStg []jcstypes.UserSpaceDetail for _, block := range obj.GroupBlocks() { if len(block.UserSpaceIDs) > 0 { // TODO 考虑选择最优的节点 @@ -768,7 +767,7 @@ func (t *ChangeRedundancy) ecToRep(ctx *changeRedundancyContext, obj jcsypes.Obj } // 如果选择的备份节点都是同一个,那么就只要上传一次 - uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) jcsypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID }) + uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) jcstypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID }) planBlder := exec.NewPlanBuilder() ft := ioswitch2.NewFromTo() @@ -795,11 +794,11 @@ func (t *ChangeRedundancy) ecToRep(ctx *changeRedundancyContext, obj jcsypes.Obj return nil, nil, fmt.Errorf("executing io plan: %w", err) } - var blocks []jcsypes.ObjectBlock + var blocks []jcstypes.ObjectBlock for i := range uploadStgs { r := ioRet.Get(fmt.Sprintf("%d", i)).(*ops2.FileInfoValue) - blocks = append(blocks, jcsypes.ObjectBlock{ + blocks = append(blocks, jcstypes.ObjectBlock{ ObjectID: obj.Object.ObjectID, Index: 0, UserSpaceID: uploadStgs[i].UserSpace.UserSpace.UserSpaceID, @@ -866,11 +865,11 @@ func (t *ChangeRedundancy) ecToRep(ctx *changeRedundancyContext, obj jcsypes.Obj nil } -func (t *ChangeRedundancy) ecToEC(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, srcRed *jcsypes.ECRedundancy, tarRed *jcsypes.ECRedundancy, uploadUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { +func (t *ChangeRedundancy) ecToEC(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, srcRed *jcstypes.ECRedundancy, tarRed *jcstypes.ECRedundancy, uploadUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { grpBlocks := obj.GroupBlocks() - var chosenBlocks []jcsypes.GrouppedObjectBlock - var chosenBlockStg []jcsypes.UserSpaceDetail + var chosenBlocks []jcstypes.GrouppedObjectBlock + var chosenBlockStg []jcstypes.UserSpaceDetail for _, block := range grpBlocks { if len(block.UserSpaceIDs) > 0 { stg, ok := ctx.allUserSpaces[block.UserSpaceIDs[0]] @@ -909,16 +908,16 @@ func (t *ChangeRedundancy) ecToEC(ctx *changeRedundancyContext, obj jcsypes.Obje }) } - var newBlocks []jcsypes.ObjectBlock + var newBlocks []jcstypes.ObjectBlock shouldUpdateBlocks := false for i, stg := range uploadUserSpaces { - newBlock := jcsypes.ObjectBlock{ + newBlock := jcstypes.ObjectBlock{ ObjectID: obj.Object.ObjectID, Index: i, UserSpaceID: stg.UserSpace.UserSpace.UserSpaceID, } - grp, ok := lo.Find(grpBlocks, func(grp jcsypes.GrouppedObjectBlock) bool { return grp.Index == i }) + grp, ok := lo.Find(grpBlocks, func(grp jcstypes.GrouppedObjectBlock) bool { return grp.Index == i }) // 如果新选中的节点已经记录在Block表中,那么就不需要任何变更 if ok && lo.Contains(grp.UserSpaceIDs, stg.UserSpace.UserSpace.UserSpaceID) { @@ -988,7 +987,7 @@ func (t *ChangeRedundancy) ecToEC(ctx *changeRedundancyContext, obj jcsypes.Obje var blockChgs []datamap.BlockChange for _, block := range obj.Blocks { - keep := lo.ContainsBy(newBlocks, func(newBlock jcsypes.ObjectBlock) bool { + keep := lo.ContainsBy(newBlocks, func(newBlock jcstypes.ObjectBlock) bool { return newBlock.Index == block.Index && newBlock.UserSpaceID == block.UserSpaceID }) if !keep { @@ -1019,7 +1018,7 @@ func (t *ChangeRedundancy) ecToEC(ctx *changeRedundancyContext, obj jcsypes.Obje nil } -func (t *ChangeRedundancy) lrcToLRC(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, srcRed *jcsypes.LRCRedundancy, tarRed *jcsypes.LRCRedundancy, uploadUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { +func (t *ChangeRedundancy) lrcToLRC(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, srcRed *jcstypes.LRCRedundancy, tarRed *jcstypes.LRCRedundancy, uploadUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { blocksGrpByIndex := obj.GroupBlocks() @@ -1062,8 +1061,8 @@ func (t *ChangeRedundancy) lrcToLRC(ctx *changeRedundancyContext, obj jcsypes.Ob /* TODO2 修复这一块的代码 - func (t *ChangeRedundancy) groupReconstructLRC(obj jcsypes.ObjectDetail, lostBlocks []int, lostBlockGrps []int, grpedBlocks []jcsypes.GrouppedObjectBlock, red *jcsypes.LRCRedundancy, uploadUserSpaces []*UserSpaceLoadInfo) (*db.UpdatingObjectRedundancy, error) { - grped := make(map[int]jcsypes.GrouppedObjectBlock) + func (t *ChangeRedundancy) groupReconstructLRC(obj jcstypes.ObjectDetail, lostBlocks []int, lostBlockGrps []int, grpedBlocks []jcstypes.GrouppedObjectBlock, red *jcstypes.LRCRedundancy, uploadUserSpaces []*UserSpaceLoadInfo) (*db.UpdatingObjectRedundancy, error) { + grped := make(map[int]jcstypes.GrouppedObjectBlock) for _, b := range grpedBlocks { grped[b.Index] = b } @@ -1098,9 +1097,9 @@ TODO2 修复这一块的代码 return nil, fmt.Errorf("executing io plan: %w", err) } - var newBlocks []jcsypes.ObjectBlock + var newBlocks []jcstypes.ObjectBlock for _, i := range lostBlocks { - newBlocks = append(newBlocks, jcsypes.ObjectBlock{ + newBlocks = append(newBlocks, jcstypes.ObjectBlock{ ObjectID: obj.Object.ObjectID, Index: i, UserSpaceID: uploadUserSpaces[i].UserSpace.UserSpace.UserSpaceID, @@ -1109,7 +1108,7 @@ TODO2 修复这一块的代码 } for _, b := range grpedBlocks { for _, hubID := range b.UserSpaceIDs { - newBlocks = append(newBlocks, jcsypes.ObjectBlock{ + newBlocks = append(newBlocks, jcstypes.ObjectBlock{ ObjectID: obj.Object.ObjectID, Index: b.Index, UserSpaceID: hubID, @@ -1125,9 +1124,9 @@ TODO2 修复这一块的代码 }, nil } */ -func (t *ChangeRedundancy) reconstructLRC(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, grpBlocks []jcsypes.GrouppedObjectBlock, red *jcsypes.LRCRedundancy, uploadUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { - var chosenBlocks []jcsypes.GrouppedObjectBlock - var chosenBlockStg []jcsypes.UserSpaceDetail +func (t *ChangeRedundancy) reconstructLRC(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, grpBlocks []jcstypes.GrouppedObjectBlock, red *jcstypes.LRCRedundancy, uploadUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) { + var chosenBlocks []jcstypes.GrouppedObjectBlock + var chosenBlockStg []jcstypes.UserSpaceDetail for _, block := range grpBlocks { if len(block.UserSpaceIDs) > 0 && block.Index < red.M() { @@ -1154,16 +1153,16 @@ func (t *ChangeRedundancy) reconstructLRC(ctx *changeRedundancyContext, obj jcsy var froms []ioswitchlrc.From var toes []ioswitchlrc.To - var newBlocks []jcsypes.ObjectBlock + var newBlocks []jcstypes.ObjectBlock shouldUpdateBlocks := false for i, userspace := range uploadUserSpaces { - newBlock := jcsypes.ObjectBlock{ + newBlock := jcstypes.ObjectBlock{ ObjectID: obj.Object.ObjectID, Index: i, UserSpaceID: userspace.UserSpace.UserSpace.UserSpaceID, } - grp, ok := lo.Find(grpBlocks, func(grp jcsypes.GrouppedObjectBlock) bool { return grp.Index == i }) + grp, ok := lo.Find(grpBlocks, func(grp jcstypes.GrouppedObjectBlock) bool { return grp.Index == i }) // 如果新选中的节点已经记录在Block表中,那么就不需要任何变更 if ok && lo.Contains(grp.UserSpaceIDs, userspace.UserSpace.UserSpace.UserSpaceID) { diff --git a/client/internal/ticktock/redundancy_shrink.go b/client/internal/ticktock/redundancy_shrink.go index 1fc5dd0..c5c1c2f 100644 --- a/client/internal/ticktock/redundancy_shrink.go +++ b/client/internal/ticktock/redundancy_shrink.go @@ -21,14 +21,14 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock/reqbuilder" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/common/types/datamap" ) -func (t *ChangeRedundancy) doRedundancyShrink(execCtx *changeRedundancyContext, pkg jcsypes.PackageDetail, objs []jcsypes.ObjectDetail, reen *publock.Reentrant) ([]db.UpdatingObjectRedundancy, []datamap.SysEventBody, error) { +func (t *ChangeRedundancy) doRedundancyShrink(execCtx *changeRedundancyContext, pkg jcstypes.PackageDetail, objs []jcstypes.ObjectDetail, reen *publock.Reentrant) ([]db.UpdatingObjectRedundancy, []datamap.SysEventBody, error) { log := logger.WithType[ChangeRedundancy]("TickTock") - var readerStgIDs []jcsypes.UserSpaceID + var readerStgIDs []jcstypes.UserSpaceID for _, space := range execCtx.allUserSpaces { // TODO 可以考虑做成配置 if space.AccessAmount >= float64(pkg.ObjectCount/2) { @@ -37,17 +37,17 @@ func (t *ChangeRedundancy) doRedundancyShrink(execCtx *changeRedundancyContext, } // 只对ec和rep对象进行处理 - var ecObjects []jcsypes.ObjectDetail - var repObjects []jcsypes.ObjectDetail + var ecObjects []jcstypes.ObjectDetail + var repObjects []jcstypes.ObjectDetail for _, obj := range objs { - if _, ok := obj.Object.Redundancy.(*jcsypes.ECRedundancy); ok { + if _, ok := obj.Object.Redundancy.(*jcstypes.ECRedundancy); ok { ecObjects = append(ecObjects, obj) - } else if _, ok := obj.Object.Redundancy.(*jcsypes.RepRedundancy); ok { + } else if _, ok := obj.Object.Redundancy.(*jcstypes.RepRedundancy); ok { repObjects = append(repObjects, obj) } } - planningStgIDs := make(map[jcsypes.UserSpaceID]bool) + planningStgIDs := make(map[jcstypes.UserSpaceID]bool) var sysEvents []datamap.SysEventBody @@ -80,7 +80,7 @@ func (t *ChangeRedundancy) doRedundancyShrink(execCtx *changeRedundancyContext, // 对于ec对象,则每个对象单独进行退火算法 var ecObjectsUpdating []db.UpdatingObjectRedundancy for i, obj := range ecObjects { - ecRed := obj.Object.Redundancy.(*jcsypes.ECRedundancy) + ecRed := obj.Object.Redundancy.(*jcstypes.ECRedundancy) solu := t.startAnnealing(execCtx, readerStgIDs, annealingObject{ totalBlockCount: ecRed.N, minBlockCnt: ecRed.K, @@ -105,15 +105,15 @@ func (t *ChangeRedundancy) doRedundancyShrink(execCtx *changeRedundancyContext, return append(repObjectsUpdating, ecObjectsUpdating...), sysEvents, nil } -func (t *ChangeRedundancy) summaryRepObjectBlockNodes(objs []jcsypes.ObjectDetail) []jcsypes.UserSpaceID { +func (t *ChangeRedundancy) summaryRepObjectBlockNodes(objs []jcstypes.ObjectDetail) []jcstypes.UserSpaceID { type stgBlocks struct { - UserSpaceID jcsypes.UserSpaceID + UserSpaceID jcstypes.UserSpaceID Count int } - stgBlocksMap := make(map[jcsypes.UserSpaceID]*stgBlocks) + stgBlocksMap := make(map[jcstypes.UserSpaceID]*stgBlocks) for _, obj := range objs { - cacheBlockStgs := make(map[jcsypes.UserSpaceID]bool) + cacheBlockStgs := make(map[jcstypes.UserSpaceID]bool) for _, block := range obj.Blocks { if _, ok := stgBlocksMap[block.UserSpaceID]; !ok { stgBlocksMap[block.UserSpaceID] = &stgBlocks{ @@ -153,17 +153,17 @@ func (t *ChangeRedundancy) summaryRepObjectBlockNodes(objs []jcsypes.ObjectDetai } } - return lo.Map(stgs, func(item *stgBlocks, idx int) jcsypes.UserSpaceID { return item.UserSpaceID }) + return lo.Map(stgs, func(item *stgBlocks, idx int) jcstypes.UserSpaceID { return item.UserSpaceID }) } type annealingState struct { ctx *changeRedundancyContext - readerStgIDs []jcsypes.UserSpaceID // 近期可能访问此对象的节点 - stgsSortedByReader map[jcsypes.UserSpaceID][]stgDist // 拥有数据的节点到每个可能访问对象的节点按距离排序 - object annealingObject // 进行退火的对象 - blockList []objectBlock // 排序后的块分布情况 - stgBlockBitmaps map[jcsypes.UserSpaceID]*bitmap.Bitmap64 // 用位图的形式表示每一个节点上有哪些块 - stgCombTree combinatorialTree // 节点组合树,用于加速计算容灾度 + readerStgIDs []jcstypes.UserSpaceID // 近期可能访问此对象的节点 + stgsSortedByReader map[jcstypes.UserSpaceID][]stgDist // 拥有数据的节点到每个可能访问对象的节点按距离排序 + object annealingObject // 进行退火的对象 + blockList []objectBlock // 排序后的块分布情况 + stgBlockBitmaps map[jcstypes.UserSpaceID]*bitmap.Bitmap64 // 用位图的形式表示每一个节点上有哪些块 + stgCombTree combinatorialTree // 节点组合树,用于加速计算容灾度 maxScore float64 // 搜索过程中得到过的最大分数 maxScoreRmBlocks []bool // 最大分数对应的删除方案 @@ -178,30 +178,30 @@ type annealingState struct { type objectBlock struct { Index int - UserSpaceID jcsypes.UserSpaceID - HasEntity bool // 节点拥有实际的文件数据块 - HasShadow bool // 如果节点拥有完整文件数据,那么认为这个节点拥有所有块,这些块被称为影子块 - FileHash jcsypes.FileHash // 只有在拥有实际文件数据块时,这个字段才有值 - Size int64 // 块大小 + UserSpaceID jcstypes.UserSpaceID + HasEntity bool // 节点拥有实际的文件数据块 + HasShadow bool // 如果节点拥有完整文件数据,那么认为这个节点拥有所有块,这些块被称为影子块 + FileHash jcstypes.FileHash // 只有在拥有实际文件数据块时,这个字段才有值 + Size int64 // 块大小 } type stgDist struct { - UserSpaceID jcsypes.UserSpaceID + UserSpaceID jcstypes.UserSpaceID Distance float64 } type combinatorialTree struct { nodes []combinatorialTreeNode blocksMaps map[int]bitmap.Bitmap64 - stgIDToLocalStgID map[jcsypes.UserSpaceID]int - localStgIDToStgID []jcsypes.UserSpaceID + stgIDToLocalStgID map[jcstypes.UserSpaceID]int + localStgIDToStgID []jcstypes.UserSpaceID } type annealingObject struct { totalBlockCount int minBlockCnt int - pinnedAt []jcsypes.UserSpaceID - blocks []jcsypes.ObjectBlock + pinnedAt []jcstypes.UserSpaceID + blocks []jcstypes.ObjectBlock } const ( @@ -210,10 +210,10 @@ const ( iterActionBreak = 2 ) -func newCombinatorialTree(stgBlocksMaps map[jcsypes.UserSpaceID]*bitmap.Bitmap64) combinatorialTree { +func newCombinatorialTree(stgBlocksMaps map[jcstypes.UserSpaceID]*bitmap.Bitmap64) combinatorialTree { tree := combinatorialTree{ blocksMaps: make(map[int]bitmap.Bitmap64), - stgIDToLocalStgID: make(map[jcsypes.UserSpaceID]int), + stgIDToLocalStgID: make(map[jcstypes.UserSpaceID]int), } tree.nodes = make([]combinatorialTreeNode, (1 << len(stgBlocksMaps))) @@ -271,7 +271,7 @@ func (t *combinatorialTree) GetDepth(index int) int { // 更新某一个算力中心节点的块分布位图,同时更新它对应组合树节点的所有子节点。 // 如果更新到某个节点时,已有K个块,那么就不会再更新它的子节点 -func (t *combinatorialTree) UpdateBitmap(stgID jcsypes.UserSpaceID, mp bitmap.Bitmap64, k int) { +func (t *combinatorialTree) UpdateBitmap(stgID jcstypes.UserSpaceID, mp bitmap.Bitmap64, k int) { t.blocksMaps[t.stgIDToLocalStgID[stgID]] = mp // 首先定义两种遍历树节点时的移动方式: // 1. 竖直移动(深度增加):从一个节点移动到它最左边的子节点。每移动一步,index+1 @@ -416,13 +416,13 @@ type annealingSolution struct { minAccessCost float64 // 本方案的最小访问费用 } -func (t *ChangeRedundancy) startAnnealing(ctx *changeRedundancyContext, readerStgIDs []jcsypes.UserSpaceID, object annealingObject) annealingSolution { +func (t *ChangeRedundancy) startAnnealing(ctx *changeRedundancyContext, readerStgIDs []jcstypes.UserSpaceID, object annealingObject) annealingSolution { state := &annealingState{ ctx: ctx, readerStgIDs: readerStgIDs, - stgsSortedByReader: make(map[jcsypes.UserSpaceID][]stgDist), + stgsSortedByReader: make(map[jcstypes.UserSpaceID][]stgDist), object: object, - stgBlockBitmaps: make(map[jcsypes.UserSpaceID]*bitmap.Bitmap64), + stgBlockBitmaps: make(map[jcstypes.UserSpaceID]*bitmap.Bitmap64), } t.initBlockList(state) @@ -486,7 +486,7 @@ func (t *ChangeRedundancy) startAnnealing(ctx *changeRedundancyContext, readerSt } func (t *ChangeRedundancy) initBlockList(ctx *annealingState) { - blocksMap := make(map[jcsypes.UserSpaceID][]objectBlock) + blocksMap := make(map[jcstypes.UserSpaceID][]objectBlock) // 先生成所有的影子块 for _, pinned := range ctx.object.pinnedAt { @@ -680,7 +680,7 @@ func (t *ChangeRedundancy) alwaysAccept(curTemp float64, dScore float64, cooling return v > rand.Float64() } -func (t *ChangeRedundancy) makePlansForRepObject(ctx *changeRedundancyContext, solu annealingSolution, obj jcsypes.ObjectDetail, planBld *exec.PlanBuilder, planningHubIDs map[jcsypes.UserSpaceID]bool) db.UpdatingObjectRedundancy { +func (t *ChangeRedundancy) makePlansForRepObject(ctx *changeRedundancyContext, solu annealingSolution, obj jcstypes.ObjectDetail, planBld *exec.PlanBuilder, planningHubIDs map[jcstypes.UserSpaceID]bool) db.UpdatingObjectRedundancy { entry := db.UpdatingObjectRedundancy{ ObjectID: obj.Object.ObjectID, FileHash: obj.Object.FileHash, @@ -694,8 +694,8 @@ func (t *ChangeRedundancy) makePlansForRepObject(ctx *changeRedundancyContext, s ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *fromStg, ioswitch2.RawStream())) for i, f := range solu.rmBlocks { - hasCache := lo.ContainsBy(obj.Blocks, func(b jcsypes.ObjectBlock) bool { return b.UserSpaceID == solu.blockList[i].UserSpaceID }) || - lo.ContainsBy(obj.PinnedAt, func(n jcsypes.UserSpaceID) bool { return n == solu.blockList[i].UserSpaceID }) + hasCache := lo.ContainsBy(obj.Blocks, func(b jcstypes.ObjectBlock) bool { return b.UserSpaceID == solu.blockList[i].UserSpaceID }) || + lo.ContainsBy(obj.PinnedAt, func(n jcstypes.UserSpaceID) bool { return n == solu.blockList[i].UserSpaceID }) willRm := f if !willRm { @@ -706,7 +706,7 @@ func (t *ChangeRedundancy) makePlansForRepObject(ctx *changeRedundancyContext, s planningHubIDs[solu.blockList[i].UserSpaceID] = true } - entry.Blocks = append(entry.Blocks, jcsypes.ObjectBlock{ + entry.Blocks = append(entry.Blocks, jcstypes.ObjectBlock{ ObjectID: obj.Object.ObjectID, Index: solu.blockList[i].Index, UserSpaceID: solu.blockList[i].UserSpaceID, @@ -724,12 +724,12 @@ func (t *ChangeRedundancy) makePlansForRepObject(ctx *changeRedundancyContext, s return entry } -func (t *ChangeRedundancy) generateSysEventForRepObject(solu annealingSolution, obj jcsypes.ObjectDetail) []datamap.SysEventBody { +func (t *ChangeRedundancy) generateSysEventForRepObject(solu annealingSolution, obj jcstypes.ObjectDetail) []datamap.SysEventBody { var blockChgs []datamap.BlockChange for i, f := range solu.rmBlocks { - hasCache := lo.ContainsBy(obj.Blocks, func(b jcsypes.ObjectBlock) bool { return b.UserSpaceID == solu.blockList[i].UserSpaceID }) || - lo.ContainsBy(obj.PinnedAt, func(n jcsypes.UserSpaceID) bool { return n == solu.blockList[i].UserSpaceID }) + hasCache := lo.ContainsBy(obj.Blocks, func(b jcstypes.ObjectBlock) bool { return b.UserSpaceID == solu.blockList[i].UserSpaceID }) || + lo.ContainsBy(obj.PinnedAt, func(n jcstypes.UserSpaceID) bool { return n == solu.blockList[i].UserSpaceID }) willRm := f if !willRm { @@ -782,7 +782,7 @@ func (t *ChangeRedundancy) generateSysEventForRepObject(solu annealingSolution, return []datamap.SysEventBody{transEvt, distEvt} } -func (t *ChangeRedundancy) makePlansForECObject(ctx *changeRedundancyContext, solu annealingSolution, obj jcsypes.ObjectDetail, planBld *exec.PlanBuilder, planningHubIDs map[jcsypes.UserSpaceID]bool) db.UpdatingObjectRedundancy { +func (t *ChangeRedundancy) makePlansForECObject(ctx *changeRedundancyContext, solu annealingSolution, obj jcstypes.ObjectDetail, planBld *exec.PlanBuilder, planningHubIDs map[jcstypes.UserSpaceID]bool) db.UpdatingObjectRedundancy { entry := db.UpdatingObjectRedundancy{ ObjectID: obj.Object.ObjectID, FileHash: obj.Object.FileHash, @@ -790,11 +790,11 @@ func (t *ChangeRedundancy) makePlansForECObject(ctx *changeRedundancyContext, so Redundancy: obj.Object.Redundancy, } - reconstrct := make(map[jcsypes.UserSpaceID]*[]int) + reconstrct := make(map[jcstypes.UserSpaceID]*[]int) for i, f := range solu.rmBlocks { block := solu.blockList[i] if !f { - entry.Blocks = append(entry.Blocks, jcsypes.ObjectBlock{ + entry.Blocks = append(entry.Blocks, jcstypes.ObjectBlock{ ObjectID: obj.Object.ObjectID, Index: block.Index, UserSpaceID: block.UserSpaceID, @@ -815,7 +815,7 @@ func (t *ChangeRedundancy) makePlansForECObject(ctx *changeRedundancyContext, so } } - ecRed := obj.Object.Redundancy.(*jcsypes.ECRedundancy) + ecRed := obj.Object.Redundancy.(*jcstypes.ECRedundancy) for id, idxs := range reconstrct { // 依次生成每个节点上的执行计划,因为如果放到一个计划里一起生成,不能保证每个节点上的块用的都是本节点上的副本 @@ -838,10 +838,10 @@ func (t *ChangeRedundancy) makePlansForECObject(ctx *changeRedundancyContext, so return entry } -func (t *ChangeRedundancy) generateSysEventForECObject(solu annealingSolution, obj jcsypes.ObjectDetail) []datamap.SysEventBody { +func (t *ChangeRedundancy) generateSysEventForECObject(solu annealingSolution, obj jcstypes.ObjectDetail) []datamap.SysEventBody { var blockChgs []datamap.BlockChange - reconstrct := make(map[jcsypes.UserSpaceID]*[]int) + reconstrct := make(map[jcstypes.UserSpaceID]*[]int) for i, f := range solu.rmBlocks { block := solu.blockList[i] if !f { @@ -917,7 +917,7 @@ func (t *ChangeRedundancy) generateSysEventForECObject(solu annealingSolution, o return []datamap.SysEventBody{transEvt, distEvt} } -func (t *ChangeRedundancy) executePlans(ctx *changeRedundancyContext, planBld *exec.PlanBuilder, planningSpaceIDs map[jcsypes.UserSpaceID]bool, reen *publock.Reentrant) (exec.PlanResult, error) { +func (t *ChangeRedundancy) executePlans(ctx *changeRedundancyContext, planBld *exec.PlanBuilder, planningSpaceIDs map[jcstypes.UserSpaceID]bool, reen *publock.Reentrant) (exec.PlanResult, error) { reqBlder := reqbuilder.NewBuilder() for id, _ := range planningSpaceIDs { reqBlder.UserSpace().Buzy(id) @@ -955,7 +955,7 @@ func (t *ChangeRedundancy) executePlans(ctx *changeRedundancyContext, planBld *e return ioSwRets, nil } -func (t *ChangeRedundancy) populateECObjectEntry(entry *db.UpdatingObjectRedundancy, obj jcsypes.ObjectDetail, ioRets exec.PlanResult) { +func (t *ChangeRedundancy) populateECObjectEntry(entry *db.UpdatingObjectRedundancy, obj jcstypes.ObjectDetail, ioRets exec.PlanResult) { for i := range entry.Blocks { if entry.Blocks[i].FileHash != "" { continue diff --git a/client/internal/uploader/uploader.go b/client/internal/uploader/uploader.go index dee260a..7a325b5 100644 --- a/client/internal/uploader/uploader.go +++ b/client/internal/uploader/uploader.go @@ -22,7 +22,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock/reqbuilder" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type Uploader struct { @@ -43,7 +43,7 @@ func NewUploader(pubLock *publock.Service, connectivity *connectivity.Collector, } } -func (u *Uploader) BeginUpdate(pkgID jcsypes.PackageID, affinity jcsypes.UserSpaceID, copyTo []jcsypes.UserSpaceID, copyToPath []jcsypes.JPath) (*UpdateUploader, error) { +func (u *Uploader) BeginUpdate(pkgID jcstypes.PackageID, affinity jcstypes.UserSpaceID, copyTo []jcstypes.UserSpaceID, copyToPath []jcstypes.JPath) (*UpdateUploader, error) { spaceIDs, err := u.db.UserSpace().GetAllIDs(u.db.DefCtx()) if err != nil { return nil, fmt.Errorf("getting user space ids: %w", err) @@ -84,9 +84,9 @@ func (u *Uploader) BeginUpdate(pkgID jcsypes.PackageID, affinity jcsypes.UserSpa return nil, fmt.Errorf("user no available userspaces") } - copyToSpaces := make([]jcsypes.UserSpaceDetail, len(copyTo)) + copyToSpaces := make([]jcstypes.UserSpaceDetail, len(copyTo)) for i, spaceID := range copyTo { - space, ok := lo.Find(spaceDetails, func(space *jcsypes.UserSpaceDetail) bool { + space, ok := lo.Find(spaceDetails, func(space *jcstypes.UserSpaceDetail) bool { return space.UserSpace.UserSpaceID == spaceID }) if !ok { @@ -118,7 +118,7 @@ func (u *Uploader) BeginUpdate(pkgID jcsypes.PackageID, affinity jcsypes.UserSpa // 1. 选择设置了亲和性的节点 // 2. 从与当前客户端相同地域的节点中随机选一个 // 3. 没有的话从所有节点选择延迟最低的节点 -func (w *Uploader) chooseUploadStorage(spaces []UploadSpaceInfo, spaceAffinity jcsypes.UserSpaceID) UploadSpaceInfo { +func (w *Uploader) chooseUploadStorage(spaces []UploadSpaceInfo, spaceAffinity jcstypes.UserSpaceID) UploadSpaceInfo { if spaceAffinity > 0 { aff, ok := lo.Find(spaces, func(space UploadSpaceInfo) bool { return space.Space.UserSpace.UserSpaceID == spaceAffinity }) if ok { @@ -137,10 +137,10 @@ func (w *Uploader) chooseUploadStorage(spaces []UploadSpaceInfo, spaceAffinity j return spaces[0] } -func (u *Uploader) BeginCreateUpload(bktID jcsypes.BucketID, pkgName string, copyTo []jcsypes.UserSpaceID, copyToPath []jcsypes.JPath) (*CreateUploader, error) { +func (u *Uploader) BeginCreateUpload(bktID jcstypes.BucketID, pkgName string, copyTo []jcstypes.UserSpaceID, copyToPath []jcstypes.JPath) (*CreateUploader, error) { getSpaces := u.spaceMeta.GetMany(copyTo) - spacesStgs := make([]jcsypes.UserSpaceDetail, len(copyTo)) + spacesStgs := make([]jcstypes.UserSpaceDetail, len(copyTo)) for i, stg := range getSpaces { if stg == nil { return nil, fmt.Errorf("storage %v not found", copyTo[i]) @@ -148,10 +148,10 @@ func (u *Uploader) BeginCreateUpload(bktID jcsypes.BucketID, pkgName string, cop spacesStgs[i] = *stg } - pkg, err := db.DoTx01(u.db, func(tx db.SQLContext) (jcsypes.Package, error) { + pkg, err := db.DoTx01(u.db, func(tx db.SQLContext) (jcstypes.Package, error) { _, err := u.db.Bucket().GetByID(tx, bktID) if err != nil { - return jcsypes.Package{}, err + return jcstypes.Package{}, err } return u.db.Package().Create(u.db.DefCtx(), bktID, pkgName, time.Now()) @@ -178,19 +178,19 @@ func (u *Uploader) BeginCreateUpload(bktID jcsypes.BucketID, pkgName string, cop }, nil } -func (u *Uploader) UploadPart(objID jcsypes.ObjectID, index int, stream io.Reader) error { +func (u *Uploader) UploadPart(objID jcstypes.ObjectID, index int, stream io.Reader) error { detail, err := u.db.Object().GetDetail(u.db.DefCtx(), objID) if err != nil { return fmt.Errorf("getting object detail: %w", err) } objDe := detail - _, ok := objDe.Object.Redundancy.(*jcsypes.MultipartUploadRedundancy) + _, ok := objDe.Object.Redundancy.(*jcstypes.MultipartUploadRedundancy) if !ok { return fmt.Errorf("object %v is not a multipart upload", objID) } - var space jcsypes.UserSpaceDetail + var space jcstypes.UserSpaceDetail if len(objDe.Blocks) > 0 { cstg := u.spaceMeta.Get(objDe.Blocks[0].UserSpaceID) if cstg == nil { @@ -272,7 +272,7 @@ func (u *Uploader) UploadPart(objID jcsypes.ObjectID, index int, stream io.Reade shardInfo := ret.Get("shard").(*ops2.FileInfoValue) err = u.db.DoTx(func(tx db.SQLContext) error { - return u.db.Object().AppendPart(tx, jcsypes.ObjectBlock{ + return u.db.Object().AppendPart(tx, jcstypes.ObjectBlock{ ObjectID: objID, Index: index, UserSpaceID: space.UserSpace.UserSpaceID, diff --git a/client/internal/uploader/user_space_upload.go b/client/internal/uploader/user_space_upload.go index 8453080..68846d1 100644 --- a/client/internal/uploader/user_space_upload.go +++ b/client/internal/uploader/user_space_upload.go @@ -17,20 +17,19 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock/reqbuilder" corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) -func (u *Uploader) UserSpaceUpload(userSpaceID jcsypes.UserSpaceID, rootPath jcsypes.JPath, targetBktID jcsypes.BucketID, newPkgName string, uploadAffinity jcsypes.UserSpaceID) (*jcsypes.Package, error) { +func (u *Uploader) UserSpaceUpload(userSpaceID jcstypes.UserSpaceID, rootPath jcstypes.JPath, targetBktID jcstypes.BucketID, newPkgName string, uploadAffinity jcstypes.UserSpaceID) (*jcstypes.Package, error) { srcSpace := u.spaceMeta.Get(userSpaceID) if srcSpace == nil { return nil, fmt.Errorf("user space %d not found", userSpaceID) } - pkg, err := db.DoTx01(u.db, func(tx db.SQLContext) (jcsypes.Package, error) { + pkg, err := db.DoTx01(u.db, func(tx db.SQLContext) (jcstypes.Package, error) { _, err := u.db.Bucket().GetByID(tx, targetBktID) if err != nil { - return jcsypes.Package{}, err + return jcstypes.Package{}, err } return u.db.Package().Create(tx, targetBktID, newPkgName, time.Now()) @@ -49,7 +48,7 @@ func (u *Uploader) UserSpaceUpload(userSpaceID jcsypes.UserSpaceID, rootPath jcs } spaceDetails := u.spaceMeta.GetMany(spaceIDs) - spaceDetails = lo.Filter(spaceDetails, func(e *jcsypes.UserSpaceDetail, i int) bool { + spaceDetails = lo.Filter(spaceDetails, func(e *jcstypes.UserSpaceDetail, i int) bool { return e != nil && e.UserSpace.ShardStore != nil }) @@ -59,13 +58,13 @@ func (u *Uploader) UserSpaceUpload(userSpaceID jcsypes.UserSpaceID, rootPath jcs coorCli := stgglb.CoordinatorRPCPool.Get() defer coorCli.Release() - resp, cerr := coorCli.GetHubConnectivities(context.Background(), corrpc.ReqGetHubConnectivities([]cortypes.HubID{srcSpace.RecommendHub.HubID})) + resp, cerr := coorCli.GetHubConnectivities(context.Background(), corrpc.ReqGetHubConnectivities([]jcstypes.HubID{srcSpace.RecommendHub.HubID})) if cerr != nil { delPkg() return nil, fmt.Errorf("getting hub connectivities: %w", cerr.ToError()) } - cons := make(map[cortypes.HubID]cortypes.HubConnectivity) + cons := make(map[jcstypes.HubID]jcstypes.HubConnectivity) for _, c := range resp.Connectivities { cons[c.ToHubID] = c } @@ -153,7 +152,7 @@ func (u *Uploader) UserSpaceUpload(userSpaceID jcsypes.UserSpaceID, rootPath jcs return &pkg, nil } -func (u *Uploader) uploadFromBaseStore(srcSpace *jcsypes.UserSpaceDetail, targetSpace *jcsypes.UserSpaceDetail, entries []types.DirEntry, rootPath jcsypes.JPath) ([]db.AddObjectEntry, error) { +func (u *Uploader) uploadFromBaseStore(srcSpace *jcstypes.UserSpaceDetail, targetSpace *jcstypes.UserSpaceDetail, entries []types.DirEntry, rootPath jcstypes.JPath) ([]db.AddObjectEntry, error) { ft := ioswitch2.FromTo{} for _, e := range entries { @@ -198,7 +197,7 @@ func (u *Uploader) uploadFromBaseStore(srcSpace *jcsypes.UserSpaceDetail, target Size: info.Size, FileHash: info.Hash, CreateTime: time.Now(), - UserSpaceIDs: []jcsypes.UserSpaceID{targetSpace.UserSpace.UserSpaceID}, + UserSpaceIDs: []jcstypes.UserSpaceID{targetSpace.UserSpace.UserSpaceID}, }) } diff --git a/client/sdk/api/v1/bucket.go b/client/sdk/api/v1/bucket.go index bf8e48e..91a0770 100644 --- a/client/sdk/api/v1/bucket.go +++ b/client/sdk/api/v1/bucket.go @@ -4,7 +4,7 @@ import ( "net/http" "gitlink.org.cn/cloudream/common/sdks" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type BucketService struct { @@ -18,7 +18,7 @@ func (c *Client) Bucket() *BucketService { const BucketGetPath = "/bucket/get" type BucketGet struct { - BucketID jcsypes.BucketID `json:"bucketID" binding:"required"` + BucketID jcstypes.BucketID `json:"bucketID" binding:"required"` } func (r *BucketGet) MakeParam() *sdks.RequestParam { @@ -26,7 +26,7 @@ func (r *BucketGet) MakeParam() *sdks.RequestParam { } type BucketGetResp struct { - Bucket jcsypes.Bucket `json:"bucket"` + Bucket jcstypes.Bucket `json:"bucket"` } func (r *BucketGetResp) ParseResponse(resp *http.Response) error { @@ -48,7 +48,7 @@ func (r *BucketGetByName) MakeParam() *sdks.RequestParam { } type BucketGetByNameResp struct { - Bucket jcsypes.Bucket `json:"bucket"` + Bucket jcstypes.Bucket `json:"bucket"` } func (r *BucketGetByNameResp) ParseResponse(resp *http.Response) error { @@ -70,7 +70,7 @@ func (r *BucketCreate) MakeParam() *sdks.RequestParam { } type BucketCreateResp struct { - Bucket jcsypes.Bucket `json:"bucket"` + Bucket jcstypes.Bucket `json:"bucket"` } func (r *BucketCreateResp) ParseResponse(resp *http.Response) error { @@ -84,7 +84,7 @@ func (c *BucketService) Create(req BucketCreate) (*BucketCreateResp, error) { const BucketDeletePath = "/bucket/delete" type BucketDelete struct { - BucketID jcsypes.BucketID `json:"bucketID" binding:"required"` + BucketID jcstypes.BucketID `json:"bucketID" binding:"required"` } func (r *BucketDelete) MakeParam() *sdks.RequestParam { @@ -111,7 +111,7 @@ func (r *BucketListAll) MakeParam() *sdks.RequestParam { } type BucketListAllResp struct { - Buckets []jcsypes.Bucket `json:"buckets"` + Buckets []jcstypes.Bucket `json:"buckets"` } func (r *BucketListAllResp) ParseResponse(resp *http.Response) error { diff --git a/client/sdk/api/v1/package.go b/client/sdk/api/v1/package.go index db2e78a..fef593c 100644 --- a/client/sdk/api/v1/package.go +++ b/client/sdk/api/v1/package.go @@ -13,7 +13,7 @@ import ( "gitlink.org.cn/cloudream/common/sdks" "gitlink.org.cn/cloudream/common/utils/http2" "gitlink.org.cn/cloudream/common/utils/serder" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type PackageService struct { @@ -27,7 +27,7 @@ func (c *Client) Package() *PackageService { const PackageGetPath = "/package/get" type PackageGet struct { - PackageID jcsypes.PackageID `form:"packageID" url:"packageID" binding:"required"` + PackageID jcstypes.PackageID `form:"packageID" url:"packageID" binding:"required"` } func (r *PackageGet) MakeParam() *sdks.RequestParam { @@ -35,7 +35,7 @@ func (r *PackageGet) MakeParam() *sdks.RequestParam { } type PackageGetResp struct { - Package jcsypes.Package `json:"package"` + Package jcstypes.Package `json:"package"` } func (r *PackageGetResp) ParseResponse(resp *http.Response) error { @@ -58,7 +58,7 @@ func (r *PackageGetByFullName) MakeParam() *sdks.RequestParam { } type PackageGetByFullNameResp struct { - Package jcsypes.Package `json:"package"` + Package jcstypes.Package `json:"package"` } func (r *PackageGetByFullNameResp) ParseResponse(resp *http.Response) error { @@ -72,8 +72,8 @@ func (c *PackageService) GetByFullName(req PackageGetByFullName) (*PackageGetByF const PackageCreatePath = "/package/create" type PackageCreate struct { - BucketID jcsypes.BucketID `json:"bucketID"` - Name string `json:"name"` + BucketID jcstypes.BucketID `json:"bucketID"` + Name string `json:"name"` } func (r *PackageCreate) MakeParam() *sdks.RequestParam { @@ -81,7 +81,7 @@ func (r *PackageCreate) MakeParam() *sdks.RequestParam { } type PackageCreateResp struct { - Package jcsypes.Package `json:"package"` + Package jcstypes.Package `json:"package"` } func (r *PackageCreateResp) ParseResponse(resp *http.Response) error { @@ -99,14 +99,14 @@ type PackageCreateUpload struct { Files UploadObjectIterator `json:"-"` } type PackageCreateUploadInfo struct { - BucketID jcsypes.BucketID `json:"bucketID" binding:"required"` - Name string `json:"name" binding:"required"` - CopyTo []jcsypes.UserSpaceID `json:"copyTo"` - CopyToPath []string `json:"copyToPath"` + BucketID jcstypes.BucketID `json:"bucketID" binding:"required"` + Name string `json:"name" binding:"required"` + CopyTo []jcstypes.UserSpaceID `json:"copyTo"` + CopyToPath []string `json:"copyToPath"` } type PackageCreateUploadResp struct { - Package jcsypes.Package `json:"package"` - Objects []jcsypes.Object `json:"objects"` + Package jcstypes.Package `json:"package"` + Objects []jcstypes.Object `json:"objects"` } func (c *PackageService) CreateUpload(req PackageCreateUpload) (*PackageCreateUploadResp, error) { @@ -148,10 +148,10 @@ func (c *PackageService) CreateUpload(req PackageCreateUpload) (*PackageCreateUp const PackageDownloadPath = "/package/download" type PackageDownload struct { - PackageID jcsypes.PackageID `url:"packageID" form:"packageID" binding:"required"` - Prefix string `url:"prefix" form:"prefix"` - NewPrefix *string `url:"newPrefix,omitempty" form:"newPrefix"` - Zip bool `url:"zip,omitempty" form:"zip"` + PackageID jcstypes.PackageID `url:"packageID" form:"packageID" binding:"required"` + Prefix string `url:"prefix" form:"prefix"` + NewPrefix *string `url:"newPrefix,omitempty" form:"newPrefix"` + Zip bool `url:"zip,omitempty" form:"zip"` } func (r *PackageDownload) MakeParam() *sdks.RequestParam { @@ -208,7 +208,7 @@ func (c *PackageService) Download(req PackageDownload) (*DownloadingPackage, err const PackageDeletePath = "/package/delete" type PackageDelete struct { - PackageID jcsypes.PackageID `json:"packageID" binding:"required"` + PackageID jcstypes.PackageID `json:"packageID" binding:"required"` } func (r *PackageDelete) MakeParam() *sdks.RequestParam { @@ -228,9 +228,9 @@ func (c *PackageService) Delete(req PackageDelete) error { const PackageClonePath = "/package/clone" type PackageClone struct { - PackageID jcsypes.PackageID `json:"packageID" binding:"required"` - BucketID jcsypes.BucketID `json:"bucketID" binding:"required"` - Name string `json:"name" binding:"required"` + PackageID jcstypes.PackageID `json:"packageID" binding:"required"` + BucketID jcstypes.BucketID `json:"bucketID" binding:"required"` + Name string `json:"name" binding:"required"` } func (r *PackageClone) MakeParam() *sdks.RequestParam { @@ -238,7 +238,7 @@ func (r *PackageClone) MakeParam() *sdks.RequestParam { } type PackageCloneResp struct { - Package jcsypes.Package `json:"package"` + Package jcstypes.Package `json:"package"` } func (r *PackageCloneResp) ParseResponse(resp *http.Response) error { @@ -252,7 +252,7 @@ func (c *PackageService) Clone(req PackageClone) (*PackageCloneResp, error) { const PackageListBucketPackagesPath = "/package/listBucketPackages" type PackageListBucketPackages struct { - BucketID jcsypes.BucketID `form:"bucketID" url:"bucketID" binding:"required"` + BucketID jcstypes.BucketID `form:"bucketID" url:"bucketID" binding:"required"` } func (r *PackageListBucketPackages) MakeParam() *sdks.RequestParam { @@ -260,7 +260,7 @@ func (r *PackageListBucketPackages) MakeParam() *sdks.RequestParam { } type PackageListBucketPackagesResp struct { - Packages []jcsypes.Package `json:"packages"` + Packages []jcstypes.Package `json:"packages"` } func (r *PackageListBucketPackagesResp) ParseResponse(resp *http.Response) error { diff --git a/client/sdk/api/v1/presigned.go b/client/sdk/api/v1/presigned.go index ec42615..ffd5077 100644 --- a/client/sdk/api/v1/presigned.go +++ b/client/sdk/api/v1/presigned.go @@ -7,7 +7,7 @@ import ( "github.com/google/go-querystring/query" "gitlink.org.cn/cloudream/jcs-pub/client/sdk/signer" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type PresignedService struct { @@ -33,10 +33,10 @@ func (c *PresignedService) ObjectListByPath(req PresignedObjectListByPath, expir const PresignedObjectDownloadByPathPath = "/presigned/object/downloadByPath" type PresignedObjectDownloadByPath struct { - PackageID jcsypes.PackageID `form:"packageID" url:"packageID" binding:"required"` - Path string `form:"path" url:"path" binding:"required"` - Offset int64 `form:"offset" url:"offset,omitempty"` - Length *int64 `form:"length" url:"length,omitempty"` + PackageID jcstypes.PackageID `form:"packageID" url:"packageID" binding:"required"` + Path string `form:"path" url:"path" binding:"required"` + Offset int64 `form:"offset" url:"offset,omitempty"` + Length *int64 `form:"length" url:"length,omitempty"` } func (c *PresignedService) ObjectDownloadByPath(req PresignedObjectDownloadByPath, expireIn int) (string, error) { @@ -46,9 +46,9 @@ func (c *PresignedService) ObjectDownloadByPath(req PresignedObjectDownloadByPat const PresignedObjectDownloadPath = "/presigned/object/download" type PresignedObjectDownload struct { - ObjectID jcsypes.ObjectID `form:"objectID" url:"objectID" binding:"required"` - Offset int64 `form:"offset" url:"offset,omitempty"` - Length *int64 `form:"length" url:"length,omitempty"` + ObjectID jcstypes.ObjectID `form:"objectID" url:"objectID" binding:"required"` + Offset int64 `form:"offset" url:"offset,omitempty"` + Length *int64 `form:"length" url:"length,omitempty"` } func (c *PresignedService) ObjectDownload(req PresignedObjectDownload, expireIn int) (string, error) { @@ -58,15 +58,15 @@ func (c *PresignedService) ObjectDownload(req PresignedObjectDownload, expireIn const PresignedObjectUploadPath = "/presigned/object/upload" type PresignedObjectUpload struct { - PackageID jcsypes.PackageID `form:"packageID" binding:"required" url:"packageID"` - Path string `form:"path" binding:"required" url:"path"` - Affinity jcsypes.UserSpaceID `form:"affinity" url:"affinity,omitempty"` - CopyTo []jcsypes.UserSpaceID `form:"copyTo" url:"copyTo,omitempty"` - CopyToPath []string `form:"copyToPath" url:"copyToPath,omitempty"` + PackageID jcstypes.PackageID `form:"packageID" binding:"required" url:"packageID"` + Path string `form:"path" binding:"required" url:"path"` + Affinity jcstypes.UserSpaceID `form:"affinity" url:"affinity,omitempty"` + CopyTo []jcstypes.UserSpaceID `form:"copyTo" url:"copyTo,omitempty"` + CopyToPath []string `form:"copyToPath" url:"copyToPath,omitempty"` } type PresignedObjectUploadResp struct { - Object jcsypes.Object `json:"object"` + Object jcstypes.Object `json:"object"` } func (c *PresignedService) ObjectUpload(req PresignedObjectUpload, expireIn int) (string, error) { @@ -76,12 +76,12 @@ func (c *PresignedService) ObjectUpload(req PresignedObjectUpload, expireIn int) const PresignedObjectNewMultipartUploadPath = "/presigned/object/newMultipartUpload" type PresignedObjectNewMultipartUpload struct { - PackageID jcsypes.PackageID `form:"packageID" binding:"required" url:"packageID"` - Path string `form:"path" binding:"required" url:"path"` + PackageID jcstypes.PackageID `form:"packageID" binding:"required" url:"packageID"` + Path string `form:"path" binding:"required" url:"path"` } type PresignedObjectNewMultipartUploadResp struct { - Object jcsypes.Object `json:"object"` + Object jcstypes.Object `json:"object"` } func (c *PresignedService) ObjectNewMultipartUpload(req PresignedObjectNewMultipartUpload, expireIn int) (string, error) { @@ -91,8 +91,8 @@ func (c *PresignedService) ObjectNewMultipartUpload(req PresignedObjectNewMultip const PresignedObjectUploadPartPath = "/presigned/object/uploadPart" type PresignedObjectUploadPart struct { - ObjectID jcsypes.ObjectID `form:"objectID" binding:"required" url:"objectID"` - Index int `form:"index" binding:"required" url:"index"` + ObjectID jcstypes.ObjectID `form:"objectID" binding:"required" url:"objectID"` + Index int `form:"index" binding:"required" url:"index"` } type PresignedUploadPartResp struct{} @@ -104,12 +104,12 @@ func (c *PresignedService) ObjectUploadPart(req PresignedObjectUploadPart, expir const PresignedObjectCompleteMultipartUploadPath = "/presigned/object/completeMultipartUpload" type PresignedObjectCompleteMultipartUpload struct { - ObjectID jcsypes.ObjectID `form:"objectID" binding:"required" url:"objectID"` - Indexes []int `form:"indexes" binding:"required" url:"indexes"` + ObjectID jcstypes.ObjectID `form:"objectID" binding:"required" url:"objectID"` + Indexes []int `form:"indexes" binding:"required" url:"indexes"` } type PresignedObjectCompleteMultipartUploadResp struct { - Object jcsypes.Object `json:"object"` + Object jcstypes.Object `json:"object"` } func (c *PresignedService) ObjectCompleteMultipartUpload(req PresignedObjectCompleteMultipartUpload, expireIn int) (string, error) { diff --git a/client/sdk/api/v1/space_syncer.go b/client/sdk/api/v1/space_syncer.go index 1adf080..297d370 100644 --- a/client/sdk/api/v1/space_syncer.go +++ b/client/sdk/api/v1/space_syncer.go @@ -4,7 +4,7 @@ import ( "net/http" "gitlink.org.cn/cloudream/common/sdks" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type SpaceSyncerService struct { @@ -20,14 +20,14 @@ func (c *Client) SpaceSyncer() *SpaceSyncerService { const SpaceSyncerCreateTaskPath = "/spaceSyncer/createTask" type SpaceSyncerCreateTask struct { - Trigger jcsypes.SpaceSyncTrigger `json:"trigger" binding:"required"` - Mode jcsypes.SpaceSyncMode `json:"mode" binding:"required"` - Filters []jcsypes.SpaceSyncFilter `json:"filters"` - Options jcsypes.SpaceSyncOptions `json:"options" binding:"required"` - SrcUserSpaceID jcsypes.UserSpaceID `json:"srcUserSpaceID" binding:"required"` - SrcPath string `json:"srcPath"` - DestUserSpaceIDs []jcsypes.UserSpaceID `json:"destUserSpaceIDs" binding:"required"` - DestPathes []string `json:"destPathes" binding:"required"` + Trigger jcstypes.SpaceSyncTrigger `json:"trigger" binding:"required"` + Mode jcstypes.SpaceSyncMode `json:"mode" binding:"required"` + Filters []jcstypes.SpaceSyncFilter `json:"filters"` + Options jcstypes.SpaceSyncOptions `json:"options" binding:"required"` + SrcUserSpaceID jcstypes.UserSpaceID `json:"srcUserSpaceID" binding:"required"` + SrcPath string `json:"srcPath"` + DestUserSpaceIDs []jcstypes.UserSpaceID `json:"destUserSpaceIDs" binding:"required"` + DestPathes []string `json:"destPathes" binding:"required"` } func (r *SpaceSyncerCreateTask) MakeParam() *sdks.RequestParam { @@ -35,7 +35,7 @@ func (r *SpaceSyncerCreateTask) MakeParam() *sdks.RequestParam { } type SpaceSyncerCreateTaskResp struct { - Task jcsypes.SpaceSyncTask `json:"task"` + Task jcstypes.SpaceSyncTask `json:"task"` } func (r *SpaceSyncerCreateTaskResp) ParseResponse(resp *http.Response) error { @@ -49,7 +49,7 @@ func (c *SpaceSyncerService) CreateTask(req SpaceSyncerCreateTask) (*SpaceSyncer const SpaceSyncerGetTaskPath = "/spaceSyncer/getTask" type SpaceSyncerGetTask struct { - TaskID jcsypes.SpaceSyncTaskID `url:"taskID" binding:"required"` + TaskID jcstypes.SpaceSyncTaskID `url:"taskID" binding:"required"` } func (r *SpaceSyncerGetTask) MakeParam() *sdks.RequestParam { @@ -57,7 +57,7 @@ func (r *SpaceSyncerGetTask) MakeParam() *sdks.RequestParam { } type SpaceSyncerGetTaskResp struct { - Task jcsypes.SpaceSyncTask `json:"task"` + Task jcstypes.SpaceSyncTask `json:"task"` } func (r *SpaceSyncerGetTaskResp) ParseResponse(resp *http.Response) error { @@ -71,7 +71,7 @@ func (c *SpaceSyncerService) GetTask(req SpaceSyncerGetTask) (*SpaceSyncerGetTas const SpaceSyncerCancelTaskPath = "/spaceSyncer/cancelTask" type SpaceSyncerCancelTask struct { - TaskID jcsypes.SpaceSyncTaskID `json:"taskID" binding:"required"` + TaskID jcstypes.SpaceSyncTaskID `json:"taskID" binding:"required"` } func (r *SpaceSyncerCancelTask) MakeParam() *sdks.RequestParam { diff --git a/client/sdk/api/v1/storage_test.go b/client/sdk/api/v1/storage_test.go index 12736ca..2e0fa57 100644 --- a/client/sdk/api/v1/storage_test.go +++ b/client/sdk/api/v1/storage_test.go @@ -10,7 +10,7 @@ import ( . "github.com/smartystreets/goconvey/convey" "gitlink.org.cn/cloudream/common/pkgs/iterator" "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func Test_PackageGet(t *testing.T) { @@ -74,7 +74,7 @@ func Test_Object(t *testing.T) { fileData[i] = byte(i) } - stgAff := jcsypes.UserSpaceID(2) + stgAff := jcstypes.UserSpaceID(2) pkgName := uuid.NewString() createResp, err := cli.Package().Create(PackageCreate{ diff --git a/client/sdk/api/v1/system.go b/client/sdk/api/v1/system.go index c6afc9e..bb6325f 100644 --- a/client/sdk/api/v1/system.go +++ b/client/sdk/api/v1/system.go @@ -4,7 +4,7 @@ import ( "net/http" "gitlink.org.cn/cloudream/common/sdks" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type SystemService struct { @@ -25,7 +25,7 @@ func (r *SystemStatus) MakeParam() *sdks.RequestParam { } type SystemStatusResp struct { - SpeedStats jcsypes.SpeedStatsStatus `json:"speedStats"` + SpeedStats jcstypes.SpeedStatsStatus `json:"speedStats"` } func (r *SystemStatusResp) ParseResponse(resp *http.Response) error { diff --git a/client/sdk/api/v1/user_space.go b/client/sdk/api/v1/user_space.go index b78e22a..3e1cda7 100644 --- a/client/sdk/api/v1/user_space.go +++ b/client/sdk/api/v1/user_space.go @@ -4,8 +4,7 @@ import ( "net/http" "gitlink.org.cn/cloudream/common/sdks" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type UserSpaceService struct { @@ -19,9 +18,9 @@ func (c *Client) UserSpace() *UserSpaceService { const UserSpaceDownloadPackagePath = "/userSpace/downloadPackage" type UserSpaceDownloadPackageReq struct { - PackageID jcsypes.PackageID `json:"packageID" binding:"required"` - UserSpaceID jcsypes.UserSpaceID `json:"userSpaceID" binding:"required"` - RootPath string `json:"rootPath"` + PackageID jcstypes.PackageID `json:"packageID" binding:"required"` + UserSpaceID jcstypes.UserSpaceID `json:"userSpaceID" binding:"required"` + RootPath string `json:"rootPath"` } func (r *UserSpaceDownloadPackageReq) MakeParam() *sdks.RequestParam { @@ -41,11 +40,11 @@ func (c *UserSpaceService) DownloadPackage(req UserSpaceDownloadPackageReq) (*Us const UserSpaceCreatePackagePath = "/userSpace/createPackage" type UserSpaceCreatePackageReq struct { - UserSpaceID jcsypes.UserSpaceID `json:"userSpaceID" binding:"required"` - Path string `json:"path" binding:"required"` - BucketID jcsypes.BucketID `json:"bucketID" binding:"required"` - Name string `json:"name" binding:"required"` - SpaceAffinity jcsypes.UserSpaceID `json:"spaceAffinity"` + UserSpaceID jcstypes.UserSpaceID `json:"userSpaceID" binding:"required"` + Path string `json:"path" binding:"required"` + BucketID jcstypes.BucketID `json:"bucketID" binding:"required"` + Name string `json:"name" binding:"required"` + SpaceAffinity jcstypes.UserSpaceID `json:"spaceAffinity"` } func (r *UserSpaceCreatePackageReq) MakeParam() *sdks.RequestParam { @@ -53,7 +52,7 @@ func (r *UserSpaceCreatePackageReq) MakeParam() *sdks.RequestParam { } type UserSpaceCreatePackageResp struct { - Package jcsypes.Package `json:"package"` + Package jcstypes.Package `json:"package"` } func (r *UserSpaceCreatePackageResp) ParseResponse(resp *http.Response) error { @@ -67,7 +66,7 @@ func (c *UserSpaceService) CreatePackage(req UserSpaceCreatePackageReq) (*UserSp const UserSpaceGetPath = "/userSpace/get" type UserSpaceGet struct { - UserSpaceID jcsypes.UserSpaceID `form:"userSpaceID" url:"userSpaceID" binding:"required"` + UserSpaceID jcstypes.UserSpaceID `form:"userSpaceID" url:"userSpaceID" binding:"required"` } func (r *UserSpaceGet) MakeParam() *sdks.RequestParam { @@ -75,7 +74,7 @@ func (r *UserSpaceGet) MakeParam() *sdks.RequestParam { } type UserSpaceGetResp struct { - UserSpace jcsypes.UserSpace `json:"userSpace"` + UserSpace jcstypes.UserSpace `json:"userSpace"` } func (r *UserSpaceGetResp) ParseResponse(resp *http.Response) error { @@ -97,7 +96,7 @@ func (r *UserSpaceGetByName) MakeParam() *sdks.RequestParam { } type UserSpaceGetByNameResp struct { - UserSpace jcsypes.UserSpace `json:"userSpace"` + UserSpace jcstypes.UserSpace `json:"userSpace"` } func (r *UserSpaceGetByNameResp) ParseResponse(resp *http.Response) error { @@ -117,7 +116,7 @@ func (r *UserSpaceGetAll) MakeParam() *sdks.RequestParam { } type UserSpaceGetAllResp struct { - UserSpaces []jcsypes.UserSpace `json:"userSpaces"` + UserSpaces []jcstypes.UserSpace `json:"userSpaces"` } func (r *UserSpaceGetAllResp) ParseResponse(resp *http.Response) error { @@ -134,10 +133,10 @@ const UserSpaceCreatePath = "/userSpace/create" type UserSpaceCreate struct { Name string `json:"name" binding:"required"` - Storage cortypes.StorageType `json:"storage" binding:"required"` - Credential cortypes.StorageCredential `json:"credential" binding:"required"` - ShardStore *cortypes.ShardStoreUserConfig `json:"shardStore"` - Features []cortypes.StorageFeature `json:"features"` + Storage jcstypes.StorageType `json:"storage" binding:"required"` + Credential jcstypes.StorageCredential `json:"credential" binding:"required"` + ShardStore *jcstypes.ShardStoreUserConfig `json:"shardStore"` + Features []jcstypes.StorageFeature `json:"features"` WorkingDir string `json:"workingDir"` } @@ -146,7 +145,7 @@ func (r *UserSpaceCreate) MakeParam() *sdks.RequestParam { } type UserSpaceCreateResp struct { - UserSpace jcsypes.UserSpace `json:"userSpace"` + UserSpace jcstypes.UserSpace `json:"userSpace"` } func (r *UserSpaceCreateResp) ParseResponse(resp *http.Response) error { @@ -161,10 +160,10 @@ func (c *UserSpaceService) Create(req UserSpaceCreate) (*UserSpaceCreateResp, er const UserSpaceUpdatePath = "/userSpace/update" type UserSpaceUpdate struct { - UserSpaceID jcsypes.UserSpaceID `json:"userSpaceID" binding:"required"` + UserSpaceID jcstypes.UserSpaceID `json:"userSpaceID" binding:"required"` Name string `json:"name" binding:"required"` - Credential cortypes.StorageCredential `json:"credential" binding:"required"` - Features []cortypes.StorageFeature `json:"features"` + Credential jcstypes.StorageCredential `json:"credential" binding:"required"` + Features []jcstypes.StorageFeature `json:"features"` } func (r *UserSpaceUpdate) MakeParam() *sdks.RequestParam { @@ -172,7 +171,7 @@ func (r *UserSpaceUpdate) MakeParam() *sdks.RequestParam { } type UserSpaceUpdateResp struct { - UserSpace jcsypes.UserSpace `json:"userSpace"` + UserSpace jcstypes.UserSpace `json:"userSpace"` } func (r *UserSpaceUpdateResp) ParseResponse(resp *http.Response) error { @@ -187,7 +186,7 @@ func (c *UserSpaceService) Update(req UserSpaceUpdate) (*UserSpaceUpdateResp, er const UserSpaceDeletePath = "/userSpace/delete" type UserSpaceDelete struct { - UserSpaceID jcsypes.UserSpaceID `json:"userSpaceID" binding:"required"` + UserSpaceID jcstypes.UserSpaceID `json:"userSpaceID" binding:"required"` } func (r *UserSpaceDelete) MakeParam() *sdks.RequestParam { @@ -208,8 +207,8 @@ func (c *UserSpaceService) Delete(req UserSpaceDelete) (*UserSpaceDeleteResp, er const UserSpaceTestPath = "/userSpace/test" type UserSpaceTest struct { - Storage cortypes.StorageType `json:"storage" binding:"required"` - Credential cortypes.StorageCredential `json:"credential" binding:"required"` + Storage jcstypes.StorageType `json:"storage" binding:"required"` + Credential jcstypes.StorageCredential `json:"credential" binding:"required"` WorikingDir string `json:"workingDir"` } diff --git a/common/globals/globals.go b/common/globals/globals.go index ea8634c..e4c80fc 100644 --- a/common/globals/globals.go +++ b/common/globals/globals.go @@ -1,7 +1,7 @@ package stgglb import ( - "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type LocalMachineInfo struct { diff --git a/common/globals/utils.go b/common/globals/utils.go index 87ad9e7..cddd346 100644 --- a/common/globals/utils.go +++ b/common/globals/utils.go @@ -1,9 +1,9 @@ package stgglb -import cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" +import jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" // 根据当前节点与目标地址的距离关系,选择合适的地址 -func SelectGRPCAddress(hub *cortypes.Hub, addr *cortypes.GRPCAddressInfo) (string, int) { +func SelectGRPCAddress(hub *jcstypes.Hub, addr *jcstypes.GRPCAddressInfo) (string, int) { // TODO 重新设计选择LocalIP的策略 return addr.ExternalIP, addr.ExternalGRPCPort } diff --git a/common/pkgs/accesstoken/accesstoken.go b/common/pkgs/accesstoken/accesstoken.go index 0c6692c..67f3249 100644 --- a/common/pkgs/accesstoken/accesstoken.go +++ b/common/pkgs/accesstoken/accesstoken.go @@ -10,7 +10,7 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/async" "gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type CacheEvent interface { @@ -23,17 +23,17 @@ type ExitEvent struct { } type CacheKey struct { - UserID cortypes.UserID - TokenID cortypes.AccessTokenID + UserID jcstypes.UserID + TokenID jcstypes.AccessTokenID } var ErrTokenNotFound = fmt.Errorf("token not found") -type AccessTokenLoader func(key CacheKey) (cortypes.UserAccessToken, error) +type AccessTokenLoader func(key CacheKey) (jcstypes.UserAccessToken, error) type CacheEntry struct { IsTokenValid bool - Token cortypes.UserAccessToken + Token jcstypes.UserAccessToken PublicKey ed25519.PublicKey LoadedAt time.Time LastUsedAt time.Time @@ -227,6 +227,6 @@ func (mc *Cache) Verify(authInfo rpc.AccessTokenAuthInfo) bool { return ed25519.Verify(token.PublicKey, []byte(MakeStringToSign(authInfo.UserID, authInfo.AccessTokenID, authInfo.Nonce)), []byte(sig)) } -func MakeStringToSign(userID cortypes.UserID, tokenID cortypes.AccessTokenID, nonce string) string { +func MakeStringToSign(userID jcstypes.UserID, tokenID jcstypes.AccessTokenID, nonce string) string { return fmt.Sprintf("%v.%v.%v", userID, tokenID, nonce) } diff --git a/common/pkgs/connectivity/collector.go b/common/pkgs/connectivity/collector.go index 99bb321..9d3429d 100644 --- a/common/pkgs/connectivity/collector.go +++ b/common/pkgs/connectivity/collector.go @@ -11,7 +11,7 @@ import ( stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type CollectorEvent interface { @@ -28,7 +28,7 @@ type CollectedEvent struct { } type Connectivity struct { - ToHubID cortypes.HubID + ToHubID jcstypes.HubID Latency *time.Duration TestTime time.Time } @@ -38,7 +38,7 @@ type Collector struct { enabled bool collectNow chan any done chan any - connectivities map[cortypes.HubID]Connectivity + connectivities map[jcstypes.HubID]Connectivity lock *sync.RWMutex } @@ -48,7 +48,7 @@ func NewEnabled(cfg Config) *Collector { enabled: true, collectNow: make(chan any, 1), done: make(chan any, 1), - connectivities: make(map[cortypes.HubID]Connectivity), + connectivities: make(map[jcstypes.HubID]Connectivity), lock: &sync.RWMutex{}, } return &rpt @@ -58,16 +58,16 @@ func NewDisabled() *Collector { enabled: false, collectNow: make(chan any, 1), done: make(chan any, 1), - connectivities: make(map[cortypes.HubID]Connectivity), + connectivities: make(map[jcstypes.HubID]Connectivity), lock: &sync.RWMutex{}, } } -func (r *Collector) GetAll() map[cortypes.HubID]Connectivity { +func (r *Collector) GetAll() map[jcstypes.HubID]Connectivity { r.lock.RLock() defer r.lock.RUnlock() - ret := make(map[cortypes.HubID]Connectivity) + ret := make(map[jcstypes.HubID]Connectivity) for k, v := range r.connectivities { ret[k] = v } @@ -170,7 +170,7 @@ func (r *Collector) testing() bool { r.lock.Lock() // 删除所有hub的记录,然后重建,避免hub数量变化时导致残余数据 - r.connectivities = make(map[cortypes.HubID]Connectivity) + r.connectivities = make(map[jcstypes.HubID]Connectivity) for _, con := range cons { r.connectivities[con.ToHubID] = con } @@ -179,13 +179,13 @@ func (r *Collector) testing() bool { return true } -func (r *Collector) ping(hub cortypes.Hub) Connectivity { +func (r *Collector) ping(hub jcstypes.Hub) Connectivity { log := logger.WithType[Collector]("").WithField("HubID", hub.HubID) var ip string var port int switch addr := hub.Address.(type) { - case *cortypes.GRPCAddressInfo: + case *jcstypes.GRPCAddressInfo: // TODO 重新设计选择LocalIP的策略 ip = addr.ExternalIP port = addr.ExternalGRPCPort diff --git a/common/pkgs/ioswitch2/fromto.go b/common/pkgs/ioswitch2/fromto.go index c42269c..df51d86 100644 --- a/common/pkgs/ioswitch2/fromto.go +++ b/common/pkgs/ioswitch2/fromto.go @@ -4,7 +4,7 @@ import ( "gitlink.org.cn/cloudream/common/utils/math2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type From interface { @@ -69,9 +69,9 @@ type FromTos []FromTo type FromTo struct { // 如果输入或者输出用到了EC编码的流,则需要提供EC参数。 - ECParam *jcsypes.ECRedundancy + ECParam *jcstypes.ECRedundancy // 同上 - SegmentParam *jcsypes.SegmentRedundancy + SegmentParam *jcstypes.SegmentRedundancy Froms []From Toes []To } @@ -110,12 +110,12 @@ func (f *FromDriver) GetStreamIndex() StreamIndex { } type FromShardStore struct { - FileHash jcsypes.FileHash - UserSpace jcsypes.UserSpaceDetail + FileHash jcstypes.FileHash + UserSpace jcstypes.UserSpaceDetail StreamIndex StreamIndex } -func NewFromShardstore(fileHash jcsypes.FileHash, space jcsypes.UserSpaceDetail, strIdx StreamIndex) *FromShardStore { +func NewFromShardstore(fileHash jcstypes.FileHash, space jcstypes.UserSpaceDetail, strIdx StreamIndex) *FromShardStore { return &FromShardStore{ FileHash: fileHash, UserSpace: space, @@ -128,11 +128,11 @@ func (f *FromShardStore) GetStreamIndex() StreamIndex { } type FromBaseStore struct { - UserSpace jcsypes.UserSpaceDetail - Path jcsypes.JPath + UserSpace jcstypes.UserSpaceDetail + Path jcstypes.JPath } -func NewFromBaseStore(space jcsypes.UserSpaceDetail, path jcsypes.JPath) *FromBaseStore { +func NewFromBaseStore(space jcstypes.UserSpaceDetail, path jcstypes.JPath) *FromBaseStore { return &FromBaseStore{ UserSpace: space, Path: path, @@ -177,13 +177,13 @@ func (t *ToDriver) GetRange() math2.Range { } type ToShardStore struct { - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail StreamIndex StreamIndex Range math2.Range ResultStoreKey string } -func NewToShardStore(space jcsypes.UserSpaceDetail, strIdx StreamIndex, retStoreKey string) *ToShardStore { +func NewToShardStore(space jcstypes.UserSpaceDetail, strIdx StreamIndex, retStoreKey string) *ToShardStore { return &ToShardStore{ UserSpace: space, StreamIndex: strIdx, @@ -191,7 +191,7 @@ func NewToShardStore(space jcsypes.UserSpaceDetail, strIdx StreamIndex, retStore } } -func NewToShardStoreWithRange(space jcsypes.UserSpaceDetail, streamIndex StreamIndex, retStoreKey string, rng math2.Range) *ToShardStore { +func NewToShardStoreWithRange(space jcstypes.UserSpaceDetail, streamIndex StreamIndex, retStoreKey string, rng math2.Range) *ToShardStore { return &ToShardStore{ UserSpace: space, StreamIndex: streamIndex, @@ -209,12 +209,12 @@ func (t *ToShardStore) GetRange() math2.Range { } type ToBaseStore struct { - UserSpace jcsypes.UserSpaceDetail - ObjectPath jcsypes.JPath + UserSpace jcstypes.UserSpaceDetail + ObjectPath jcstypes.JPath Option types.WriteOption } -func NewToBaseStore(space jcsypes.UserSpaceDetail, objectPath jcsypes.JPath) *ToBaseStore { +func NewToBaseStore(space jcstypes.UserSpaceDetail, objectPath jcstypes.JPath) *ToBaseStore { return &ToBaseStore{ UserSpace: space, ObjectPath: objectPath, diff --git a/common/pkgs/ioswitch2/http_hub_worker.go b/common/pkgs/ioswitch2/http_hub_worker.go index c90b350..31721d3 100644 --- a/common/pkgs/ioswitch2/http_hub_worker.go +++ b/common/pkgs/ioswitch2/http_hub_worker.go @@ -9,16 +9,16 @@ import ( "gitlink.org.cn/cloudream/common/utils/io2" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" hubapi "gitlink.org.cn/cloudream/jcs-pub/hub/sdk/api" ) type HttpHubWorker struct { - Hub cortypes.Hub + Hub jcstypes.Hub } func (w *HttpHubWorker) NewClient() (exec.WorkerClient, error) { - addressInfo := w.Hub.Address.(*cortypes.HttpAddressInfo) + addressInfo := w.Hub.Address.(*jcstypes.HttpAddressInfo) baseUrl := "http://" + addressInfo.ExternalIP + ":" + strconv.Itoa(addressInfo.Port) config := hubapi.Config{ URL: baseUrl, @@ -47,7 +47,7 @@ func (w *HttpHubWorker) Equals(worker exec.WorkerInfo) bool { } type HttpHubWorkerClient struct { - hubID cortypes.HubID + hubID jcstypes.HubID cli *hubapi.Client } diff --git a/common/pkgs/ioswitch2/hub_worker.go b/common/pkgs/ioswitch2/hub_worker.go index ab819d8..44db251 100644 --- a/common/pkgs/ioswitch2/hub_worker.go +++ b/common/pkgs/ioswitch2/hub_worker.go @@ -11,7 +11,7 @@ import ( stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec" hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) var _ = serder.UseTypeUnionExternallyTagged(types.Ref(types.NewTypeUnion[exec.WorkerInfo]( @@ -20,8 +20,8 @@ var _ = serder.UseTypeUnionExternallyTagged(types.Ref(types.NewTypeUnion[exec.Wo ))) type HubWorker struct { - Hub cortypes.Hub - Address cortypes.GRPCAddressInfo + Hub jcstypes.Hub + Address jcstypes.GRPCAddressInfo } func (w *HubWorker) NewClient() (exec.WorkerClient, error) { @@ -43,7 +43,7 @@ func (w *HubWorker) Equals(worker exec.WorkerInfo) bool { } type HubWorkerClient struct { - hubID cortypes.HubID + hubID jcstypes.HubID cli *hubrpc.Client } diff --git a/common/pkgs/ioswitch2/ops2/base_store.go b/common/pkgs/ioswitch2/ops2/base_store.go index 2a51427..46d84d5 100644 --- a/common/pkgs/ioswitch2/ops2/base_store.go +++ b/common/pkgs/ioswitch2/ops2/base_store.go @@ -13,7 +13,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) const ( @@ -43,8 +43,8 @@ func (v *BaseReadStatsValue) Clone() exec.VarValue { type BaseRead struct { Output exec.VarID - UserSpace jcsypes.UserSpaceDetail - Path jcsypes.JPath + UserSpace jcstypes.UserSpaceDetail + Path jcstypes.JPath Option types.OpenOption } @@ -101,7 +101,7 @@ func (o *BaseRead) String() string { } type BaseReadDyn struct { - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail Output exec.VarID FileInfo exec.VarID Option types.OpenOption @@ -167,8 +167,8 @@ func (o *BaseReadDyn) String() string { type BaseWrite struct { Input exec.VarID - UserSpace jcsypes.UserSpaceDetail - Path jcsypes.JPath + UserSpace jcstypes.UserSpaceDetail + Path jcstypes.JPath FileInfo exec.VarID Option types.WriteOption } @@ -213,12 +213,12 @@ func (o *BaseWrite) String() string { type BaseReadNode struct { dag.NodeBase From ioswitch2.From - UserSpace jcsypes.UserSpaceDetail - Path jcsypes.JPath + UserSpace jcstypes.UserSpaceDetail + Path jcstypes.JPath Option types.OpenOption } -func (b *GraphNodeBuilder) NewBaseRead(from ioswitch2.From, userSpace jcsypes.UserSpaceDetail, path jcsypes.JPath, opt types.OpenOption) *BaseReadNode { +func (b *GraphNodeBuilder) NewBaseRead(from ioswitch2.From, userSpace jcstypes.UserSpaceDetail, path jcstypes.JPath, opt types.OpenOption) *BaseReadNode { node := &BaseReadNode{ From: from, UserSpace: userSpace, @@ -254,11 +254,11 @@ func (t *BaseReadNode) GenerateOp() (exec.Op, error) { type BaseReadDynNode struct { dag.NodeBase From ioswitch2.From - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail Option types.OpenOption } -func (b *GraphNodeBuilder) NewBaseReadDyn(from ioswitch2.From, userSpace jcsypes.UserSpaceDetail, opt types.OpenOption) *BaseReadDynNode { +func (b *GraphNodeBuilder) NewBaseReadDyn(from ioswitch2.From, userSpace jcstypes.UserSpaceDetail, opt types.OpenOption) *BaseReadDynNode { node := &BaseReadDynNode{ From: from, UserSpace: userSpace, @@ -301,12 +301,12 @@ func (t *BaseReadDynNode) GenerateOp() (exec.Op, error) { type BaseWriteNode struct { dag.NodeBase To ioswitch2.To - UserSpace jcsypes.UserSpaceDetail - Path jcsypes.JPath + UserSpace jcstypes.UserSpaceDetail + Path jcstypes.JPath Option types.WriteOption } -func (b *GraphNodeBuilder) NewBaseWrite(to ioswitch2.To, userSpace jcsypes.UserSpaceDetail, path jcsypes.JPath, opt types.WriteOption) *BaseWriteNode { +func (b *GraphNodeBuilder) NewBaseWrite(to ioswitch2.To, userSpace jcstypes.UserSpaceDetail, path jcstypes.JPath, opt types.WriteOption) *BaseWriteNode { node := &BaseWriteNode{ To: to, UserSpace: userSpace, diff --git a/common/pkgs/ioswitch2/ops2/bypass.go b/common/pkgs/ioswitch2/ops2/bypass.go index 6397022..098ddb7 100644 --- a/common/pkgs/ioswitch2/ops2/bypass.go +++ b/common/pkgs/ioswitch2/ops2/bypass.go @@ -7,7 +7,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func init() { @@ -17,8 +17,8 @@ func init() { // 旁路Http读取 type GetShardHTTPRequest struct { - UserSpace jcsypes.UserSpaceDetail - FileHash jcsypes.FileHash + UserSpace jcstypes.UserSpaceDetail + FileHash jcstypes.FileHash Output exec.VarID } @@ -64,11 +64,11 @@ func (o *GetShardHTTPRequest) String() string { // 旁路Http读取 type GetShardHTTPRequestNode struct { dag.NodeBase - UserSpace jcsypes.UserSpaceDetail - FileHash jcsypes.FileHash + UserSpace jcstypes.UserSpaceDetail + FileHash jcstypes.FileHash } -func (b *GraphNodeBuilder) NewGetShardHTTPRequest(userSpace jcsypes.UserSpaceDetail, fileHash jcsypes.FileHash) *GetShardHTTPRequestNode { +func (b *GraphNodeBuilder) NewGetShardHTTPRequest(userSpace jcstypes.UserSpaceDetail, fileHash jcstypes.FileHash) *GetShardHTTPRequestNode { node := &GetShardHTTPRequestNode{ UserSpace: userSpace, FileHash: fileHash, diff --git a/common/pkgs/ioswitch2/ops2/ec.go b/common/pkgs/ioswitch2/ops2/ec.go index f4480d9..bf0b78c 100644 --- a/common/pkgs/ioswitch2/ops2/ec.go +++ b/common/pkgs/ioswitch2/ops2/ec.go @@ -13,7 +13,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/utils" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func init() { @@ -146,7 +146,7 @@ func (o *ECMultiply) String() string { } type CallECMultiplier struct { - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail Coef [][]byte Inputs []exec.VarID Outputs []exec.VarID @@ -203,12 +203,12 @@ func (o *CallECMultiplier) String() string { type ECMultiplyNode struct { dag.NodeBase - EC jcsypes.ECRedundancy + EC jcstypes.ECRedundancy InputIndexes []int OutputIndexes []int } -func (b *GraphNodeBuilder) NewECMultiply(ec jcsypes.ECRedundancy) *ECMultiplyNode { +func (b *GraphNodeBuilder) NewECMultiply(ec jcstypes.ECRedundancy) *ECMultiplyNode { node := &ECMultiplyNode{ EC: ec, } @@ -257,13 +257,13 @@ func (t *ECMultiplyNode) GenerateOp() (exec.Op, error) { type CallECMultiplierNode struct { dag.NodeBase - UserSpace jcsypes.UserSpaceDetail - EC jcsypes.ECRedundancy + UserSpace jcstypes.UserSpaceDetail + EC jcstypes.ECRedundancy InputIndexes []int OutputIndexes []int } -func (b *GraphNodeBuilder) NewCallECMultiplier(userSpace jcsypes.UserSpaceDetail) *CallECMultiplierNode { +func (b *GraphNodeBuilder) NewCallECMultiplier(userSpace jcstypes.UserSpaceDetail) *CallECMultiplierNode { node := &CallECMultiplierNode{ UserSpace: userSpace, } diff --git a/common/pkgs/ioswitch2/ops2/multipart.go b/common/pkgs/ioswitch2/ops2/multipart.go index 438ec44..6dbe782 100644 --- a/common/pkgs/ioswitch2/ops2/multipart.go +++ b/common/pkgs/ioswitch2/ops2/multipart.go @@ -9,7 +9,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func init() { @@ -40,7 +40,7 @@ func (v *UploadedPartInfoValue) Clone() exec.VarValue { } type MultipartInitiator struct { - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail UploadArgs exec.VarID UploadedParts []exec.VarID FileOutput exec.VarID // 分片上传之后的临时文件的路径 @@ -99,7 +99,7 @@ func (o *MultipartInitiator) String() string { } type MultipartUpload struct { - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail UploadArgs exec.VarID UploadResult exec.VarID PartStream exec.VarID @@ -149,10 +149,10 @@ func (o *MultipartUpload) String() string { type MultipartInitiatorNode struct { dag.NodeBase - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail } -func (b *GraphNodeBuilder) NewMultipartInitiator(userSpace jcsypes.UserSpaceDetail) *MultipartInitiatorNode { +func (b *GraphNodeBuilder) NewMultipartInitiator(userSpace jcstypes.UserSpaceDetail) *MultipartInitiatorNode { node := &MultipartInitiatorNode{ UserSpace: userSpace, } @@ -194,12 +194,12 @@ func (n *MultipartInitiatorNode) GenerateOp() (exec.Op, error) { type MultipartUploadNode struct { dag.NodeBase - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail PartNumber int PartSize int64 } -func (b *GraphNodeBuilder) NewMultipartUpload(userSpace jcsypes.UserSpaceDetail, partNumber int, partSize int64) *MultipartUploadNode { +func (b *GraphNodeBuilder) NewMultipartUpload(userSpace jcstypes.UserSpaceDetail, partNumber int, partSize int64) *MultipartUploadNode { node := &MultipartUploadNode{ UserSpace: userSpace, PartNumber: partNumber, diff --git a/common/pkgs/ioswitch2/ops2/s2s.go b/common/pkgs/ioswitch2/ops2/s2s.go index 090f123..6b54bb8 100644 --- a/common/pkgs/ioswitch2/ops2/s2s.go +++ b/common/pkgs/ioswitch2/ops2/s2s.go @@ -6,7 +6,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/dag" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func init() { @@ -15,10 +15,10 @@ func init() { } type S2STransfer struct { - SrcSpace jcsypes.UserSpaceDetail - SrcPath jcsypes.JPath - DstSpace jcsypes.UserSpaceDetail - DstPath jcsypes.JPath + SrcSpace jcstypes.UserSpaceDetail + SrcPath jcstypes.JPath + DstSpace jcstypes.UserSpaceDetail + DstPath jcstypes.JPath Output exec.VarID } @@ -55,10 +55,10 @@ func (o *S2STransfer) String() string { } type S2STransferDyn struct { - SrcSpace jcsypes.UserSpaceDetail + SrcSpace jcstypes.UserSpaceDetail SrcFileInfo exec.VarID - DstSpace jcsypes.UserSpaceDetail - DstPath jcsypes.JPath + DstSpace jcstypes.UserSpaceDetail + DstPath jcstypes.JPath Output exec.VarID } @@ -101,13 +101,13 @@ func (o *S2STransferDyn) String() string { type S2STransferNode struct { dag.NodeBase - SrcSpace jcsypes.UserSpaceDetail - SrcPath jcsypes.JPath - DstSpace jcsypes.UserSpaceDetail - DstPath jcsypes.JPath + SrcSpace jcstypes.UserSpaceDetail + SrcPath jcstypes.JPath + DstSpace jcstypes.UserSpaceDetail + DstPath jcstypes.JPath } -func (b *GraphNodeBuilder) NewS2STransfer(srcSpace jcsypes.UserSpaceDetail, srcPath jcsypes.JPath, dstSpace jcsypes.UserSpaceDetail, dstPath jcsypes.JPath) *S2STransferNode { +func (b *GraphNodeBuilder) NewS2STransfer(srcSpace jcstypes.UserSpaceDetail, srcPath jcstypes.JPath, dstSpace jcstypes.UserSpaceDetail, dstPath jcstypes.JPath) *S2STransferNode { n := &S2STransferNode{ SrcSpace: srcSpace, SrcPath: srcPath, @@ -139,12 +139,12 @@ func (n *S2STransferNode) GenerateOp() (exec.Op, error) { type S2STransferDynNode struct { dag.NodeBase - SrcSpace jcsypes.UserSpaceDetail - DstSpace jcsypes.UserSpaceDetail - DstPath jcsypes.JPath + SrcSpace jcstypes.UserSpaceDetail + DstSpace jcstypes.UserSpaceDetail + DstPath jcstypes.JPath } -func (b *GraphNodeBuilder) NewS2STransferDyn(srcSpace jcsypes.UserSpaceDetail, dstSpace jcsypes.UserSpaceDetail, dstPath jcsypes.JPath) *S2STransferDynNode { +func (b *GraphNodeBuilder) NewS2STransferDyn(srcSpace jcstypes.UserSpaceDetail, dstSpace jcstypes.UserSpaceDetail, dstPath jcstypes.JPath) *S2STransferDynNode { n := &S2STransferDynNode{ SrcSpace: srcSpace, DstSpace: dstSpace, diff --git a/common/pkgs/ioswitch2/ops2/shard_store.go b/common/pkgs/ioswitch2/ops2/shard_store.go index 4a5d610..8c07720 100644 --- a/common/pkgs/ioswitch2/ops2/shard_store.go +++ b/common/pkgs/ioswitch2/ops2/shard_store.go @@ -6,7 +6,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/dag" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func init() { @@ -15,8 +15,8 @@ func init() { } type GetShardInfo struct { - UserSpace jcsypes.UserSpaceDetail - FileHash jcsypes.FileHash + UserSpace jcstypes.UserSpaceDetail + FileHash jcstypes.FileHash ShardInfo exec.VarID } @@ -46,7 +46,7 @@ func (o *GetShardInfo) String() string { } type StoreShard struct { - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail FileInfo exec.VarID ShardInfo exec.VarID } @@ -84,11 +84,11 @@ func (o *StoreShard) String() string { type GetShardInfoNode struct { dag.NodeBase - UserSpace jcsypes.UserSpaceDetail - FileHash jcsypes.FileHash + UserSpace jcstypes.UserSpaceDetail + FileHash jcstypes.FileHash } -func (b *GraphNodeBuilder) NewGetShardInfo(userSpace jcsypes.UserSpaceDetail, fileHash jcsypes.FileHash) *GetShardInfoNode { +func (b *GraphNodeBuilder) NewGetShardInfo(userSpace jcstypes.UserSpaceDetail, fileHash jcstypes.FileHash) *GetShardInfoNode { node := &GetShardInfoNode{ UserSpace: userSpace, FileHash: fileHash, @@ -116,11 +116,11 @@ func (n *GetShardInfoNode) GenerateOp() (exec.Op, error) { type StoreShardNode struct { dag.NodeBase - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail ShardInfoKey string } -func (b *GraphNodeBuilder) NewStoreShard(userSpace jcsypes.UserSpaceDetail, shardInfoKey string) *StoreShardNode { +func (b *GraphNodeBuilder) NewStoreShard(userSpace jcstypes.UserSpaceDetail, shardInfoKey string) *StoreShardNode { node := &StoreShardNode{ UserSpace: userSpace, ShardInfoKey: shardInfoKey, diff --git a/common/pkgs/ioswitch2/parser/gen/generator.go b/common/pkgs/ioswitch2/parser/gen/generator.go index 0c5cbaa..251b8d5 100644 --- a/common/pkgs/ioswitch2/parser/gen/generator.go +++ b/common/pkgs/ioswitch2/parser/gen/generator.go @@ -12,8 +12,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser/state" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) // 检查使用不同编码时参数是否设置到位 @@ -385,17 +384,17 @@ func buildToNode(ctx *state.GenerateState, t ioswitch2.To) (ops2.ToNode, error) } } -func setEnvBySpace(n dag.Node, space *jcsypes.UserSpaceDetail) error { +func setEnvBySpace(n dag.Node, space *jcstypes.UserSpaceDetail) error { if space.RecommendHub == nil { n.Env().ToEnvDriver(true) return nil } switch addr := space.RecommendHub.Address.(type) { - case *cortypes.HttpAddressInfo: + case *jcstypes.HttpAddressInfo: n.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: *space.RecommendHub}, true) - case *cortypes.GRPCAddressInfo: + case *jcstypes.GRPCAddressInfo: n.Env().ToEnvWorker(&ioswitch2.HubWorker{Hub: *space.RecommendHub, Address: *addr}, true) default: diff --git a/common/pkgs/ioswitch2/parser/opt/utils.go b/common/pkgs/ioswitch2/parser/opt/utils.go index b631c37..3844895 100644 --- a/common/pkgs/ioswitch2/parser/opt/utils.go +++ b/common/pkgs/ioswitch2/parser/opt/utils.go @@ -5,21 +5,20 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/dag" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) -func setEnvBySpace(n dag.Node, space *jcsypes.UserSpaceDetail) error { +func setEnvBySpace(n dag.Node, space *jcstypes.UserSpaceDetail) error { if space.RecommendHub == nil { n.Env().ToEnvDriver(true) return nil } switch addr := space.RecommendHub.Address.(type) { - case *cortypes.HttpAddressInfo: + case *jcstypes.HttpAddressInfo: n.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: *space.RecommendHub}, true) - case *cortypes.GRPCAddressInfo: + case *jcstypes.GRPCAddressInfo: n.Env().ToEnvWorker(&ioswitch2.HubWorker{Hub: *space.RecommendHub, Address: *addr}, true) default: diff --git a/common/pkgs/ioswitch2/plans/complete_multipart.go b/common/pkgs/ioswitch2/plans/complete_multipart.go index 0e0c494..d344fd8 100644 --- a/common/pkgs/ioswitch2/plans/complete_multipart.go +++ b/common/pkgs/ioswitch2/plans/complete_multipart.go @@ -8,10 +8,10 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/plan" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) -func CompleteMultipart(blocks []jcsypes.ObjectBlock, blockSpaces []jcsypes.UserSpaceDetail, targetSpace jcsypes.UserSpaceDetail, shardInfoKey string, blder *exec.PlanBuilder) error { +func CompleteMultipart(blocks []jcstypes.ObjectBlock, blockSpaces []jcstypes.UserSpaceDetail, targetSpace jcstypes.UserSpaceDetail, shardInfoKey string, blder *exec.PlanBuilder) error { da := ops2.NewGraphNodeBuilder() sizes := make([]int64, len(blocks)) diff --git a/common/pkgs/ioswitch2/plans/utils.go b/common/pkgs/ioswitch2/plans/utils.go index 5efd89c..3c6af0f 100644 --- a/common/pkgs/ioswitch2/plans/utils.go +++ b/common/pkgs/ioswitch2/plans/utils.go @@ -5,21 +5,20 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/dag" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) -func setEnvBySpace(n dag.Node, space *jcsypes.UserSpaceDetail) error { +func setEnvBySpace(n dag.Node, space *jcstypes.UserSpaceDetail) error { if space.RecommendHub == nil { n.Env().ToEnvDriver(true) return nil } switch addr := space.RecommendHub.Address.(type) { - case *cortypes.HttpAddressInfo: + case *jcstypes.HttpAddressInfo: n.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: *space.RecommendHub}, true) - case *cortypes.GRPCAddressInfo: + case *jcstypes.GRPCAddressInfo: n.Env().ToEnvWorker(&ioswitch2.HubWorker{Hub: *space.RecommendHub, Address: *addr}, true) default: diff --git a/common/pkgs/ioswitchlrc/fromto.go b/common/pkgs/ioswitchlrc/fromto.go index 080a4eb..91ebba6 100644 --- a/common/pkgs/ioswitchlrc/fromto.go +++ b/common/pkgs/ioswitchlrc/fromto.go @@ -4,7 +4,7 @@ import ( "gitlink.org.cn/cloudream/common/utils/math2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type From interface { @@ -39,12 +39,12 @@ func (f *FromDriver) GetDataIndex() int { } type FromNode struct { - FileHash jcsypes.FileHash - UserSpace jcsypes.UserSpaceDetail + FileHash jcstypes.FileHash + UserSpace jcstypes.UserSpaceDetail DataIndex int } -func NewFromStorage(fileHash jcsypes.FileHash, space jcsypes.UserSpaceDetail, dataIndex int) *FromNode { +func NewFromStorage(fileHash jcstypes.FileHash, space jcstypes.UserSpaceDetail, dataIndex int) *FromNode { return &FromNode{ FileHash: fileHash, DataIndex: dataIndex, @@ -88,14 +88,14 @@ func (t *ToDriver) GetRange() math2.Range { } type ToNode struct { - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail DataIndex int Range math2.Range FileHashStoreKey string Option types.WriteOption } -func NewToStorage(space jcsypes.UserSpaceDetail, dataIndex int, fileHashStoreKey string) *ToNode { +func NewToStorage(space jcstypes.UserSpaceDetail, dataIndex int, fileHashStoreKey string) *ToNode { return &ToNode{ UserSpace: space, DataIndex: dataIndex, @@ -103,7 +103,7 @@ func NewToStorage(space jcsypes.UserSpaceDetail, dataIndex int, fileHashStoreKey } } -func NewToStorageWithRange(space jcsypes.UserSpaceDetail, dataIndex int, fileHashStoreKey string, rng math2.Range) *ToNode { +func NewToStorageWithRange(space jcstypes.UserSpaceDetail, dataIndex int, fileHashStoreKey string, rng math2.Range) *ToNode { return &ToNode{ UserSpace: space, DataIndex: dataIndex, diff --git a/common/pkgs/ioswitchlrc/hub_worker.go b/common/pkgs/ioswitchlrc/hub_worker.go index c557dd1..f3b1bd8 100644 --- a/common/pkgs/ioswitchlrc/hub_worker.go +++ b/common/pkgs/ioswitchlrc/hub_worker.go @@ -7,7 +7,7 @@ import ( stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec" hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) // var _ = serder.UseTypeUnionExternallyTagged(types.Ref(types.NewTypeUnion[exec.WorkerInfo]( @@ -15,8 +15,8 @@ import ( // ))) type HubWorker struct { - Hub cortypes.Hub - Address cortypes.GRPCAddressInfo + Hub jcstypes.Hub + Address jcstypes.GRPCAddressInfo } func (w *HubWorker) NewClient() (exec.WorkerClient, error) { diff --git a/common/pkgs/ioswitchlrc/ops2/base_store.go b/common/pkgs/ioswitchlrc/ops2/base_store.go index d56d224..e04924f 100644 --- a/common/pkgs/ioswitchlrc/ops2/base_store.go +++ b/common/pkgs/ioswitchlrc/ops2/base_store.go @@ -12,7 +12,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitchlrc" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func init() { @@ -23,8 +23,8 @@ func init() { type BaseRead struct { Output exec.VarID - UserSpace jcsypes.UserSpaceDetail - Path jcsypes.JPath + UserSpace jcstypes.UserSpaceDetail + Path jcstypes.JPath Option types.OpenOption } @@ -67,7 +67,7 @@ func (o *BaseRead) String() string { } type BaseReadDyn struct { - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail Output exec.VarID Path exec.VarID Option types.OpenOption @@ -118,8 +118,8 @@ func (o *BaseReadDyn) String() string { type BaseWrite struct { Input exec.VarID - UserSpace jcsypes.UserSpaceDetail - Path jcsypes.JPath + UserSpace jcstypes.UserSpaceDetail + Path jcstypes.JPath WriteResult exec.VarID Option types.WriteOption } @@ -164,12 +164,12 @@ func (o *BaseWrite) String() string { type BaseReadNode struct { dag.NodeBase From ioswitchlrc.From - UserSpace jcsypes.UserSpaceDetail - Path jcsypes.JPath + UserSpace jcstypes.UserSpaceDetail + Path jcstypes.JPath Option types.OpenOption } -func (b *GraphNodeBuilder) NewBaseRead(from ioswitchlrc.From, userSpace jcsypes.UserSpaceDetail, path jcsypes.JPath, opt types.OpenOption) *BaseReadNode { +func (b *GraphNodeBuilder) NewBaseRead(from ioswitchlrc.From, userSpace jcstypes.UserSpaceDetail, path jcstypes.JPath, opt types.OpenOption) *BaseReadNode { node := &BaseReadNode{ From: from, UserSpace: userSpace, @@ -205,11 +205,11 @@ func (t *BaseReadNode) GenerateOp() (exec.Op, error) { type BaseReadDynNode struct { dag.NodeBase From ioswitchlrc.From - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail Option types.OpenOption } -func (b *GraphNodeBuilder) NewBaseReadDyn(from ioswitchlrc.From, userSpace jcsypes.UserSpaceDetail, opt types.OpenOption) *BaseReadDynNode { +func (b *GraphNodeBuilder) NewBaseReadDyn(from ioswitchlrc.From, userSpace jcstypes.UserSpaceDetail, opt types.OpenOption) *BaseReadDynNode { node := &BaseReadDynNode{ From: from, UserSpace: userSpace, @@ -252,12 +252,12 @@ func (t *BaseReadDynNode) GenerateOp() (exec.Op, error) { type BaseWriteNode struct { dag.NodeBase To ioswitchlrc.To - UserSpace jcsypes.UserSpaceDetail - Path jcsypes.JPath + UserSpace jcstypes.UserSpaceDetail + Path jcstypes.JPath Option types.WriteOption } -func (b *GraphNodeBuilder) NewBaseWrite(to ioswitchlrc.To, userSpace jcsypes.UserSpaceDetail, path jcsypes.JPath, opt types.WriteOption) *BaseWriteNode { +func (b *GraphNodeBuilder) NewBaseWrite(to ioswitchlrc.To, userSpace jcstypes.UserSpaceDetail, path jcstypes.JPath, opt types.WriteOption) *BaseWriteNode { node := &BaseWriteNode{ To: to, UserSpace: userSpace, diff --git a/common/pkgs/ioswitchlrc/ops2/ec.go b/common/pkgs/ioswitchlrc/ops2/ec.go index 55e0e32..4b120ee 100644 --- a/common/pkgs/ioswitchlrc/ops2/ec.go +++ b/common/pkgs/ioswitchlrc/ops2/ec.go @@ -12,7 +12,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/dag" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/utils" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func init() { @@ -114,12 +114,12 @@ func (o *GalMultiply) String() string { type LRCConstructAnyNode struct { dag.NodeBase - LRC jcsypes.LRCRedundancy + LRC jcstypes.LRCRedundancy InputIndexes []int OutputIndexes []int } -func (b *GraphNodeBuilder) NewLRCConstructAny(lrc jcsypes.LRCRedundancy) *LRCConstructAnyNode { +func (b *GraphNodeBuilder) NewLRCConstructAny(lrc jcstypes.LRCRedundancy) *LRCConstructAnyNode { node := &LRCConstructAnyNode{ LRC: lrc, } @@ -168,11 +168,11 @@ func (t *LRCConstructAnyNode) GenerateOp() (exec.Op, error) { type LRCConstructGroupNode struct { dag.NodeBase - LRC jcsypes.LRCRedundancy + LRC jcstypes.LRCRedundancy TargetBlockIndex int } -func (b *GraphNodeBuilder) NewLRCConstructGroup(lrc jcsypes.LRCRedundancy) *LRCConstructGroupNode { +func (b *GraphNodeBuilder) NewLRCConstructGroup(lrc jcstypes.LRCRedundancy) *LRCConstructGroupNode { node := &LRCConstructGroupNode{ LRC: lrc, } diff --git a/common/pkgs/ioswitchlrc/ops2/shard_store.go b/common/pkgs/ioswitchlrc/ops2/shard_store.go index ecd53b9..c0865e8 100644 --- a/common/pkgs/ioswitchlrc/ops2/shard_store.go +++ b/common/pkgs/ioswitchlrc/ops2/shard_store.go @@ -6,7 +6,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/dag" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func init() { @@ -15,8 +15,8 @@ func init() { } type GetShardInfo struct { - UserSpace jcsypes.UserSpaceDetail - FileHash jcsypes.FileHash + UserSpace jcstypes.UserSpaceDetail + FileHash jcstypes.FileHash ShardInfo exec.VarID } @@ -46,7 +46,7 @@ func (o *GetShardInfo) String() string { } type StoreShard struct { - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail FileInfo exec.VarID ShardInfo exec.VarID } @@ -84,11 +84,11 @@ func (o *StoreShard) String() string { type GetShardInfoNode struct { dag.NodeBase - UserSpace jcsypes.UserSpaceDetail - FileHash jcsypes.FileHash + UserSpace jcstypes.UserSpaceDetail + FileHash jcstypes.FileHash } -func (b *GraphNodeBuilder) NewGetShardInfo(userSpace jcsypes.UserSpaceDetail, fileHash jcsypes.FileHash) *GetShardInfoNode { +func (b *GraphNodeBuilder) NewGetShardInfo(userSpace jcstypes.UserSpaceDetail, fileHash jcstypes.FileHash) *GetShardInfoNode { node := &GetShardInfoNode{ UserSpace: userSpace, FileHash: fileHash, @@ -116,11 +116,11 @@ func (n *GetShardInfoNode) GenerateOp() (exec.Op, error) { type StoreShardNode struct { dag.NodeBase - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail ShardInfoKey string } -func (b *GraphNodeBuilder) NewStoreShard(userSpace jcsypes.UserSpaceDetail, shardInfoKey string) *StoreShardNode { +func (b *GraphNodeBuilder) NewStoreShard(userSpace jcstypes.UserSpaceDetail, shardInfoKey string) *StoreShardNode { node := &StoreShardNode{ UserSpace: userSpace, ShardInfoKey: shardInfoKey, diff --git a/common/pkgs/ioswitchlrc/parser/generator.go b/common/pkgs/ioswitchlrc/parser/generator.go index af90dc9..0802bff 100644 --- a/common/pkgs/ioswitchlrc/parser/generator.go +++ b/common/pkgs/ioswitchlrc/parser/generator.go @@ -9,11 +9,11 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/plan" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitchlrc" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitchlrc/ops2" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type GenerateContext struct { - LRC jcsypes.LRCRedundancy + LRC jcstypes.LRCRedundancy DAG *ops2.GraphNodeBuilder To []ioswitchlrc.To ToNodes map[ioswitchlrc.To]ops2.ToNode @@ -27,7 +27,7 @@ func Encode(fr ioswitchlrc.From, toes []ioswitchlrc.To, blder *exec.PlanBuilder) } ctx := GenerateContext{ - LRC: jcsypes.DefaultLRCRedundancy, + LRC: jcstypes.DefaultLRCRedundancy, DAG: ops2.NewGraphNodeBuilder(), To: toes, ToNodes: make(map[ioswitchlrc.To]ops2.ToNode), @@ -124,7 +124,7 @@ func buildDAGEncode(ctx *GenerateContext, fr ioswitchlrc.From, toes []ioswitchlr // 提供数据块+编码块中的k个块,重建任意块,包括完整文件。 func ReconstructAny(frs []ioswitchlrc.From, toes []ioswitchlrc.To, blder *exec.PlanBuilder) error { ctx := GenerateContext{ - LRC: jcsypes.DefaultLRCRedundancy, + LRC: jcstypes.DefaultLRCRedundancy, DAG: ops2.NewGraphNodeBuilder(), To: toes, ToNodes: make(map[ioswitchlrc.To]ops2.ToNode), @@ -245,7 +245,7 @@ func buildDAGReconstructAny(ctx *GenerateContext, frs []ioswitchlrc.From, toes [ // 输入同一组的多个块,恢复出剩下缺少的一个块。 func ReconstructGroup(frs []ioswitchlrc.From, toes []ioswitchlrc.To, blder *exec.PlanBuilder) error { ctx := GenerateContext{ - LRC: jcsypes.DefaultLRCRedundancy, + LRC: jcstypes.DefaultLRCRedundancy, DAG: ops2.NewGraphNodeBuilder(), To: toes, ToNodes: make(map[ioswitchlrc.To]ops2.ToNode), diff --git a/common/pkgs/ioswitchlrc/parser/utils.go b/common/pkgs/ioswitchlrc/parser/utils.go index ae8eff7..2d52e5b 100644 --- a/common/pkgs/ioswitchlrc/parser/utils.go +++ b/common/pkgs/ioswitchlrc/parser/utils.go @@ -5,21 +5,20 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/dag" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) -func setEnvBySpace(n dag.Node, space *jcsypes.UserSpaceDetail) error { +func setEnvBySpace(n dag.Node, space *jcstypes.UserSpaceDetail) error { if space.RecommendHub == nil { n.Env().ToEnvDriver(true) return nil } switch addr := space.RecommendHub.Address.(type) { - case *cortypes.HttpAddressInfo: + case *jcstypes.HttpAddressInfo: n.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: *space.RecommendHub}, true) - case *cortypes.GRPCAddressInfo: + case *jcstypes.GRPCAddressInfo: n.Env().ToEnvWorker(&ioswitch2.HubWorker{Hub: *space.RecommendHub, Address: *addr}, true) default: diff --git a/common/pkgs/publock/reqbuilder/user_space.go b/common/pkgs/publock/reqbuilder/user_space.go index 8c89b59..b008d83 100644 --- a/common/pkgs/publock/reqbuilder/user_space.go +++ b/common/pkgs/publock/reqbuilder/user_space.go @@ -5,7 +5,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock/lockprovider" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type UserSpaceLockReqBuilder struct { @@ -15,7 +15,7 @@ type UserSpaceLockReqBuilder struct { func (b *LockRequestBuilder) UserSpace() *UserSpaceLockReqBuilder { return &UserSpaceLockReqBuilder{LockRequestBuilder: b} } -func (b *UserSpaceLockReqBuilder) Buzy(spaceID jcsypes.UserSpaceID) *UserSpaceLockReqBuilder { +func (b *UserSpaceLockReqBuilder) Buzy(spaceID jcstypes.UserSpaceID) *UserSpaceLockReqBuilder { b.locks = append(b.locks, types.Lock{ Path: b.makePath(spaceID), Name: lockprovider.UserSpaceBuzyLock, @@ -24,7 +24,7 @@ func (b *UserSpaceLockReqBuilder) Buzy(spaceID jcsypes.UserSpaceID) *UserSpaceLo return b } -func (b *UserSpaceLockReqBuilder) GC(spaceID jcsypes.UserSpaceID) *UserSpaceLockReqBuilder { +func (b *UserSpaceLockReqBuilder) GC(spaceID jcstypes.UserSpaceID) *UserSpaceLockReqBuilder { b.locks = append(b.locks, types.Lock{ Path: b.makePath(spaceID), Name: lockprovider.UserSpaceGCLock, @@ -33,6 +33,6 @@ func (b *UserSpaceLockReqBuilder) GC(spaceID jcsypes.UserSpaceID) *UserSpaceLock return b } -func (b *UserSpaceLockReqBuilder) makePath(hubID jcsypes.UserSpaceID) []string { +func (b *UserSpaceLockReqBuilder) makePath(hubID jcstypes.UserSpaceID) []string { return []string{lockprovider.UserSpaceLockPathPrefix, strconv.FormatInt(int64(hubID), 10)} } diff --git a/common/pkgs/rpc/auth.go b/common/pkgs/rpc/auth.go index d360c44..1577b76 100644 --- a/common/pkgs/rpc/auth.go +++ b/common/pkgs/rpc/auth.go @@ -5,7 +5,7 @@ import ( "fmt" "strconv" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -27,8 +27,8 @@ const ( ) type AccessTokenAuthInfo struct { - UserID cortypes.UserID - AccessTokenID cortypes.AccessTokenID + UserID jcstypes.UserID + AccessTokenID jcstypes.AccessTokenID Nonce string Signature string } @@ -125,8 +125,8 @@ func (s *ServerBase) authUnary( } authInfo := AccessTokenAuthInfo{ - UserID: cortypes.UserID(userID), - AccessTokenID: cortypes.AccessTokenID(accessTokenIDs[0]), + UserID: jcstypes.UserID(userID), + AccessTokenID: jcstypes.AccessTokenID(accessTokenIDs[0]), Nonce: nonce[0], Signature: signature[0], } @@ -200,8 +200,8 @@ func (s *ServerBase) authStream( } authInfo := AccessTokenAuthInfo{ - UserID: cortypes.UserID(userID), - AccessTokenID: cortypes.AccessTokenID(accessTokenIDs[0]), + UserID: jcstypes.UserID(userID), + AccessTokenID: jcstypes.AccessTokenID(accessTokenIDs[0]), Nonce: nonce[0], Signature: signature[0], } diff --git a/common/pkgs/rpc/coordinator/hub.go b/common/pkgs/rpc/coordinator/hub.go index 9b26bca..e1b6f31 100644 --- a/common/pkgs/rpc/coordinator/hub.go +++ b/common/pkgs/rpc/coordinator/hub.go @@ -4,7 +4,7 @@ import ( context "context" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type HubService interface { @@ -18,18 +18,18 @@ type HubService interface { } type GetHubConfig struct { - HubID cortypes.HubID `json:"hubID"` + HubID jcstypes.HubID `json:"hubID"` } type GetHubConfigResp struct { - Hub cortypes.Hub `json:"hub"` + Hub jcstypes.Hub `json:"hub"` } -func ReqGetHubConfig(hubID cortypes.HubID) *GetHubConfig { +func ReqGetHubConfig(hubID jcstypes.HubID) *GetHubConfig { return &GetHubConfig{ HubID: hubID, } } -func RespGetHubConfig(hub cortypes.Hub) *GetHubConfigResp { +func RespGetHubConfig(hub jcstypes.Hub) *GetHubConfigResp { return &GetHubConfigResp{ Hub: hub, } @@ -43,23 +43,23 @@ func (s *Server) GetHubConfig(ctx context.Context, req *rpc.Request) (*rpc.Respo // 获取指定节点的信息。如果HubIDs为nil,则返回所有Hub type GetHubs struct { - HubIDs []cortypes.HubID `json:"hubIDs"` + HubIDs []jcstypes.HubID `json:"hubIDs"` } type GetHubsResp struct { - Hubs []*cortypes.Hub `json:"hubs"` + Hubs []*jcstypes.Hub `json:"hubs"` } -func NewGetHubs(hubIDs []cortypes.HubID) *GetHubs { +func NewGetHubs(hubIDs []jcstypes.HubID) *GetHubs { return &GetHubs{ HubIDs: hubIDs, } } -func NewGetHubsResp(hubs []*cortypes.Hub) *GetHubsResp { +func NewGetHubsResp(hubs []*jcstypes.Hub) *GetHubsResp { return &GetHubsResp{ Hubs: hubs, } } -func (r *GetHubsResp) GetHub(id cortypes.HubID) *cortypes.Hub { +func (r *GetHubsResp) GetHub(id jcstypes.HubID) *jcstypes.Hub { for _, n := range r.Hubs { if n.HubID == id { return n @@ -78,18 +78,18 @@ func (s *Server) GetHubs(ctx context.Context, req *rpc.Request) (*rpc.Response, // 获取节点连通性信息 type GetHubConnectivities struct { - HubIDs []cortypes.HubID `json:"hubIDs"` + HubIDs []jcstypes.HubID `json:"hubIDs"` } type GetHubConnectivitiesResp struct { - Connectivities []cortypes.HubConnectivity `json:"hubs"` + Connectivities []jcstypes.HubConnectivity `json:"hubs"` } -func ReqGetHubConnectivities(hubIDs []cortypes.HubID) *GetHubConnectivities { +func ReqGetHubConnectivities(hubIDs []jcstypes.HubID) *GetHubConnectivities { return &GetHubConnectivities{ HubIDs: hubIDs, } } -func RespGetHubConnectivities(cons []cortypes.HubConnectivity) *GetHubConnectivitiesResp { +func RespGetHubConnectivities(cons []jcstypes.HubConnectivity) *GetHubConnectivitiesResp { return &GetHubConnectivitiesResp{ Connectivities: cons, } @@ -103,7 +103,7 @@ func (s *Server) GetHubConnectivities(ctx context.Context, req *rpc.Request) (*r // 上报节点连通性信息 type ReportHubConnectivity struct { - Connecttivities []cortypes.HubConnectivity + Connecttivities []jcstypes.HubConnectivity } type ReportHubConnectivityResp struct { } diff --git a/common/pkgs/rpc/coordinator/storage.go b/common/pkgs/rpc/coordinator/storage.go index 19c27bd..76c02dd 100644 --- a/common/pkgs/rpc/coordinator/storage.go +++ b/common/pkgs/rpc/coordinator/storage.go @@ -4,7 +4,7 @@ import ( context "context" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type StorageService interface { @@ -13,10 +13,10 @@ type StorageService interface { // 为指定的Storage选择一个适合通信的Hub type SelectStorageHub struct { - Storages []cortypes.StorageType + Storages []jcstypes.StorageType } type SelectStorageHubResp struct { - Hubs []*cortypes.Hub + Hubs []*jcstypes.Hub } var _ = TokenAuth(Coordinator_SelectStorageHub_FullMethodName) diff --git a/common/pkgs/rpc/coordinator/user.go b/common/pkgs/rpc/coordinator/user.go index 2da8c67..12f289f 100644 --- a/common/pkgs/rpc/coordinator/user.go +++ b/common/pkgs/rpc/coordinator/user.go @@ -4,7 +4,7 @@ import ( context "context" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type UserService interface { @@ -23,7 +23,7 @@ type UserLogin struct { Password string } type UserLoginResp struct { - Token cortypes.UserAccessToken + Token jcstypes.UserAccessToken PrivateKey string } @@ -42,7 +42,7 @@ func (s *Server) UserLogin(ctx context.Context, req *rpc.Request) (*rpc.Response // 客户端刷新Token,原始Token会继续有效。 type UserRefreshToken struct{} type UserRefreshTokenResp struct { - Token cortypes.UserAccessToken + Token jcstypes.UserAccessToken PrivateKey string } @@ -76,12 +76,12 @@ func (s *Server) UserLogout(ctx context.Context, req *rpc.Request) (*rpc.Respons // Hub服务加载AccessToken type HubLoadAccessToken struct { - HubID cortypes.HubID - UserID cortypes.UserID - TokenID cortypes.AccessTokenID + HubID jcstypes.HubID + UserID jcstypes.UserID + TokenID jcstypes.AccessTokenID } type HubLoadAccessTokenResp struct { - Token cortypes.UserAccessToken + Token jcstypes.UserAccessToken } func (c *Client) HubLoadAccessToken(ctx context.Context, msg *HubLoadAccessToken) (*HubLoadAccessTokenResp, *rpc.CodeError) { diff --git a/common/pkgs/rpc/hub/cache.go b/common/pkgs/rpc/hub/cache.go index 9e3bfc4..7fe1573 100644 --- a/common/pkgs/rpc/hub/cache.go +++ b/common/pkgs/rpc/hub/cache.go @@ -4,7 +4,7 @@ package hubrpc import ( "context" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" ) @@ -15,10 +15,10 @@ type CacheSvc interface { // 获取Cache中文件列表 type CheckCache struct { - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail } type CheckCacheResp struct { - FileHashes []jcsypes.FileHash + FileHashes []jcstypes.FileHash } func (c *Client) CheckCache(ctx context.Context, req *CheckCache) (*CheckCacheResp, *rpc.CodeError) { @@ -33,8 +33,8 @@ func (s *Server) CheckCache(ctx context.Context, req *rpc.Request) (*rpc.Respons // 清理Cache中不用的文件 type CacheGC struct { - UserSpace jcsypes.UserSpaceDetail - Availables []jcsypes.FileHash + UserSpace jcstypes.UserSpaceDetail + Availables []jcstypes.FileHash } type CacheGCResp struct{} diff --git a/common/pkgs/rpc/hub/user.go b/common/pkgs/rpc/hub/user.go index d067f6e..5e0d2d7 100644 --- a/common/pkgs/rpc/hub/user.go +++ b/common/pkgs/rpc/hub/user.go @@ -4,7 +4,7 @@ import ( context "context" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type UserSvc interface { @@ -13,8 +13,8 @@ type UserSvc interface { // 通知用户的Token登出 type NotifyUserAccessTokenInvalid struct { - UserID cortypes.UserID - TokenID cortypes.AccessTokenID + UserID jcstypes.UserID + TokenID jcstypes.AccessTokenID } type NotifyUserAccessTokenInvalidResp struct{} diff --git a/common/pkgs/rpc/hub/user_space.go b/common/pkgs/rpc/hub/user_space.go index 994ff77..dbc50e5 100644 --- a/common/pkgs/rpc/hub/user_space.go +++ b/common/pkgs/rpc/hub/user_space.go @@ -4,7 +4,7 @@ package hubrpc import ( "context" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" stgtypes "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" ) @@ -16,7 +16,7 @@ type UserSpaceSvc interface { // 列出指定BaseStore的指定位置内的所有文件 type BaseStoreListAll struct { - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail Path string } type BaseStoreListAllResp struct { @@ -35,7 +35,7 @@ func (s *Server) BaseStoreListAll(ctx context.Context, req *rpc.Request) (*rpc.R // 批量在指定BaseStore中创建文件夹 type BaseStoreMkdirs struct { - UserSpace jcsypes.UserSpaceDetail + UserSpace jcstypes.UserSpaceDetail Pathes []string } diff --git a/common/pkgs/servicestats/hub_strorage_transfer.go b/common/pkgs/servicestats/hub_strorage_transfer.go index 72c02d1..9a7d79a 100644 --- a/common/pkgs/servicestats/hub_strorage_transfer.go +++ b/common/pkgs/servicestats/hub_strorage_transfer.go @@ -6,22 +6,22 @@ import ( "time" "gitlink.org.cn/cloudream/common/utils/math2" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type HubStorageTransferStats struct { data HubStorageTransferStatsData - fromHubID cortypes.HubID + fromHubID jcstypes.HubID lock *sync.Mutex } type HubStorageTransferStatsData struct { - Entries map[cortypes.StorageID]*HubStorageTransferStatsEntry + Entries map[jcstypes.StorageID]*HubStorageTransferStatsEntry StartTime time.Time } type HubStorageTransferStatsEntry struct { - DestStorageID cortypes.StorageID + DestStorageID jcstypes.StorageID OutputBytes int64 MaxOutputBytes int64 @@ -36,7 +36,7 @@ type HubStorageTransferStatsEntry struct { SuccessInput int64 } -func (s *HubStorageTransferStats) RecordUpload(dstStorageID cortypes.StorageID, transferBytes int64, isSuccess bool) { +func (s *HubStorageTransferStats) RecordUpload(dstStorageID jcstypes.StorageID, transferBytes int64, isSuccess bool) { s.lock.Lock() defer s.lock.Unlock() @@ -58,7 +58,7 @@ func (s *HubStorageTransferStats) RecordUpload(dstStorageID cortypes.StorageID, e.TotalOutput++ } -func (s *HubStorageTransferStats) RecordDownload(dstStorageID cortypes.StorageID, transferBytes int64, isSuccess bool) { +func (s *HubStorageTransferStats) RecordDownload(dstStorageID jcstypes.StorageID, transferBytes int64, isSuccess bool) { s.lock.Lock() defer s.lock.Unlock() @@ -83,7 +83,7 @@ func (s *HubStorageTransferStats) Reset() time.Time { s.lock.Lock() defer s.lock.Unlock() - s.data.Entries = make(map[cortypes.StorageID]*HubStorageTransferStatsEntry) + s.data.Entries = make(map[jcstypes.StorageID]*HubStorageTransferStatsEntry) s.data.StartTime = time.Now() return s.data.StartTime } @@ -93,7 +93,7 @@ func (s *HubStorageTransferStats) DumpData() HubStorageTransferStatsData { defer s.lock.Unlock() data := s.data - data.Entries = make(map[cortypes.StorageID]*HubStorageTransferStatsEntry) + data.Entries = make(map[jcstypes.StorageID]*HubStorageTransferStatsEntry) for k, v := range s.data.Entries { v2 := *v data.Entries[k] = &v2 diff --git a/common/pkgs/servicestats/hub_transfter.go b/common/pkgs/servicestats/hub_transfter.go index 774c233..b5a447a 100644 --- a/common/pkgs/servicestats/hub_transfter.go +++ b/common/pkgs/servicestats/hub_transfter.go @@ -6,22 +6,22 @@ import ( "time" "gitlink.org.cn/cloudream/common/utils/math2" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type HubTransferStats struct { data HubTransferStatsData - fromHubID cortypes.HubID + fromHubID jcstypes.HubID lock *sync.Mutex } type HubTransferStatsData struct { - Entries map[cortypes.HubID]*HubTransferStatsEntry + Entries map[jcstypes.HubID]*HubTransferStatsEntry StartTime time.Time } type HubTransferStatsEntry struct { - DestHubID cortypes.HubID + DestHubID jcstypes.HubID OutputBytes int64 MaxOutputBytes int64 @@ -36,7 +36,7 @@ type HubTransferStatsEntry struct { SuccessInput int64 } -func (s *HubTransferStats) RecordOutput(dstHubID cortypes.HubID, transferBytes int64, isSuccess bool) { +func (s *HubTransferStats) RecordOutput(dstHubID jcstypes.HubID, transferBytes int64, isSuccess bool) { s.lock.Lock() defer s.lock.Unlock() @@ -58,7 +58,7 @@ func (s *HubTransferStats) RecordOutput(dstHubID cortypes.HubID, transferBytes i e.TotalOutput++ } -func (s *HubTransferStats) RecordInput(dstHubID cortypes.HubID, transferBytes int64, isSuccess bool) { +func (s *HubTransferStats) RecordInput(dstHubID jcstypes.HubID, transferBytes int64, isSuccess bool) { s.lock.Lock() defer s.lock.Unlock() @@ -85,7 +85,7 @@ func (s *HubTransferStats) Reset() time.Time { defer s.lock.Unlock() s.data.StartTime = time.Now() - s.data.Entries = make(map[cortypes.HubID]*HubTransferStatsEntry) + s.data.Entries = make(map[jcstypes.HubID]*HubTransferStatsEntry) return s.data.StartTime } @@ -94,7 +94,7 @@ func (s *HubTransferStats) DumpData() HubTransferStatsData { defer s.lock.Unlock() data := s.data - data.Entries = make(map[cortypes.HubID]*HubTransferStatsEntry) + data.Entries = make(map[jcstypes.HubID]*HubTransferStatsEntry) for k, v := range s.data.Entries { v2 := *v data.Entries[k] = &v2 diff --git a/common/pkgs/servicestats/service_stats.go b/common/pkgs/servicestats/service_stats.go index 996afed..b242bfd 100644 --- a/common/pkgs/servicestats/service_stats.go +++ b/common/pkgs/servicestats/service_stats.go @@ -4,7 +4,7 @@ import ( "sync" "time" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type StatsHost struct { @@ -14,24 +14,24 @@ type StatsHost struct { HubStorageTransfer *HubStorageTransferStats } -func (h *StatsHost) SetupHubTransfer(fromHubID cortypes.HubID) { +func (h *StatsHost) SetupHubTransfer(fromHubID jcstypes.HubID) { h.HubTransfer = &HubTransferStats{ fromHubID: fromHubID, lock: &sync.Mutex{}, data: HubTransferStatsData{ StartTime: time.Now(), - Entries: make(map[cortypes.HubID]*HubTransferStatsEntry), + Entries: make(map[jcstypes.HubID]*HubTransferStatsEntry), }, } } -func (h *StatsHost) SetupHubStorageTransfer(fromHubID cortypes.HubID) { +func (h *StatsHost) SetupHubStorageTransfer(fromHubID jcstypes.HubID) { h.HubStorageTransfer = &HubStorageTransferStats{ fromHubID: fromHubID, lock: &sync.Mutex{}, data: HubStorageTransferStatsData{ StartTime: time.Now(), - Entries: make(map[cortypes.StorageID]*HubStorageTransferStatsEntry), + Entries: make(map[jcstypes.StorageID]*HubStorageTransferStatsEntry), }, } } diff --git a/common/pkgs/storage/efile/ec_multiplier.go b/common/pkgs/storage/efile/ec_multiplier.go index 8e90cfc..27986b7 100644 --- a/common/pkgs/storage/efile/ec_multiplier.go +++ b/common/pkgs/storage/efile/ec_multiplier.go @@ -10,14 +10,13 @@ import ( "gitlink.org.cn/cloudream/common/utils/os2" "gitlink.org.cn/cloudream/common/utils/serder" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type ECMultiplier struct { blder *builder url string - feat *cortypes.ECMultiplierFeature + feat *jcstypes.ECMultiplierFeature outputs []string } @@ -98,9 +97,9 @@ func (m *ECMultiplier) Multiply(coef [][]byte, inputs []types.HTTPRequest, chunk for i, data := range r.Data { ret[i] = types.FileInfo{ // TODO 要确认一下output的格式 - Path: jcsypes.PathFromJcsPathString(m.outputs[i]), + Path: jcstypes.PathFromJcsPathString(m.outputs[i]), Size: data.Size, - Hash: jcsypes.NewFullHashFromString(data.Sha256), + Hash: jcstypes.NewFullHashFromString(data.Sha256), } } diff --git a/common/pkgs/storage/efile/efile.go b/common/pkgs/storage/efile/efile.go index 5eec93e..16a36bf 100644 --- a/common/pkgs/storage/efile/efile.go +++ b/common/pkgs/storage/efile/efile.go @@ -10,12 +10,11 @@ import ( "gitlink.org.cn/cloudream/common/utils/serder" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/factory/reg" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func init() { - reg.RegisterBuilder[*cortypes.EFileType](func(detail *jcsypes.UserSpaceDetail) types.StorageBuilder { + reg.RegisterBuilder[*jcstypes.EFileType](func(detail *jcstypes.UserSpaceDetail) types.StorageBuilder { return &builder{ detail: detail, } @@ -24,15 +23,15 @@ func init() { type builder struct { types.EmptyBuilder - detail *jcsypes.UserSpaceDetail + detail *jcstypes.UserSpaceDetail token string tokenLock sync.Mutex getTokenTime time.Time } func (b *builder) getToken() (string, error) { - stgType := b.detail.UserSpace.Storage.(*cortypes.EFileType) - cred := b.detail.UserSpace.Credential.(*cortypes.EFileCred) + stgType := b.detail.UserSpace.Storage.(*jcstypes.EFileType) + cred := b.detail.UserSpace.Credential.(*jcstypes.EFileCred) b.tokenLock.Lock() defer b.tokenLock.Unlock() @@ -91,12 +90,12 @@ func (b *builder) getToken() (string, error) { } func (b *builder) CreateECMultiplier(typeOnly bool) (types.ECMultiplier, error) { - feat := types.FindFeature[*cortypes.ECMultiplierFeature](b.detail) + feat := types.FindFeature[*jcstypes.ECMultiplierFeature](b.detail) if feat == nil { return nil, fmt.Errorf("feature ECMultiplier not found") } - cred, ok := b.detail.UserSpace.Credential.(*cortypes.EFileCred) + cred, ok := b.detail.UserSpace.Credential.(*jcstypes.EFileCred) if !ok { return nil, fmt.Errorf("invalid storage credential type %T for efile storage", b.detail.UserSpace.Credential) } diff --git a/common/pkgs/storage/factory/factory.go b/common/pkgs/storage/factory/factory.go index ac6769d..aa94dce 100644 --- a/common/pkgs/storage/factory/factory.go +++ b/common/pkgs/storage/factory/factory.go @@ -6,12 +6,12 @@ import ( _ "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/factory/reg" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) // 此函数永远不会返回nil。如果找不到对应的Builder,则会返回EmptyBuilder, // 此Builder的所有函数都会返回否定值或者封装后的ErrUnsupported错误(需要使用errors.Is检查) -func GetBuilder(detail *jcsypes.UserSpaceDetail) types.StorageBuilder { +func GetBuilder(detail *jcstypes.UserSpaceDetail) types.StorageBuilder { typ := reflect.TypeOf(detail.UserSpace.Storage) ctor, ok := reg.StorageBuilders[typ] diff --git a/common/pkgs/storage/factory/reg/reg.go b/common/pkgs/storage/factory/reg/reg.go index f8c6c77..6cb643d 100644 --- a/common/pkgs/storage/factory/reg/reg.go +++ b/common/pkgs/storage/factory/reg/reg.go @@ -5,23 +5,22 @@ import ( "gitlink.org.cn/cloudream/common/utils/reflect2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) -type BuilderCtor func(detail *jcsypes.UserSpaceDetail) types.StorageBuilder +type BuilderCtor func(detail *jcstypes.UserSpaceDetail) types.StorageBuilder var StorageBuilders = make(map[reflect.Type]BuilderCtor) // 注册针对指定存储服务类型的Builder -func RegisterBuilder[T cortypes.StorageType](ctor BuilderCtor) { +func RegisterBuilder[T jcstypes.StorageType](ctor BuilderCtor) { StorageBuilders[reflect2.TypeOf[T]()] = ctor } // 注:此函数只给storage包内部使用,外部包请使用外层的factory.GetBuilder // 此函数永远不会返回nil。如果找不到对应的Builder,则会返回EmptyBuilder, // 此Builder的所有函数都会返回否定值或者封装后的ErrUnsupported错误(需要使用errors.Is检查) -func GetBuilderInternal(detail *jcsypes.UserSpaceDetail) types.StorageBuilder { +func GetBuilderInternal(detail *jcstypes.UserSpaceDetail) types.StorageBuilder { typ := reflect.TypeOf(detail.UserSpace.Storage) ctor, ok := StorageBuilders[typ] diff --git a/common/pkgs/storage/local/base_store.go b/common/pkgs/storage/local/base_store.go index 25fb4df..42a863e 100644 --- a/common/pkgs/storage/local/base_store.go +++ b/common/pkgs/storage/local/base_store.go @@ -9,22 +9,22 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/utils/io2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type BaseStore struct { root string - detail *jcsypes.UserSpaceDetail + detail *jcstypes.UserSpaceDetail } -func NewBaseStore(root string, detail *jcsypes.UserSpaceDetail) (*BaseStore, error) { +func NewBaseStore(root string, detail *jcstypes.UserSpaceDetail) (*BaseStore, error) { return &BaseStore{ root: root, detail: detail, }, nil } -func (s *BaseStore) Write(pat jcsypes.JPath, stream io.Reader, opt types.WriteOption) (types.FileInfo, error) { +func (s *BaseStore) Write(pat jcstypes.JPath, stream io.Reader, opt types.WriteOption) (types.FileInfo, error) { log := s.getLogger() absObjPath := filepath.Join(s.root, pat.String()) @@ -58,11 +58,11 @@ func (s *BaseStore) Write(pat jcsypes.JPath, stream io.Reader, opt types.WriteOp return types.FileInfo{ Path: pat, Size: counter.Count(), - Hash: jcsypes.NewFullHash(hasher.Sum()), + Hash: jcstypes.NewFullHash(hasher.Sum()), }, nil } -func (s *BaseStore) Read(objPath jcsypes.JPath, opt types.OpenOption) (io.ReadCloser, error) { +func (s *BaseStore) Read(objPath jcstypes.JPath, opt types.OpenOption) (io.ReadCloser, error) { absObjPath := filepath.Join(s.root, objPath.JoinOSPath()) file, err := os.Open(absObjPath) if err != nil { @@ -86,7 +86,7 @@ func (s *BaseStore) Read(objPath jcsypes.JPath, opt types.OpenOption) (io.ReadCl return ret, nil } -func (s *BaseStore) Mkdir(path jcsypes.JPath) error { +func (s *BaseStore) Mkdir(path jcstypes.JPath) error { absObjPath := filepath.Join(s.root, path.JoinOSPath()) err := os.MkdirAll(absObjPath, 0755) if err != nil { @@ -96,7 +96,7 @@ func (s *BaseStore) Mkdir(path jcsypes.JPath) error { return nil } -func (s *BaseStore) ReadDir(pat jcsypes.JPath) types.DirReader { +func (s *BaseStore) ReadDir(pat jcstypes.JPath) types.DirReader { return &DirReader{ absRootPath: filepath.Join(s.root, pat.JoinOSPath()), rootJPath: pat.Clone(), diff --git a/common/pkgs/storage/local/dir_reader.go b/common/pkgs/storage/local/dir_reader.go index 55866e2..e2539aa 100644 --- a/common/pkgs/storage/local/dir_reader.go +++ b/common/pkgs/storage/local/dir_reader.go @@ -6,14 +6,14 @@ import ( "path/filepath" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type DirReader struct { // 完整的根路径(包括ReadDir的path参数),比如包括了盘符 absRootPath string // ReadDir函数传递进来的path参数 - rootJPath jcsypes.JPath + rootJPath jcstypes.JPath init bool curEntries []dirEntry } @@ -42,7 +42,7 @@ func (r *DirReader) Next() (types.DirEntry, error) { for _, e := range es { r.curEntries = append(r.curEntries, dirEntry{ - dir: jcsypes.JPath{}, + dir: jcstypes.JPath{}, entry: e, }) } @@ -103,7 +103,7 @@ func (r *DirReader) Close() { } type dirEntry struct { - dir jcsypes.JPath + dir jcstypes.JPath entry os.DirEntry } diff --git a/common/pkgs/storage/local/local.go b/common/pkgs/storage/local/local.go index 6725b85..477a950 100644 --- a/common/pkgs/storage/local/local.go +++ b/common/pkgs/storage/local/local.go @@ -5,12 +5,11 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/factory/reg" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func init() { - reg.RegisterBuilder[*cortypes.LocalType](func(detail *jcsypes.UserSpaceDetail) types.StorageBuilder { + reg.RegisterBuilder[*jcstypes.LocalType](func(detail *jcstypes.UserSpaceDetail) types.StorageBuilder { return &builder{ detail: detail, } @@ -19,7 +18,7 @@ func init() { type builder struct { types.EmptyBuilder - detail *jcsypes.UserSpaceDetail + detail *jcstypes.UserSpaceDetail } func (b *builder) FeatureDesc() types.FeatureDesc { @@ -27,7 +26,7 @@ func (b *builder) FeatureDesc() types.FeatureDesc { } func (b *builder) CreateShardStore(typeOnly bool) (types.ShardStore, error) { - cred, ok := b.detail.UserSpace.Credential.(*cortypes.LocalCred) + cred, ok := b.detail.UserSpace.Credential.(*jcstypes.LocalCred) if !ok { return nil, fmt.Errorf("invalid storage credential type %T for local storage", b.detail.UserSpace.Credential) } @@ -40,7 +39,7 @@ func (b *builder) CreateShardStore(typeOnly bool) (types.ShardStore, error) { } func (b *builder) CreateBaseStore(typeOnly bool) (types.BaseStore, error) { - cred, ok := b.detail.UserSpace.Credential.(*cortypes.LocalCred) + cred, ok := b.detail.UserSpace.Credential.(*jcstypes.LocalCred) if !ok { return nil, fmt.Errorf("invalid storage credential type %T for local storage", b.detail.UserSpace.Credential) } @@ -53,9 +52,9 @@ func (b *builder) CreateBaseStore(typeOnly bool) (types.BaseStore, error) { } func (b *builder) CreateMultiparter(typeOnly bool) (types.Multiparter, error) { - feat := types.FindFeature[*cortypes.MultipartUploadFeature](b.detail) + feat := types.FindFeature[*jcstypes.MultipartUploadFeature](b.detail) if feat == nil { - return nil, fmt.Errorf("feature %T not found", cortypes.MultipartUploadFeature{}) + return nil, fmt.Errorf("feature %T not found", jcstypes.MultipartUploadFeature{}) } if typeOnly { @@ -68,9 +67,9 @@ func (b *builder) CreateMultiparter(typeOnly bool) (types.Multiparter, error) { } func (b *builder) CreateS2STransfer(typeOnly bool) (types.S2STransfer, error) { - feat := types.FindFeature[*cortypes.S2STransferFeature](b.detail) + feat := types.FindFeature[*jcstypes.S2STransferFeature](b.detail) if feat == nil { - return nil, fmt.Errorf("feature %T not found", cortypes.S2STransferFeature{}) + return nil, fmt.Errorf("feature %T not found", jcstypes.S2STransferFeature{}) } if typeOnly { diff --git a/common/pkgs/storage/local/multipart_upload.go b/common/pkgs/storage/local/multipart_upload.go index 27ebd32..c664141 100644 --- a/common/pkgs/storage/local/multipart_upload.go +++ b/common/pkgs/storage/local/multipart_upload.go @@ -13,14 +13,13 @@ import ( "gitlink.org.cn/cloudream/common/utils/os2" "gitlink.org.cn/cloudream/common/utils/sort2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type Multiparter struct { - detail *jcsypes.UserSpaceDetail - localStg *cortypes.LocalCred - feat *cortypes.MultipartUploadFeature + detail *jcstypes.UserSpaceDetail + localStg *jcstypes.LocalCred + feat *jcstypes.MultipartUploadFeature } func (*Multiparter) MinPartSize() int64 { @@ -80,7 +79,7 @@ type MultipartTask struct { absTempDir string // 应该要是绝对路径 tempFileName string tempPartsDir string - joinedFileJPath jcsypes.JPath + joinedFileJPath jcstypes.JPath absJoinedFilePath string uploadID string } @@ -118,7 +117,7 @@ func (i *MultipartTask) JoinParts(ctx context.Context, parts []types.UploadedPar return types.FileInfo{ Path: i.joinedFileJPath, Size: size, - Hash: jcsypes.NewFullHash(h), + Hash: jcstypes.NewFullHash(h), }, nil } diff --git a/common/pkgs/storage/local/s2s.go b/common/pkgs/storage/local/s2s.go index dd4ae79..bd78525 100644 --- a/common/pkgs/storage/local/s2s.go +++ b/common/pkgs/storage/local/s2s.go @@ -7,24 +7,23 @@ import ( "path/filepath" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type S2STransfer struct { - feat *cortypes.S2STransferFeature - detail *jcsypes.UserSpaceDetail - localStg *cortypes.LocalCred - dstPath jcsypes.JPath + feat *jcstypes.S2STransferFeature + detail *jcstypes.UserSpaceDetail + localStg *jcstypes.LocalCred + dstPath jcstypes.JPath } // 只有同一个机器的存储之间才可以进行数据直传 -func (*S2STransfer) CanTransfer(src, dst *jcsypes.UserSpaceDetail) bool { - if types.FindFeature[*cortypes.S2STransferFeature](dst) == nil { +func (*S2STransfer) CanTransfer(src, dst *jcstypes.UserSpaceDetail) bool { + if types.FindFeature[*jcstypes.S2STransferFeature](dst) == nil { return false } - _, ok := src.UserSpace.Storage.(*cortypes.LocalType) + _, ok := src.UserSpace.Storage.(*jcstypes.LocalType) if !ok { return false } @@ -37,7 +36,7 @@ func (*S2STransfer) CanTransfer(src, dst *jcsypes.UserSpaceDetail) bool { } // 执行数据直传 -func (s *S2STransfer) Transfer(ctx context.Context, src *jcsypes.UserSpaceDetail, srcPath jcsypes.JPath, dstPath jcsypes.JPath) (types.FileInfo, error) { +func (s *S2STransfer) Transfer(ctx context.Context, src *jcstypes.UserSpaceDetail, srcPath jcstypes.JPath, dstPath jcstypes.JPath) (types.FileInfo, error) { s.dstPath = dstPath copy, err := os.OpenFile(filepath.Join(s.localStg.RootDir, s.dstPath.JoinOSPath()), os.O_WRONLY|os.O_CREATE, 0644) diff --git a/common/pkgs/storage/local/shard_store.go b/common/pkgs/storage/local/shard_store.go index b9b179c..c727da0 100644 --- a/common/pkgs/storage/local/shard_store.go +++ b/common/pkgs/storage/local/shard_store.go @@ -10,18 +10,18 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type ShardStore struct { - detail *jcsypes.UserSpaceDetail + detail *jcstypes.UserSpaceDetail stgRoot string storeAbsRoot string lock sync.Mutex done chan any } -func NewShardStore(root string, detail *jcsypes.UserSpaceDetail) (*ShardStore, error) { +func NewShardStore(root string, detail *jcstypes.UserSpaceDetail) (*ShardStore, error) { storeAbsRoot, err := filepath.Abs(filepath.Join(root, detail.UserSpace.WorkingDir.JoinOSPath(), types.ShardStoreWorkingDir)) if err != nil { return nil, fmt.Errorf("get abs root: %w", err) @@ -43,7 +43,7 @@ func (s *ShardStore) Stop() { s.getLogger().Infof("component stop") } -func (s *ShardStore) Store(path jcsypes.JPath, hash jcsypes.FileHash, size int64) (types.FileInfo, error) { +func (s *ShardStore) Store(path jcstypes.JPath, hash jcstypes.FileHash, size int64) (types.FileInfo, error) { fullTempPath := filepath.Join(s.stgRoot, path.JoinOSPath()) s.lock.Lock() @@ -81,7 +81,7 @@ func (s *ShardStore) Store(path jcsypes.JPath, hash jcsypes.FileHash, size int64 }, nil } -func (s *ShardStore) Info(hash jcsypes.FileHash) (types.FileInfo, error) { +func (s *ShardStore) Info(hash jcstypes.FileHash) (types.FileInfo, error) { s.lock.Lock() defer s.lock.Unlock() @@ -118,7 +118,7 @@ func (s *ShardStore) ListAll() ([]types.FileInfo, error) { return err } - fileHash, err := jcsypes.ParseHash(filepath.Base(info.Name())) + fileHash, err := jcstypes.ParseHash(filepath.Base(info.Name())) if err != nil { return nil } @@ -137,11 +137,11 @@ func (s *ShardStore) ListAll() ([]types.FileInfo, error) { return infos, nil } -func (s *ShardStore) GC(avaiables []jcsypes.FileHash) error { +func (s *ShardStore) GC(avaiables []jcstypes.FileHash) error { s.lock.Lock() defer s.lock.Unlock() - avais := make(map[jcsypes.FileHash]bool) + avais := make(map[jcstypes.FileHash]bool) for _, hash := range avaiables { avais[hash] = true } @@ -162,7 +162,7 @@ func (s *ShardStore) GC(avaiables []jcsypes.FileHash) error { return err } - fileHash, err := jcsypes.ParseHash(filepath.Base(info.Name())) + fileHash, err := jcstypes.ParseHash(filepath.Base(info.Name())) if err != nil { return nil } @@ -199,14 +199,14 @@ func (s *ShardStore) getLogger() logger.Logger { return logger.WithField("ShardStore", "Local").WithField("Storage", s.detail.UserSpace.Storage.String()) } -func (s *ShardStore) getFileDirFromHash(hash jcsypes.FileHash) string { +func (s *ShardStore) getFileDirFromHash(hash jcstypes.FileHash) string { return filepath.Join(s.storeAbsRoot, hash.GetHashPrefix(2)) } -func (s *ShardStore) getFilePathFromHash(hash jcsypes.FileHash) string { +func (s *ShardStore) getFilePathFromHash(hash jcstypes.FileHash) string { return filepath.Join(s.storeAbsRoot, hash.GetHashPrefix(2), string(hash)) } -func (s *ShardStore) getJPathFromHash(hash jcsypes.FileHash) jcsypes.JPath { +func (s *ShardStore) getJPathFromHash(hash jcstypes.FileHash) jcstypes.JPath { return s.detail.UserSpace.WorkingDir.ConcatCompsNew(types.ShardStoreWorkingDir, hash.GetHashPrefix(2), string(hash)) } diff --git a/common/pkgs/storage/obs/obs.go b/common/pkgs/storage/obs/obs.go index be13ce2..17819d9 100644 --- a/common/pkgs/storage/obs/obs.go +++ b/common/pkgs/storage/obs/obs.go @@ -9,20 +9,19 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/factory/reg" s3stg "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/s3" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func init() { - reg.RegisterBuilder[*cortypes.OBSType](newBuilder) + reg.RegisterBuilder[*jcstypes.OBSType](newBuilder) } type builder struct { types.EmptyBuilder - detail *jcsypes.UserSpaceDetail + detail *jcstypes.UserSpaceDetail } -func newBuilder(detail *jcsypes.UserSpaceDetail) types.StorageBuilder { +func newBuilder(detail *jcstypes.UserSpaceDetail) types.StorageBuilder { return &builder{ detail: detail, } @@ -33,8 +32,8 @@ func (b *builder) FeatureDesc() types.FeatureDesc { } func (b *builder) CreateShardStore(typeOnly bool) (types.ShardStore, error) { - stgType := b.detail.UserSpace.Storage.(*cortypes.OBSType) - cred, ok := b.detail.UserSpace.Credential.(*cortypes.OBSCred) + stgType := b.detail.UserSpace.Storage.(*jcstypes.OBSType) + cred, ok := b.detail.UserSpace.Credential.(*jcstypes.OBSCred) if !ok { return nil, fmt.Errorf("invalid storage credential type %T for obs storage", b.detail.UserSpace.Credential) } @@ -52,8 +51,8 @@ func (b *builder) CreateShardStore(typeOnly bool) (types.ShardStore, error) { } func (b *builder) CreateBaseStore(typeOnly bool) (types.BaseStore, error) { - stgType := b.detail.UserSpace.Storage.(*cortypes.OBSType) - cred, ok := b.detail.UserSpace.Credential.(*cortypes.OBSCred) + stgType := b.detail.UserSpace.Storage.(*jcstypes.OBSType) + cred, ok := b.detail.UserSpace.Credential.(*jcstypes.OBSCred) if !ok { return nil, fmt.Errorf("invalid storage credential type %T for obs storage", b.detail.UserSpace.Credential) } @@ -70,7 +69,7 @@ func (b *builder) CreateBaseStore(typeOnly bool) (types.BaseStore, error) { return s3stg.NewBaseStore(b.detail, cli, bucket, s3stg.BaseStoreOption{UseAWSSha256: false}) } -func createClient(stgType *cortypes.OBSType, cred *cortypes.OBSCred) (*s3.Client, string, error) { +func createClient(stgType *jcstypes.OBSType, cred *jcstypes.OBSCred) (*s3.Client, string, error) { awsConfig := aws.Config{} cre := aws.Credentials{ @@ -91,17 +90,17 @@ func createClient(stgType *cortypes.OBSType, cred *cortypes.OBSCred) (*s3.Client } func (b *builder) CreateMultiparter(typeOnly bool) (types.Multiparter, error) { - stgType := b.detail.UserSpace.Storage.(*cortypes.OBSType) - feat := types.FindFeature[*cortypes.MultipartUploadFeature](b.detail) + stgType := b.detail.UserSpace.Storage.(*jcstypes.OBSType) + feat := types.FindFeature[*jcstypes.MultipartUploadFeature](b.detail) if feat == nil { - return nil, fmt.Errorf("feature %T not found", cortypes.MultipartUploadFeature{}) + return nil, fmt.Errorf("feature %T not found", jcstypes.MultipartUploadFeature{}) } if typeOnly { return (*s3stg.Multiparter)(nil), nil } - cred, ok := b.detail.UserSpace.Credential.(*cortypes.OBSCred) + cred, ok := b.detail.UserSpace.Credential.(*jcstypes.OBSCred) if !ok { return nil, fmt.Errorf("invalid storage credential type %T for obs storage", b.detail.UserSpace.Credential) } @@ -120,17 +119,17 @@ func (b *builder) CreateMultiparter(typeOnly bool) (types.Multiparter, error) { } func (b *builder) CreateS2STransfer(typeOnly bool) (types.S2STransfer, error) { - stgType := b.detail.UserSpace.Storage.(*cortypes.OBSType) - feat := types.FindFeature[*cortypes.S2STransferFeature](b.detail) + stgType := b.detail.UserSpace.Storage.(*jcstypes.OBSType) + feat := types.FindFeature[*jcstypes.S2STransferFeature](b.detail) if feat == nil { - return nil, fmt.Errorf("feature %T not found", cortypes.S2STransferFeature{}) + return nil, fmt.Errorf("feature %T not found", jcstypes.S2STransferFeature{}) } if typeOnly { return (*S2STransfer)(nil), nil } - cred, ok := b.detail.UserSpace.Credential.(*cortypes.OBSCred) + cred, ok := b.detail.UserSpace.Credential.(*jcstypes.OBSCred) if !ok { return nil, fmt.Errorf("invalid storage credential type %T for obs storage", b.detail.UserSpace.Credential) } diff --git a/common/pkgs/storage/obs/obs_test.go b/common/pkgs/storage/obs/obs_test.go index 7590a7f..75ac29c 100644 --- a/common/pkgs/storage/obs/obs_test.go +++ b/common/pkgs/storage/obs/obs_test.go @@ -5,40 +5,39 @@ import ( "testing" . "github.com/smartystreets/goconvey/convey" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func Test_S2S(t *testing.T) { Convey("OBS", t, func() { s2s := S2STransfer{ - stgType: &cortypes.OBSType{ + stgType: &jcstypes.OBSType{ Region: "cn-north-4", Endpoint: "obs.cn-north-4.myhuaweicloud.com", Bucket: "pcm3-bucket3", ProjectID: "", }, - cred: &cortypes.OBSCred{ + cred: &jcstypes.OBSCred{ AK: "", SK: "", }, - feat: &cortypes.S2STransferFeature{}, + feat: &jcstypes.S2STransferFeature{}, } - _, err := s2s.Transfer(context.TODO(), &jcsypes.UserSpaceDetail{ - UserSpace: jcsypes.UserSpace{ - Storage: &cortypes.OBSType{ + _, err := s2s.Transfer(context.TODO(), &jcstypes.UserSpaceDetail{ + UserSpace: jcstypes.UserSpace{ + Storage: &jcstypes.OBSType{ Region: "cn-north-4", Endpoint: "obs.cn-north-4.myhuaweicloud.com", Bucket: "pcm2-bucket2", ProjectID: "", }, - Credential: &cortypes.OBSCred{ + Credential: &jcstypes.OBSCred{ AK: "", SK: "", }, }, - }, jcsypes.PathFromComps("test_data/test03.txt"), jcsypes.PathFromComps("atest.txt")) + }, jcstypes.PathFromComps("test_data/test03.txt"), jcstypes.PathFromComps("atest.txt")) defer s2s.Close() So(err, ShouldEqual, nil) diff --git a/common/pkgs/storage/obs/s2s.go b/common/pkgs/storage/obs/s2s.go index a9c0116..002e8cb 100644 --- a/common/pkgs/storage/obs/s2s.go +++ b/common/pkgs/storage/obs/s2s.go @@ -15,20 +15,19 @@ import ( "gitlink.org.cn/cloudream/common/utils/os2" stgs3 "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/s3" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type S2STransfer struct { - detail *jcsypes.UserSpaceDetail - stgType *cortypes.OBSType - cred *cortypes.OBSCred - feat *cortypes.S2STransferFeature + detail *jcstypes.UserSpaceDetail + stgType *jcstypes.OBSType + cred *jcstypes.OBSCred + feat *jcstypes.S2STransferFeature taskID *int64 omsCli *oms.OmsClient } -func NewS2STransfer(detail *jcsypes.UserSpaceDetail, stgType *cortypes.OBSType, cred *cortypes.OBSCred, feat *cortypes.S2STransferFeature) *S2STransfer { +func NewS2STransfer(detail *jcstypes.UserSpaceDetail, stgType *jcstypes.OBSType, cred *jcstypes.OBSCred, feat *jcstypes.S2STransferFeature) *S2STransfer { return &S2STransfer{ detail: detail, stgType: stgType, @@ -38,13 +37,13 @@ func NewS2STransfer(detail *jcsypes.UserSpaceDetail, stgType *cortypes.OBSType, } // 判断是否能从指定的源存储中直传到当前存储的目的路径 -func (*S2STransfer) CanTransfer(src, dst *jcsypes.UserSpaceDetail) bool { - req := makeRequest(src, jcsypes.JPath{}) +func (*S2STransfer) CanTransfer(src, dst *jcstypes.UserSpaceDetail) bool { + req := makeRequest(src, jcstypes.JPath{}) return req != nil } // 执行数据直传。返回传输后的文件路径 -func (s *S2STransfer) Transfer(ctx context.Context, src *jcsypes.UserSpaceDetail, srcPath jcsypes.JPath, dstPath jcsypes.JPath) (types.FileInfo, error) { +func (s *S2STransfer) Transfer(ctx context.Context, src *jcstypes.UserSpaceDetail, srcPath jcstypes.JPath, dstPath jcstypes.JPath) (types.FileInfo, error) { req := makeRequest(src, srcPath) if req == nil { return types.FileInfo{}, fmt.Errorf("unsupported source storage type: %T", src.UserSpace.Storage) @@ -178,12 +177,12 @@ func (s *S2STransfer) Close() { } } -func makeRequest(srcStg *jcsypes.UserSpaceDetail, srcPath jcsypes.JPath) *model.SrcNodeReq { +func makeRequest(srcStg *jcstypes.UserSpaceDetail, srcPath jcstypes.JPath) *model.SrcNodeReq { switch srcType := srcStg.UserSpace.Storage.(type) { - case *cortypes.OBSType: + case *jcstypes.OBSType: cloudType := "HuaweiCloud" - cred, ok := srcStg.UserSpace.Credential.(*cortypes.OBSCred) + cred, ok := srcStg.UserSpace.Credential.(*jcstypes.OBSCred) if !ok { return nil } diff --git a/common/pkgs/storage/obs/shard_store.go b/common/pkgs/storage/obs/shard_store.go index 4efe576..0042c58 100644 --- a/common/pkgs/storage/obs/shard_store.go +++ b/common/pkgs/storage/obs/shard_store.go @@ -5,17 +5,16 @@ import ( "github.com/huaweicloud/huaweicloud-sdk-go-obs/obs" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/s3" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type ShardStore struct { *s3.ShardStore - stgType *cortypes.OBSType - cred *cortypes.OBSCred + stgType *jcstypes.OBSType + cred *jcstypes.OBSCred } -func NewShardStore(detail *jcsypes.UserSpaceDetail, stgType *cortypes.OBSType, cred *cortypes.OBSCred, s3Cli *awss3.Client, bkt string) (*ShardStore, error) { +func NewShardStore(detail *jcstypes.UserSpaceDetail, stgType *jcstypes.OBSType, cred *jcstypes.OBSCred, s3Cli *awss3.Client, bkt string) (*ShardStore, error) { sd := ShardStore{ stgType: stgType, cred: cred, @@ -32,7 +31,7 @@ func NewShardStore(detail *jcsypes.UserSpaceDetail, stgType *cortypes.OBSType, c return &sd, nil } -func (s *ShardStore) MakeHTTPReadRequest(fileHash jcsypes.FileHash) (types.HTTPRequest, error) { +func (s *ShardStore) MakeHTTPReadRequest(fileHash jcstypes.FileHash) (types.HTTPRequest, error) { cli, err := obs.New(s.cred.AK, s.cred.SK, s.stgType.Endpoint) if err != nil { return types.HTTPRequest{}, err diff --git a/common/pkgs/storage/pool/pool.go b/common/pkgs/storage/pool/pool.go index d0b646e..d7cab3c 100644 --- a/common/pkgs/storage/pool/pool.go +++ b/common/pkgs/storage/pool/pool.go @@ -6,12 +6,11 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/async" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/factory" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type userSpace struct { - detail *jcsypes.UserSpaceDetail + detail *jcstypes.UserSpaceDetail store types.ShardStore } @@ -23,8 +22,8 @@ func (u *userSpace) Drop() { } type userSpaceKey struct { - UserID cortypes.UserID - UserSpaceID jcsypes.UserSpaceID + UserID jcstypes.UserID + UserSpaceID jcstypes.UserSpaceID } type Pool struct { @@ -40,7 +39,7 @@ func NewPool() *Pool { } } -func (p *Pool) Drop(userID cortypes.UserID, spaceID jcsypes.UserSpaceID) { +func (p *Pool) Drop(userID jcstypes.UserID, spaceID jcstypes.UserSpaceID) { p.lock.Lock() defer p.lock.Unlock() @@ -56,7 +55,7 @@ func (p *Pool) Drop(userID cortypes.UserID, spaceID jcsypes.UserSpaceID) { delete(p.spaces, key) } -func (p *Pool) GetShardStore(spaceDetail *jcsypes.UserSpaceDetail) (types.ShardStore, error) { +func (p *Pool) GetShardStore(spaceDetail *jcstypes.UserSpaceDetail) (types.ShardStore, error) { p.lock.Lock() defer p.lock.Unlock() @@ -91,18 +90,18 @@ func (p *Pool) GetShardStore(spaceDetail *jcsypes.UserSpaceDetail) (types.ShardS return space.store, nil } -func (p *Pool) GetBaseStore(spaceDetail *jcsypes.UserSpaceDetail) (types.BaseStore, error) { +func (p *Pool) GetBaseStore(spaceDetail *jcstypes.UserSpaceDetail) (types.BaseStore, error) { return factory.GetBuilder(spaceDetail).CreateBaseStore(false) } -func (p *Pool) GetMultiparter(spaceDetail *jcsypes.UserSpaceDetail) (types.Multiparter, error) { +func (p *Pool) GetMultiparter(spaceDetail *jcstypes.UserSpaceDetail) (types.Multiparter, error) { return factory.GetBuilder(spaceDetail).CreateMultiparter(false) } -func (p *Pool) GetS2STransfer(spaceDetail *jcsypes.UserSpaceDetail) (types.S2STransfer, error) { +func (p *Pool) GetS2STransfer(spaceDetail *jcstypes.UserSpaceDetail) (types.S2STransfer, error) { return factory.GetBuilder(spaceDetail).CreateS2STransfer(false) } -func (p *Pool) GetECMultiplier(spaceDetail *jcsypes.UserSpaceDetail) (types.ECMultiplier, error) { +func (p *Pool) GetECMultiplier(spaceDetail *jcstypes.UserSpaceDetail) (types.ECMultiplier, error) { return factory.GetBuilder(spaceDetail).CreateECMultiplier(false) } diff --git a/common/pkgs/storage/s3/base_store.go b/common/pkgs/storage/s3/base_store.go index b53de79..32b3888 100644 --- a/common/pkgs/storage/s3/base_store.go +++ b/common/pkgs/storage/s3/base_store.go @@ -16,7 +16,7 @@ import ( "gitlink.org.cn/cloudream/common/utils/io2" "gitlink.org.cn/cloudream/common/utils/math2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) const ( @@ -24,7 +24,7 @@ const ( ) type BaseStore struct { - Detail *jcsypes.UserSpaceDetail + Detail *jcstypes.UserSpaceDetail Bucket string cli *s3.Client opt BaseStoreOption @@ -34,7 +34,7 @@ type BaseStoreOption struct { UseAWSSha256 bool // 能否直接使用AWS提供的SHA256校验,如果不行,则使用本地计算。默认使用本地计算。 } -func NewBaseStore(detail *jcsypes.UserSpaceDetail, cli *s3.Client, bkt string, opt BaseStoreOption) (*BaseStore, error) { +func NewBaseStore(detail *jcstypes.UserSpaceDetail, cli *s3.Client, bkt string, opt BaseStoreOption) (*BaseStore, error) { return &BaseStore{ Detail: detail, Bucket: bkt, @@ -43,7 +43,7 @@ func NewBaseStore(detail *jcsypes.UserSpaceDetail, cli *s3.Client, bkt string, o }, nil } -func (s *BaseStore) Write(pat jcsypes.JPath, stream io.Reader, opt types.WriteOption) (types.FileInfo, error) { +func (s *BaseStore) Write(pat jcstypes.JPath, stream io.Reader, opt types.WriteOption) (types.FileInfo, error) { key := pat meta := make(map[string]string) if opt.ModTime.IsZero() { @@ -81,7 +81,7 @@ func (s *BaseStore) Write(pat jcsypes.JPath, stream io.Reader, opt types.WriteOp return types.FileInfo{ Path: key, - Hash: jcsypes.NewFullHash(hash), + Hash: jcstypes.NewFullHash(hash), Size: counter.Count(), }, nil } @@ -99,12 +99,12 @@ func (s *BaseStore) Write(pat jcsypes.JPath, stream io.Reader, opt types.WriteOp return types.FileInfo{ Path: key, - Hash: jcsypes.NewFullHash(hashStr.Sum()), + Hash: jcstypes.NewFullHash(hashStr.Sum()), Size: counter.Count(), }, nil } -func (s *BaseStore) Read(pat jcsypes.JPath, opt types.OpenOption) (io.ReadCloser, error) { +func (s *BaseStore) Read(pat jcstypes.JPath, opt types.OpenOption) (io.ReadCloser, error) { key := pat input := &s3.GetObjectInput{ @@ -128,7 +128,7 @@ func (s *BaseStore) Read(pat jcsypes.JPath, opt types.OpenOption) (io.ReadCloser return resp.Body, nil } -func (s *BaseStore) Mkdir(path jcsypes.JPath) error { +func (s *BaseStore) Mkdir(path jcstypes.JPath) error { _, err := s.cli.PutObject(context.TODO(), &s3.PutObjectInput{ Bucket: aws.String(s.Bucket), Key: aws.String(path.String() + "/"), @@ -137,7 +137,7 @@ func (s *BaseStore) Mkdir(path jcsypes.JPath) error { return err } -func (s *BaseStore) ReadDir(path jcsypes.JPath) types.DirReader { +func (s *BaseStore) ReadDir(path jcstypes.JPath) types.DirReader { return &DirReader{ cli: s.cli, bucket: s.Bucket, diff --git a/common/pkgs/storage/s3/dir_reader.go b/common/pkgs/storage/s3/dir_reader.go index 368cf97..4f9d193 100644 --- a/common/pkgs/storage/s3/dir_reader.go +++ b/common/pkgs/storage/s3/dir_reader.go @@ -7,13 +7,13 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type DirReader struct { cli *s3.Client bucket string - rootPath jcsypes.JPath + rootPath jcstypes.JPath marker *string curInfos []types.DirEntry eof bool @@ -39,7 +39,7 @@ func (r *DirReader) Next() (types.DirEntry, error) { } for _, obj := range resp.Contents { - key := jcsypes.PathFromJcsPathString(*obj.Key) + key := jcstypes.PathFromJcsPathString(*obj.Key) r.curInfos = append(r.curInfos, types.DirEntry{ Path: key, diff --git a/common/pkgs/storage/s3/multipart_upload.go b/common/pkgs/storage/s3/multipart_upload.go index 9ac7fac..cc46b93 100644 --- a/common/pkgs/storage/s3/multipart_upload.go +++ b/common/pkgs/storage/s3/multipart_upload.go @@ -12,18 +12,17 @@ import ( "gitlink.org.cn/cloudream/common/utils/os2" "gitlink.org.cn/cloudream/common/utils/sort2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type Multiparter struct { - detail *jcsypes.UserSpaceDetail - feat *cortypes.MultipartUploadFeature + detail *jcstypes.UserSpaceDetail + feat *jcstypes.MultipartUploadFeature bucket string cli *s3.Client } -func NewMultiparter(detail *jcsypes.UserSpaceDetail, feat *cortypes.MultipartUploadFeature, bkt string, cli *s3.Client) *Multiparter { +func NewMultiparter(detail *jcstypes.UserSpaceDetail, feat *jcstypes.MultipartUploadFeature, bkt string, cli *s3.Client) *Multiparter { return &Multiparter{ detail: detail, feat: feat, @@ -89,9 +88,9 @@ func (m *Multiparter) UploadPart(ctx context.Context, init types.MultipartInitSt type MultipartTask struct { cli *s3.Client bucket string - tempDir jcsypes.JPath + tempDir jcstypes.JPath tempFileName string - tempFilePath jcsypes.JPath + tempFilePath jcstypes.JPath uploadID string } @@ -140,7 +139,7 @@ func (i *MultipartTask) JoinParts(ctx context.Context, parts []types.UploadedPar return types.FileInfo{}, err } - hash := jcsypes.CalculateCompositeHash(partHashes) + hash := jcstypes.CalculateCompositeHash(partHashes) return types.FileInfo{ Path: i.tempFilePath, diff --git a/common/pkgs/storage/s3/s3.go b/common/pkgs/storage/s3/s3.go index 20680e7..fa1b3c9 100644 --- a/common/pkgs/storage/s3/s3.go +++ b/common/pkgs/storage/s3/s3.go @@ -8,20 +8,19 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/factory/reg" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func init() { - reg.RegisterBuilder[*cortypes.S3Type](newBuilder) + reg.RegisterBuilder[*jcstypes.S3Type](newBuilder) } type builder struct { types.EmptyBuilder - detail *jcsypes.UserSpaceDetail + detail *jcstypes.UserSpaceDetail } -func newBuilder(detail *jcsypes.UserSpaceDetail) types.StorageBuilder { +func newBuilder(detail *jcstypes.UserSpaceDetail) types.StorageBuilder { return &builder{ detail: detail, } @@ -32,8 +31,8 @@ func (b *builder) FeatureDesc() types.FeatureDesc { } func (b *builder) CreateShardStore(typeOnly bool) (types.ShardStore, error) { - stgType := b.detail.UserSpace.Storage.(*cortypes.S3Type) - s3Cred, ok := b.detail.UserSpace.Credential.(*cortypes.S3Cred) + stgType := b.detail.UserSpace.Storage.(*jcstypes.S3Type) + s3Cred, ok := b.detail.UserSpace.Credential.(*jcstypes.S3Cred) if !ok { return nil, fmt.Errorf("invalid storage credential type %T for s3 storage", b.detail.UserSpace.Credential) } @@ -51,8 +50,8 @@ func (b *builder) CreateShardStore(typeOnly bool) (types.ShardStore, error) { } func (b *builder) CreateBaseStore(typeOnly bool) (types.BaseStore, error) { - stgType := b.detail.UserSpace.Storage.(*cortypes.S3Type) - s3Cred, ok := b.detail.UserSpace.Credential.(*cortypes.S3Cred) + stgType := b.detail.UserSpace.Storage.(*jcstypes.S3Type) + s3Cred, ok := b.detail.UserSpace.Credential.(*jcstypes.S3Cred) if !ok { return nil, fmt.Errorf("invalid storage credential type %T for s3 storage", b.detail.UserSpace.Credential) } @@ -69,7 +68,7 @@ func (b *builder) CreateBaseStore(typeOnly bool) (types.BaseStore, error) { return NewBaseStore(b.detail, cli, bkt, BaseStoreOption{UseAWSSha256: false}) } -func createClient(stgType *cortypes.S3Type, cred *cortypes.S3Cred) (*s3.Client, string, error) { +func createClient(stgType *jcstypes.S3Type, cred *jcstypes.S3Cred) (*s3.Client, string, error) { awsConfig := aws.Config{} if cred.AK != "" && cred.SK != "" { @@ -92,13 +91,13 @@ func createClient(stgType *cortypes.S3Type, cred *cortypes.S3Cred) (*s3.Client, } func (b *builder) CreateMultiparter(typeOnly bool) (types.Multiparter, error) { - stgType := b.detail.UserSpace.Storage.(*cortypes.S3Type) - feat := types.FindFeature[*cortypes.MultipartUploadFeature](b.detail) + stgType := b.detail.UserSpace.Storage.(*jcstypes.S3Type) + feat := types.FindFeature[*jcstypes.MultipartUploadFeature](b.detail) if feat == nil { - return nil, fmt.Errorf("feature %T not found", cortypes.MultipartUploadFeature{}) + return nil, fmt.Errorf("feature %T not found", jcstypes.MultipartUploadFeature{}) } - s3Cred, ok := b.detail.UserSpace.Credential.(*cortypes.S3Cred) + s3Cred, ok := b.detail.UserSpace.Credential.(*jcstypes.S3Cred) if !ok { return nil, fmt.Errorf("invalid storage credential type %T for s3 base store", b.detail.UserSpace.Credential) } diff --git a/common/pkgs/storage/s3/shard_store.go b/common/pkgs/storage/s3/shard_store.go index 48328e6..fddc6a4 100644 --- a/common/pkgs/storage/s3/shard_store.go +++ b/common/pkgs/storage/s3/shard_store.go @@ -10,7 +10,7 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/utils/math2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type ShardStoreOption struct { @@ -18,15 +18,15 @@ type ShardStoreOption struct { } type ShardStore struct { - Detail *jcsypes.UserSpaceDetail + Detail *jcstypes.UserSpaceDetail Bucket string - workingDir jcsypes.JPath + workingDir jcstypes.JPath cli *s3.Client opt ShardStoreOption lock sync.Mutex } -func NewShardStore(detail *jcsypes.UserSpaceDetail, cli *s3.Client, bkt string, opt ShardStoreOption) (*ShardStore, error) { +func NewShardStore(detail *jcstypes.UserSpaceDetail, cli *s3.Client, bkt string, opt ShardStoreOption) (*ShardStore, error) { wd := detail.UserSpace.WorkingDir.Clone() wd.Push(types.ShardStoreWorkingDir) return &ShardStore{ @@ -46,7 +46,7 @@ func (s *ShardStore) Stop() { s.getLogger().Infof("component stop") } -func (s *ShardStore) Store(path jcsypes.JPath, hash jcsypes.FileHash, size int64) (types.FileInfo, error) { +func (s *ShardStore) Store(path jcstypes.JPath, hash jcstypes.FileHash, size int64) (types.FileInfo, error) { s.lock.Lock() defer s.lock.Unlock() @@ -73,7 +73,7 @@ func (s *ShardStore) Store(path jcsypes.JPath, hash jcsypes.FileHash, size int64 }, nil } -func (s *ShardStore) Info(hash jcsypes.FileHash) (types.FileInfo, error) { +func (s *ShardStore) Info(hash jcstypes.FileHash) (types.FileInfo, error) { s.lock.Lock() defer s.lock.Unlock() @@ -116,7 +116,7 @@ func (s *ShardStore) ListAll() ([]types.FileInfo, error) { for _, obj := range resp.Contents { key := BaseKey(*obj.Key) - fileHash, err := jcsypes.ParseHash(key) + fileHash, err := jcstypes.ParseHash(key) if err != nil { continue } @@ -124,7 +124,7 @@ func (s *ShardStore) ListAll() ([]types.FileInfo, error) { infos = append(infos, types.FileInfo{ Hash: fileHash, Size: *obj.Size, - Path: jcsypes.PathFromJcsPathString(*obj.Key), + Path: jcstypes.PathFromJcsPathString(*obj.Key), }) } @@ -138,11 +138,11 @@ func (s *ShardStore) ListAll() ([]types.FileInfo, error) { return infos, nil } -func (s *ShardStore) GC(avaiables []jcsypes.FileHash) error { +func (s *ShardStore) GC(avaiables []jcstypes.FileHash) error { s.lock.Lock() defer s.lock.Unlock() - avais := make(map[jcsypes.FileHash]bool) + avais := make(map[jcstypes.FileHash]bool) for _, hash := range avaiables { avais[hash] = true } @@ -163,7 +163,7 @@ func (s *ShardStore) GC(avaiables []jcsypes.FileHash) error { for _, obj := range resp.Contents { key := BaseKey(*obj.Key) - fileHash, err := jcsypes.ParseHash(key) + fileHash, err := jcstypes.ParseHash(key) if err != nil { continue } @@ -216,13 +216,13 @@ func (s *ShardStore) getLogger() logger.Logger { return logger.WithField("ShardStore", "S3").WithField("UserSpace", s.Detail) } -func (s *ShardStore) GetFileDirFromHash(hash jcsypes.FileHash) jcsypes.JPath { +func (s *ShardStore) GetFileDirFromHash(hash jcstypes.FileHash) jcstypes.JPath { p := s.workingDir.Clone() p.Push(hash.GetHashPrefix(2)) return p } -func (s *ShardStore) GetFilePathFromHash(hash jcsypes.FileHash) jcsypes.JPath { +func (s *ShardStore) GetFilePathFromHash(hash jcstypes.FileHash) jcstypes.JPath { p := s.workingDir.Clone() p.Push(hash.GetHashPrefix(2)) p.Push(string(hash)) diff --git a/common/pkgs/storage/types/base_store.go b/common/pkgs/storage/types/base_store.go index 63400d2..84722c4 100644 --- a/common/pkgs/storage/types/base_store.go +++ b/common/pkgs/storage/types/base_store.go @@ -5,11 +5,11 @@ import ( "io" "time" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type DirEntry struct { - Path jcsypes.JPath + Path jcstypes.JPath Size int64 ModTime time.Time IsDir bool @@ -22,13 +22,13 @@ type DirReader interface { } type BaseStore interface { - Write(path jcsypes.JPath, stream io.Reader, opt WriteOption) (FileInfo, error) - Read(path jcsypes.JPath, opt OpenOption) (io.ReadCloser, error) + Write(path jcstypes.JPath, stream io.Reader, opt WriteOption) (FileInfo, error) + Read(path jcstypes.JPath, opt OpenOption) (io.ReadCloser, error) // 创建指定路径的文件夹。对于不支持空文件夹的存储系统来说,可以采用创建以/结尾的对象的方式来模拟文件夹。 - Mkdir(path jcsypes.JPath) error + Mkdir(path jcstypes.JPath) error // 返回指定路径下的所有文件,文件路径是包含path在内的完整路径。返回结果的第一条一定是路径本身,可能是文件,也可能是目录,路径不存在时,Next应该直接返回io.EOF。 // Next必须按照目录的层级关系返回,但不一定要按照文件名排序。 - ReadDir(path jcsypes.JPath) DirReader + ReadDir(path jcstypes.JPath) DirReader // 清空临时目录。只应该在此存储服务未被使用时调用 CleanTemps() // 测试存储服务是否可用 diff --git a/common/pkgs/storage/types/bypass.go b/common/pkgs/storage/types/bypass.go index 6180b7f..9bcf945 100644 --- a/common/pkgs/storage/types/bypass.go +++ b/common/pkgs/storage/types/bypass.go @@ -1,13 +1,13 @@ package types import ( - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) // 能通过一个Http请求直接访问文件 // 仅用于分片存储。 type HTTPShardRead interface { - MakeHTTPReadRequest(fileHash jcsypes.FileHash) (HTTPRequest, error) + MakeHTTPReadRequest(fileHash jcstypes.FileHash) (HTTPRequest, error) } type HTTPRequest struct { diff --git a/common/pkgs/storage/types/empty_builder.go b/common/pkgs/storage/types/empty_builder.go index 0d78f8a..84e5781 100644 --- a/common/pkgs/storage/types/empty_builder.go +++ b/common/pkgs/storage/types/empty_builder.go @@ -3,11 +3,11 @@ package types import ( "fmt" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type EmptyBuilder struct { - Detail jcsypes.UserSpaceDetail + Detail jcstypes.UserSpaceDetail } func (b *EmptyBuilder) FeatureDesc() FeatureDesc { diff --git a/common/pkgs/storage/types/s2s.go b/common/pkgs/storage/types/s2s.go index 902d719..ccd4850 100644 --- a/common/pkgs/storage/types/s2s.go +++ b/common/pkgs/storage/types/s2s.go @@ -3,13 +3,13 @@ package types import ( "context" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type S2STransfer interface { // 【静态方法】判断是否能从指定的源存储中直传到当前存储的目的路径。仅在生成计划时使用 - CanTransfer(src, dst *jcsypes.UserSpaceDetail) bool + CanTransfer(src, dst *jcstypes.UserSpaceDetail) bool // 从远端获取文件并保存到本地路径 - Transfer(ctx context.Context, src *jcsypes.UserSpaceDetail, srcPath jcsypes.JPath, dstPath jcsypes.JPath) (FileInfo, error) + Transfer(ctx context.Context, src *jcstypes.UserSpaceDetail, srcPath jcstypes.JPath, dstPath jcstypes.JPath) (FileInfo, error) Close() } diff --git a/common/pkgs/storage/types/shard_store.go b/common/pkgs/storage/types/shard_store.go index 95806da..74cfb32 100644 --- a/common/pkgs/storage/types/shard_store.go +++ b/common/pkgs/storage/types/shard_store.go @@ -1,7 +1,7 @@ package types import ( - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) const ShardStoreWorkingDir = "shards" @@ -10,13 +10,13 @@ type ShardStore interface { Start(ch *StorageEventChan) Stop() // 将存储系统中已有的文件作为分片纳入管理范围 - Store(path jcsypes.JPath, hash jcsypes.FileHash, size int64) (FileInfo, error) + Store(path jcstypes.JPath, hash jcstypes.FileHash, size int64) (FileInfo, error) // 获得指定文件信息 - Info(fileHash jcsypes.FileHash) (FileInfo, error) + Info(fileHash jcstypes.FileHash) (FileInfo, error) // 获取所有文件信息,尽量保证操作是原子的 ListAll() ([]FileInfo, error) // 垃圾清理。只保留availables中的文件,删除其他文件 - GC(avaiables []jcsypes.FileHash) error + GC(avaiables []jcstypes.FileHash) error // 获得存储系统信息 Stats() Stats } diff --git a/common/pkgs/storage/types/types.go b/common/pkgs/storage/types/types.go index fd70d4d..b8fee85 100644 --- a/common/pkgs/storage/types/types.go +++ b/common/pkgs/storage/types/types.go @@ -4,7 +4,7 @@ import ( "errors" "gitlink.org.cn/cloudream/common/pkgs/async" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) const TempWorkingDir = "temp" @@ -43,9 +43,9 @@ type FeatureDesc struct{} type FileInfo struct { // 分片在存储系统中的路径,可以通过BaseStore读取的 - Path jcsypes.JPath + Path jcstypes.JPath // 文件大小 Size int64 // 分片的哈希值,不一定有值,根据来源不同,可能为空 - Hash jcsypes.FileHash + Hash jcstypes.FileHash } diff --git a/common/pkgs/storage/types/utils.go b/common/pkgs/storage/types/utils.go index 512ac0f..6d22f24 100644 --- a/common/pkgs/storage/types/utils.go +++ b/common/pkgs/storage/types/utils.go @@ -1,11 +1,10 @@ package types import ( - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) -func FindFeature[T cortypes.StorageFeature](detail *jcsypes.UserSpaceDetail) T { +func FindFeature[T jcstypes.StorageFeature](detail *jcstypes.UserSpaceDetail) T { for _, f := range detail.UserSpace.Features { f2, ok := f.(T) if ok { @@ -17,7 +16,7 @@ func FindFeature[T cortypes.StorageFeature](detail *jcsypes.UserSpaceDetail) T { return def } -func MakeTempDirPath(detail *jcsypes.UserSpaceDetail, comps ...string) jcsypes.JPath { +func MakeTempDirPath(detail *jcstypes.UserSpaceDetail, comps ...string) jcstypes.JPath { p := detail.UserSpace.WorkingDir.Clone() p.Push(TempWorkingDir) p.ConcatComps(comps) diff --git a/common/types/types.go b/common/types/client.go similarity index 93% rename from common/types/types.go rename to common/types/client.go index deca657..24c88d4 100644 --- a/common/types/types.go +++ b/common/types/client.go @@ -6,7 +6,6 @@ import ( "github.com/samber/lo" "gitlink.org.cn/cloudream/common/utils/sort2" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" ) const ( @@ -75,13 +74,13 @@ type UserSpace struct { // 用户空间名称 Name string `gorm:"column:Name; type:varchar(255); not null" json:"name"` // 用户空间所在的存储服务配置 - Storage cortypes.StorageType `gorm:"column:Storage; type:json; not null; serializer:union" json:"storage"` + Storage StorageType `gorm:"column:Storage; type:json; not null; serializer:union" json:"storage"` // 用户在指定存储节点的凭证信息,比如用户账户,AK/SK等 - Credential cortypes.StorageCredential `gorm:"column:Credential; type:json; not null; serializer:union" json:"credential"` + Credential StorageCredential `gorm:"column:Credential; type:json; not null; serializer:union" json:"credential"` // 用户空间的分片存储配置,如果为空,则表示不使用分片存储 - ShardStore *cortypes.ShardStoreUserConfig `gorm:"column:ShardStore; type:json; serializer:json" json:"shardStore"` + ShardStore *ShardStoreUserConfig `gorm:"column:ShardStore; type:json; serializer:json" json:"shardStore"` // 存储服务特性功能的配置 - Features []cortypes.StorageFeature `json:"features" gorm:"column:Features; type:json; serializer:union"` + Features []StorageFeature `json:"features" gorm:"column:Features; type:json; serializer:union"` // 各种组件保存数据的根目录。组件工作过程中都会以这个目录为根(除了BaseStore)。 WorkingDir JPath `gorm:"column:WorkingDir; type:varchar(1024); not null; serializer:string" json:"workingDir"` // 工作目录在存储系统中的真实路径。当工作路径在挂载点内时,这个字段记录的是挂载背后的真实路径。部分直接与存储系统交互的组件需要知道真实路径。 @@ -238,9 +237,9 @@ func (o *ObjectDetail) ContainsPinned(userSpaceID UserSpaceID) bool { } type UserSpaceDetail struct { - UserID cortypes.UserID + UserID UserID UserSpace UserSpace - RecommendHub *cortypes.Hub + RecommendHub *Hub } func (d UserSpaceDetail) String() string { diff --git a/coordinator/types/types.go b/common/types/coordinator.go similarity index 100% rename from coordinator/types/types.go rename to common/types/coordinator.go diff --git a/common/types/datamap/datamap.go b/common/types/datamap/datamap.go index d7c2b59..52b4365 100644 --- a/common/types/datamap/datamap.go +++ b/common/types/datamap/datamap.go @@ -6,8 +6,7 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/types" "gitlink.org.cn/cloudream/common/utils/serder" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) // 系统事件 @@ -52,7 +51,7 @@ func (s *SourceCoordinator) String() string { type SourceHub struct { serder.Metadata `union:"Hub"` Type string `json:"type"` - HubID cortypes.HubID `json:"hubID"` + HubID jcstypes.HubID `json:"hubID"` HubName string `json:"hubName"` } @@ -71,7 +70,7 @@ func (s *SourceHub) String() string { type SourceClient struct { serder.Metadata `union:"Client"` Type string `json:"type"` - UserID cortypes.UserID `json:"userID"` + UserID jcstypes.UserID `json:"userID"` } func (s *SourceClient) GetSourceType() string { @@ -123,7 +122,7 @@ var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[SysEven type BodyNewHub struct { serder.Metadata `union:"NewHub"` Type string `json:"type"` - Info cortypes.Hub `json:"info"` + Info jcstypes.Hub `json:"info"` } func (b *BodyNewHub) GetBodyType() string { @@ -138,7 +137,7 @@ func (b *BodyNewHub) OnUnionSerializing() { type BodyHubUpdated struct { serder.Metadata `union:"HubUpdated"` Type string `json:"type"` - Info cortypes.Hub `json:"info"` + Info jcstypes.Hub `json:"info"` } func (b *BodyHubUpdated) GetBodyType() string { @@ -153,7 +152,7 @@ func (b *BodyHubUpdated) OnUnionSerializing() { type BodyHubDeleted struct { serder.Metadata `union:"HubDeleted"` Type string `json:"type"` - HubID cortypes.HubID `json:"hubID"` + HubID jcstypes.HubID `json:"hubID"` } func (b *BodyHubDeleted) GetBodyType() string { @@ -168,8 +167,8 @@ func (b *BodyHubDeleted) OnUnionSerializing() { // 新增Storage的事件 type BodyNewUserSpace struct { serder.Metadata `union:"NewUserSpace"` - Info jcsypes.UserSpace `json:"info"` - Type string `json:"type"` + Info jcstypes.UserSpace `json:"info"` + Type string `json:"type"` } func (b *BodyNewUserSpace) GetBodyType() string { @@ -183,8 +182,8 @@ func (b *BodyNewUserSpace) OnUnionSerializing() { // Storage信息更新的事件 type BodyUserSpaceUpdated struct { serder.Metadata `union:"UserSpaceUpdated"` - Type string `json:"type"` - Info jcsypes.UserSpace `json:"info"` + Type string `json:"type"` + Info jcstypes.UserSpace `json:"info"` } func (b *BodyUserSpaceUpdated) GetBodyType() string { @@ -198,8 +197,8 @@ func (b *BodyUserSpaceUpdated) OnUnionSerializing() { // Storage删除的事件 type BodyUserSpaceDeleted struct { serder.Metadata `union:"UserSpaceDeleted"` - Type string `json:"type"` - UserSpaceID jcsypes.UserSpaceID `json:"userSpaceID"` + Type string `json:"type"` + UserSpaceID jcstypes.UserSpaceID `json:"userSpaceID"` } func (b *BodyUserSpaceDeleted) GetBodyType() string { @@ -215,7 +214,7 @@ func (b *BodyUserSpaceDeleted) OnUnionSerializing() { type BodyStorageStats struct { serder.Metadata `union:"StorageStats"` Type string `json:"type"` - StorageID jcsypes.StorageID `json:"storageID"` + StorageID jcstypes.StorageID `json:"storageID"` DataCount int64 `json:"dataCount"` } @@ -231,8 +230,8 @@ func (b *BodyStorageStats) OnUnionSerializing() { type BodyHubTransferStats struct { serder.Metadata `union:"HubTransferStats"` Type string `json:"type"` - SourceHubID cortypes.HubID `json:"sourceHubID"` - TargetHubID cortypes.HubID `json:"targetHubID"` + SourceHubID jcstypes.HubID `json:"sourceHubID"` + TargetHubID jcstypes.HubID `json:"targetHubID"` Send DataTrans `json:"send"` StartTimestamp time.Time `json:"startTimestamp"` EndTimestamp time.Time `json:"endTimestamp"` @@ -259,8 +258,8 @@ type DataTrans struct { type BodyHubStorageTransferStats struct { serder.Metadata `union:"HubStorageTransferStats"` Type string `json:"type"` - HubID cortypes.HubID `json:"hubID"` - StorageID jcsypes.StorageID `json:"storageID"` + HubID jcstypes.HubID `json:"hubID"` + StorageID jcstypes.StorageID `json:"storageID"` Send DataTrans `json:"send"` Receive DataTrans `json:"receive"` StartTimestamp time.Time `json:"startTimestamp"` @@ -279,10 +278,10 @@ func (b *BodyHubStorageTransferStats) OnUnionSerializing() { // 块传输的事件 type BodyBlockTransfer struct { serder.Metadata `union:"BlockTransfer"` - Type string `json:"type"` - ObjectID jcsypes.ObjectID `json:"objectID"` - PackageID jcsypes.PackageID `json:"packageID"` - BlockChanges []BlockChange `json:"blockChanges"` + Type string `json:"type"` + ObjectID jcstypes.ObjectID `json:"objectID"` + PackageID jcstypes.PackageID `json:"packageID"` + BlockChanges []BlockChange `json:"blockChanges"` } func (b *BodyBlockTransfer) GetBodyType() string { @@ -311,24 +310,24 @@ const ( ) type Block struct { - BlockType string `json:"blockType"` - Index int `json:"index"` - UserSpaceID jcsypes.UserSpaceID `json:"userSpaceID"` + BlockType string `json:"blockType"` + Index int `json:"index"` + UserSpaceID jcstypes.UserSpaceID `json:"userSpaceID"` } type DataTransfer struct { - SourceUserSpaceID jcsypes.UserSpaceID `json:"sourceUserSpaceID"` - TargetUserSpaceID jcsypes.UserSpaceID `json:"targetUserSpaceID"` - TransferBytes int64 `json:"transferBytes"` + SourceUserSpaceID jcstypes.UserSpaceID `json:"sourceUserSpaceID"` + TargetUserSpaceID jcstypes.UserSpaceID `json:"targetUserSpaceID"` + TransferBytes int64 `json:"transferBytes"` } type BlockChangeClone struct { serder.Metadata `union:"Clone"` - Type string `json:"type"` - BlockType string `json:"blockType"` - Index int `json:"index"` - SourceUserSpaceID jcsypes.UserSpaceID `json:"sourceUserSpaceID"` - TargetUserSpaceID jcsypes.UserSpaceID `json:"targetUserSpaceID"` - TransferBytes int64 `json:"transferBytes"` + Type string `json:"type"` + BlockType string `json:"blockType"` + Index int `json:"index"` + SourceUserSpaceID jcstypes.UserSpaceID `json:"sourceUserSpaceID"` + TargetUserSpaceID jcstypes.UserSpaceID `json:"targetUserSpaceID"` + TransferBytes int64 `json:"transferBytes"` } func (b *BlockChangeClone) GetBlockChangeType() string { @@ -341,9 +340,9 @@ func (b *BlockChangeClone) OnUnionSerializing() { type BlockChangeDeleted struct { serder.Metadata `union:"Deleted"` - Type string `json:"type"` - Index int `json:"index"` - UserSpaceID jcsypes.UserSpaceID `json:"userSpaceID"` + Type string `json:"type"` + Index int `json:"index"` + UserSpaceID jcstypes.UserSpaceID `json:"userSpaceID"` } func (b *BlockChangeDeleted) GetBlockChangeType() string { @@ -374,11 +373,11 @@ func (b *BlockChangeEnDecode) OnUnionSerializing() { type BodyBlockDistribution struct { serder.Metadata `union:"BlockDistribution"` Type string `json:"type"` - ObjectID jcsypes.ObjectID `json:"objectID"` - PackageID jcsypes.PackageID `json:"packageID"` + ObjectID jcstypes.ObjectID `json:"objectID"` + PackageID jcstypes.PackageID `json:"packageID"` Path string `json:"path"` Size int64 `json:"size"` - FileHash jcsypes.FileHash `json:"fileHash"` + FileHash jcstypes.FileHash `json:"fileHash"` FaultTolerance float64 `json:"faultTolerance"` Redundancy float64 `json:"redundancy"` AvgAccessCost float64 `json:"avgAccessCost"` @@ -395,16 +394,16 @@ func (b *BodyBlockDistribution) OnUnionSerializing() { } type BlockDistributionObjectInfo struct { - BlockType string `json:"type"` - Index int `json:"index"` - UserSpaceID jcsypes.UserSpaceID `json:"userSpaceID"` + BlockType string `json:"type"` + Index int `json:"index"` + UserSpaceID jcstypes.UserSpaceID `json:"userSpaceID"` } // 新增或者重新上传Object的事件 type BodyNewOrUpdateObject struct { serder.Metadata `union:"NewOrUpdateObject"` Type string `json:"type"` - Info jcsypes.Object `json:"info"` + Info jcstypes.Object `json:"info"` BlockDistribution []BlockDistributionObjectInfo `json:"blockDistribution"` } @@ -419,8 +418,8 @@ func (b *BodyNewOrUpdateObject) OnUnionSerializing() { // Object的基本信息更新的事件 type BodyObjectInfoUpdated struct { serder.Metadata `union:"ObjectInfoUpdated"` - Type string `json:"type"` - Object jcsypes.Object `json:"object"` + Type string `json:"type"` + Object jcstypes.Object `json:"object"` } func (b *BodyObjectInfoUpdated) GetBodyType() string { @@ -434,8 +433,8 @@ func (b *BodyObjectInfoUpdated) OnUnionSerializing() { // Object删除的事件 type BodyObjectDeleted struct { serder.Metadata `union:"ObjectDeleted"` - Type string `json:"type"` - ObjectID jcsypes.ObjectID `json:"objectID"` + Type string `json:"type"` + ObjectID jcstypes.ObjectID `json:"objectID"` } func (b *BodyObjectDeleted) GetBodyType() string { @@ -449,8 +448,8 @@ func (b *BodyObjectDeleted) OnUnionSerializing() { // 新增Package的事件 type BodyNewPackage struct { serder.Metadata `union:"NewPackage"` - Type string `json:"type"` - Info jcsypes.Package `json:"info"` + Type string `json:"type"` + Info jcstypes.Package `json:"info"` } func (b *BodyNewPackage) GetBodyType() string { @@ -464,11 +463,11 @@ func (b *BodyNewPackage) OnUnionSerializing() { // Package克隆的事件 type BodyPackageCloned struct { serder.Metadata `union:"PackageCloned"` - Type string `json:"type"` - SourcePackageID jcsypes.PackageID `json:"sourcePackageID"` - NewPackage jcsypes.Package `json:"newPackage"` - SourceObjectIDs []jcsypes.ObjectID `json:"sourceObjectIDs"` // 原本的ObjectID - NewObjectIDs []jcsypes.ObjectID `json:"newObjectIDs"` // 复制后的新ObjectID,与SourceObjectIDs一一对应 + Type string `json:"type"` + SourcePackageID jcstypes.PackageID `json:"sourcePackageID"` + NewPackage jcstypes.Package `json:"newPackage"` + SourceObjectIDs []jcstypes.ObjectID `json:"sourceObjectIDs"` // 原本的ObjectID + NewObjectIDs []jcstypes.ObjectID `json:"newObjectIDs"` // 复制后的新ObjectID,与SourceObjectIDs一一对应 } func (b *BodyPackageCloned) GetBodyType() string { @@ -482,8 +481,8 @@ func (b *BodyPackageCloned) OnUnionSerializing() { // Package删除的事件 type BodyPackageDeleted struct { serder.Metadata `union:"PackageDeleted"` - Type string `json:"type"` - PackageID jcsypes.PackageID `json:"packageID"` + Type string `json:"type"` + PackageID jcstypes.PackageID `json:"packageID"` } func (b *BodyPackageDeleted) GetBodyType() string { @@ -497,8 +496,8 @@ func (b *BodyPackageDeleted) OnUnionSerializing() { // 新增Bucket的事件 type BodyNewBucket struct { serder.Metadata `union:"NewBucket"` - Type string `json:"type"` - Info jcsypes.Bucket `json:"info"` + Type string `json:"type"` + Info jcstypes.Bucket `json:"info"` } func (b *BodyNewBucket) GetBodyType() string { @@ -512,8 +511,8 @@ func (b *BodyNewBucket) OnUnionSerializing() { // Bucket删除的事件 type BodyBucketDeleted struct { serder.Metadata `union:"BucketDeleted"` - Type string `json:"type"` - BucketID jcsypes.BucketID `json:"bucketID"` + Type string `json:"type"` + BucketID jcstypes.BucketID `json:"bucketID"` } func (b *BodyBucketDeleted) GetBodyType() string { diff --git a/coordinator/types/location.go b/common/types/location.go similarity index 100% rename from coordinator/types/location.go rename to common/types/location.go diff --git a/coordinator/types/storage.go b/common/types/storage.go similarity index 100% rename from coordinator/types/storage.go rename to common/types/storage.go diff --git a/coordinator/types/storage_credential.go b/common/types/storage_credential.go similarity index 100% rename from coordinator/types/storage_credential.go rename to common/types/storage_credential.go diff --git a/coordinator/types/storage_feature.go b/common/types/storage_feature.go similarity index 100% rename from coordinator/types/storage_feature.go rename to common/types/storage_feature.go diff --git a/coordinator/internal/accesstoken/accesstoken.go b/coordinator/internal/accesstoken/accesstoken.go index 5c3b785..30ad3ec 100644 --- a/coordinator/internal/accesstoken/accesstoken.go +++ b/coordinator/internal/accesstoken/accesstoken.go @@ -2,8 +2,8 @@ package accesstoken import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/accesstoken" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/coordinator/internal/db" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" "gorm.io/gorm" ) @@ -25,13 +25,13 @@ func New(db *db.DB) *Cache { return c } -func (c *Cache) load(key accesstoken.CacheKey) (cortypes.UserAccessToken, error) { +func (c *Cache) load(key accesstoken.CacheKey) (jcstypes.UserAccessToken, error) { token, err := c.db.UserAccessToken().GetByID(c.db.DefCtx(), key.UserID, key.TokenID) if err == gorm.ErrRecordNotFound { - return cortypes.UserAccessToken{}, accesstoken.ErrTokenNotFound + return jcstypes.UserAccessToken{}, accesstoken.ErrTokenNotFound } if err != nil { - return cortypes.UserAccessToken{}, err + return jcstypes.UserAccessToken{}, err } return token, nil diff --git a/coordinator/internal/cmd/migrate.go b/coordinator/internal/cmd/migrate.go index e86d3ac..74df83c 100644 --- a/coordinator/internal/cmd/migrate.go +++ b/coordinator/internal/cmd/migrate.go @@ -5,8 +5,8 @@ import ( "os" "github.com/spf13/cobra" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/coordinator/internal/config" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" "gorm.io/driver/mysql" "gorm.io/gorm" ) @@ -38,12 +38,12 @@ func migrate(configPath string) { } db = db.Set("gorm:table_options", "CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci") - migrateOne(db, cortypes.HubConnectivity{}) - migrateOne(db, cortypes.Hub{}) - migrateOne(db, cortypes.HubLocation{}) - migrateOne(db, cortypes.User{}) - migrateOne(db, cortypes.UserAccessToken{}) - migrateOne(db, cortypes.LoadedAccessToken{}) + migrateOne(db, jcstypes.HubConnectivity{}) + migrateOne(db, jcstypes.Hub{}) + migrateOne(db, jcstypes.HubLocation{}) + migrateOne(db, jcstypes.User{}) + migrateOne(db, jcstypes.UserAccessToken{}) + migrateOne(db, jcstypes.LoadedAccessToken{}) fmt.Println("migrate success") } diff --git a/coordinator/internal/cmd/serve.go b/coordinator/internal/cmd/serve.go index 24fe6ce..b912e8b 100644 --- a/coordinator/internal/cmd/serve.go +++ b/coordinator/internal/cmd/serve.go @@ -58,7 +58,7 @@ func serve(configPath string) { } // 初始化系统事件发布器 - // evtPub, err := sysevent.NewPublisher(sysevent.ConfigFromMQConfig(config.Cfg().RabbitMQ), &cortypes.SourceCoordinator{}) + // evtPub, err := sysevent.NewPublisher(sysevent.ConfigFromMQConfig(config.Cfg().RabbitMQ), &jcstypes.SourceCoordinator{}) // if err != nil { // logger.Errorf("new sysevent publisher: %v", err) // os.Exit(1) diff --git a/coordinator/internal/db/hub.go b/coordinator/internal/db/hub.go index 767d701..e2460be 100644 --- a/coordinator/internal/db/hub.go +++ b/coordinator/internal/db/hub.go @@ -3,7 +3,7 @@ package db import ( "time" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type HubDB struct { @@ -14,31 +14,31 @@ func (db *DB) Hub() *HubDB { return &HubDB{DB: db} } -func (*HubDB) GetAllHubs(ctx SQLContext) ([]cortypes.Hub, error) { - var ret []cortypes.Hub +func (*HubDB) GetAllHubs(ctx SQLContext) ([]jcstypes.Hub, error) { + var ret []jcstypes.Hub err := ctx.Table("Hub").Find(&ret).Error return ret, err } -func (*HubDB) GetByID(ctx SQLContext, hubID cortypes.HubID) (cortypes.Hub, error) { - var ret cortypes.Hub +func (*HubDB) GetByID(ctx SQLContext, hubID jcstypes.HubID) (jcstypes.Hub, error) { + var ret jcstypes.Hub err := ctx.Table("Hub").Where("HubID = ?", hubID).Find(&ret).Error return ret, err } -func (*HubDB) BatchGetByID(ctx SQLContext, hubIDs []cortypes.HubID) ([]cortypes.Hub, error) { - var ret []cortypes.Hub +func (*HubDB) BatchGetByID(ctx SQLContext, hubIDs []jcstypes.HubID) ([]jcstypes.Hub, error) { + var ret []jcstypes.Hub err := ctx.Table("Hub").Where("HubID IN (?)", hubIDs).Find(&ret).Error return ret, err } // UpdateState 更新状态,并且设置上次上报时间为现在 -func (*HubDB) UpdateState(ctx SQLContext, hubID cortypes.HubID, state string) error { +func (*HubDB) UpdateState(ctx SQLContext, hubID jcstypes.HubID, state string) error { err := ctx. - Model(&cortypes.Hub{}). + Model(&jcstypes.Hub{}). Where("HubID = ?", hubID). Updates(map[string]interface{}{ "State": state, diff --git a/coordinator/internal/db/hub_connectivity.go b/coordinator/internal/db/hub_connectivity.go index 62945f4..efbf840 100644 --- a/coordinator/internal/db/hub_connectivity.go +++ b/coordinator/internal/db/hub_connectivity.go @@ -1,7 +1,7 @@ package db import ( - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm/clause" ) @@ -13,18 +13,18 @@ func (db *DB) HubConnectivity() *HubConnectivityDB { return &HubConnectivityDB{DB: db} } -func (db *HubConnectivityDB) BatchGetByFromHub(ctx SQLContext, fromHubIDs []cortypes.HubID) ([]cortypes.HubConnectivity, error) { +func (db *HubConnectivityDB) BatchGetByFromHub(ctx SQLContext, fromHubIDs []jcstypes.HubID) ([]jcstypes.HubConnectivity, error) { if len(fromHubIDs) == 0 { return nil, nil } - var ret []cortypes.HubConnectivity + var ret []jcstypes.HubConnectivity err := ctx.Table("HubConnectivity").Where("FromHubID IN (?)", fromHubIDs).Find(&ret).Error return ret, err } -func (db *HubConnectivityDB) BatchUpdateOrCreate(ctx SQLContext, cons []cortypes.HubConnectivity) error { +func (db *HubConnectivityDB) BatchUpdateOrCreate(ctx SQLContext, cons []jcstypes.HubConnectivity) error { if len(cons) == 0 { return nil } diff --git a/coordinator/internal/db/hub_location.go b/coordinator/internal/db/hub_location.go index 3a90e26..2f3bce7 100644 --- a/coordinator/internal/db/hub_location.go +++ b/coordinator/internal/db/hub_location.go @@ -1,7 +1,7 @@ package db import ( - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type HubLocationDB struct { @@ -12,14 +12,14 @@ func (db *DB) HubLocation() *HubLocationDB { return &HubLocationDB{DB: db} } -func (*HubLocationDB) GetByHubID(ctx SQLContext, id cortypes.HubID) ([]cortypes.HubLocation, error) { - var ret []cortypes.HubLocation +func (*HubLocationDB) GetByHubID(ctx SQLContext, id jcstypes.HubID) ([]jcstypes.HubLocation, error) { + var ret []jcstypes.HubLocation err := ctx.Where("HubID = ?", id).Find(&ret).Error return ret, err } -func (*HubLocationDB) GetAll(ctx SQLContext) ([]cortypes.HubLocation, error) { - var ret []cortypes.HubLocation +func (*HubLocationDB) GetAll(ctx SQLContext) ([]jcstypes.HubLocation, error) { + var ret []jcstypes.HubLocation err := ctx.Find(&ret).Error return ret, err } diff --git a/coordinator/internal/db/loaded_access_token.go b/coordinator/internal/db/loaded_access_token.go index ab6a562..afd6b98 100644 --- a/coordinator/internal/db/loaded_access_token.go +++ b/coordinator/internal/db/loaded_access_token.go @@ -3,7 +3,7 @@ package db import ( "time" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm/clause" ) @@ -15,21 +15,21 @@ func (db *DB) LoadedAccessToken() *LoadedAccessTokenDB { return &LoadedAccessTokenDB{DB: db} } -func (db *LoadedAccessTokenDB) GetByUserIDAndTokenID(ctx SQLContext, userID cortypes.UserID, tokenID cortypes.AccessTokenID) ([]cortypes.LoadedAccessToken, error) { - var ret []cortypes.LoadedAccessToken +func (db *LoadedAccessTokenDB) GetByUserIDAndTokenID(ctx SQLContext, userID jcstypes.UserID, tokenID jcstypes.AccessTokenID) ([]jcstypes.LoadedAccessToken, error) { + var ret []jcstypes.LoadedAccessToken err := ctx.Table("LoadedAccessToken").Where("UserID = ? AND TokenID = ?", userID, tokenID).Find(&ret).Error return ret, err } -func (*LoadedAccessTokenDB) CreateOrUpdate(ctx SQLContext, token cortypes.LoadedAccessToken) error { +func (*LoadedAccessTokenDB) CreateOrUpdate(ctx SQLContext, token jcstypes.LoadedAccessToken) error { return ctx.Clauses(clause.OnConflict{ Columns: []clause.Column{{Name: "UserID"}, {Name: "TokenID"}, {Name: "HubID"}}, DoUpdates: clause.AssignmentColumns([]string{"LoadedAt"}), }).Create(token).Error } -func (*LoadedAccessTokenDB) GetExpired(ctx SQLContext, expireAt time.Time) ([]cortypes.LoadedAccessToken, error) { - var ret []cortypes.LoadedAccessToken +func (*LoadedAccessTokenDB) GetExpired(ctx SQLContext, expireAt time.Time) ([]jcstypes.LoadedAccessToken, error) { + var ret []jcstypes.LoadedAccessToken err := ctx.Table("LoadedAccessToken"). Select("LoadedAccessToken.*"). Joins("join UserAccessToken on UserAccessToken.UserID = LoadedAccessToken.UserID and UserAccessToken.TokenID = LoadedAccessToken.TokenID"). @@ -41,9 +41,9 @@ func (*LoadedAccessTokenDB) GetExpired(ctx SQLContext, expireAt time.Time) ([]co func (*LoadedAccessTokenDB) DeleteExpired(ctx SQLContext, expireAt time.Time) error { return ctx.Table("LoadedAccessToken"). Where("UserID in (select UserID from UserAccessToken where ExpiresAt < ?)", expireAt). - Delete(&cortypes.LoadedAccessToken{}).Error + Delete(&jcstypes.LoadedAccessToken{}).Error } -func (db *LoadedAccessTokenDB) DeleteAllByUserIDAndTokenID(ctx SQLContext, userID cortypes.UserID, tokenID cortypes.AccessTokenID) error { - return ctx.Table("LoadedAccessToken").Where("UserID = ? AND TokenID = ?", userID, tokenID).Delete(&cortypes.LoadedAccessToken{}).Error +func (db *LoadedAccessTokenDB) DeleteAllByUserIDAndTokenID(ctx SQLContext, userID jcstypes.UserID, tokenID jcstypes.AccessTokenID) error { + return ctx.Table("LoadedAccessToken").Where("UserID = ? AND TokenID = ?", userID, tokenID).Delete(&jcstypes.LoadedAccessToken{}).Error } diff --git a/coordinator/internal/db/user.go b/coordinator/internal/db/user.go index 08ceea7..01ceb77 100644 --- a/coordinator/internal/db/user.go +++ b/coordinator/internal/db/user.go @@ -1,7 +1,7 @@ package db import ( - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm" ) @@ -13,32 +13,32 @@ func (db *DB) User() *UserDB { return &UserDB{DB: db} } -func (db *UserDB) GetByID(ctx SQLContext, userID cortypes.UserID) (cortypes.User, error) { - var ret cortypes.User +func (db *UserDB) GetByID(ctx SQLContext, userID jcstypes.UserID) (jcstypes.User, error) { + var ret jcstypes.User err := ctx.Table("User").Where("UserID = ?", userID).First(&ret).Error return ret, err } -func (db *UserDB) GetByAccount(ctx SQLContext, account string) (cortypes.User, error) { - var ret cortypes.User +func (db *UserDB) GetByAccount(ctx SQLContext, account string) (jcstypes.User, error) { + var ret jcstypes.User err := ctx.Table("User").Where("Account = ?", account).First(&ret).Error return ret, err } -func (db *UserDB) Create(ctx SQLContext, account string, password string, nickName string) (cortypes.User, error) { +func (db *UserDB) Create(ctx SQLContext, account string, password string, nickName string) (jcstypes.User, error) { _, err := db.GetByAccount(ctx, account) if err == nil { - return cortypes.User{}, gorm.ErrDuplicatedKey + return jcstypes.User{}, gorm.ErrDuplicatedKey } if err != gorm.ErrRecordNotFound { - return cortypes.User{}, err + return jcstypes.User{}, err } - user := cortypes.User{NickName: nickName, Account: account, Password: password} + user := jcstypes.User{NickName: nickName, Account: account, Password: password} err = ctx.Table("User").Create(&user).Error return user, err } -func (*UserDB) Delete(ctx SQLContext, userID cortypes.UserID) error { - return ctx.Table("User").Delete(&cortypes.User{UserID: userID}).Error +func (*UserDB) Delete(ctx SQLContext, userID jcstypes.UserID) error { + return ctx.Table("User").Delete(&jcstypes.User{UserID: userID}).Error } diff --git a/coordinator/internal/db/user_access_token.go b/coordinator/internal/db/user_access_token.go index a263055..a800d7a 100644 --- a/coordinator/internal/db/user_access_token.go +++ b/coordinator/internal/db/user_access_token.go @@ -3,7 +3,7 @@ package db import ( "time" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type UserAccessTokenDB struct { @@ -14,20 +14,20 @@ func (db *DB) UserAccessToken() *UserAccessTokenDB { return &UserAccessTokenDB{DB: db} } -func (db *UserAccessTokenDB) GetByID(ctx SQLContext, userID cortypes.UserID, tokenID cortypes.AccessTokenID) (cortypes.UserAccessToken, error) { - var ret cortypes.UserAccessToken +func (db *UserAccessTokenDB) GetByID(ctx SQLContext, userID jcstypes.UserID, tokenID jcstypes.AccessTokenID) (jcstypes.UserAccessToken, error) { + var ret jcstypes.UserAccessToken err := ctx.Table("UserAccessToken").Where("UserID = ? AND TokenID = ?", userID, tokenID).First(&ret).Error return ret, err } -func (*UserAccessTokenDB) Create(ctx SQLContext, token *cortypes.UserAccessToken) error { +func (*UserAccessTokenDB) Create(ctx SQLContext, token *jcstypes.UserAccessToken) error { return ctx.Table("UserAccessToken").Create(token).Error } -func (db *UserAccessTokenDB) DeleteByID(ctx SQLContext, userID cortypes.UserID, tokenID cortypes.AccessTokenID) error { - return ctx.Table("UserAccessToken").Where("UserID = ? AND TokenID = ?", userID, tokenID).Delete(&cortypes.UserAccessToken{}).Error +func (db *UserAccessTokenDB) DeleteByID(ctx SQLContext, userID jcstypes.UserID, tokenID jcstypes.AccessTokenID) error { + return ctx.Table("UserAccessToken").Where("UserID = ? AND TokenID = ?", userID, tokenID).Delete(&jcstypes.UserAccessToken{}).Error } func (*UserAccessTokenDB) DeleteExpired(ctx SQLContext, expireTime time.Time) error { - return ctx.Table("UserAccessToken").Where("ExpiresAt < ?", expireTime).Delete(&cortypes.UserAccessToken{}).Error + return ctx.Table("UserAccessToken").Where("ExpiresAt < ?", expireTime).Delete(&jcstypes.UserAccessToken{}).Error } diff --git a/coordinator/internal/repl/user.go b/coordinator/internal/repl/user.go index 7645e63..c619e04 100644 --- a/coordinator/internal/repl/user.go +++ b/coordinator/internal/repl/user.go @@ -10,9 +10,9 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/logger" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/coordinator/internal/accesstoken" "gitlink.org.cn/cloudream/jcs-pub/coordinator/internal/db" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" "golang.org/x/crypto/bcrypt" "golang.org/x/term" "gorm.io/gorm" @@ -40,7 +40,7 @@ func init() { Short: "logout from a user account", Args: cobra.ExactArgs(2), Run: func(cmd *cobra.Command, args []string) { - userLogout(GetCmdCtx(cmd), args[0], cortypes.AccessTokenID(args[1])) + userLogout(GetCmdCtx(cmd), args[0], jcstypes.AccessTokenID(args[1])) }, } userCmd.AddCommand(logoutCmd) @@ -66,7 +66,7 @@ func userCreate(ctx *CommandContext, account string, nickName string) { return } - user, err := db.DoTx02(ctx.repl.db, func(tx db.SQLContext) (cortypes.User, error) { + user, err := db.DoTx02(ctx.repl.db, func(tx db.SQLContext) (jcstypes.User, error) { return ctx.repl.db.User().Create(tx, account, hex.EncodeToString(passHash), nickName) }) if err != nil { @@ -77,7 +77,7 @@ func userCreate(ctx *CommandContext, account string, nickName string) { fmt.Printf("user %s created\n", user.Account) } -func userLogout(ctx *CommandContext, account string, tokenID cortypes.AccessTokenID) { +func userLogout(ctx *CommandContext, account string, tokenID jcstypes.AccessTokenID) { acc, err := ctx.repl.db.User().GetByAccount(ctx.repl.db.DefCtx(), account) if err != nil { fmt.Printf("user %s not found\n", account) @@ -87,7 +87,7 @@ func userLogout(ctx *CommandContext, account string, tokenID cortypes.AccessToke log := logger.WithField("UserID", acc.UserID).WithField("TokenID", tokenID) d := ctx.repl.db - loaded, err := db.DoTx02(d, func(tx db.SQLContext) ([]cortypes.LoadedAccessToken, error) { + loaded, err := db.DoTx02(d, func(tx db.SQLContext) ([]jcstypes.LoadedAccessToken, error) { token, err := d.UserAccessToken().GetByID(tx, acc.UserID, tokenID) if err != nil { return nil, err @@ -124,7 +124,7 @@ func userLogout(ctx *CommandContext, account string, tokenID cortypes.AccessToke TokenID: tokenID, }) - var loadedHubIDs []cortypes.HubID + var loadedHubIDs []jcstypes.HubID for _, l := range loaded { loadedHubIDs = append(loadedHubIDs, l.HubID) } @@ -132,7 +132,7 @@ func userLogout(ctx *CommandContext, account string, tokenID cortypes.AccessToke notifyLoadedHubs(ctx, acc.UserID, tokenID, loadedHubIDs) } -func notifyLoadedHubs(ctx *CommandContext, userID cortypes.UserID, tokenID cortypes.AccessTokenID, loadedHubIDs []cortypes.HubID) { +func notifyLoadedHubs(ctx *CommandContext, userID jcstypes.UserID, tokenID jcstypes.AccessTokenID, loadedHubIDs []jcstypes.HubID) { log := logger.WithField("UserID", userID).WithField("TokenID", tokenID) d := ctx.repl.db @@ -144,7 +144,7 @@ func notifyLoadedHubs(ctx *CommandContext, userID cortypes.UserID, tokenID corty } for _, l := range loadedHubs { - addr, ok := l.Address.(*cortypes.GRPCAddressInfo) + addr, ok := l.Address.(*jcstypes.GRPCAddressInfo) if !ok { continue } diff --git a/coordinator/internal/rpc/hub.go b/coordinator/internal/rpc/hub.go index 813d089..75d9a33 100644 --- a/coordinator/internal/rpc/hub.go +++ b/coordinator/internal/rpc/hub.go @@ -8,7 +8,7 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func (svc *Service) GetHubConfig(ctx context.Context, msg *corrpc.GetHubConfig) (*corrpc.GetHubConfigResp, *rpc.CodeError) { @@ -24,7 +24,7 @@ func (svc *Service) GetHubConfig(ctx context.Context, msg *corrpc.GetHubConfig) } func (svc *Service) GetHubs(ctx context.Context, msg *corrpc.GetHubs) (*corrpc.GetHubsResp, *rpc.CodeError) { - var hubs []*cortypes.Hub + var hubs []*jcstypes.Hub if msg.HubIDs == nil { get, err := svc.db.Hub().GetAllHubs(svc.db.DefCtx()) @@ -45,7 +45,7 @@ func (svc *Service) GetHubs(ctx context.Context, msg *corrpc.GetHubs) (*corrpc.G return nil, rpc.Failed(errorcode.OperationFailed, fmt.Sprintf("batch get hubs by id: %v", err)) } - getMp := make(map[cortypes.HubID]cortypes.Hub) + getMp := make(map[jcstypes.HubID]jcstypes.Hub) for _, hub := range get { getMp[hub.HubID] = hub } diff --git a/coordinator/internal/rpc/storage.go b/coordinator/internal/rpc/storage.go index 9a230f6..2b3c612 100644 --- a/coordinator/internal/rpc/storage.go +++ b/coordinator/internal/rpc/storage.go @@ -7,23 +7,23 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/coordinator/internal/db" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" ) func (svc *Service) SelectStorageHub(ctx context.Context, msg *corrpc.SelectStorageHub) (*corrpc.SelectStorageHubResp, *rpc.CodeError) { d := svc.db - resp, err := db.DoTx02(d, func(tx db.SQLContext) ([]*cortypes.Hub, error) { + resp, err := db.DoTx02(d, func(tx db.SQLContext) ([]*jcstypes.Hub, error) { allLoc, err := d.HubLocation().GetAll(tx) if err != nil { return nil, err } - stgHubIDs := make([]cortypes.HubID, 0, len(msg.Storages)) + stgHubIDs := make([]jcstypes.HubID, 0, len(msg.Storages)) for _, stg := range msg.Storages { stgLoc := stg.GetLocation() - var matchedHubID cortypes.HubID + var matchedHubID jcstypes.HubID var matchedScore int for _, loc := range allLoc { sc := matchLocation(stgLoc, loc) @@ -41,13 +41,13 @@ func (svc *Service) SelectStorageHub(ctx context.Context, msg *corrpc.SelectStor return nil, err } - hubMap := make(map[cortypes.HubID]*cortypes.Hub) + hubMap := make(map[jcstypes.HubID]*jcstypes.Hub) for _, hub := range hubs { h := hub hubMap[hub.HubID] = &h } - resp := make([]*cortypes.Hub, len(msg.Storages)) + resp := make([]*jcstypes.Hub, len(msg.Storages)) for i := range msg.Storages { resp[i] = hubMap[stgHubIDs[i]] } @@ -68,7 +68,7 @@ func (svc *Service) SelectStorageHub(ctx context.Context, msg *corrpc.SelectStor // 1. 按照StorageName、Location顺序检查StorageLocation和HubLocation // 2. "*"代表通配符,匹配任意值,如果匹配到了通配,那么就直接结束匹配 // 3. 匹配越精确,分数越高 -func matchLocation(loc cortypes.Location, hubLoc cortypes.HubLocation) int { +func matchLocation(loc jcstypes.Location, hubLoc jcstypes.HubLocation) int { if hubLoc.StorageName == "*" { return 1 } diff --git a/coordinator/internal/rpc/user.go b/coordinator/internal/rpc/user.go index a64a519..bbe6aec 100644 --- a/coordinator/internal/rpc/user.go +++ b/coordinator/internal/rpc/user.go @@ -15,8 +15,8 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/coordinator/internal/db" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" "golang.org/x/crypto/bcrypt" "gorm.io/gorm" ) @@ -54,9 +54,9 @@ func (svc *Service) UserLogin(ctx context.Context, msg *corrpc.UserLogin) (*corr pubKeyStr := hex.EncodeToString(pubKey) nowTime := time.Now() - token := cortypes.UserAccessToken{ + token := jcstypes.UserAccessToken{ UserID: user.UserID, - TokenID: cortypes.AccessTokenID(uuid.NewString()), + TokenID: jcstypes.AccessTokenID(uuid.NewString()), PublicKey: pubKeyStr, ExpiresAt: nowTime.Add(time.Hour), CreatedAt: nowTime, @@ -92,9 +92,9 @@ func (svc *Service) UserRefreshToken(ctx context.Context, msg *corrpc.UserRefres pubKeyStr := hex.EncodeToString(pubKey) nowTime := time.Now() - token := cortypes.UserAccessToken{ + token := jcstypes.UserAccessToken{ UserID: authInfo.UserID, - TokenID: cortypes.AccessTokenID(uuid.NewString()), + TokenID: jcstypes.AccessTokenID(uuid.NewString()), PublicKey: pubKeyStr, ExpiresAt: nowTime.Add(time.Hour), CreatedAt: nowTime, @@ -122,7 +122,7 @@ func (svc *Service) UserLogout(ctx context.Context, msg *corrpc.UserLogout) (*co log := logger.WithField("UserID", authInfo.UserID).WithField("TokenID", authInfo.AccessTokenID) - loaded, err := db.DoTx02(svc.db, func(tx db.SQLContext) ([]cortypes.LoadedAccessToken, error) { + loaded, err := db.DoTx02(svc.db, func(tx db.SQLContext) ([]jcstypes.LoadedAccessToken, error) { token, err := svc.db.UserAccessToken().GetByID(tx, authInfo.UserID, authInfo.AccessTokenID) if err != nil { return nil, err @@ -159,7 +159,7 @@ func (svc *Service) UserLogout(ctx context.Context, msg *corrpc.UserLogout) (*co TokenID: authInfo.AccessTokenID, }) - var loadedHubIDs []cortypes.HubID + var loadedHubIDs []jcstypes.HubID for _, l := range loaded { loadedHubIDs = append(loadedHubIDs, l.HubID) } @@ -169,7 +169,7 @@ func (svc *Service) UserLogout(ctx context.Context, msg *corrpc.UserLogout) (*co return &corrpc.UserLogoutResp{}, nil } -func (svc *Service) notifyLoadedHubs(userID cortypes.UserID, tokenID cortypes.AccessTokenID, loadedHubIDs []cortypes.HubID) { +func (svc *Service) notifyLoadedHubs(userID jcstypes.UserID, tokenID jcstypes.AccessTokenID, loadedHubIDs []jcstypes.HubID) { log := logger.WithField("UserID", userID).WithField("TokenID", tokenID) loadedHubs, err := svc.db.Hub().BatchGetByID(svc.db.DefCtx(), loadedHubIDs) @@ -179,7 +179,7 @@ func (svc *Service) notifyLoadedHubs(userID cortypes.UserID, tokenID cortypes.Ac } for _, l := range loadedHubs { - addr, ok := l.Address.(*cortypes.GRPCAddressInfo) + addr, ok := l.Address.(*jcstypes.GRPCAddressInfo) if !ok { continue } @@ -195,20 +195,20 @@ func (svc *Service) notifyLoadedHubs(userID cortypes.UserID, tokenID cortypes.Ac } func (svc *Service) HubLoadAccessToken(ctx context.Context, msg *corrpc.HubLoadAccessToken) (*corrpc.HubLoadAccessTokenResp, *rpc.CodeError) { - token, err := db.DoTx02(svc.db, func(tx db.SQLContext) (cortypes.UserAccessToken, error) { + token, err := db.DoTx02(svc.db, func(tx db.SQLContext) (jcstypes.UserAccessToken, error) { token, err := svc.db.UserAccessToken().GetByID(tx, msg.UserID, msg.TokenID) if err != nil { - return cortypes.UserAccessToken{}, err + return jcstypes.UserAccessToken{}, err } - err = svc.db.LoadedAccessToken().CreateOrUpdate(tx, cortypes.LoadedAccessToken{ + err = svc.db.LoadedAccessToken().CreateOrUpdate(tx, jcstypes.LoadedAccessToken{ UserID: msg.UserID, TokenID: msg.TokenID, HubID: msg.HubID, LoadedAt: time.Now(), }) if err != nil { - return cortypes.UserAccessToken{}, fmt.Errorf("creating access token loaded record: %v", err) + return jcstypes.UserAccessToken{}, fmt.Errorf("creating access token loaded record: %v", err) } return token, nil diff --git a/coordinator/internal/ticktock/check_hub_state.go b/coordinator/internal/ticktock/check_hub_state.go index dea4f79..7c23155 100644 --- a/coordinator/internal/ticktock/check_hub_state.go +++ b/coordinator/internal/ticktock/check_hub_state.go @@ -10,7 +10,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/consts" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type CheckHubState struct { @@ -42,10 +42,10 @@ func (j *CheckHubState) Execute(t *TickTock) { } } -func (j *CheckHubState) checkOne(t *TickTock, hub cortypes.Hub) error { +func (j *CheckHubState) checkOne(t *TickTock, hub jcstypes.Hub) error { log := logger.WithType[CheckHubState]("TickTock") - addr, ok := hub.Address.(*cortypes.GRPCAddressInfo) + addr, ok := hub.Address.(*jcstypes.GRPCAddressInfo) if !ok { return fmt.Errorf("hub has no grpc address") } diff --git a/coordinator/internal/ticktock/clear_expired_access_token.go b/coordinator/internal/ticktock/clear_expired_access_token.go index 0aaa802..28db0c0 100644 --- a/coordinator/internal/ticktock/clear_expired_access_token.go +++ b/coordinator/internal/ticktock/clear_expired_access_token.go @@ -12,7 +12,7 @@ import ( hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" "gitlink.org.cn/cloudream/jcs-pub/coordinator/internal/db" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type ClearExpiredAccessToken struct { @@ -30,7 +30,7 @@ func (j *ClearExpiredAccessToken) Execute(t *TickTock) { log.Infof("job end, time: %v", time.Since(startTime)) }() - expired, err := db.DoTx02(t.db, func(tx db.SQLContext) ([]cortypes.LoadedAccessToken, error) { + expired, err := db.DoTx02(t.db, func(tx db.SQLContext) ([]jcstypes.LoadedAccessToken, error) { nowTime := time.Now() expired, err := t.db.LoadedAccessToken().GetExpired(tx, nowTime) if err != nil { @@ -71,7 +71,7 @@ func (j *ClearExpiredAccessToken) Execute(t *TickTock) { // 通知所有加载了失效Token的Hub - var loadedHubIDs []cortypes.HubID + var loadedHubIDs []jcstypes.HubID for _, e := range expired { loadedHubIDs = append(loadedHubIDs, e.HubID) } @@ -82,7 +82,7 @@ func (j *ClearExpiredAccessToken) Execute(t *TickTock) { return } - hubMap := make(map[cortypes.HubID]cortypes.Hub) + hubMap := make(map[jcstypes.HubID]jcstypes.Hub) for _, h := range loadedHubs { hubMap[h.HubID] = h } @@ -92,7 +92,7 @@ func (j *ClearExpiredAccessToken) Execute(t *TickTock) { if !ok { continue } - addr, ok := h.Address.(*cortypes.GRPCAddressInfo) + addr, ok := h.Address.(*jcstypes.GRPCAddressInfo) if !ok { continue } diff --git a/hub/internal/accesstoken/accesstoken.go b/hub/internal/accesstoken/accesstoken.go index ff82c7e..72a3e41 100644 --- a/hub/internal/accesstoken/accesstoken.go +++ b/hub/internal/accesstoken/accesstoken.go @@ -7,7 +7,7 @@ import ( stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/accesstoken" corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type ExitEvent = accesstoken.ExitEvent @@ -15,11 +15,11 @@ type ExitEvent = accesstoken.ExitEvent type CacheKey = accesstoken.CacheKey type Cache struct { - localHubID cortypes.HubID + localHubID jcstypes.HubID *accesstoken.Cache } -func New(localHubID cortypes.HubID) *Cache { +func New(localHubID jcstypes.HubID) *Cache { c := &Cache{ localHubID: localHubID, } @@ -28,7 +28,7 @@ func New(localHubID cortypes.HubID) *Cache { return c } -func (c *Cache) load(key accesstoken.CacheKey) (cortypes.UserAccessToken, error) { +func (c *Cache) load(key accesstoken.CacheKey) (jcstypes.UserAccessToken, error) { corCli := stgglb.CoordinatorRPCPool.Get() defer corCli.Release() @@ -39,10 +39,10 @@ func (c *Cache) load(key accesstoken.CacheKey) (cortypes.UserAccessToken, error) }) if cerr != nil { if cerr.Code == errorcode.DataNotFound { - return cortypes.UserAccessToken{}, accesstoken.ErrTokenNotFound + return jcstypes.UserAccessToken{}, accesstoken.ErrTokenNotFound } - return cortypes.UserAccessToken{}, cerr.ToError() + return jcstypes.UserAccessToken{}, cerr.ToError() } return tokenResp.Token, nil diff --git a/hub/internal/cmd/serve.go b/hub/internal/cmd/serve.go index 110ae9b..4beb60b 100644 --- a/hub/internal/cmd/serve.go +++ b/hub/internal/cmd/serve.go @@ -18,8 +18,8 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec" hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/sysevent" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/common/types/datamap" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" "gitlink.org.cn/cloudream/jcs-pub/hub/internal/config" "gitlink.org.cn/cloudream/jcs-pub/hub/internal/ticktock" @@ -224,7 +224,7 @@ func downloadHubConfig() corrpc.GetHubConfigResp { ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() - cfgResp, cerr := coorCli.GetHubConfig(ctx, corrpc.ReqGetHubConfig(cortypes.HubID(config.Cfg().ID))) + cfgResp, cerr := coorCli.GetHubConfig(ctx, corrpc.ReqGetHubConfig(jcstypes.HubID(config.Cfg().ID))) if cerr != nil { logger.Errorf("getting hub config: %v", cerr) os.Exit(1) diff --git a/hub/internal/config/config.go b/hub/internal/config/config.go index 6de9797..ce54af5 100644 --- a/hub/internal/config/config.go +++ b/hub/internal/config/config.go @@ -8,13 +8,13 @@ import ( corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/sysevent" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/hub/internal/http" "gitlink.org.cn/cloudream/jcs-pub/hub/internal/ticktock" ) type Config struct { - ID cortypes.HubID `json:"id"` + ID jcstypes.HubID `json:"id"` Local stgglb.LocalMachineInfo `json:"local"` RPC rpc.Config `json:"rpc"` HTTP *http.Config `json:"http"` diff --git a/hub/internal/rpc/cache.go b/hub/internal/rpc/cache.go index 8b30130..7d67441 100644 --- a/hub/internal/rpc/cache.go +++ b/hub/internal/rpc/cache.go @@ -6,7 +6,7 @@ import ( "fmt" "gitlink.org.cn/cloudream/common/consts/errorcode" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" ) @@ -22,7 +22,7 @@ func (svc *Service) CheckCache(context context.Context, msg *hubrpc.CheckCache) return nil, rpc.Failed(errorcode.OperationFailed, fmt.Sprintf("listting file in shard store: %v", err)) } - var fileHashes []jcsypes.FileHash + var fileHashes []jcstypes.FileHash for _, info := range infos { fileHashes = append(fileHashes, info.Hash) } diff --git a/hub/internal/ticktock/test_hub_connectivities.go b/hub/internal/ticktock/test_hub_connectivities.go index 3ed1baf..cdcf348 100644 --- a/hub/internal/ticktock/test_hub_connectivities.go +++ b/hub/internal/ticktock/test_hub_connectivities.go @@ -10,11 +10,11 @@ import ( stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type TestHubConnectivities struct { - myHubID cortypes.HubID + myHubID jcstypes.HubID } func (t *TestHubConnectivities) Name() string { @@ -37,13 +37,13 @@ func (j *TestHubConnectivities) Execute(t *TickTock) { return } - tests := make([]cortypes.HubConnectivity, len(getHubs.Hubs)) + tests := make([]jcstypes.HubConnectivity, len(getHubs.Hubs)) wg := sync.WaitGroup{} for i, hub := range getHubs.Hubs { wg.Add(1) - go func(hub *cortypes.Hub, i int) { + go func(hub *jcstypes.Hub, i int) { defer wg.Done() tests[i] = j.testOne(hub) @@ -59,12 +59,12 @@ func (j *TestHubConnectivities) Execute(t *TickTock) { } } -func (j *TestHubConnectivities) testOne(hub *cortypes.Hub) cortypes.HubConnectivity { +func (j *TestHubConnectivities) testOne(hub *jcstypes.Hub) jcstypes.HubConnectivity { log := logger.WithType[TestHubConnectivities]("TickTock") - rpcAddr, ok := hub.Address.(*cortypes.GRPCAddressInfo) + rpcAddr, ok := hub.Address.(*jcstypes.GRPCAddressInfo) if !ok { - return cortypes.HubConnectivity{ + return jcstypes.HubConnectivity{ FromHubID: j.myHubID, ToHubID: hub.HubID, Latency: nil, @@ -81,7 +81,7 @@ func (j *TestHubConnectivities) testOne(hub *cortypes.Hub) cortypes.HubConnectiv _, cerr := hubCli.Ping(context.Background(), &hubrpc.Ping{}) if cerr != nil { log.Warnf("ping %v: %v", hub.String(), cerr) - return cortypes.HubConnectivity{ + return jcstypes.HubConnectivity{ FromHubID: j.myHubID, ToHubID: hub.HubID, Latency: nil, @@ -98,7 +98,7 @@ func (j *TestHubConnectivities) testOne(hub *cortypes.Hub) cortypes.HubConnectiv latency := avgLatency / 3 latencyMs := float32(latency.Microseconds()) / 1000 - return cortypes.HubConnectivity{ + return jcstypes.HubConnectivity{ FromHubID: j.myHubID, ToHubID: hub.HubID, Latency: &latencyMs, diff --git a/hub/internal/ticktock/ticktock.go b/hub/internal/ticktock/ticktock.go index 8fdb39a..67df20a 100644 --- a/hub/internal/ticktock/ticktock.go +++ b/hub/internal/ticktock/ticktock.go @@ -6,7 +6,7 @@ import ( "github.com/go-co-op/gocron/v2" "gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type Job interface { @@ -24,11 +24,11 @@ type TickTock struct { sch gocron.Scheduler jobs map[string]cronJob - myHubID cortypes.HubID + myHubID jcstypes.HubID stgPool *pool.Pool } -func New(cfg Config, myHubID cortypes.HubID, stgPool *pool.Pool) *TickTock { +func New(cfg Config, myHubID jcstypes.HubID, stgPool *pool.Pool) *TickTock { sch, _ := gocron.NewScheduler() t := &TickTock{ cfg: cfg, diff --git a/jcsctl/cmd/bucket/delete.go b/jcsctl/cmd/bucket/delete.go index 80955f4..e70c795 100644 --- a/jcsctl/cmd/bucket/delete.go +++ b/jcsctl/cmd/bucket/delete.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" ) @@ -30,13 +30,13 @@ type deleteOpt struct { } func delete(c *cobra.Command, ctx *cmd.CommandContext, opt deleteOpt, args []string) error { - var bktID jcsypes.BucketID + var bktID jcstypes.BucketID if opt.UseID { id, err := strconv.ParseInt(args[0], 10, 64) if err != nil { return fmt.Errorf("invalid bucket ID: %v", args[0]) } - bktID = jcsypes.BucketID(id) + bktID = jcstypes.BucketID(id) } else { bktName := args[0] bkt, err := ctx.Client.Bucket().GetByName(cliapi.BucketGetByName{ diff --git a/jcsctl/cmd/bucket/utils.go b/jcsctl/cmd/bucket/utils.go index 35099c6..a5d909f 100644 --- a/jcsctl/cmd/bucket/utils.go +++ b/jcsctl/cmd/bucket/utils.go @@ -4,10 +4,10 @@ import ( "fmt" "github.com/jedib0t/go-pretty/v6/table" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) -func printOneBucket(bkt jcsypes.Bucket) { +func printOneBucket(bkt jcstypes.Bucket) { tb := table.NewWriter() tb.AppendHeader(table.Row{"ID", "Name", "CreateTime"}) tb.AppendRow(table.Row{bkt.BucketID, bkt.Name, bkt.CreateTime}) diff --git a/jcsctl/cmd/geto/geto.go b/jcsctl/cmd/geto/geto.go index 126c8dc..d9af15f 100644 --- a/jcsctl/cmd/geto/geto.go +++ b/jcsctl/cmd/geto/geto.go @@ -11,7 +11,7 @@ import ( "github.com/inhies/go-bytesize" "github.com/spf13/cobra" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" ) @@ -43,7 +43,7 @@ type option struct { } func geto(c *cobra.Command, ctx *cmd.CommandContext, opt option, args []string) error { - var obj jcsypes.Object + var obj jcstypes.Object if opt.UseID { id, err := strconv.ParseInt(args[0], 10, 64) if err != nil { @@ -51,7 +51,7 @@ func geto(c *cobra.Command, ctx *cmd.CommandContext, opt option, args []string) } resp, err := ctx.Client.Object().ListByIDs(cliapi.ObjectListByIDs{ - ObjectIDs: []jcsypes.ObjectID{jcsypes.ObjectID(id)}, + ObjectIDs: []jcstypes.ObjectID{jcstypes.ObjectID(id)}, }) if err != nil { return fmt.Errorf("list objects by ids: %v", err) @@ -96,7 +96,7 @@ func geto(c *cobra.Command, ctx *cmd.CommandContext, opt option, args []string) if opt.Output != "" { filePath = filepath.Join(filePath, opt.Output) } else { - filePath = filepath.Join(filePath, jcsypes.BaseName(obj.Path)) + filePath = filepath.Join(filePath, jcstypes.BaseName(obj.Path)) } flag := os.O_CREATE | os.O_WRONLY diff --git a/jcsctl/cmd/getp/getp.go b/jcsctl/cmd/getp/getp.go index a122b3c..99dc303 100644 --- a/jcsctl/cmd/getp/getp.go +++ b/jcsctl/cmd/getp/getp.go @@ -13,7 +13,7 @@ import ( "github.com/inhies/go-bytesize" "github.com/spf13/cobra" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" ) @@ -45,14 +45,14 @@ type option struct { } func getp(c *cobra.Command, ctx *cmd.CommandContext, opt option, args []string) error { - var pkgID jcsypes.PackageID + var pkgID jcstypes.PackageID if opt.UseID { id, err := strconv.ParseInt(args[0], 10, 64) if err != nil { return fmt.Errorf("invalid package id") } - pkgID = jcsypes.PackageID(id) + pkgID = jcstypes.PackageID(id) } else { comps := strings.Split(args[0], "/") if len(comps) != 2 { diff --git a/jcsctl/cmd/ls/ls_object.go b/jcsctl/cmd/ls/ls_object.go index c981753..38c3f34 100644 --- a/jcsctl/cmd/ls/ls_object.go +++ b/jcsctl/cmd/ls/ls_object.go @@ -5,14 +5,14 @@ import ( "github.com/jedib0t/go-pretty/v6/table" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" ) func lsObject(ctx *cmd.CommandContext, opt option, bktName string, pkgName string, objPath string) error { - var pkgID jcsypes.PackageID + var pkgID jcstypes.PackageID if opt.PackageID != 0 { - pkgID = jcsypes.PackageID(opt.PackageID) + pkgID = jcstypes.PackageID(opt.PackageID) } else { resp, err := ctx.Client.Package().GetByFullName(cliapi.PackageGetByFullName{ BucketName: bktName, @@ -24,7 +24,7 @@ func lsObject(ctx *cmd.CommandContext, opt option, bktName string, pkgName strin pkgID = resp.Package.PackageID } - var objs []jcsypes.Object + var objs []jcstypes.Object var commonPrefixes []string req := cliapi.ObjectListByPath{ diff --git a/jcsctl/cmd/ls/ls_package.go b/jcsctl/cmd/ls/ls_package.go index 7740185..456e30e 100644 --- a/jcsctl/cmd/ls/ls_package.go +++ b/jcsctl/cmd/ls/ls_package.go @@ -5,12 +5,12 @@ import ( "github.com/jedib0t/go-pretty/v6/table" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" ) func lsPackage(ctx *cmd.CommandContext, opt option, bktName string) error { - var bktID jcsypes.BucketID + var bktID jcstypes.BucketID if opt.BucketID == 0 { bktResp, err := ctx.Client.Bucket().GetByName(cliapi.BucketGetByName{ Name: bktName, @@ -20,7 +20,7 @@ func lsPackage(ctx *cmd.CommandContext, opt option, bktName string) error { } bktID = bktResp.Bucket.BucketID } else { - bktID = jcsypes.BucketID(opt.BucketID) + bktID = jcstypes.BucketID(opt.BucketID) } pkgResp, err := ctx.Client.Package().ListBucketPackages(cliapi.PackageListBucketPackages{ diff --git a/jcsctl/cmd/package/delete.go b/jcsctl/cmd/package/delete.go index 092a3b9..8e60328 100644 --- a/jcsctl/cmd/package/delete.go +++ b/jcsctl/cmd/package/delete.go @@ -7,7 +7,7 @@ import ( "github.com/spf13/cobra" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" ) @@ -31,13 +31,13 @@ type deleteOpt struct { } func delete(c *cobra.Command, ctx *cmd.CommandContext, opt deleteOpt, args []string) error { - var pkgID jcsypes.PackageID + var pkgID jcstypes.PackageID if opt.UseID { id, err := strconv.ParseInt(args[0], 10, 64) if err != nil { return fmt.Errorf("invalid package ID: %v", args[0]) } - pkgID = jcsypes.PackageID(id) + pkgID = jcstypes.PackageID(id) } else { comps := strings.Split(args[0], "/") if len(comps) != 2 { diff --git a/jcsctl/cmd/package/new.go b/jcsctl/cmd/package/new.go index af5eaa3..c9c2439 100644 --- a/jcsctl/cmd/package/new.go +++ b/jcsctl/cmd/package/new.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" ) @@ -31,7 +31,7 @@ type newOpt struct { func new(c *cobra.Command, ctx *cmd.CommandContext, opt newOpt, args []string) error { if opt.BucketID != 0 { resp, err := ctx.Client.Package().Create(cliapi.PackageCreate{ - BucketID: jcsypes.BucketID(opt.BucketID), + BucketID: jcstypes.BucketID(opt.BucketID), Name: args[0], }) if err != nil { diff --git a/jcsctl/cmd/package/utils.go b/jcsctl/cmd/package/utils.go index 8599e74..633c053 100644 --- a/jcsctl/cmd/package/utils.go +++ b/jcsctl/cmd/package/utils.go @@ -4,10 +4,10 @@ import ( "fmt" "github.com/jedib0t/go-pretty/v6/table" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) -func printOnePackage(pkg jcsypes.Package) { +func printOnePackage(pkg jcstypes.Package) { tb := table.NewWriter() tb.AppendHeader(table.Row{"Package ID", "Bucket ID", "Name", "CreateTime"}) tb.AppendRow(table.Row{pkg.PackageID, pkg.BucketID, pkg.Name, pkg.CreateTime}) diff --git a/jcsctl/cmd/puto/puto.go b/jcsctl/cmd/puto/puto.go index 6a37376..b4e1f43 100644 --- a/jcsctl/cmd/puto/puto.go +++ b/jcsctl/cmd/puto/puto.go @@ -10,7 +10,7 @@ import ( "github.com/spf13/cobra" "gitlink.org.cn/cloudream/common/pkgs/iterator" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" ) @@ -34,7 +34,7 @@ type option struct { } func puto(c *cobra.Command, ctx *cmd.CommandContext, opt option, args []string) error { - var pkgID jcsypes.PackageID + var pkgID jcstypes.PackageID var objPath string if opt.UseID { @@ -44,7 +44,7 @@ func puto(c *cobra.Command, ctx *cmd.CommandContext, opt option, args []string) } resp, err := ctx.Client.Object().ListByIDs(cliapi.ObjectListByIDs{ - ObjectIDs: []jcsypes.ObjectID{jcsypes.ObjectID(id)}, + ObjectIDs: []jcstypes.ObjectID{jcstypes.ObjectID(id)}, }) if err != nil { return fmt.Errorf("list objects by ids: %v", err) diff --git a/jcsctl/cmd/putp/file_iterator.go b/jcsctl/cmd/putp/file_iterator.go index 9c0b6fd..a8680d1 100644 --- a/jcsctl/cmd/putp/file_iterator.go +++ b/jcsctl/cmd/putp/file_iterator.go @@ -9,12 +9,12 @@ import ( "github.com/inhies/go-bytesize" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/iterator" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type FileIterator struct { absRootPath string - jpathRoot jcsypes.JPath + jpathRoot jcstypes.JPath init bool curEntries []dirEntry lastStartTime time.Time @@ -31,7 +31,7 @@ func (i *FileIterator) MoveNext() (*cliapi.UploadingObject, error) { for _, e := range es { i.curEntries = append(i.curEntries, dirEntry{ - dir: jcsypes.JPath{}, + dir: jcstypes.JPath{}, entry: e, }) } @@ -101,6 +101,6 @@ func (i *FileIterator) Close() { } type dirEntry struct { - dir jcsypes.JPath + dir jcstypes.JPath entry os.DirEntry } diff --git a/jcsctl/cmd/putp/putp.go b/jcsctl/cmd/putp/putp.go index 9c21c8c..e064d2e 100644 --- a/jcsctl/cmd/putp/putp.go +++ b/jcsctl/cmd/putp/putp.go @@ -15,7 +15,7 @@ import ( "gitlink.org.cn/cloudream/common/sdks" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" "gitlink.org.cn/cloudream/jcs-pub/common/ecode" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" ) @@ -55,13 +55,13 @@ func putp(c *cobra.Command, ctx *cmd.CommandContext, opt option, args []string) return err } - var pkgID jcsypes.PackageID + var pkgID jcstypes.PackageID if opt.UseID { id, err := strconv.ParseInt(args[1], 10, 64) if err != nil { return err } - pkgID = jcsypes.PackageID(id) + pkgID = jcstypes.PackageID(id) _, err = ctx.Client.Package().Get(cliapi.PackageGet{ PackageID: pkgID, @@ -134,7 +134,7 @@ func putp(c *cobra.Command, ctx *cmd.CommandContext, opt option, args []string) _, err = ctx.Client.Object().Upload(cliapi.ObjectUpload{ Info: cliapi.ObjectUploadInfo{ PackageID: pkgID, - Affinity: jcsypes.UserSpaceID(opt.Affinity), + Affinity: jcstypes.UserSpaceID(opt.Affinity), }, Files: iterator.Array(&cliapi.UploadingObject{ Path: pat, @@ -152,14 +152,14 @@ func putp(c *cobra.Command, ctx *cmd.CommandContext, opt option, args []string) iter := &FileIterator{ absRootPath: absLocal, - jpathRoot: jcsypes.PathFromJcsPathString(opt.Prefix), + jpathRoot: jcstypes.PathFromJcsPathString(opt.Prefix), } startTime := time.Now() _, err = ctx.Client.Object().Upload(cliapi.ObjectUpload{ Info: cliapi.ObjectUploadInfo{ PackageID: pkgID, - Affinity: jcsypes.UserSpaceID(opt.Affinity), + Affinity: jcstypes.UserSpaceID(opt.Affinity), }, Files: iter, }) diff --git a/jcsctl/cmd/userspace/create.go b/jcsctl/cmd/userspace/create.go index b2794cf..b04d983 100644 --- a/jcsctl/cmd/userspace/create.go +++ b/jcsctl/cmd/userspace/create.go @@ -9,7 +9,7 @@ import ( "github.com/spf13/cobra" "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" ) @@ -147,14 +147,14 @@ func (userSpace *MyUserSpace) collectLocalConfig(rl *readline.Instance) error { return err } - userSpace.Storage = &cortypes.LocalType{ + userSpace.Storage = &jcstypes.LocalType{ Type: "Local", - Location: cortypes.Location{ + Location: jcstypes.Location{ StorageName: storageName, Location: location, }, } - userSpace.Credential = &cortypes.LocalCred{ + userSpace.Credential = &jcstypes.LocalCred{ Type: "Local", RootDir: rootDir, } @@ -199,14 +199,14 @@ func (userSpace *MyUserSpace) collectObsConfig(rl *readline.Instance) error { } secretKey := string(secretBytes) - userSpace.Storage = &cortypes.OBSType{ + userSpace.Storage = &jcstypes.OBSType{ Type: "OBS", Region: region, Endpoint: endpoint, Bucket: bucket, ProjectID: projectID, } - userSpace.Credential = &cortypes.OBSCred{ + userSpace.Credential = &jcstypes.OBSCred{ Type: "OBS", AK: accessKey, SK: secretKey, @@ -221,7 +221,7 @@ func (userSpace *MyUserSpace) collectObsConfig(rl *readline.Instance) error { switch strings.ToLower(strings.TrimSpace(input)) { case "y", "yes": - userSpace.Features = append(userSpace.Features, &cortypes.S2STransferFeature{ + userSpace.Features = append(userSpace.Features, &jcstypes.S2STransferFeature{ Type: "S2STransfer", }) return nil @@ -266,12 +266,12 @@ func (userSpace *MyUserSpace) collectOssConfig(rl *readline.Instance) error { } secretKey := string(secretBytes) - userSpace.Storage = &cortypes.OSSType{ + userSpace.Storage = &jcstypes.OSSType{ Region: region, Endpoint: endpoint, Bucket: bucket, } - userSpace.Credential = &cortypes.OSSCred{ + userSpace.Credential = &jcstypes.OSSCred{ Type: "OSS", AK: accessKey, SK: secretKey, @@ -311,13 +311,13 @@ func (userSpace *MyUserSpace) collectCosConfig(rl *readline.Instance) error { } secretKey := string(secretBytes) - userSpace.Storage = &cortypes.COSType{ + userSpace.Storage = &jcstypes.COSType{ Type: "COS", Region: region, Endpoint: endpoint, Bucket: bucket, } - userSpace.Credential = &cortypes.COSCred{ + userSpace.Credential = &jcstypes.COSCred{ Type: "COS", AK: accessKey, SK: secretKey, @@ -387,11 +387,11 @@ func (userSpace *MyUserSpace) collectEfileConfig(rl *readline.Instance) error { return err } - userSpace.Storage = &cortypes.EFileType{ + userSpace.Storage = &jcstypes.EFileType{ Type: "EFile", ClusterID: clusterID, } - userSpace.Credential = &cortypes.EFileCred{ + userSpace.Credential = &jcstypes.EFileCred{ Type: "EFile", TokenURL: tokenURL, APIURL: apiURL, @@ -410,7 +410,7 @@ func (userSpace *MyUserSpace) collectEfileConfig(rl *readline.Instance) error { switch strings.ToLower(strings.TrimSpace(input)) { case "y", "yes": - userSpace.Features = append(userSpace.Features, &cortypes.ECMultiplierFeature{ + userSpace.Features = append(userSpace.Features, &jcstypes.ECMultiplierFeature{ Type: "ECMultiplier", }) return nil @@ -455,13 +455,13 @@ func (userSpace *MyUserSpace) collectS3Config(rl *readline.Instance) error { } secretKey := string(secretBytes) - userSpace.Storage = &cortypes.S3Type{ + userSpace.Storage = &jcstypes.S3Type{ Type: "S3", Region: region, Endpoint: endpoint, Bucket: bucket, } - userSpace.Credential = &cortypes.S3Cred{ + userSpace.Credential = &jcstypes.S3Cred{ Type: "S3", AK: accessKey, SK: secretKey, @@ -476,7 +476,7 @@ func (userSpace *MyUserSpace) collectS3Config(rl *readline.Instance) error { switch strings.ToLower(strings.TrimSpace(input)) { case "y", "yes": - userSpace.Features = append(userSpace.Features, &cortypes.MultipartUploadFeature{ + userSpace.Features = append(userSpace.Features, &jcstypes.MultipartUploadFeature{ Type: "MultipartUpload", }) return nil @@ -519,7 +519,7 @@ func (userSpace *MyUserSpace) collectShardStore(rl *readline.Instance) error { fmt.Printf("\033[31m错误:%d 不是正整数,请输入大于 0 的整数\033[0m\n", maxSize) continue } - userSpace.ShardStore = &cortypes.ShardStoreUserConfig{ + userSpace.ShardStore = &jcstypes.ShardStoreUserConfig{ MaxSize: maxSize, } return nil diff --git a/jcsctl/cmd/userspace/delete.go b/jcsctl/cmd/userspace/delete.go index ecac52f..8d1f23e 100644 --- a/jcsctl/cmd/userspace/delete.go +++ b/jcsctl/cmd/userspace/delete.go @@ -8,7 +8,7 @@ import ( "github.com/chzyer/readline" "github.com/spf13/cobra" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" ) @@ -44,7 +44,7 @@ func delete(c *cobra.Command, ctx *cmd.CommandContext) { return } - var userSpace jcsypes.UserSpace + var userSpace jcstypes.UserSpace trimmed := strings.TrimSpace(line) switch trimmed { case "1": @@ -61,7 +61,7 @@ func delete(c *cobra.Command, ctx *cmd.CommandContext) { } resp, err := ctx.Client.UserSpace().Get(cliapi.UserSpaceGet{ - UserSpaceID: jcsypes.UserSpaceID(id), + UserSpaceID: jcstypes.UserSpaceID(id), }) if err != nil { fmt.Printf("\033[31m保存配置失败: %v\033[0m", err) diff --git a/jcsctl/cmd/userspace/ls.go b/jcsctl/cmd/userspace/ls.go index f77e836..a4b2f67 100644 --- a/jcsctl/cmd/userspace/ls.go +++ b/jcsctl/cmd/userspace/ls.go @@ -7,7 +7,7 @@ import ( "github.com/jedib0t/go-pretty/v6/table" "github.com/spf13/cobra" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" ) @@ -51,7 +51,7 @@ func ls(c *cobra.Command, ctx *cmd.CommandContext, opt lsOpt, args []string) err } searchKey := args[0] - var userSpace *jcsypes.UserSpace + var userSpace *jcstypes.UserSpace if opt.ByID { id, err := strconv.Atoi(searchKey) if err != nil { @@ -59,7 +59,7 @@ func ls(c *cobra.Command, ctx *cmd.CommandContext, opt lsOpt, args []string) err } result, err := ctx.Client.UserSpace().Get(cliapi.UserSpaceGet{ - UserSpaceID: jcsypes.UserSpaceID(id), + UserSpaceID: jcstypes.UserSpaceID(id), }) if err != nil { return err diff --git a/jcsctl/cmd/userspace/update.go b/jcsctl/cmd/userspace/update.go index ac011e3..48bec42 100644 --- a/jcsctl/cmd/userspace/update.go +++ b/jcsctl/cmd/userspace/update.go @@ -8,8 +8,7 @@ import ( "github.com/chzyer/readline" "github.com/spf13/cobra" cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" - jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" ) @@ -50,7 +49,7 @@ func update(c *cobra.Command, ctx *cmd.CommandContext) { } resp, err := ctx.Client.UserSpace().Get(cliapi.UserSpaceGet{ - UserSpaceID: jcsypes.UserSpaceID(id), + UserSpaceID: jcstypes.UserSpaceID(id), }) if err != nil { fmt.Printf("\033[31m云存储id=%d 不存在: %v\033[0m\n", id, err) @@ -64,7 +63,7 @@ func update(c *cobra.Command, ctx *cmd.CommandContext) { } var userSpaceUpdate UserSpaceUpdate - userSpaceUpdate.UserSpaceID = jcsypes.UserSpaceID(id) + userSpaceUpdate.UserSpaceID = jcstypes.UserSpaceID(id) userSpaceUpdate.Name = name storageType := resp.UserSpace.Storage.GetStorageType() @@ -101,7 +100,7 @@ func (userSpace *UserSpaceUpdate) collectLocalConfig(rl *readline.Instance) erro return err } - userSpace.Credential = &cortypes.LocalCred{ + userSpace.Credential = &jcstypes.LocalCred{ Type: "Local", RootDir: rootDir, } @@ -121,7 +120,7 @@ func (userSpace *UserSpaceUpdate) collectObsConfig(rl *readline.Instance) error } secretKey := string(secretBytes) - userSpace.Credential = &cortypes.OBSCred{ + userSpace.Credential = &jcstypes.OBSCred{ Type: "OBS", AK: accessKey, SK: secretKey, @@ -136,7 +135,7 @@ func (userSpace *UserSpaceUpdate) collectObsConfig(rl *readline.Instance) error switch strings.ToLower(strings.TrimSpace(input)) { case "y", "yes": - userSpace.Features = append(userSpace.Features, &cortypes.S2STransferFeature{ + userSpace.Features = append(userSpace.Features, &jcstypes.S2STransferFeature{ Type: "S2STransfer", }) return nil @@ -162,7 +161,7 @@ func (userSpace *UserSpaceUpdate) collectOssConfig(rl *readline.Instance) error } secretKey := string(secretBytes) - userSpace.Credential = &cortypes.OSSCred{ + userSpace.Credential = &jcstypes.OSSCred{ Type: "OSS", AK: accessKey, SK: secretKey, @@ -183,7 +182,7 @@ func (userSpace *UserSpaceUpdate) collectCosConfig(rl *readline.Instance) error } secretKey := string(secretBytes) - userSpace.Credential = &cortypes.COSCred{ + userSpace.Credential = &jcstypes.COSCred{ Type: "COS", AK: accessKey, SK: secretKey, @@ -246,7 +245,7 @@ func (userSpace *UserSpaceUpdate) collectEfileConfig(rl *readline.Instance) erro return err } - userSpace.Credential = &cortypes.EFileCred{ + userSpace.Credential = &jcstypes.EFileCred{ Type: "EFile", TokenURL: tokenURL, APIURL: apiURL, @@ -265,7 +264,7 @@ func (userSpace *UserSpaceUpdate) collectEfileConfig(rl *readline.Instance) erro switch strings.ToLower(strings.TrimSpace(input)) { case "y", "yes": - userSpace.Features = append(userSpace.Features, &cortypes.ECMultiplierFeature{ + userSpace.Features = append(userSpace.Features, &jcstypes.ECMultiplierFeature{ Type: "ECMultiplier", }) return nil @@ -291,7 +290,7 @@ func (userSpace *UserSpaceUpdate) collectS3Config(rl *readline.Instance) error { } secretKey := string(secretBytes) - userSpace.Credential = &cortypes.S3Cred{ + userSpace.Credential = &jcstypes.S3Cred{ Type: "S3", AK: accessKey, SK: secretKey, @@ -306,7 +305,7 @@ func (userSpace *UserSpaceUpdate) collectS3Config(rl *readline.Instance) error { switch strings.ToLower(strings.TrimSpace(input)) { case "y", "yes": - userSpace.Features = append(userSpace.Features, &cortypes.MultipartUploadFeature{ + userSpace.Features = append(userSpace.Features, &jcstypes.MultipartUploadFeature{ Type: "MultipartUpload", }) return nil