| @@ -46,8 +46,8 @@ func doTest(svc *services.Service) { | |||||
| space1 := svc.UserSpaceMeta.Get(3) | space1 := svc.UserSpaceMeta.Get(3) | ||||
| space2 := svc.UserSpaceMeta.Get(4) | space2 := svc.UserSpaceMeta.Get(4) | ||||
| // ft.AddFrom(ioswitch2.NewFromPublicStore(*space1.MasterHub, *space1, "space3/blocks/1A/Full1AE5436AF72D8EF93923486E0E167315CEF0C91898064DADFAC22216FFBC5E3D")) | |||||
| // ft.AddTo(ioswitch2.NewToPublicStore(*space2.MasterHub, *space2, "block")) | |||||
| // ft.AddFrom(ioswitch2.NewFromBaseStore(*space1.MasterHub, *space1, "space3/blocks/1A/Full1AE5436AF72D8EF93923486E0E167315CEF0C91898064DADFAC22216FFBC5E3D")) | |||||
| // ft.AddTo(ioswitch2.NewToBaseStore(*space2.MasterHub, *space2, "block")) | |||||
| // plans := exec.NewPlanBuilder() | // plans := exec.NewPlanBuilder() | ||||
| // parser.Parse(ft, plans) | // parser.Parse(ft, plans) | ||||
| // fmt.Println(plans) | // fmt.Println(plans) | ||||
| @@ -56,8 +56,8 @@ func doTest(svc *services.Service) { | |||||
| // fmt.Println(err) | // fmt.Println(err) | ||||
| ft = ioswitch2.NewFromTo() | ft = ioswitch2.NewFromTo() | ||||
| ft.AddFrom(ioswitch2.NewFromShardstore("Full1AE5436AF72D8EF93923486E0E167315CEF0C91898064DADFAC22216FFBC5E3D", *space1.MasterHub, *space1, ioswitch2.RawStream())) | |||||
| ft.AddTo(ioswitch2.NewToPublicStore(*space2.MasterHub, *space2, "test3.txt")) | |||||
| ft.AddFrom(ioswitch2.NewFromShardstore("Full1AE5436AF72D8EF93923486E0E167315CEF0C91898064DADFAC22216FFBC5E3D", *space1, ioswitch2.RawStream())) | |||||
| ft.AddTo(ioswitch2.NewToBaseStore(*space2, "test3.txt")) | |||||
| plans := exec.NewPlanBuilder() | plans := exec.NewPlanBuilder() | ||||
| parser.Parse(ft, plans) | parser.Parse(ft, plans) | ||||
| fmt.Println(plans) | fmt.Println(plans) | ||||
| @@ -109,7 +109,7 @@ func (i *DownloadObjectIterator) Close() { | |||||
| } | } | ||||
| func (i *DownloadObjectIterator) downloadDirect(req downloadReqeust2, strg strategy.DirectStrategy) (io.ReadCloser, error) { | func (i *DownloadObjectIterator) downloadDirect(req downloadReqeust2, strg strategy.DirectStrategy) (io.ReadCloser, error) { | ||||
| logger.Debugf("downloading object %v from storage %v", req.Raw.ObjectID, strg.UserSpace.Storage.String()) | |||||
| logger.Debugf("downloading object %v from storage %v", req.Raw.ObjectID, strg.UserSpace.UserSpace.Storage.String()) | |||||
| var strHandle *exec.DriverReadStream | var strHandle *exec.DriverReadStream | ||||
| ft := ioswitch2.NewFromTo() | ft := ioswitch2.NewFromTo() | ||||
| @@ -123,7 +123,7 @@ func (i *DownloadObjectIterator) downloadDirect(req downloadReqeust2, strg strat | |||||
| toExec.Range.Length = &len | toExec.Range.Length = &len | ||||
| } | } | ||||
| ft.AddFrom(ioswitch2.NewFromShardstore(req.Detail.Object.FileHash, *strg.UserSpace.MasterHub, strg.UserSpace, ioswitch2.RawStream())).AddTo(toExec) | |||||
| ft.AddFrom(ioswitch2.NewFromShardstore(req.Detail.Object.FileHash, strg.UserSpace, ioswitch2.RawStream())).AddTo(toExec) | |||||
| strHandle = handle | strHandle = handle | ||||
| plans := exec.NewPlanBuilder() | plans := exec.NewPlanBuilder() | ||||
| @@ -146,7 +146,7 @@ func (i *DownloadObjectIterator) downloadECReconstruct(req downloadReqeust2, str | |||||
| logStrs = append(logStrs, ", ") | logStrs = append(logStrs, ", ") | ||||
| } | } | ||||
| logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Index, strg.UserSpaces[i].Storage.String())) | |||||
| logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Index, strg.UserSpaces[i].UserSpace.Storage.String())) | |||||
| } | } | ||||
| logger.Debug(logStrs...) | logger.Debug(logStrs...) | ||||
| @@ -18,7 +18,7 @@ func (iter *DownloadObjectIterator) downloadLRCReconstruct(req downloadReqeust2, | |||||
| logStrs = append(logStrs, ", ") | logStrs = append(logStrs, ", ") | ||||
| } | } | ||||
| logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Index, strg.Spaces[i].Storage.String())) | |||||
| logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Index, strg.Spaces[i].UserSpace.Storage.String())) | |||||
| } | } | ||||
| logger.Debug(logStrs...) | logger.Debug(logStrs...) | ||||
| @@ -99,7 +99,7 @@ func (s *LRCStripIterator) downloading() { | |||||
| var froms []ioswitchlrc.From | var froms []ioswitchlrc.From | ||||
| for _, b := range s.blocks { | for _, b := range s.blocks { | ||||
| space := b.Space | space := b.Space | ||||
| froms = append(froms, ioswitchlrc.NewFromStorage(b.Block.FileHash, *space.MasterHub, space, b.Block.Index)) | |||||
| froms = append(froms, ioswitchlrc.NewFromStorage(b.Block.FileHash, space, b.Block.Index)) | |||||
| } | } | ||||
| toExec, hd := ioswitchlrc.NewToDriverWithRange(-1, math2.Range{ | toExec, hd := ioswitchlrc.NewToDriverWithRange(-1, math2.Range{ | ||||
| @@ -62,7 +62,7 @@ func (s *LRCReconstructStrategy) GetDetail() types.ObjectDetail { | |||||
| type Selector struct { | type Selector struct { | ||||
| cfg Config | cfg Config | ||||
| storageMeta *metacache.UserSpaceMeta | |||||
| spaceMeta *metacache.UserSpaceMeta | |||||
| hubMeta *metacache.HubMeta | hubMeta *metacache.HubMeta | ||||
| connectivity *metacache.Connectivity | connectivity *metacache.Connectivity | ||||
| } | } | ||||
| @@ -70,7 +70,7 @@ type Selector struct { | |||||
| func NewSelector(cfg Config, storageMeta *metacache.UserSpaceMeta, hubMeta *metacache.HubMeta, connectivity *metacache.Connectivity) *Selector { | func NewSelector(cfg Config, storageMeta *metacache.UserSpaceMeta, hubMeta *metacache.HubMeta, connectivity *metacache.Connectivity) *Selector { | ||||
| return &Selector{ | return &Selector{ | ||||
| cfg: cfg, | cfg: cfg, | ||||
| storageMeta: storageMeta, | |||||
| spaceMeta: storageMeta, | |||||
| hubMeta: hubMeta, | hubMeta: hubMeta, | ||||
| connectivity: connectivity, | connectivity: connectivity, | ||||
| } | } | ||||
| @@ -232,8 +232,8 @@ func (s *Selector) sortDownloadStorages(req request2) []*downloadSpaceInfo { | |||||
| for _, id := range req.Detail.PinnedAt { | for _, id := range req.Detail.PinnedAt { | ||||
| storage, ok := downloadSpaceMap[id] | storage, ok := downloadSpaceMap[id] | ||||
| if !ok { | if !ok { | ||||
| mod := s.storageMeta.Get(id) | |||||
| if mod == nil || mod.MasterHub == nil { | |||||
| mod := s.spaceMeta.Get(id) | |||||
| if mod == nil { | |||||
| continue | continue | ||||
| } | } | ||||
| @@ -251,8 +251,8 @@ func (s *Selector) sortDownloadStorages(req request2) []*downloadSpaceInfo { | |||||
| for _, b := range req.Detail.Blocks { | for _, b := range req.Detail.Blocks { | ||||
| space, ok := downloadSpaceMap[b.UserSpaceID] | space, ok := downloadSpaceMap[b.UserSpaceID] | ||||
| if !ok { | if !ok { | ||||
| mod := s.storageMeta.Get(b.UserSpaceID) | |||||
| if mod == nil || mod.MasterHub == nil { | |||||
| mod := s.spaceMeta.Get(b.UserSpaceID) | |||||
| if mod == nil { | |||||
| continue | continue | ||||
| } | } | ||||
| @@ -273,15 +273,15 @@ func (s *Selector) sortDownloadStorages(req request2) []*downloadSpaceInfo { | |||||
| func (s *Selector) getStorageDistance(req request2, src types.UserSpaceDetail) float64 { | func (s *Selector) getStorageDistance(req request2, src types.UserSpaceDetail) float64 { | ||||
| if req.DestHub != nil { | if req.DestHub != nil { | ||||
| if src.MasterHub.HubID == req.DestHub.HubID { | |||||
| if src.RecommendHub.HubID == req.DestHub.HubID { | |||||
| return consts.StorageDistanceSameStorage | return consts.StorageDistanceSameStorage | ||||
| } | } | ||||
| if src.MasterHub.LocationID == req.DestHub.LocationID { | |||||
| if src.RecommendHub.LocationID == req.DestHub.LocationID { | |||||
| return consts.StorageDistanceSameLocation | return consts.StorageDistanceSameLocation | ||||
| } | } | ||||
| latency := s.connectivity.Get(src.MasterHub.HubID, req.DestHub.HubID) | |||||
| latency := s.connectivity.Get(src.RecommendHub.HubID, req.DestHub.HubID) | |||||
| if latency == nil || *latency > time.Duration(float64(time.Millisecond)*s.cfg.HighLatencyHubMs) { | if latency == nil || *latency > time.Duration(float64(time.Millisecond)*s.cfg.HighLatencyHubMs) { | ||||
| return consts.HubDistanceHighLatencyHub | return consts.HubDistanceHighLatencyHub | ||||
| } | } | ||||
| @@ -290,7 +290,7 @@ func (s *Selector) getStorageDistance(req request2, src types.UserSpaceDetail) f | |||||
| } | } | ||||
| if req.DestLocation != 0 { | if req.DestLocation != 0 { | ||||
| if src.MasterHub.LocationID == req.DestLocation { | |||||
| if src.RecommendHub.LocationID == req.DestLocation { | |||||
| return consts.StorageDistanceSameLocation | return consts.StorageDistanceSameLocation | ||||
| } | } | ||||
| } | } | ||||
| @@ -202,7 +202,7 @@ func (s *StripIterator) readStrip(stripIndex int64, buf []byte) (int, error) { | |||||
| ft.ECParam = &s.red | ft.ECParam = &s.red | ||||
| for _, b := range s.blocks { | for _, b := range s.blocks { | ||||
| space := b.Space | space := b.Space | ||||
| ft.AddFrom(ioswitch2.NewFromShardstore(b.Block.FileHash, *space.MasterHub, space, ioswitch2.ECStream(b.Block.Index))) | |||||
| ft.AddFrom(ioswitch2.NewFromShardstore(b.Block.FileHash, space, ioswitch2.ECStream(b.Block.Index))) | |||||
| } | } | ||||
| toExec, hd := ioswitch2.NewToDriverWithRange(ioswitch2.RawStream(), math2.Range{ | toExec, hd := ioswitch2.NewToDriverWithRange(ioswitch2.RawStream(), math2.Range{ | ||||
| @@ -65,24 +65,25 @@ func (s *UserSpaceMeta) load(keys []types.UserSpaceID) ([]types.UserSpaceDetail, | |||||
| coorCli := stgglb.CoordinatorRPCPool.Get() | coorCli := stgglb.CoordinatorRPCPool.Get() | ||||
| defer coorCli.Release() | defer coorCli.Release() | ||||
| stgIDs := make([]cortypes.StorageID, len(spaces)) | |||||
| stgs := make([]cortypes.StorageType, len(spaces)) | |||||
| for i := range spaces { | for i := range spaces { | ||||
| stgIDs[i] = spaces[i].StorageID | |||||
| stgs[i] = spaces[i].Storage | |||||
| } | } | ||||
| getStgs, cerr := coorCli.GetStorageDetails(context.Background(), corrpc.ReqGetStorageDetails(stgIDs)) | |||||
| selectHubs, cerr := coorCli.SelectStorageHub(context.Background(), &corrpc.SelectStorageHub{ | |||||
| Storages: stgs, | |||||
| }) | |||||
| if cerr != nil { | if cerr != nil { | ||||
| logger.Warnf("get storage details: %v", cerr) | logger.Warnf("get storage details: %v", cerr) | ||||
| return vs, oks | return vs, oks | ||||
| } | } | ||||
| for i := range spaces { | for i := range spaces { | ||||
| if getStgs.Storage[i] != nil { | |||||
| if selectHubs.Hubs[i] != nil { | |||||
| vs[i] = types.UserSpaceDetail{ | vs[i] = types.UserSpaceDetail{ | ||||
| UserID: stgglb.Local.UserID, | |||||
| UserSpace: spaces[i], | |||||
| Storage: getStgs.Storage[i].Storage, | |||||
| MasterHub: getStgs.Storage[i].MasterHub, | |||||
| UserID: stgglb.Local.UserID, | |||||
| UserSpace: spaces[i], | |||||
| RecommendHub: *selectHubs.Hubs[i], | |||||
| } | } | ||||
| oks[i] = true | oks[i] = true | ||||
| @@ -714,7 +714,7 @@ func (svc *ObjectService) CompleteMultipartUpload(objectID types.ObjectID, index | |||||
| return types.Object{}, err | return types.Object{}, err | ||||
| } | } | ||||
| shardInfo := ret["shard"].(*ops2.ShardInfoValue) | |||||
| shardInfo := ret["shard"].(*ops2.FileInfoValue) | |||||
| err = db.DoTx10(svc.DB, svc.DB.Object().BatchUpdateRedundancy, []db.UpdatingObjectRedundancy{ | err = db.DoTx10(svc.DB, svc.DB.Object().BatchUpdateRedundancy, []db.UpdatingObjectRedundancy{ | ||||
| { | { | ||||
| @@ -47,9 +47,6 @@ func (svc *UserSpaceService) DownloadPackage(packageID clitypes.PackageID, users | |||||
| if destStg == nil { | if destStg == nil { | ||||
| return fmt.Errorf("userspace not found: %d", userspaceID) | return fmt.Errorf("userspace not found: %d", userspaceID) | ||||
| } | } | ||||
| if destStg.MasterHub == nil { | |||||
| return fmt.Errorf("userspace %v has no master hub", userspaceID) | |||||
| } | |||||
| details, err := db.DoTx11(svc.DB, svc.DB.Object().GetPackageObjectDetails, packageID) | details, err := db.DoTx11(svc.DB, svc.DB.Object().GetPackageObjectDetails, packageID) | ||||
| if err != nil { | if err != nil { | ||||
| @@ -61,7 +58,7 @@ func (svc *UserSpaceService) DownloadPackage(packageID clitypes.PackageID, users | |||||
| for _, obj := range details { | for _, obj := range details { | ||||
| strg, err := svc.StrategySelector.Select(strategy.Request{ | strg, err := svc.StrategySelector.Select(strategy.Request{ | ||||
| Detail: obj, | Detail: obj, | ||||
| DestHub: destStg.MasterHub.HubID, | |||||
| DestHub: destStg.RecommendHub.HubID, | |||||
| }) | }) | ||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("select download strategy: %w", err) | return fmt.Errorf("select download strategy: %w", err) | ||||
| @@ -70,21 +67,21 @@ func (svc *UserSpaceService) DownloadPackage(packageID clitypes.PackageID, users | |||||
| ft := ioswitch2.NewFromTo() | ft := ioswitch2.NewFromTo() | ||||
| switch strg := strg.(type) { | switch strg := strg.(type) { | ||||
| case *strategy.DirectStrategy: | case *strategy.DirectStrategy: | ||||
| ft.AddFrom(ioswitch2.NewFromShardstore(strg.Detail.Object.FileHash, *strg.UserSpace.MasterHub, strg.UserSpace, ioswitch2.RawStream())) | |||||
| ft.AddFrom(ioswitch2.NewFromShardstore(strg.Detail.Object.FileHash, strg.UserSpace, ioswitch2.RawStream())) | |||||
| case *strategy.ECReconstructStrategy: | case *strategy.ECReconstructStrategy: | ||||
| for i, b := range strg.Blocks { | for i, b := range strg.Blocks { | ||||
| ft.AddFrom(ioswitch2.NewFromShardstore(b.FileHash, *strg.UserSpaces[i].MasterHub, strg.UserSpaces[i], ioswitch2.ECStream(b.Index))) | |||||
| ft.AddFrom(ioswitch2.NewFromShardstore(b.FileHash, strg.UserSpaces[i], ioswitch2.ECStream(b.Index))) | |||||
| ft.ECParam = &strg.Redundancy | ft.ECParam = &strg.Redundancy | ||||
| } | } | ||||
| default: | default: | ||||
| return fmt.Errorf("unsupported download strategy: %T", strg) | return fmt.Errorf("unsupported download strategy: %T", strg) | ||||
| } | } | ||||
| ft.AddTo(ioswitch2.NewToPublicStore(*destStg.MasterHub, *destStg, path.Join(rootPath, obj.Object.Path))) | |||||
| ft.AddTo(ioswitch2.NewToBaseStore(*destStg, path.Join(rootPath, obj.Object.Path))) | |||||
| // 顺便保存到同存储服务的分片存储中 | // 顺便保存到同存储服务的分片存储中 | ||||
| if destStg.UserSpace.ShardStore != nil { | if destStg.UserSpace.ShardStore != nil { | ||||
| ft.AddTo(ioswitch2.NewToShardStore(*destStg.MasterHub, *destStg, ioswitch2.RawStream(), "")) | |||||
| ft.AddTo(ioswitch2.NewToShardStore(*destStg, ioswitch2.RawStream(), "")) | |||||
| pinned = append(pinned, obj.Object.ObjectID) | pinned = append(pinned, obj.Object.ObjectID) | ||||
| } | } | ||||
| @@ -121,29 +118,24 @@ func (svc *UserSpaceService) SpaceToSpace(srcSpaceID clitypes.UserSpaceID, srcPa | |||||
| if srcSpace == nil { | if srcSpace == nil { | ||||
| return clitypes.SpaceToSpaceResult{}, fmt.Errorf("source userspace not found: %d", srcSpaceID) | return clitypes.SpaceToSpaceResult{}, fmt.Errorf("source userspace not found: %d", srcSpaceID) | ||||
| } | } | ||||
| if srcSpace.MasterHub == nil { | |||||
| return clitypes.SpaceToSpaceResult{}, fmt.Errorf("source userspace %v has no master hub", srcSpaceID) | |||||
| } | |||||
| srcAddr, ok := srcSpace.MasterHub.Address.(*cortypes.GRPCAddressInfo) | |||||
| srcAddr, ok := srcSpace.RecommendHub.Address.(*cortypes.GRPCAddressInfo) | |||||
| if !ok { | if !ok { | ||||
| return clitypes.SpaceToSpaceResult{}, fmt.Errorf("source userspace %v has no grpc address", srcSpaceID) | return clitypes.SpaceToSpaceResult{}, fmt.Errorf("source userspace %v has no grpc address", srcSpaceID) | ||||
| } | } | ||||
| srcSpaceCli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(srcSpace.MasterHub, srcAddr)) | |||||
| srcSpaceCli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(&srcSpace.RecommendHub, srcAddr)) | |||||
| defer srcSpaceCli.Release() | defer srcSpaceCli.Release() | ||||
| dstSpace := svc.UserSpaceMeta.Get(dstSpaceID) | dstSpace := svc.UserSpaceMeta.Get(dstSpaceID) | ||||
| if dstSpace == nil { | if dstSpace == nil { | ||||
| return clitypes.SpaceToSpaceResult{}, fmt.Errorf("destination userspace not found: %d", dstSpaceID) | return clitypes.SpaceToSpaceResult{}, fmt.Errorf("destination userspace not found: %d", dstSpaceID) | ||||
| } | } | ||||
| if dstSpace.MasterHub == nil { | |||||
| return clitypes.SpaceToSpaceResult{}, fmt.Errorf("destination userspace %v has no master hub", dstSpaceID) | |||||
| } | |||||
| dstAddr, ok := dstSpace.MasterHub.Address.(*cortypes.GRPCAddressInfo) | |||||
| dstAddr, ok := dstSpace.RecommendHub.Address.(*cortypes.GRPCAddressInfo) | |||||
| if !ok { | if !ok { | ||||
| return clitypes.SpaceToSpaceResult{}, fmt.Errorf("destination userspace %v has no grpc address", srcSpaceID) | return clitypes.SpaceToSpaceResult{}, fmt.Errorf("destination userspace %v has no grpc address", srcSpaceID) | ||||
| } | } | ||||
| dstSpaceCli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(dstSpace.MasterHub, dstAddr)) | |||||
| dstSpaceCli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(&dstSpace.RecommendHub, dstAddr)) | |||||
| defer dstSpaceCli.Release() | defer dstSpaceCli.Release() | ||||
| srcPath = strings.Trim(srcPath, cdssdk.ObjectPathSeparator) | srcPath = strings.Trim(srcPath, cdssdk.ObjectPathSeparator) | ||||
| @@ -157,7 +149,7 @@ func (svc *UserSpaceService) SpaceToSpace(srcSpaceID clitypes.UserSpaceID, srcPa | |||||
| return clitypes.SpaceToSpaceResult{}, fmt.Errorf("destination path is empty") | return clitypes.SpaceToSpaceResult{}, fmt.Errorf("destination path is empty") | ||||
| } | } | ||||
| listAllResp, cerr := srcSpaceCli.PublicStoreListAll(context.Background(), &hubrpc.PublicStoreListAll{ | |||||
| listAllResp, cerr := srcSpaceCli.BaseStoreListAll(context.Background(), &hubrpc.BaseStoreListAll{ | |||||
| UserSpace: *srcSpace, | UserSpace: *srcSpace, | ||||
| Path: srcPath, | Path: srcPath, | ||||
| }) | }) | ||||
| @@ -168,7 +160,7 @@ func (svc *UserSpaceService) SpaceToSpace(srcSpaceID clitypes.UserSpaceID, srcPa | |||||
| srcPathComps := clitypes.SplitObjectPath(srcPath) | srcPathComps := clitypes.SplitObjectPath(srcPath) | ||||
| srcDirCompLen := len(srcPathComps) - 1 | srcDirCompLen := len(srcPathComps) - 1 | ||||
| entryTree := trie.NewTrie[*types.PublicStoreEntry]() | |||||
| entryTree := trie.NewTrie[*types.BaseStoreEntry]() | |||||
| for _, e := range listAllResp.Entries { | for _, e := range listAllResp.Entries { | ||||
| pa, ok := strings.CutSuffix(e.Path, clitypes.ObjectPathSeparator) | pa, ok := strings.CutSuffix(e.Path, clitypes.ObjectPathSeparator) | ||||
| comps := clitypes.SplitObjectPath(pa) | comps := clitypes.SplitObjectPath(pa) | ||||
| @@ -179,7 +171,7 @@ func (svc *UserSpaceService) SpaceToSpace(srcSpaceID clitypes.UserSpaceID, srcPa | |||||
| e2.IsDir = e2.IsDir || ok | e2.IsDir = e2.IsDir || ok | ||||
| } | } | ||||
| entryTree.Iterate(func(path []string, node *trie.Node[*types.PublicStoreEntry], isWordNode bool) trie.VisitCtrl { | |||||
| entryTree.Iterate(func(path []string, node *trie.Node[*types.BaseStoreEntry], isWordNode bool) trie.VisitCtrl { | |||||
| if node.Value == nil { | if node.Value == nil { | ||||
| return trie.VisitContinue | return trie.VisitContinue | ||||
| } | } | ||||
| @@ -198,7 +190,7 @@ func (svc *UserSpaceService) SpaceToSpace(srcSpaceID clitypes.UserSpaceID, srcPa | |||||
| var filePathes []string | var filePathes []string | ||||
| var dirPathes []string | var dirPathes []string | ||||
| entryTree.Iterate(func(path []string, node *trie.Node[*types.PublicStoreEntry], isWordNode bool) trie.VisitCtrl { | |||||
| entryTree.Iterate(func(path []string, node *trie.Node[*types.BaseStoreEntry], isWordNode bool) trie.VisitCtrl { | |||||
| if node.Value == nil { | if node.Value == nil { | ||||
| return trie.VisitContinue | return trie.VisitContinue | ||||
| } | } | ||||
| @@ -219,8 +211,8 @@ func (svc *UserSpaceService) SpaceToSpace(srcSpaceID clitypes.UserSpaceID, srcPa | |||||
| newPath := strings.Replace(f, srcPath, dstPath, 1) | newPath := strings.Replace(f, srcPath, dstPath, 1) | ||||
| ft := ioswitch2.NewFromTo() | ft := ioswitch2.NewFromTo() | ||||
| ft.AddFrom(ioswitch2.NewFromPublicStore(*srcSpace.MasterHub, *srcSpace, f)) | |||||
| ft.AddTo(ioswitch2.NewToPublicStore(*dstSpace.MasterHub, *dstSpace, newPath)) | |||||
| ft.AddFrom(ioswitch2.NewFromBaseStore(*srcSpace, f)) | |||||
| ft.AddTo(ioswitch2.NewToBaseStore(*dstSpace, newPath)) | |||||
| plans := exec.NewPlanBuilder() | plans := exec.NewPlanBuilder() | ||||
| err := parser.Parse(ft, plans) | err := parser.Parse(ft, plans) | ||||
| @@ -245,7 +237,7 @@ func (svc *UserSpaceService) SpaceToSpace(srcSpaceID clitypes.UserSpaceID, srcPa | |||||
| newDirPathes = append(newDirPathes, strings.Replace(dirPathes[i], srcPath, dstPath, 1)) | newDirPathes = append(newDirPathes, strings.Replace(dirPathes[i], srcPath, dstPath, 1)) | ||||
| } | } | ||||
| mkdirResp, err := dstSpaceCli.PublicStoreMkdirs(context.Background(), &hubrpc.PublicStoreMkdirs{ | |||||
| mkdirResp, err := dstSpaceCli.BaseStoreMkdirs(context.Background(), &hubrpc.BaseStoreMkdirs{ | |||||
| UserSpace: *dstSpace, | UserSpace: *dstSpace, | ||||
| Pathes: newDirPathes, | Pathes: newDirPathes, | ||||
| }) | }) | ||||
| @@ -48,9 +48,6 @@ func (j *ChangeRedundancy) Execute(t *TickTock) { | |||||
| if space == nil { | if space == nil { | ||||
| continue | continue | ||||
| } | } | ||||
| if space.MasterHub == nil { | |||||
| continue | |||||
| } | |||||
| ctx.allUserSpaces[space.UserSpace.UserSpaceID] = &userSpaceUsageInfo{ | ctx.allUserSpaces[space.UserSpace.UserSpaceID] = &userSpaceUsageInfo{ | ||||
| UserSpace: space, | UserSpace: space, | ||||
| @@ -56,18 +56,11 @@ func (j *CheckShardStore) Execute(t *TickTock) { | |||||
| } | } | ||||
| func (j *CheckShardStore) checkOne(t *TickTock, space *clitypes.UserSpaceDetail) error { | func (j *CheckShardStore) checkOne(t *TickTock, space *clitypes.UserSpaceDetail) error { | ||||
| log := logger.WithType[CheckShardStore]("TickTock") | |||||
| if space.MasterHub == nil { | |||||
| log.Infof("user space %v has no master hub", space.UserSpace) | |||||
| return nil | |||||
| } | |||||
| addr, ok := space.MasterHub.Address.(*cortypes.GRPCAddressInfo) | |||||
| addr, ok := space.RecommendHub.Address.(*cortypes.GRPCAddressInfo) | |||||
| if !ok { | if !ok { | ||||
| return fmt.Errorf("master of user space %v has no grpc address", space.UserSpace) | return fmt.Errorf("master of user space %v has no grpc address", space.UserSpace) | ||||
| } | } | ||||
| agtCli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(space.MasterHub, addr)) | |||||
| agtCli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(&space.RecommendHub, addr)) | |||||
| defer agtCli.Release() | defer agtCli.Release() | ||||
| ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute)) | ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute)) | ||||
| @@ -307,12 +307,12 @@ func (t *ChangeRedundancy) chooseSoManyUserSpaces(count int, stgs []*userSpaceUs | |||||
| continue | continue | ||||
| } | } | ||||
| if chosenLocations[stg.UserSpace.MasterHub.LocationID] { | |||||
| if chosenLocations[stg.UserSpace.RecommendHub.LocationID] { | |||||
| continue | continue | ||||
| } | } | ||||
| chosen = append(chosen, stg) | chosen = append(chosen, stg) | ||||
| chosenLocations[stg.UserSpace.MasterHub.LocationID] = true | |||||
| chosenLocations[stg.UserSpace.RecommendHub.LocationID] = true | |||||
| extendStgs[i] = nil | extendStgs[i] = nil | ||||
| } | } | ||||
| } | } | ||||
| @@ -329,17 +329,14 @@ func (t *ChangeRedundancy) noneToRep(ctx *changeRedundancyContext, obj clitypes. | |||||
| if !ok { | if !ok { | ||||
| return nil, nil, fmt.Errorf("userspace %v not found", obj.Blocks[0].UserSpaceID) | return nil, nil, fmt.Errorf("userspace %v not found", obj.Blocks[0].UserSpaceID) | ||||
| } | } | ||||
| if srcStg.UserSpace.MasterHub == nil { | |||||
| return nil, nil, fmt.Errorf("userspace %v has no master hub", obj.Blocks[0].UserSpaceID) | |||||
| } | |||||
| // 如果选择的备份节点都是同一个,那么就只要上传一次 | // 如果选择的备份节点都是同一个,那么就只要上传一次 | ||||
| uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) clitypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID }) | uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) clitypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID }) | ||||
| ft := ioswitch2.NewFromTo() | ft := ioswitch2.NewFromTo() | ||||
| ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.UserSpace.MasterHub, *srcStg.UserSpace, ioswitch2.RawStream())) | |||||
| ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.UserSpace, ioswitch2.RawStream())) | |||||
| for i, stg := range uploadStgs { | for i, stg := range uploadStgs { | ||||
| ft.AddTo(ioswitch2.NewToShardStore(*stg.UserSpace.MasterHub, *stg.UserSpace, ioswitch2.RawStream(), fmt.Sprintf("%d", i))) | |||||
| ft.AddTo(ioswitch2.NewToShardStore(*stg.UserSpace, ioswitch2.RawStream(), fmt.Sprintf("%d", i))) | |||||
| } | } | ||||
| plans := exec.NewPlanBuilder() | plans := exec.NewPlanBuilder() | ||||
| @@ -359,7 +356,7 @@ func (t *ChangeRedundancy) noneToRep(ctx *changeRedundancyContext, obj clitypes. | |||||
| var blocks []clitypes.ObjectBlock | var blocks []clitypes.ObjectBlock | ||||
| var blockChgs []datamap.BlockChange | var blockChgs []datamap.BlockChange | ||||
| for i, stg := range uploadStgs { | for i, stg := range uploadStgs { | ||||
| r := ret[fmt.Sprintf("%d", i)].(*ops2.ShardInfoValue) | |||||
| r := ret[fmt.Sprintf("%d", i)].(*ops2.FileInfoValue) | |||||
| blocks = append(blocks, clitypes.ObjectBlock{ | blocks = append(blocks, clitypes.ObjectBlock{ | ||||
| ObjectID: obj.Object.ObjectID, | ObjectID: obj.Object.ObjectID, | ||||
| Index: 0, | Index: 0, | ||||
| @@ -403,15 +400,12 @@ func (t *ChangeRedundancy) noneToEC(ctx *changeRedundancyContext, obj clitypes.O | |||||
| if !ok { | if !ok { | ||||
| return nil, nil, fmt.Errorf("userspace %v not found", obj.Blocks[0].UserSpaceID) | return nil, nil, fmt.Errorf("userspace %v not found", obj.Blocks[0].UserSpaceID) | ||||
| } | } | ||||
| if srcStg.UserSpace.MasterHub == nil { | |||||
| return nil, nil, fmt.Errorf("userspace %v has no master hub", obj.Blocks[0].UserSpaceID) | |||||
| } | |||||
| ft := ioswitch2.NewFromTo() | ft := ioswitch2.NewFromTo() | ||||
| ft.ECParam = red | ft.ECParam = red | ||||
| ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.UserSpace.MasterHub, *srcStg.UserSpace, ioswitch2.RawStream())) | |||||
| ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.UserSpace, ioswitch2.RawStream())) | |||||
| for i := 0; i < red.N; i++ { | for i := 0; i < red.N; i++ { | ||||
| ft.AddTo(ioswitch2.NewToShardStore(*uploadStgs[i].UserSpace.MasterHub, *uploadStgs[i].UserSpace, ioswitch2.ECStream(i), fmt.Sprintf("%d", i))) | |||||
| ft.AddTo(ioswitch2.NewToShardStore(*uploadStgs[i].UserSpace, ioswitch2.ECStream(i), fmt.Sprintf("%d", i))) | |||||
| } | } | ||||
| plans := exec.NewPlanBuilder() | plans := exec.NewPlanBuilder() | ||||
| err := parser.Parse(ft, plans) | err := parser.Parse(ft, plans) | ||||
| @@ -430,7 +424,7 @@ func (t *ChangeRedundancy) noneToEC(ctx *changeRedundancyContext, obj clitypes.O | |||||
| var evtTargetBlocks []datamap.Block | var evtTargetBlocks []datamap.Block | ||||
| var evtBlockTrans []datamap.DataTransfer | var evtBlockTrans []datamap.DataTransfer | ||||
| for i := 0; i < red.N; i++ { | for i := 0; i < red.N; i++ { | ||||
| r := ioRet[fmt.Sprintf("%d", i)].(*ops2.ShardInfoValue) | |||||
| r := ioRet[fmt.Sprintf("%d", i)].(*ops2.FileInfoValue) | |||||
| blocks = append(blocks, clitypes.ObjectBlock{ | blocks = append(blocks, clitypes.ObjectBlock{ | ||||
| ObjectID: obj.Object.ObjectID, | ObjectID: obj.Object.ObjectID, | ||||
| Index: i, | Index: i, | ||||
| @@ -488,17 +482,14 @@ func (t *ChangeRedundancy) noneToLRC(ctx *changeRedundancyContext, obj clitypes. | |||||
| if !ok { | if !ok { | ||||
| return nil, nil, fmt.Errorf("userspace %v not found", obj.Blocks[0].UserSpaceID) | return nil, nil, fmt.Errorf("userspace %v not found", obj.Blocks[0].UserSpaceID) | ||||
| } | } | ||||
| if srcStg.UserSpace.MasterHub == nil { | |||||
| return nil, nil, fmt.Errorf("userspace %v has no master hub", obj.Blocks[0].UserSpaceID) | |||||
| } | |||||
| var toes []ioswitchlrc.To | var toes []ioswitchlrc.To | ||||
| for i := 0; i < red.N; i++ { | for i := 0; i < red.N; i++ { | ||||
| toes = append(toes, ioswitchlrc.NewToStorage(*uploadStgs[i].UserSpace.MasterHub, *uploadStgs[i].UserSpace, i, fmt.Sprintf("%d", i))) | |||||
| toes = append(toes, ioswitchlrc.NewToStorage(*uploadStgs[i].UserSpace, i, fmt.Sprintf("%d", i))) | |||||
| } | } | ||||
| plans := exec.NewPlanBuilder() | plans := exec.NewPlanBuilder() | ||||
| err := lrcparser.Encode(ioswitchlrc.NewFromStorage(obj.Object.FileHash, *srcStg.UserSpace.MasterHub, *srcStg.UserSpace, -1), toes, plans) | |||||
| err := lrcparser.Encode(ioswitchlrc.NewFromStorage(obj.Object.FileHash, *srcStg.UserSpace, -1), toes, plans) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, nil, fmt.Errorf("parsing plan: %w", err) | return nil, nil, fmt.Errorf("parsing plan: %w", err) | ||||
| } | } | ||||
| @@ -514,7 +505,7 @@ func (t *ChangeRedundancy) noneToLRC(ctx *changeRedundancyContext, obj clitypes. | |||||
| var evtTargetBlocks []datamap.Block | var evtTargetBlocks []datamap.Block | ||||
| var evtBlockTrans []datamap.DataTransfer | var evtBlockTrans []datamap.DataTransfer | ||||
| for i := 0; i < red.N; i++ { | for i := 0; i < red.N; i++ { | ||||
| r := ioRet[fmt.Sprintf("%d", i)].(*ops2.ShardInfoValue) | |||||
| r := ioRet[fmt.Sprintf("%d", i)].(*ops2.FileInfoValue) | |||||
| blocks = append(blocks, clitypes.ObjectBlock{ | blocks = append(blocks, clitypes.ObjectBlock{ | ||||
| ObjectID: obj.Object.ObjectID, | ObjectID: obj.Object.ObjectID, | ||||
| Index: i, | Index: i, | ||||
| @@ -573,18 +564,15 @@ func (t *ChangeRedundancy) noneToSeg(ctx *changeRedundancyContext, obj clitypes. | |||||
| if !ok { | if !ok { | ||||
| return nil, nil, fmt.Errorf("userspace %v not found", obj.Blocks[0].UserSpaceID) | return nil, nil, fmt.Errorf("userspace %v not found", obj.Blocks[0].UserSpaceID) | ||||
| } | } | ||||
| if srcStg.UserSpace.MasterHub == nil { | |||||
| return nil, nil, fmt.Errorf("userspace %v has no master hub", obj.Blocks[0].UserSpaceID) | |||||
| } | |||||
| // 如果选择的备份节点都是同一个,那么就只要上传一次 | // 如果选择的备份节点都是同一个,那么就只要上传一次 | ||||
| uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) clitypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID }) | uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) clitypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID }) | ||||
| ft := ioswitch2.NewFromTo() | ft := ioswitch2.NewFromTo() | ||||
| ft.SegmentParam = red | ft.SegmentParam = red | ||||
| ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.UserSpace.MasterHub, *srcStg.UserSpace, ioswitch2.RawStream())) | |||||
| ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.UserSpace, ioswitch2.RawStream())) | |||||
| for i, stg := range uploadStgs { | for i, stg := range uploadStgs { | ||||
| ft.AddTo(ioswitch2.NewToShardStore(*stg.UserSpace.MasterHub, *stg.UserSpace, ioswitch2.SegmentStream(i), fmt.Sprintf("%d", i))) | |||||
| ft.AddTo(ioswitch2.NewToShardStore(*stg.UserSpace, ioswitch2.SegmentStream(i), fmt.Sprintf("%d", i))) | |||||
| } | } | ||||
| plans := exec.NewPlanBuilder() | plans := exec.NewPlanBuilder() | ||||
| @@ -605,7 +593,7 @@ func (t *ChangeRedundancy) noneToSeg(ctx *changeRedundancyContext, obj clitypes. | |||||
| var evtTargetBlocks []datamap.Block | var evtTargetBlocks []datamap.Block | ||||
| var evtBlockTrans []datamap.DataTransfer | var evtBlockTrans []datamap.DataTransfer | ||||
| for i, stg := range uploadStgs { | for i, stg := range uploadStgs { | ||||
| r := ret[fmt.Sprintf("%d", i)].(*ops2.ShardInfoValue) | |||||
| r := ret[fmt.Sprintf("%d", i)].(*ops2.FileInfoValue) | |||||
| blocks = append(blocks, clitypes.ObjectBlock{ | blocks = append(blocks, clitypes.ObjectBlock{ | ||||
| ObjectID: obj.Object.ObjectID, | ObjectID: obj.Object.ObjectID, | ||||
| Index: i, | Index: i, | ||||
| @@ -664,17 +652,14 @@ func (t *ChangeRedundancy) repToRep(ctx *changeRedundancyContext, obj clitypes.O | |||||
| if !ok { | if !ok { | ||||
| return nil, nil, fmt.Errorf("userspace %v not found", obj.Blocks[0].UserSpaceID) | return nil, nil, fmt.Errorf("userspace %v not found", obj.Blocks[0].UserSpaceID) | ||||
| } | } | ||||
| if srcStg.UserSpace.MasterHub == nil { | |||||
| return nil, nil, fmt.Errorf("userspace %v has no master hub", obj.Blocks[0].UserSpaceID) | |||||
| } | |||||
| // 如果选择的备份节点都是同一个,那么就只要上传一次 | // 如果选择的备份节点都是同一个,那么就只要上传一次 | ||||
| uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) clitypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID }) | uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) clitypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID }) | ||||
| ft := ioswitch2.NewFromTo() | ft := ioswitch2.NewFromTo() | ||||
| ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.UserSpace.MasterHub, *srcStg.UserSpace, ioswitch2.RawStream())) | |||||
| ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.UserSpace, ioswitch2.RawStream())) | |||||
| for i, stg := range uploadStgs { | for i, stg := range uploadStgs { | ||||
| ft.AddTo(ioswitch2.NewToShardStore(*stg.UserSpace.MasterHub, *stg.UserSpace, ioswitch2.RawStream(), fmt.Sprintf("%d", i))) | |||||
| ft.AddTo(ioswitch2.NewToShardStore(*stg.UserSpace, ioswitch2.RawStream(), fmt.Sprintf("%d", i))) | |||||
| } | } | ||||
| plans := exec.NewPlanBuilder() | plans := exec.NewPlanBuilder() | ||||
| @@ -694,7 +679,7 @@ func (t *ChangeRedundancy) repToRep(ctx *changeRedundancyContext, obj clitypes.O | |||||
| var blocks []clitypes.ObjectBlock | var blocks []clitypes.ObjectBlock | ||||
| var blockChgs []datamap.BlockChange | var blockChgs []datamap.BlockChange | ||||
| for i, stg := range uploadStgs { | for i, stg := range uploadStgs { | ||||
| r := ret[fmt.Sprintf("%d", i)].(*ops2.ShardInfoValue) | |||||
| r := ret[fmt.Sprintf("%d", i)].(*ops2.FileInfoValue) | |||||
| blocks = append(blocks, clitypes.ObjectBlock{ | blocks = append(blocks, clitypes.ObjectBlock{ | ||||
| ObjectID: obj.Object.ObjectID, | ObjectID: obj.Object.ObjectID, | ||||
| Index: 0, | Index: 0, | ||||
| @@ -746,9 +731,6 @@ func (t *ChangeRedundancy) ecToRep(ctx *changeRedundancyContext, obj clitypes.Ob | |||||
| if !ok { | if !ok { | ||||
| continue | continue | ||||
| } | } | ||||
| if stg.UserSpace.MasterHub == nil { | |||||
| continue | |||||
| } | |||||
| chosenBlocks = append(chosenBlocks, block) | chosenBlocks = append(chosenBlocks, block) | ||||
| chosenBlockIndexes = append(chosenBlockIndexes, block.Index) | chosenBlockIndexes = append(chosenBlockIndexes, block.Index) | ||||
| @@ -772,11 +754,11 @@ func (t *ChangeRedundancy) ecToRep(ctx *changeRedundancyContext, obj clitypes.Ob | |||||
| ft.ECParam = srcRed | ft.ECParam = srcRed | ||||
| for i, block := range chosenBlocks { | for i, block := range chosenBlocks { | ||||
| ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, *chosenBlockStg[i].MasterHub, chosenBlockStg[i], ioswitch2.ECStream(block.Index))) | |||||
| ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, chosenBlockStg[i], ioswitch2.ECStream(block.Index))) | |||||
| } | } | ||||
| for i := range uploadStgs { | for i := range uploadStgs { | ||||
| ft.AddTo(ioswitch2.NewToShardStoreWithRange(*uploadStgs[i].UserSpace.MasterHub, *uploadStgs[i].UserSpace, ioswitch2.RawStream(), fmt.Sprintf("%d", i), math2.NewRange(0, obj.Object.Size))) | |||||
| ft.AddTo(ioswitch2.NewToShardStoreWithRange(*uploadStgs[i].UserSpace, ioswitch2.RawStream(), fmt.Sprintf("%d", i), math2.NewRange(0, obj.Object.Size))) | |||||
| } | } | ||||
| err := parser.Parse(ft, planBlder) | err := parser.Parse(ft, planBlder) | ||||
| @@ -795,7 +777,7 @@ func (t *ChangeRedundancy) ecToRep(ctx *changeRedundancyContext, obj clitypes.Ob | |||||
| var blocks []clitypes.ObjectBlock | var blocks []clitypes.ObjectBlock | ||||
| for i := range uploadStgs { | for i := range uploadStgs { | ||||
| r := ioRet[fmt.Sprintf("%d", i)].(*ops2.ShardInfoValue) | |||||
| r := ioRet[fmt.Sprintf("%d", i)].(*ops2.FileInfoValue) | |||||
| blocks = append(blocks, clitypes.ObjectBlock{ | blocks = append(blocks, clitypes.ObjectBlock{ | ||||
| ObjectID: obj.Object.ObjectID, | ObjectID: obj.Object.ObjectID, | ||||
| Index: 0, | Index: 0, | ||||
| @@ -874,10 +856,6 @@ func (t *ChangeRedundancy) ecToEC(ctx *changeRedundancyContext, obj clitypes.Obj | |||||
| if !ok { | if !ok { | ||||
| continue | continue | ||||
| } | } | ||||
| if stg.UserSpace.MasterHub == nil { | |||||
| continue | |||||
| } | |||||
| chosenBlocks = append(chosenBlocks, block) | chosenBlocks = append(chosenBlocks, block) | ||||
| chosenBlockStg = append(chosenBlockStg, *stg.UserSpace) | chosenBlockStg = append(chosenBlockStg, *stg.UserSpace) | ||||
| } | } | ||||
| @@ -901,7 +879,7 @@ func (t *ChangeRedundancy) ecToEC(ctx *changeRedundancyContext, obj clitypes.Obj | |||||
| ft.ECParam = srcRed | ft.ECParam = srcRed | ||||
| for i, block := range chosenBlocks { | for i, block := range chosenBlocks { | ||||
| ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, *chosenBlockStg[i].MasterHub, chosenBlockStg[i], ioswitch2.ECStream(block.Index))) | |||||
| ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, chosenBlockStg[i], ioswitch2.ECStream(block.Index))) | |||||
| evtSrcBlocks = append(evtSrcBlocks, datamap.Block{ | evtSrcBlocks = append(evtSrcBlocks, datamap.Block{ | ||||
| BlockType: datamap.BlockTypeEC, | BlockType: datamap.BlockTypeEC, | ||||
| @@ -933,7 +911,7 @@ func (t *ChangeRedundancy) ecToEC(ctx *changeRedundancyContext, obj clitypes.Obj | |||||
| // 否则就要重建出这个节点需要的块 | // 否则就要重建出这个节点需要的块 | ||||
| // 输出只需要自己要保存的那一块 | // 输出只需要自己要保存的那一块 | ||||
| ft.AddTo(ioswitch2.NewToShardStore(*stg.UserSpace.MasterHub, *stg.UserSpace, ioswitch2.ECStream(i), fmt.Sprintf("%d", i))) | |||||
| ft.AddTo(ioswitch2.NewToShardStore(*stg.UserSpace, ioswitch2.ECStream(i), fmt.Sprintf("%d", i))) | |||||
| evtTargetBlocks = append(evtTargetBlocks, datamap.Block{ | evtTargetBlocks = append(evtTargetBlocks, datamap.Block{ | ||||
| BlockType: datamap.BlockTypeEC, | BlockType: datamap.BlockTypeEC, | ||||
| @@ -967,7 +945,7 @@ func (t *ChangeRedundancy) ecToEC(ctx *changeRedundancyContext, obj clitypes.Obj | |||||
| return nil, nil, fmt.Errorf("parsing result key %s as index: %w", k, err) | return nil, nil, fmt.Errorf("parsing result key %s as index: %w", k, err) | ||||
| } | } | ||||
| r := v.(*ops2.ShardInfoValue) | |||||
| r := v.(*ops2.FileInfoValue) | |||||
| newBlocks[idx].FileHash = r.Hash | newBlocks[idx].FileHash = r.Hash | ||||
| newBlocks[idx].Size = r.Size | newBlocks[idx].Size = r.Size | ||||
| } | } | ||||
| @@ -1132,9 +1110,6 @@ func (t *ChangeRedundancy) reconstructLRC(ctx *changeRedundancyContext, obj clit | |||||
| if !ok { | if !ok { | ||||
| continue | continue | ||||
| } | } | ||||
| if stg.UserSpace.MasterHub == nil { | |||||
| continue | |||||
| } | |||||
| chosenBlocks = append(chosenBlocks, block) | chosenBlocks = append(chosenBlocks, block) | ||||
| chosenBlockStg = append(chosenBlockStg, *stg.UserSpace) | chosenBlockStg = append(chosenBlockStg, *stg.UserSpace) | ||||
| @@ -1178,11 +1153,11 @@ func (t *ChangeRedundancy) reconstructLRC(ctx *changeRedundancyContext, obj clit | |||||
| // 否则就要重建出这个节点需要的块 | // 否则就要重建出这个节点需要的块 | ||||
| for i2, block := range chosenBlocks { | for i2, block := range chosenBlocks { | ||||
| froms = append(froms, ioswitchlrc.NewFromStorage(block.FileHash, *chosenBlockStg[i2].MasterHub, chosenBlockStg[i2], block.Index)) | |||||
| froms = append(froms, ioswitchlrc.NewFromStorage(block.FileHash, chosenBlockStg[i2], block.Index)) | |||||
| } | } | ||||
| // 输出只需要自己要保存的那一块 | // 输出只需要自己要保存的那一块 | ||||
| toes = append(toes, ioswitchlrc.NewToStorage(*userspace.UserSpace.MasterHub, *userspace.UserSpace, i, fmt.Sprintf("%d", i))) | |||||
| toes = append(toes, ioswitchlrc.NewToStorage(*userspace.UserSpace, i, fmt.Sprintf("%d", i))) | |||||
| newBlocks = append(newBlocks, newBlock) | newBlocks = append(newBlocks, newBlock) | ||||
| } | } | ||||
| @@ -1212,7 +1187,7 @@ func (t *ChangeRedundancy) reconstructLRC(ctx *changeRedundancyContext, obj clit | |||||
| return nil, nil, fmt.Errorf("parsing result key %s as index: %w", k, err) | return nil, nil, fmt.Errorf("parsing result key %s as index: %w", k, err) | ||||
| } | } | ||||
| r := v.(*ops2.ShardInfoValue) | |||||
| r := v.(*ops2.FileInfoValue) | |||||
| newBlocks[idx].FileHash = r.Hash | newBlocks[idx].FileHash = r.Hash | ||||
| newBlocks[idx].Size = r.Size | newBlocks[idx].Size = r.Size | ||||
| } | } | ||||
| @@ -557,7 +557,7 @@ func (t *ChangeRedundancy) sortNodeByReaderDistance(state *annealingState) { | |||||
| UserSpaceID: n, | UserSpaceID: n, | ||||
| Distance: consts.StorageDistanceSameStorage, | Distance: consts.StorageDistanceSameStorage, | ||||
| }) | }) | ||||
| } else if state.ctx.allUserSpaces[r].UserSpace.MasterHub.LocationID == state.ctx.allUserSpaces[n].UserSpace.MasterHub.LocationID { | |||||
| } else if state.ctx.allUserSpaces[r].UserSpace.RecommendHub.LocationID == state.ctx.allUserSpaces[n].UserSpace.RecommendHub.LocationID { | |||||
| // 同地区时距离视为1 | // 同地区时距离视为1 | ||||
| nodeDists = append(nodeDists, stgDist{ | nodeDists = append(nodeDists, stgDist{ | ||||
| UserSpaceID: n, | UserSpaceID: n, | ||||
| @@ -680,7 +680,7 @@ func (t *ChangeRedundancy) makePlansForRepObject(ctx *changeRedundancyContext, s | |||||
| ft := ioswitch2.NewFromTo() | ft := ioswitch2.NewFromTo() | ||||
| fromStg := ctx.allUserSpaces[obj.Blocks[0].UserSpaceID].UserSpace | fromStg := ctx.allUserSpaces[obj.Blocks[0].UserSpaceID].UserSpace | ||||
| ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *fromStg.MasterHub, *fromStg, ioswitch2.RawStream())) | |||||
| ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *fromStg, ioswitch2.RawStream())) | |||||
| for i, f := range solu.rmBlocks { | for i, f := range solu.rmBlocks { | ||||
| hasCache := lo.ContainsBy(obj.Blocks, func(b clitypes.ObjectBlock) bool { return b.UserSpaceID == solu.blockList[i].UserSpaceID }) || | hasCache := lo.ContainsBy(obj.Blocks, func(b clitypes.ObjectBlock) bool { return b.UserSpaceID == solu.blockList[i].UserSpaceID }) || | ||||
| @@ -691,7 +691,7 @@ func (t *ChangeRedundancy) makePlansForRepObject(ctx *changeRedundancyContext, s | |||||
| // 如果对象在退火后要保留副本的节点没有副本,则需要在这个节点创建副本 | // 如果对象在退火后要保留副本的节点没有副本,则需要在这个节点创建副本 | ||||
| if !hasCache { | if !hasCache { | ||||
| toStg := ctx.allUserSpaces[solu.blockList[i].UserSpaceID].UserSpace | toStg := ctx.allUserSpaces[solu.blockList[i].UserSpaceID].UserSpace | ||||
| ft.AddTo(ioswitch2.NewToShardStore(*toStg.MasterHub, *toStg, ioswitch2.RawStream(), fmt.Sprintf("%d.0", obj.Object.ObjectID))) | |||||
| ft.AddTo(ioswitch2.NewToShardStore(*toStg, ioswitch2.RawStream(), fmt.Sprintf("%d.0", obj.Object.ObjectID))) | |||||
| planningHubIDs[solu.blockList[i].UserSpaceID] = true | planningHubIDs[solu.blockList[i].UserSpaceID] = true | ||||
| } | } | ||||
| @@ -810,10 +810,10 @@ func (t *ChangeRedundancy) makePlansForECObject(ctx *changeRedundancyContext, so | |||||
| // 依次生成每个节点上的执行计划,因为如果放到一个计划里一起生成,不能保证每个节点上的块用的都是本节点上的副本 | // 依次生成每个节点上的执行计划,因为如果放到一个计划里一起生成,不能保证每个节点上的块用的都是本节点上的副本 | ||||
| ft := ioswitch2.NewFromTo() | ft := ioswitch2.NewFromTo() | ||||
| ft.ECParam = ecRed | ft.ECParam = ecRed | ||||
| ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *ctx.allUserSpaces[id].UserSpace.MasterHub, *ctx.allUserSpaces[id].UserSpace, ioswitch2.RawStream())) | |||||
| ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *ctx.allUserSpaces[id].UserSpace, ioswitch2.RawStream())) | |||||
| for _, i := range *idxs { | for _, i := range *idxs { | ||||
| ft.AddTo(ioswitch2.NewToShardStore(*ctx.allUserSpaces[id].UserSpace.MasterHub, *ctx.allUserSpaces[id].UserSpace, ioswitch2.ECStream(i), fmt.Sprintf("%d.%d", obj.Object.ObjectID, i))) | |||||
| ft.AddTo(ioswitch2.NewToShardStore(*ctx.allUserSpaces[id].UserSpace, ioswitch2.ECStream(i), fmt.Sprintf("%d.%d", obj.Object.ObjectID, i))) | |||||
| } | } | ||||
| err := parser.Parse(ft, planBld) | err := parser.Parse(ft, planBld) | ||||
| @@ -952,7 +952,7 @@ func (t *ChangeRedundancy) populateECObjectEntry(entry *db.UpdatingObjectRedunda | |||||
| key := fmt.Sprintf("%d.%d", obj.Object.ObjectID, entry.Blocks[i].Index) | key := fmt.Sprintf("%d.%d", obj.Object.ObjectID, entry.Blocks[i].Index) | ||||
| // 不应该出现key不存在的情况 | // 不应该出现key不存在的情况 | ||||
| r := ioRets[key].(*ops2.ShardInfoValue) | |||||
| r := ioRets[key].(*ops2.FileInfoValue) | |||||
| entry.Blocks[i].FileHash = r.Hash | entry.Blocks[i].FileHash = r.Hash | ||||
| entry.Blocks[i].Size = r.Size | entry.Blocks[i].Size = r.Size | ||||
| } | } | ||||
| @@ -87,11 +87,11 @@ func (j *ShardStoreGC) gcOne(t *TickTock, space *types.UserSpaceDetail) error { | |||||
| } | } | ||||
| // 获取与节点通信的代理客户端 | // 获取与节点通信的代理客户端 | ||||
| addr, ok := space.MasterHub.Address.(*cortypes.GRPCAddressInfo) | |||||
| addr, ok := space.RecommendHub.Address.(*cortypes.GRPCAddressInfo) | |||||
| if !ok { | if !ok { | ||||
| return fmt.Errorf("master of user space %v has no grpc address", space.UserSpace) | return fmt.Errorf("master of user space %v has no grpc address", space.UserSpace) | ||||
| } | } | ||||
| agtCli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(space.MasterHub, addr)) | |||||
| agtCli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(&space.RecommendHub, addr)) | |||||
| defer agtCli.Release() | defer agtCli.Release() | ||||
| // 向代理发送垃圾回收请求 | // 向代理发送垃圾回收请求 | ||||
| @@ -49,8 +49,8 @@ func (u *CreateUploader) Upload(pa string, stream io.Reader, opts ...UploadOptio | |||||
| fromExec, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream()) | fromExec, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream()) | ||||
| ft.AddFrom(fromExec) | ft.AddFrom(fromExec) | ||||
| for i, space := range u.targetSpaces { | for i, space := range u.targetSpaces { | ||||
| ft.AddTo(ioswitch2.NewToShardStore(*space.MasterHub, space, ioswitch2.RawStream(), "shardInfo")) | |||||
| ft.AddTo(ioswitch2.NewToPublicStore(*space.MasterHub, space, path.Join(u.copyRoots[i], pa))) | |||||
| ft.AddTo(ioswitch2.NewToShardStore(space, ioswitch2.RawStream(), "shardInfo")) | |||||
| ft.AddTo(ioswitch2.NewToBaseStore(space, path.Join(u.copyRoots[i], pa))) | |||||
| spaceIDs = append(spaceIDs, space.UserSpace.UserSpaceID) | spaceIDs = append(spaceIDs, space.UserSpace.UserSpaceID) | ||||
| } | } | ||||
| @@ -73,7 +73,7 @@ func (u *CreateUploader) Upload(pa string, stream io.Reader, opts ...UploadOptio | |||||
| defer u.lock.Unlock() | defer u.lock.Unlock() | ||||
| // 记录上传结果 | // 记录上传结果 | ||||
| shardInfo := ret["fileHash"].(*ops2.ShardInfoValue) | |||||
| shardInfo := ret["fileHash"].(*ops2.FileInfoValue) | |||||
| u.successes = append(u.successes, db.AddObjectEntry{ | u.successes = append(u.successes, db.AddObjectEntry{ | ||||
| Path: pa, | Path: pa, | ||||
| Size: shardInfo.Size, | Size: shardInfo.Size, | ||||
| @@ -58,10 +58,10 @@ func (w *UpdateUploader) Upload(pat string, stream io.Reader, opts ...UploadOpti | |||||
| ft := ioswitch2.NewFromTo() | ft := ioswitch2.NewFromTo() | ||||
| fromExec, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream()) | fromExec, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream()) | ||||
| ft.AddFrom(fromExec). | ft.AddFrom(fromExec). | ||||
| AddTo(ioswitch2.NewToShardStore(*w.targetSpace.MasterHub, w.targetSpace, ioswitch2.RawStream(), "shardInfo")) | |||||
| AddTo(ioswitch2.NewToShardStore(w.targetSpace, ioswitch2.RawStream(), "shardInfo")) | |||||
| for i, space := range w.copyToSpaces { | for i, space := range w.copyToSpaces { | ||||
| ft.AddTo(ioswitch2.NewToPublicStore(*space.MasterHub, space, path.Join(w.copyToPath[i], pat))) | |||||
| ft.AddTo(ioswitch2.NewToBaseStore(space, path.Join(w.copyToPath[i], pat))) | |||||
| } | } | ||||
| plans := exec.NewPlanBuilder() | plans := exec.NewPlanBuilder() | ||||
| @@ -83,7 +83,7 @@ func (w *UpdateUploader) Upload(pat string, stream io.Reader, opts ...UploadOpti | |||||
| defer w.lock.Unlock() | defer w.lock.Unlock() | ||||
| // 记录上传结果 | // 记录上传结果 | ||||
| shardInfo := ret["shardInfo"].(*ops2.ShardInfoValue) | |||||
| shardInfo := ret["shardInfo"].(*ops2.FileInfoValue) | |||||
| w.successes = append(w.successes, db.AddObjectEntry{ | w.successes = append(w.successes, db.AddObjectEntry{ | ||||
| Path: pat, | Path: pat, | ||||
| Size: shardInfo.Size, | Size: shardInfo.Size, | ||||
| @@ -55,13 +55,9 @@ func (u *Uploader) BeginUpdate(pkgID clitypes.PackageID, affinity clitypes.UserS | |||||
| cons := u.connectivity.GetAll() | cons := u.connectivity.GetAll() | ||||
| var uploadSpaces []UploadSpaceInfo | var uploadSpaces []UploadSpaceInfo | ||||
| for _, space := range spaceDetails { | for _, space := range spaceDetails { | ||||
| if space.MasterHub == nil { | |||||
| continue | |||||
| } | |||||
| latency := time.Duration(math.MaxInt64) | latency := time.Duration(math.MaxInt64) | ||||
| con, ok := cons[space.MasterHub.HubID] | |||||
| con, ok := cons[space.RecommendHub.HubID] | |||||
| if ok && con.Latency != nil { | if ok && con.Latency != nil { | ||||
| latency = *con.Latency | latency = *con.Latency | ||||
| } | } | ||||
| @@ -69,7 +65,7 @@ func (u *Uploader) BeginUpdate(pkgID clitypes.PackageID, affinity clitypes.UserS | |||||
| uploadSpaces = append(uploadSpaces, UploadSpaceInfo{ | uploadSpaces = append(uploadSpaces, UploadSpaceInfo{ | ||||
| Space: *space, | Space: *space, | ||||
| Delay: latency, | Delay: latency, | ||||
| IsSameLocation: space.MasterHub.LocationID == stgglb.Local.LocationID, | |||||
| IsSameLocation: space.RecommendHub.LocationID == stgglb.Local.LocationID, | |||||
| }) | }) | ||||
| } | } | ||||
| @@ -85,9 +81,6 @@ func (u *Uploader) BeginUpdate(pkgID clitypes.PackageID, affinity clitypes.UserS | |||||
| if !ok { | if !ok { | ||||
| return nil, fmt.Errorf("user space %v not found", spaceID) | return nil, fmt.Errorf("user space %v not found", spaceID) | ||||
| } | } | ||||
| if space.MasterHub == nil { | |||||
| return nil, fmt.Errorf("user space %v has no master hub", spaceID) | |||||
| } | |||||
| copyToSpaces[i] = *space | copyToSpaces[i] = *space | ||||
| } | } | ||||
| @@ -207,13 +200,9 @@ func (u *Uploader) UploadPart(objID clitypes.ObjectID, index int, stream io.Read | |||||
| cons := u.connectivity.GetAll() | cons := u.connectivity.GetAll() | ||||
| var userStgs []UploadSpaceInfo | var userStgs []UploadSpaceInfo | ||||
| for _, space := range spaces { | for _, space := range spaces { | ||||
| if space.MasterHub == nil { | |||||
| continue | |||||
| } | |||||
| delay := time.Duration(math.MaxInt64) | delay := time.Duration(math.MaxInt64) | ||||
| con, ok := cons[space.MasterHub.HubID] | |||||
| con, ok := cons[space.RecommendHub.HubID] | |||||
| if ok && con.Latency != nil { | if ok && con.Latency != nil { | ||||
| delay = *con.Latency | delay = *con.Latency | ||||
| } | } | ||||
| @@ -221,7 +210,7 @@ func (u *Uploader) UploadPart(objID clitypes.ObjectID, index int, stream io.Read | |||||
| userStgs = append(userStgs, UploadSpaceInfo{ | userStgs = append(userStgs, UploadSpaceInfo{ | ||||
| Space: *space, | Space: *space, | ||||
| Delay: delay, | Delay: delay, | ||||
| IsSameLocation: space.MasterHub.LocationID == stgglb.Local.LocationID, | |||||
| IsSameLocation: space.RecommendHub.LocationID == stgglb.Local.LocationID, | |||||
| }) | }) | ||||
| } | } | ||||
| @@ -241,7 +230,7 @@ func (u *Uploader) UploadPart(objID clitypes.ObjectID, index int, stream io.Read | |||||
| ft := ioswitch2.NewFromTo() | ft := ioswitch2.NewFromTo() | ||||
| fromDrv, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream()) | fromDrv, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream()) | ||||
| ft.AddFrom(fromDrv). | ft.AddFrom(fromDrv). | ||||
| AddTo(ioswitch2.NewToShardStore(*space.MasterHub, space, ioswitch2.RawStream(), "shard")) | |||||
| AddTo(ioswitch2.NewToShardStore(space, ioswitch2.RawStream(), "shard")) | |||||
| plans := exec.NewPlanBuilder() | plans := exec.NewPlanBuilder() | ||||
| err = parser.Parse(ft, plans) | err = parser.Parse(ft, plans) | ||||
| @@ -258,7 +247,7 @@ func (u *Uploader) UploadPart(objID clitypes.ObjectID, index int, stream io.Read | |||||
| return fmt.Errorf("executing plan: %w", err) | return fmt.Errorf("executing plan: %w", err) | ||||
| } | } | ||||
| shardInfo := ret["shard"].(*ops2.ShardInfoValue) | |||||
| shardInfo := ret["shard"].(*ops2.FileInfoValue) | |||||
| err = u.db.DoTx(func(tx db.SQLContext) error { | err = u.db.DoTx(func(tx db.SQLContext) error { | ||||
| return u.db.Object().AppendPart(tx, clitypes.ObjectBlock{ | return u.db.Object().AppendPart(tx, clitypes.ObjectBlock{ | ||||
| ObjectID: objID, | ObjectID: objID, | ||||
| @@ -26,9 +26,6 @@ func (u *Uploader) UserSpaceUpload(userSpaceID clitypes.UserSpaceID, rootPath st | |||||
| if srcSpace == nil { | if srcSpace == nil { | ||||
| return nil, fmt.Errorf("user space %d not found", userSpaceID) | return nil, fmt.Errorf("user space %d not found", userSpaceID) | ||||
| } | } | ||||
| if srcSpace.MasterHub == nil { | |||||
| return nil, fmt.Errorf("master hub not found for user space %d", userSpaceID) | |||||
| } | |||||
| pkg, err := db.DoTx01(u.db, func(tx db.SQLContext) (clitypes.Package, error) { | pkg, err := db.DoTx01(u.db, func(tx db.SQLContext) (clitypes.Package, error) { | ||||
| _, err := u.db.Bucket().GetByID(tx, targetBktID) | _, err := u.db.Bucket().GetByID(tx, targetBktID) | ||||
| @@ -53,13 +50,13 @@ func (u *Uploader) UserSpaceUpload(userSpaceID clitypes.UserSpaceID, rootPath st | |||||
| spaceDetails := u.spaceMeta.GetMany(spaceIDs) | spaceDetails := u.spaceMeta.GetMany(spaceIDs) | ||||
| spaceDetails = lo.Filter(spaceDetails, func(e *clitypes.UserSpaceDetail, i int) bool { | spaceDetails = lo.Filter(spaceDetails, func(e *clitypes.UserSpaceDetail, i int) bool { | ||||
| return e != nil && e.MasterHub != nil && e.UserSpace.ShardStore != nil | |||||
| return e != nil && e.UserSpace.ShardStore != nil | |||||
| }) | }) | ||||
| coorCli := stgglb.CoordinatorRPCPool.Get() | coorCli := stgglb.CoordinatorRPCPool.Get() | ||||
| defer coorCli.Release() | defer coorCli.Release() | ||||
| resp, cerr := coorCli.GetHubConnectivities(context.Background(), corrpc.ReqGetHubConnectivities([]cortypes.HubID{srcSpace.MasterHub.HubID})) | |||||
| resp, cerr := coorCli.GetHubConnectivities(context.Background(), corrpc.ReqGetHubConnectivities([]cortypes.HubID{srcSpace.RecommendHub.HubID})) | |||||
| if cerr != nil { | if cerr != nil { | ||||
| delPkg() | delPkg() | ||||
| return nil, fmt.Errorf("getting hub connectivities: %w", cerr.ToError()) | return nil, fmt.Errorf("getting hub connectivities: %w", cerr.ToError()) | ||||
| @@ -72,13 +69,9 @@ func (u *Uploader) UserSpaceUpload(userSpaceID clitypes.UserSpaceID, rootPath st | |||||
| var uploadSpaces []UploadSpaceInfo | var uploadSpaces []UploadSpaceInfo | ||||
| for _, space := range spaceDetails { | for _, space := range spaceDetails { | ||||
| if space.MasterHub == nil { | |||||
| continue | |||||
| } | |||||
| latency := time.Duration(math.MaxInt64) | latency := time.Duration(math.MaxInt64) | ||||
| con, ok := cons[space.MasterHub.HubID] | |||||
| con, ok := cons[space.RecommendHub.HubID] | |||||
| if ok && con.Latency != nil { | if ok && con.Latency != nil { | ||||
| latency = time.Duration(*con.Latency * float32(time.Millisecond)) | latency = time.Duration(*con.Latency * float32(time.Millisecond)) | ||||
| } | } | ||||
| @@ -86,7 +79,7 @@ func (u *Uploader) UserSpaceUpload(userSpaceID clitypes.UserSpaceID, rootPath st | |||||
| uploadSpaces = append(uploadSpaces, UploadSpaceInfo{ | uploadSpaces = append(uploadSpaces, UploadSpaceInfo{ | ||||
| Space: *space, | Space: *space, | ||||
| Delay: latency, | Delay: latency, | ||||
| IsSameLocation: space.MasterHub.LocationID == srcSpace.MasterHub.LocationID, | |||||
| IsSameLocation: space.RecommendHub.LocationID == srcSpace.RecommendHub.LocationID, | |||||
| }) | }) | ||||
| } | } | ||||
| @@ -97,27 +90,27 @@ func (u *Uploader) UserSpaceUpload(userSpaceID clitypes.UserSpaceID, rootPath st | |||||
| targetSapce := u.chooseUploadStorage(uploadSpaces, uploadAffinity) | targetSapce := u.chooseUploadStorage(uploadSpaces, uploadAffinity) | ||||
| addr, ok := srcSpace.MasterHub.Address.(*cortypes.GRPCAddressInfo) | |||||
| addr, ok := srcSpace.RecommendHub.Address.(*cortypes.GRPCAddressInfo) | |||||
| if !ok { | if !ok { | ||||
| delPkg() | delPkg() | ||||
| return nil, fmt.Errorf("master of user space %v has no grpc address", srcSpace.UserSpace) | return nil, fmt.Errorf("master of user space %v has no grpc address", srcSpace.UserSpace) | ||||
| } | } | ||||
| srcHubCli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(srcSpace.MasterHub, addr)) | |||||
| srcHubCli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(&srcSpace.RecommendHub, addr)) | |||||
| defer srcHubCli.Release() | defer srcHubCli.Release() | ||||
| listAllResp, cerr := srcHubCli.PublicStoreListAll(context.Background(), &hubrpc.PublicStoreListAll{ | |||||
| listAllResp, cerr := srcHubCli.BaseStoreListAll(context.Background(), &hubrpc.BaseStoreListAll{ | |||||
| UserSpace: *srcSpace, | UserSpace: *srcSpace, | ||||
| Path: rootPath, | Path: rootPath, | ||||
| }) | }) | ||||
| if cerr != nil { | if cerr != nil { | ||||
| delPkg() | delPkg() | ||||
| return nil, fmt.Errorf("listing public store: %w", cerr.ToError()) | |||||
| return nil, fmt.Errorf("listing base store: %w", cerr.ToError()) | |||||
| } | } | ||||
| adds, err := u.uploadFromPublicStore(srcSpace, &targetSapce.Space, listAllResp.Entries, rootPath) | |||||
| adds, err := u.uploadFromBaseStore(srcSpace, &targetSapce.Space, listAllResp.Entries, rootPath) | |||||
| if err != nil { | if err != nil { | ||||
| delPkg() | delPkg() | ||||
| return nil, fmt.Errorf("uploading from public store: %w", err) | |||||
| return nil, fmt.Errorf("uploading from base store: %w", err) | |||||
| } | } | ||||
| _, err = db.DoTx21(u.db, u.db.Object().BatchAdd, pkg.PackageID, adds) | _, err = db.DoTx21(u.db, u.db.Object().BatchAdd, pkg.PackageID, adds) | ||||
| @@ -129,7 +122,7 @@ func (u *Uploader) UserSpaceUpload(userSpaceID clitypes.UserSpaceID, rootPath st | |||||
| return &pkg, nil | return &pkg, nil | ||||
| } | } | ||||
| func (u *Uploader) uploadFromPublicStore(srcSpace *clitypes.UserSpaceDetail, targetSpace *clitypes.UserSpaceDetail, entries []types.PublicStoreEntry, rootPath string) ([]db.AddObjectEntry, error) { | |||||
| func (u *Uploader) uploadFromBaseStore(srcSpace *clitypes.UserSpaceDetail, targetSpace *clitypes.UserSpaceDetail, entries []types.BaseStoreEntry, rootPath string) ([]db.AddObjectEntry, error) { | |||||
| ft := ioswitch2.FromTo{} | ft := ioswitch2.FromTo{} | ||||
| for _, e := range entries { | for _, e := range entries { | ||||
| @@ -138,8 +131,8 @@ func (u *Uploader) uploadFromPublicStore(srcSpace *clitypes.UserSpaceDetail, tar | |||||
| continue | continue | ||||
| } | } | ||||
| ft.AddFrom(ioswitch2.NewFromPublicStore(*srcSpace.MasterHub, *srcSpace, e.Path)) | |||||
| ft.AddTo(ioswitch2.NewToShardStore(*targetSpace.MasterHub, *targetSpace, ioswitch2.RawStream(), e.Path)) | |||||
| ft.AddFrom(ioswitch2.NewFromBaseStore(*srcSpace, e.Path)) | |||||
| ft.AddTo(ioswitch2.NewToShardStore(*targetSpace, ioswitch2.RawStream(), e.Path)) | |||||
| } | } | ||||
| plans := exec.NewPlanBuilder() | plans := exec.NewPlanBuilder() | ||||
| @@ -167,7 +160,7 @@ func (u *Uploader) uploadFromPublicStore(srcSpace *clitypes.UserSpaceDetail, tar | |||||
| pat = clitypes.BaseName(e.Path) | pat = clitypes.BaseName(e.Path) | ||||
| } | } | ||||
| info := ret[e.Path].(*ops2.ShardInfoValue) | |||||
| info := ret[e.Path].(*ops2.FileInfoValue) | |||||
| adds = append(adds, db.AddObjectEntry{ | adds = append(adds, db.AddObjectEntry{ | ||||
| Path: pat, | Path: pat, | ||||
| Size: info.Size, | Size: info.Size, | ||||
| @@ -74,12 +74,14 @@ type UserSpace struct { | |||||
| UserSpaceID UserSpaceID `gorm:"column:UserSpaceID; primaryKey; type:bigint" json:"userSpaceID"` | UserSpaceID UserSpaceID `gorm:"column:UserSpaceID; primaryKey; type:bigint" json:"userSpaceID"` | ||||
| // 用户空间名称 | // 用户空间名称 | ||||
| Name string `gorm:"column:Name; type:varchar(255); not null" json:"name"` | Name string `gorm:"column:Name; type:varchar(255); not null" json:"name"` | ||||
| // 用户空间所在的存储节点 | |||||
| StorageID cotypes.StorageID `gorm:"column:StorageID; type:bigint; not null" json:"storageID"` | |||||
| // 用户空间所在的存储服务配置 | |||||
| Storage cotypes.StorageType `gorm:"column:Storage; type:json; not null; serializer:union" json:"storage"` | |||||
| // 用户在指定存储节点的凭证信息,比如用户账户,AK/SK等 | // 用户在指定存储节点的凭证信息,比如用户账户,AK/SK等 | ||||
| Credential cotypes.StorageCredential `gorm:"column:Credential; type:json; not null; serializer:union" json:"credential"` | Credential cotypes.StorageCredential `gorm:"column:Credential; type:json; not null; serializer:union" json:"credential"` | ||||
| // 用户空间的分片存储配置,如果为空,则表示不使用分片存储 | // 用户空间的分片存储配置,如果为空,则表示不使用分片存储 | ||||
| ShardStore *cotypes.ShardStoreUserConfig `gorm:"column:ShardStore; type:json; serializer:json" json:"shardStore"` | ShardStore *cotypes.ShardStoreUserConfig `gorm:"column:ShardStore; type:json; serializer:json" json:"shardStore"` | ||||
| // 存储服务特性功能的配置 | |||||
| Features []cotypes.StorageFeature `json:"features" gorm:"column:Features; type:json; serializer:union"` | |||||
| // 用户空间信息的版本号,每一次更改都需要更新版本号 | // 用户空间信息的版本号,每一次更改都需要更新版本号 | ||||
| Revision int64 `gorm:"column:Revision; type:bigint; not null" json:"revision"` | Revision int64 `gorm:"column:Revision; type:bigint; not null" json:"revision"` | ||||
| } | } | ||||
| @@ -89,7 +91,7 @@ func (UserSpace) TableName() string { | |||||
| } | } | ||||
| func (s UserSpace) String() string { | func (s UserSpace) String() string { | ||||
| return fmt.Sprintf("%v[id=%v,storageID=%v,rev=%v]", s.Name, s.UserSpaceID, s.StorageID, s.Revision) | |||||
| return fmt.Sprintf("%v[id=%v,storage=%v,rev=%v]", s.Name, s.UserSpaceID, s.Storage, s.Revision) | |||||
| } | } | ||||
| type PackageAccessStat struct { | type PackageAccessStat struct { | ||||
| @@ -214,10 +216,9 @@ func (o *ObjectDetail) GroupBlocks() []GrouppedObjectBlock { | |||||
| } | } | ||||
| type UserSpaceDetail struct { | type UserSpaceDetail struct { | ||||
| UserID cotypes.UserID | |||||
| UserSpace UserSpace | |||||
| Storage cotypes.Storage | |||||
| MasterHub *cotypes.Hub | |||||
| UserID cotypes.UserID | |||||
| UserSpace UserSpace | |||||
| RecommendHub cotypes.Hub | |||||
| } | } | ||||
| func (d UserSpaceDetail) String() string { | func (d UserSpaceDetail) String() string { | ||||
| @@ -4,7 +4,6 @@ import ( | |||||
| "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" | "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" | ||||
| "gitlink.org.cn/cloudream/common/utils/math2" | "gitlink.org.cn/cloudream/common/utils/math2" | ||||
| clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" | clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" | ||||
| cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" | |||||
| ) | ) | ||||
| type From interface { | type From interface { | ||||
| @@ -111,15 +110,13 @@ func (f *FromDriver) GetStreamIndex() StreamIndex { | |||||
| type FromShardstore struct { | type FromShardstore struct { | ||||
| FileHash clitypes.FileHash | FileHash clitypes.FileHash | ||||
| Hub cortypes.Hub | |||||
| UserSpace clitypes.UserSpaceDetail | UserSpace clitypes.UserSpaceDetail | ||||
| StreamIndex StreamIndex | StreamIndex StreamIndex | ||||
| } | } | ||||
| func NewFromShardstore(fileHash clitypes.FileHash, hub cortypes.Hub, space clitypes.UserSpaceDetail, strIdx StreamIndex) *FromShardstore { | |||||
| func NewFromShardstore(fileHash clitypes.FileHash, space clitypes.UserSpaceDetail, strIdx StreamIndex) *FromShardstore { | |||||
| return &FromShardstore{ | return &FromShardstore{ | ||||
| FileHash: fileHash, | FileHash: fileHash, | ||||
| Hub: hub, | |||||
| UserSpace: space, | UserSpace: space, | ||||
| StreamIndex: strIdx, | StreamIndex: strIdx, | ||||
| } | } | ||||
| @@ -129,21 +126,19 @@ func (f *FromShardstore) GetStreamIndex() StreamIndex { | |||||
| return f.StreamIndex | return f.StreamIndex | ||||
| } | } | ||||
| type FromPublicStore struct { | |||||
| Hub cortypes.Hub | |||||
| type FromBaseStore struct { | |||||
| UserSpace clitypes.UserSpaceDetail | UserSpace clitypes.UserSpaceDetail | ||||
| Path string | Path string | ||||
| } | } | ||||
| func NewFromPublicStore(hub cortypes.Hub, space clitypes.UserSpaceDetail, path string) *FromPublicStore { | |||||
| return &FromPublicStore{ | |||||
| Hub: hub, | |||||
| func NewFromBaseStore(space clitypes.UserSpaceDetail, path string) *FromBaseStore { | |||||
| return &FromBaseStore{ | |||||
| UserSpace: space, | UserSpace: space, | ||||
| Path: path, | Path: path, | ||||
| } | } | ||||
| } | } | ||||
| func (f *FromPublicStore) GetStreamIndex() StreamIndex { | |||||
| func (f *FromBaseStore) GetStreamIndex() StreamIndex { | |||||
| return StreamIndex{ | return StreamIndex{ | ||||
| Type: StreamIndexRaw, | Type: StreamIndexRaw, | ||||
| } | } | ||||
| @@ -181,25 +176,22 @@ func (t *ToDriver) GetRange() math2.Range { | |||||
| } | } | ||||
| type ToShardStore struct { | type ToShardStore struct { | ||||
| Hub cortypes.Hub | |||||
| Space clitypes.UserSpaceDetail | Space clitypes.UserSpaceDetail | ||||
| StreamIndex StreamIndex | StreamIndex StreamIndex | ||||
| Range math2.Range | Range math2.Range | ||||
| FileHashStoreKey string | FileHashStoreKey string | ||||
| } | } | ||||
| func NewToShardStore(hub cortypes.Hub, space clitypes.UserSpaceDetail, strIdx StreamIndex, fileHashStoreKey string) *ToShardStore { | |||||
| func NewToShardStore(space clitypes.UserSpaceDetail, strIdx StreamIndex, fileHashStoreKey string) *ToShardStore { | |||||
| return &ToShardStore{ | return &ToShardStore{ | ||||
| Hub: hub, | |||||
| Space: space, | Space: space, | ||||
| StreamIndex: strIdx, | StreamIndex: strIdx, | ||||
| FileHashStoreKey: fileHashStoreKey, | FileHashStoreKey: fileHashStoreKey, | ||||
| } | } | ||||
| } | } | ||||
| func NewToShardStoreWithRange(hub cortypes.Hub, space clitypes.UserSpaceDetail, streamIndex StreamIndex, fileHashStoreKey string, rng math2.Range) *ToShardStore { | |||||
| func NewToShardStoreWithRange(space clitypes.UserSpaceDetail, streamIndex StreamIndex, fileHashStoreKey string, rng math2.Range) *ToShardStore { | |||||
| return &ToShardStore{ | return &ToShardStore{ | ||||
| Hub: hub, | |||||
| Space: space, | Space: space, | ||||
| StreamIndex: streamIndex, | StreamIndex: streamIndex, | ||||
| FileHashStoreKey: fileHashStoreKey, | FileHashStoreKey: fileHashStoreKey, | ||||
| @@ -215,26 +207,24 @@ func (t *ToShardStore) GetRange() math2.Range { | |||||
| return t.Range | return t.Range | ||||
| } | } | ||||
| type ToPublicStore struct { | |||||
| Hub cortypes.Hub | |||||
| type ToBaseStore struct { | |||||
| Space clitypes.UserSpaceDetail | Space clitypes.UserSpaceDetail | ||||
| ObjectPath string | ObjectPath string | ||||
| } | } | ||||
| func NewToPublicStore(hub cortypes.Hub, space clitypes.UserSpaceDetail, objectPath string) *ToPublicStore { | |||||
| return &ToPublicStore{ | |||||
| Hub: hub, | |||||
| func NewToBaseStore(space clitypes.UserSpaceDetail, objectPath string) *ToBaseStore { | |||||
| return &ToBaseStore{ | |||||
| Space: space, | Space: space, | ||||
| ObjectPath: objectPath, | ObjectPath: objectPath, | ||||
| } | } | ||||
| } | } | ||||
| func (t *ToPublicStore) GetStreamIndex() StreamIndex { | |||||
| func (t *ToBaseStore) GetStreamIndex() StreamIndex { | |||||
| return StreamIndex{ | return StreamIndex{ | ||||
| Type: StreamIndexRaw, | Type: StreamIndexRaw, | ||||
| } | } | ||||
| } | } | ||||
| func (t *ToPublicStore) GetRange() math2.Range { | |||||
| func (t *ToBaseStore) GetRange() math2.Range { | |||||
| return math2.Range{} | return math2.Range{} | ||||
| } | } | ||||
| @@ -12,13 +12,13 @@ import ( | |||||
| func init() { | func init() { | ||||
| exec.UseOp[*BypassToShardStore]() | exec.UseOp[*BypassToShardStore]() | ||||
| exec.UseOp[*BypassToPublicStore]() | |||||
| exec.UseOp[*BypassToBaseStore]() | |||||
| exec.UseVarValue[*BypassedFileInfoValue]() | exec.UseVarValue[*BypassedFileInfoValue]() | ||||
| exec.UseVarValue[*BypassHandleResultValue]() | exec.UseVarValue[*BypassHandleResultValue]() | ||||
| exec.UseOp[*BypassFromShardStore]() | exec.UseOp[*BypassFromShardStore]() | ||||
| exec.UseOp[*BypassFromPublicStore]() | |||||
| exec.UseOp[*BypassFromBaseStore]() | |||||
| exec.UseVarValue[*BypassFilePathValue]() | exec.UseVarValue[*BypassFilePathValue]() | ||||
| exec.UseOp[*BypassFromShardStoreHTTP]() | exec.UseOp[*BypassFromShardStoreHTTP]() | ||||
| @@ -79,7 +79,7 @@ func (o *BypassToShardStore) Execute(ctx *exec.ExecContext, e *exec.Executor) er | |||||
| } | } | ||||
| e.PutVar(o.BypassCallback, &BypassHandleResultValue{Commited: true}) | e.PutVar(o.BypassCallback, &BypassHandleResultValue{Commited: true}) | ||||
| e.PutVar(o.FileInfo, &ShardInfoValue{Hash: fileInfo.Hash, Size: fileInfo.Size}) | |||||
| e.PutVar(o.FileInfo, &FileInfoValue{Hash: fileInfo.Hash, Size: fileInfo.Size}) | |||||
| return nil | return nil | ||||
| } | } | ||||
| @@ -87,27 +87,27 @@ func (o *BypassToShardStore) String() string { | |||||
| return fmt.Sprintf("BypassToShardStore[UserSpace:%v] Info: %v, Callback: %v", o.UserSpace, o.BypassFileInfo, o.BypassCallback) | return fmt.Sprintf("BypassToShardStore[UserSpace:%v] Info: %v, Callback: %v", o.UserSpace, o.BypassFileInfo, o.BypassCallback) | ||||
| } | } | ||||
| type BypassToPublicStore struct { | |||||
| type BypassToBaseStore struct { | |||||
| UserSpace clitypes.UserSpaceDetail | UserSpace clitypes.UserSpaceDetail | ||||
| BypassFileInfo exec.VarID | BypassFileInfo exec.VarID | ||||
| BypassCallback exec.VarID | BypassCallback exec.VarID | ||||
| DestPath string | DestPath string | ||||
| } | } | ||||
| func (o *BypassToPublicStore) Execute(ctx *exec.ExecContext, e *exec.Executor) error { | |||||
| func (o *BypassToBaseStore) Execute(ctx *exec.ExecContext, e *exec.Executor) error { | |||||
| stgPool, err := exec.GetValueByType[*pool.Pool](ctx) | stgPool, err := exec.GetValueByType[*pool.Pool](ctx) | ||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| } | } | ||||
| store, err := stgPool.GetPublicStore(&o.UserSpace) | |||||
| store, err := stgPool.GetBaseStore(&o.UserSpace) | |||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| } | } | ||||
| br, ok := store.(types.BypassPublicWrite) | br, ok := store.(types.BypassPublicWrite) | ||||
| if !ok { | if !ok { | ||||
| return fmt.Errorf("public store %v not support bypass write", o.UserSpace) | |||||
| return fmt.Errorf("base store %v not support bypass write", o.UserSpace) | |||||
| } | } | ||||
| fileInfo, err := exec.BindVar[*BypassedFileInfoValue](e, ctx.Context, o.BypassFileInfo) | fileInfo, err := exec.BindVar[*BypassedFileInfoValue](e, ctx.Context, o.BypassFileInfo) | ||||
| @@ -124,8 +124,8 @@ func (o *BypassToPublicStore) Execute(ctx *exec.ExecContext, e *exec.Executor) e | |||||
| return nil | return nil | ||||
| } | } | ||||
| func (o *BypassToPublicStore) String() string { | |||||
| return fmt.Sprintf("BypassToPublicStore[UserSpace:%v] Info: %v, Callback: %v", o.UserSpace, o.BypassFileInfo, o.BypassCallback) | |||||
| func (o *BypassToBaseStore) String() string { | |||||
| return fmt.Sprintf("BypassToBaseStore[UserSpace:%v] Info: %v, Callback: %v", o.UserSpace, o.BypassFileInfo, o.BypassCallback) | |||||
| } | } | ||||
| type BypassFilePathValue struct { | type BypassFilePathValue struct { | ||||
| @@ -173,26 +173,26 @@ func (o *BypassFromShardStore) String() string { | |||||
| return fmt.Sprintf("BypassFromShardStore[UserSpace:%v] FileHash: %v, Output: %v", o.UserSpace, o.FileHash, o.Output) | return fmt.Sprintf("BypassFromShardStore[UserSpace:%v] FileHash: %v, Output: %v", o.UserSpace, o.FileHash, o.Output) | ||||
| } | } | ||||
| type BypassFromPublicStore struct { | |||||
| type BypassFromBaseStore struct { | |||||
| UserSpace clitypes.UserSpaceDetail | UserSpace clitypes.UserSpaceDetail | ||||
| Path string | Path string | ||||
| Output exec.VarID | Output exec.VarID | ||||
| } | } | ||||
| func (o *BypassFromPublicStore) Execute(ctx *exec.ExecContext, e *exec.Executor) error { | |||||
| func (o *BypassFromBaseStore) Execute(ctx *exec.ExecContext, e *exec.Executor) error { | |||||
| stgPool, err := exec.GetValueByType[*pool.Pool](ctx) | stgPool, err := exec.GetValueByType[*pool.Pool](ctx) | ||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| } | } | ||||
| store, err := stgPool.GetPublicStore(&o.UserSpace) | |||||
| store, err := stgPool.GetBaseStore(&o.UserSpace) | |||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| } | } | ||||
| br, ok := store.(types.BypassPublicRead) | br, ok := store.(types.BypassPublicRead) | ||||
| if !ok { | if !ok { | ||||
| return fmt.Errorf("public store %v not support bypass read", o.UserSpace) | |||||
| return fmt.Errorf("base store %v not support bypass read", o.UserSpace) | |||||
| } | } | ||||
| path, err := br.BypassPublicRead(o.Path) | path, err := br.BypassPublicRead(o.Path) | ||||
| @@ -204,8 +204,8 @@ func (o *BypassFromPublicStore) Execute(ctx *exec.ExecContext, e *exec.Executor) | |||||
| return nil | return nil | ||||
| } | } | ||||
| func (o *BypassFromPublicStore) String() string { | |||||
| return fmt.Sprintf("BypassFromPublicStore[UserSpace:%v] Path: %v, Output: %v", o.UserSpace, o.Path, o.Output) | |||||
| func (o *BypassFromBaseStore) String() string { | |||||
| return fmt.Sprintf("BypassFromBaseStore[UserSpace:%v] Path: %v, Output: %v", o.UserSpace, o.Path, o.Output) | |||||
| } | } | ||||
| // 旁路Http读取 | // 旁路Http读取 | ||||
| @@ -303,14 +303,14 @@ func (t *BypassToShardStoreNode) GenerateOp() (exec.Op, error) { | |||||
| }, nil | }, nil | ||||
| } | } | ||||
| type BypassToPublicStoreNode struct { | |||||
| type BypassToBaseStoreNode struct { | |||||
| dag.NodeBase | dag.NodeBase | ||||
| UserSpace clitypes.UserSpaceDetail | UserSpace clitypes.UserSpaceDetail | ||||
| DestPath string | DestPath string | ||||
| } | } | ||||
| func (b *GraphNodeBuilder) NewBypassToPublicStore(userSpace clitypes.UserSpaceDetail, dstPath string) *BypassToPublicStoreNode { | |||||
| node := &BypassToPublicStoreNode{ | |||||
| func (b *GraphNodeBuilder) NewBypassToBaseStore(userSpace clitypes.UserSpaceDetail, dstPath string) *BypassToBaseStoreNode { | |||||
| node := &BypassToBaseStoreNode{ | |||||
| UserSpace: userSpace, | UserSpace: userSpace, | ||||
| DestPath: dstPath, | DestPath: dstPath, | ||||
| } | } | ||||
| @@ -321,22 +321,22 @@ func (b *GraphNodeBuilder) NewBypassToPublicStore(userSpace clitypes.UserSpaceDe | |||||
| return node | return node | ||||
| } | } | ||||
| func (n *BypassToPublicStoreNode) BypassFileInfoSlot() dag.ValueInputSlot { | |||||
| func (n *BypassToBaseStoreNode) BypassFileInfoSlot() dag.ValueInputSlot { | |||||
| return dag.ValueInputSlot{ | return dag.ValueInputSlot{ | ||||
| Node: n, | Node: n, | ||||
| Index: 0, | Index: 0, | ||||
| } | } | ||||
| } | } | ||||
| func (n *BypassToPublicStoreNode) BypassCallbackVar() dag.ValueOutputSlot { | |||||
| func (n *BypassToBaseStoreNode) BypassCallbackVar() dag.ValueOutputSlot { | |||||
| return dag.ValueOutputSlot{ | return dag.ValueOutputSlot{ | ||||
| Node: n, | Node: n, | ||||
| Index: 0, | Index: 0, | ||||
| } | } | ||||
| } | } | ||||
| func (t *BypassToPublicStoreNode) GenerateOp() (exec.Op, error) { | |||||
| return &BypassToPublicStore{ | |||||
| func (t *BypassToBaseStoreNode) GenerateOp() (exec.Op, error) { | |||||
| return &BypassToBaseStore{ | |||||
| UserSpace: t.UserSpace, | UserSpace: t.UserSpace, | ||||
| BypassFileInfo: t.BypassFileInfoSlot().Var().VarID, | BypassFileInfo: t.BypassFileInfoSlot().Var().VarID, | ||||
| BypassCallback: t.BypassCallbackVar().Var().VarID, | BypassCallback: t.BypassCallbackVar().Var().VarID, | ||||
| @@ -377,14 +377,14 @@ func (n *BypassFromShardStoreNode) GenerateOp() (exec.Op, error) { | |||||
| }, nil | }, nil | ||||
| } | } | ||||
| type BypassFromPublicStoreNode struct { | |||||
| type BypassFromBaseStoreNode struct { | |||||
| dag.NodeBase | dag.NodeBase | ||||
| UserSpace clitypes.UserSpaceDetail | UserSpace clitypes.UserSpaceDetail | ||||
| Path string | Path string | ||||
| } | } | ||||
| func (b *GraphNodeBuilder) NewBypassFromPublicStore(userSpace clitypes.UserSpaceDetail, path string) *BypassFromPublicStoreNode { | |||||
| node := &BypassFromPublicStoreNode{ | |||||
| func (b *GraphNodeBuilder) NewBypassFromBaseStore(userSpace clitypes.UserSpaceDetail, path string) *BypassFromBaseStoreNode { | |||||
| node := &BypassFromBaseStoreNode{ | |||||
| UserSpace: userSpace, | UserSpace: userSpace, | ||||
| Path: path, | Path: path, | ||||
| } | } | ||||
| @@ -394,15 +394,15 @@ func (b *GraphNodeBuilder) NewBypassFromPublicStore(userSpace clitypes.UserSpace | |||||
| return node | return node | ||||
| } | } | ||||
| func (n *BypassFromPublicStoreNode) FilePathVar() dag.ValueOutputSlot { | |||||
| func (n *BypassFromBaseStoreNode) FilePathVar() dag.ValueOutputSlot { | |||||
| return dag.ValueOutputSlot{ | return dag.ValueOutputSlot{ | ||||
| Node: n, | Node: n, | ||||
| Index: 0, | Index: 0, | ||||
| } | } | ||||
| } | } | ||||
| func (n *BypassFromPublicStoreNode) GenerateOp() (exec.Op, error) { | |||||
| return &BypassFromPublicStore{ | |||||
| func (n *BypassFromBaseStoreNode) GenerateOp() (exec.Op, error) { | |||||
| return &BypassFromBaseStore{ | |||||
| UserSpace: n.UserSpace, | UserSpace: n.UserSpace, | ||||
| Path: n.Path, | Path: n.Path, | ||||
| Output: n.FilePathVar().Var().VarID, | Output: n.FilePathVar().Var().VarID, | ||||
| @@ -15,37 +15,47 @@ import ( | |||||
| ) | ) | ||||
| func init() { | func init() { | ||||
| exec.UseOp[*PublicWrite]() | |||||
| exec.UseOp[*PublicRead]() | |||||
| exec.UseOp[*BaseWrite]() | |||||
| exec.UseOp[*BaseRead]() | |||||
| exec.UseVarValue[*FileInfoValue]() | |||||
| } | } | ||||
| type PublicRead struct { | |||||
| Output exec.VarID | |||||
| UserSpace clitypes.UserSpaceDetail | |||||
| ObjectPath string | |||||
| type FileInfoValue struct { | |||||
| Hash clitypes.FileHash `json:"hash"` | |||||
| Size int64 `json:"size"` | |||||
| } | } | ||||
| func (o *PublicRead) Execute(ctx *exec.ExecContext, e *exec.Executor) error { | |||||
| func (v *FileInfoValue) Clone() exec.VarValue { | |||||
| return &FileInfoValue{Hash: v.Hash, Size: v.Size} | |||||
| } | |||||
| type BaseRead struct { | |||||
| Output exec.VarID | |||||
| UserSpace clitypes.UserSpaceDetail | |||||
| Path string | |||||
| } | |||||
| func (o *BaseRead) Execute(ctx *exec.ExecContext, e *exec.Executor) error { | |||||
| logger. | logger. | ||||
| WithField("Output", o.Output). | WithField("Output", o.Output). | ||||
| WithField("UserSpace", o.UserSpace). | WithField("UserSpace", o.UserSpace). | ||||
| WithField("ObjectPath", o.ObjectPath). | |||||
| Debug("public read") | |||||
| defer logger.Debug("public read end") | |||||
| WithField("Path", o.Path). | |||||
| Debug("base read") | |||||
| defer logger.Debug("base read end") | |||||
| stgPool, err := exec.GetValueByType[*pool.Pool](ctx) | stgPool, err := exec.GetValueByType[*pool.Pool](ctx) | ||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("getting storage pool: %w", err) | return fmt.Errorf("getting storage pool: %w", err) | ||||
| } | } | ||||
| store, err := stgPool.GetPublicStore(&o.UserSpace) | |||||
| store, err := stgPool.GetBaseStore(&o.UserSpace) | |||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("getting public store of storage %v: %w", o.UserSpace, err) | |||||
| return fmt.Errorf("getting base store of storage %v: %w", o.UserSpace, err) | |||||
| } | } | ||||
| stream, err := store.Read(o.ObjectPath) | |||||
| stream, err := store.Read(o.Path) | |||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("reading object %v: %w", o.ObjectPath, err) | |||||
| return fmt.Errorf("reading object %v: %w", o.Path, err) | |||||
| } | } | ||||
| fut := future.NewSetVoid() | fut := future.NewSetVoid() | ||||
| @@ -59,30 +69,31 @@ func (o *PublicRead) Execute(ctx *exec.ExecContext, e *exec.Executor) error { | |||||
| return fut.Wait(ctx.Context) | return fut.Wait(ctx.Context) | ||||
| } | } | ||||
| func (o *PublicRead) String() string { | |||||
| return fmt.Sprintf("PublicRead %v:%v -> %v", o.UserSpace, o.ObjectPath, o.Output) | |||||
| func (o *BaseRead) String() string { | |||||
| return fmt.Sprintf("PublicRead %v:%v -> %v", o.UserSpace, o.Path, o.Output) | |||||
| } | } | ||||
| type PublicWrite struct { | |||||
| Input exec.VarID | |||||
| UserSpace clitypes.UserSpaceDetail | |||||
| ObjectPath string | |||||
| type BaseWrite struct { | |||||
| Input exec.VarID | |||||
| UserSpace clitypes.UserSpaceDetail | |||||
| Path string | |||||
| FileInfo exec.VarID | |||||
| } | } | ||||
| func (o *PublicWrite) Execute(ctx *exec.ExecContext, e *exec.Executor) error { | |||||
| func (o *BaseWrite) Execute(ctx *exec.ExecContext, e *exec.Executor) error { | |||||
| logger. | logger. | ||||
| WithField("Input", o.Input). | WithField("Input", o.Input). | ||||
| Debugf("write file to public store") | |||||
| defer logger.Debugf("write file to public store finished") | |||||
| Debugf("write file to base store") | |||||
| defer logger.Debugf("write file to base store finished") | |||||
| stgPool, err := exec.GetValueByType[*pool.Pool](ctx) | stgPool, err := exec.GetValueByType[*pool.Pool](ctx) | ||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("getting storage pool: %w", err) | return fmt.Errorf("getting storage pool: %w", err) | ||||
| } | } | ||||
| store, err := stgPool.GetPublicStore(&o.UserSpace) | |||||
| store, err := stgPool.GetBaseStore(&o.UserSpace) | |||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("getting public store of storage %v: %w", o.UserSpace, err) | |||||
| return fmt.Errorf("getting base store of storage %v: %w", o.UserSpace, err) | |||||
| } | } | ||||
| input, err := exec.BindVar[*exec.StreamValue](e, ctx.Context, o.Input) | input, err := exec.BindVar[*exec.StreamValue](e, ctx.Context, o.Input) | ||||
| @@ -91,25 +102,34 @@ func (o *PublicWrite) Execute(ctx *exec.ExecContext, e *exec.Executor) error { | |||||
| } | } | ||||
| defer input.Stream.Close() | defer input.Stream.Close() | ||||
| return store.Write(o.ObjectPath, input.Stream) | |||||
| info, err := store.Write(o.Path, input.Stream) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| e.PutVar(o.FileInfo, &FileInfoValue{ | |||||
| Hash: info.Hash, | |||||
| Size: info.Size, | |||||
| }) | |||||
| return nil | |||||
| } | } | ||||
| func (o *PublicWrite) String() string { | |||||
| return fmt.Sprintf("PublicWrite %v -> %v:%v", o.Input, o.UserSpace, o.ObjectPath) | |||||
| func (o *BaseWrite) String() string { | |||||
| return fmt.Sprintf("PublicWrite %v -> %v:%v", o.Input, o.UserSpace, o.Path) | |||||
| } | } | ||||
| type PublicReadNode struct { | |||||
| type BaseReadNode struct { | |||||
| dag.NodeBase | dag.NodeBase | ||||
| From ioswitch2.From | |||||
| UserSpace clitypes.UserSpaceDetail | |||||
| ObjectPath string | |||||
| From ioswitch2.From | |||||
| UserSpace clitypes.UserSpaceDetail | |||||
| Path string | |||||
| } | } | ||||
| func (b *GraphNodeBuilder) NewPublicRead(from ioswitch2.From, userSpace clitypes.UserSpaceDetail, objPath string) *PublicReadNode { | |||||
| node := &PublicReadNode{ | |||||
| From: from, | |||||
| UserSpace: userSpace, | |||||
| ObjectPath: objPath, | |||||
| func (b *GraphNodeBuilder) NewPublicRead(from ioswitch2.From, userSpace clitypes.UserSpaceDetail, path string) *BaseReadNode { | |||||
| node := &BaseReadNode{ | |||||
| From: from, | |||||
| UserSpace: userSpace, | |||||
| Path: path, | |||||
| } | } | ||||
| b.AddNode(node) | b.AddNode(node) | ||||
| @@ -117,37 +137,38 @@ func (b *GraphNodeBuilder) NewPublicRead(from ioswitch2.From, userSpace clitypes | |||||
| return node | return node | ||||
| } | } | ||||
| func (t *PublicReadNode) GetFrom() ioswitch2.From { | |||||
| func (t *BaseReadNode) GetFrom() ioswitch2.From { | |||||
| return t.From | return t.From | ||||
| } | } | ||||
| func (t *PublicReadNode) Output() dag.StreamOutputSlot { | |||||
| func (t *BaseReadNode) Output() dag.StreamOutputSlot { | |||||
| return dag.StreamOutputSlot{ | return dag.StreamOutputSlot{ | ||||
| Node: t, | Node: t, | ||||
| Index: 0, | Index: 0, | ||||
| } | } | ||||
| } | } | ||||
| func (t *PublicReadNode) GenerateOp() (exec.Op, error) { | |||||
| return &PublicRead{ | |||||
| Output: t.Output().Var().VarID, | |||||
| UserSpace: t.UserSpace, | |||||
| ObjectPath: t.ObjectPath, | |||||
| func (t *BaseReadNode) GenerateOp() (exec.Op, error) { | |||||
| return &BaseRead{ | |||||
| Output: t.Output().Var().VarID, | |||||
| UserSpace: t.UserSpace, | |||||
| Path: t.Path, | |||||
| }, nil | }, nil | ||||
| } | } | ||||
| type PublicWriteNode struct { | |||||
| type BaseWriteNode struct { | |||||
| dag.NodeBase | dag.NodeBase | ||||
| To ioswitch2.To | |||||
| UserSpace clitypes.UserSpaceDetail | |||||
| ObjectPath string | |||||
| To ioswitch2.To | |||||
| UserSpace clitypes.UserSpaceDetail | |||||
| Path string | |||||
| FileInfoStoreKey string | |||||
| } | } | ||||
| func (b *GraphNodeBuilder) NewPublicWrite(to ioswitch2.To, userSpace clitypes.UserSpaceDetail, objPath string) *PublicWriteNode { | |||||
| node := &PublicWriteNode{ | |||||
| To: to, | |||||
| UserSpace: userSpace, | |||||
| ObjectPath: objPath, | |||||
| func (b *GraphNodeBuilder) NewPublicWrite(to ioswitch2.To, userSpace clitypes.UserSpaceDetail, path string) *BaseWriteNode { | |||||
| node := &BaseWriteNode{ | |||||
| To: to, | |||||
| UserSpace: userSpace, | |||||
| Path: path, | |||||
| } | } | ||||
| b.AddNode(node) | b.AddNode(node) | ||||
| @@ -155,25 +176,30 @@ func (b *GraphNodeBuilder) NewPublicWrite(to ioswitch2.To, userSpace clitypes.Us | |||||
| return node | return node | ||||
| } | } | ||||
| func (t *PublicWriteNode) GetTo() ioswitch2.To { | |||||
| func (t *BaseWriteNode) GetTo() ioswitch2.To { | |||||
| return t.To | return t.To | ||||
| } | } | ||||
| func (t *PublicWriteNode) SetInput(input *dag.StreamVar) { | |||||
| func (t *BaseWriteNode) SetInput(input *dag.StreamVar) { | |||||
| input.To(t, 0) | input.To(t, 0) | ||||
| } | } | ||||
| func (t *PublicWriteNode) Input() dag.StreamInputSlot { | |||||
| func (t *BaseWriteNode) Input() dag.StreamInputSlot { | |||||
| return dag.StreamInputSlot{ | return dag.StreamInputSlot{ | ||||
| Node: t, | Node: t, | ||||
| Index: 0, | Index: 0, | ||||
| } | } | ||||
| } | } | ||||
| func (t *PublicWriteNode) GenerateOp() (exec.Op, error) { | |||||
| return &PublicWrite{ | |||||
| Input: t.InputStreams().Get(0).VarID, | |||||
| UserSpace: t.UserSpace, | |||||
| ObjectPath: t.ObjectPath, | |||||
| func (t *BaseWriteNode) FileInfoVar() *dag.ValueVar { | |||||
| return t.OutputValues().Get(0) | |||||
| } | |||||
| func (t *BaseWriteNode) GenerateOp() (exec.Op, error) { | |||||
| return &BaseWrite{ | |||||
| Input: t.InputStreams().Get(0).VarID, | |||||
| UserSpace: t.UserSpace, | |||||
| Path: t.Path, | |||||
| FileInfo: t.FileInfoVar().VarID, | |||||
| }, nil | }, nil | ||||
| } | } | ||||
| @@ -67,7 +67,7 @@ func (o *S2STransfer) Execute(ctx *exec.ExecContext, e *exec.Executor) error { | |||||
| } | } | ||||
| func (o *S2STransfer) String() string { | func (o *S2STransfer) String() string { | ||||
| return fmt.Sprintf("S2STransfer %v:%v -> %v:%v, Callback: %v", o.Src.Storage.String(), o.SrcPath, o.Dst.Storage.String(), o.Output, o.BypassCallback) | |||||
| return fmt.Sprintf("S2STransfer %v:%v -> %v:%v, Callback: %v", o.Src.UserSpace.Storage.String(), o.SrcPath, o.Dst.UserSpace.Storage.String(), o.Output, o.BypassCallback) | |||||
| } | } | ||||
| type S2STransferNode struct { | type S2STransferNode struct { | ||||
| @@ -18,16 +18,6 @@ import ( | |||||
| func init() { | func init() { | ||||
| exec.UseOp[*ShardRead]() | exec.UseOp[*ShardRead]() | ||||
| exec.UseOp[*ShardWrite]() | exec.UseOp[*ShardWrite]() | ||||
| exec.UseVarValue[*ShardInfoValue]() | |||||
| } | |||||
| type ShardInfoValue struct { | |||||
| Hash clitypes.FileHash `json:"hash"` | |||||
| Size int64 `json:"size"` | |||||
| } | |||||
| func (v *ShardInfoValue) Clone() exec.VarValue { | |||||
| return &ShardInfoValue{Hash: v.Hash, Size: v.Size} | |||||
| } | } | ||||
| type ShardRead struct { | type ShardRead struct { | ||||
| @@ -105,7 +95,7 @@ func (o *ShardWrite) Execute(ctx *exec.ExecContext, e *exec.Executor) error { | |||||
| return fmt.Errorf("writing file to shard store: %w", err) | return fmt.Errorf("writing file to shard store: %w", err) | ||||
| } | } | ||||
| e.PutVar(o.FileHashVar, &ShardInfoValue{ | |||||
| e.PutVar(o.FileHashVar, &FileInfoValue{ | |||||
| Hash: fileInfo.Hash, | Hash: fileInfo.Hash, | ||||
| Size: fileInfo.Size, | Size: fileInfo.Size, | ||||
| }) | }) | ||||
| @@ -286,13 +286,13 @@ func buildFromNode(ctx *state.GenerateState, f ioswitch2.From) (ops2.FromNode, e | |||||
| t.Open.WithNullableLength(openOff, &openLen) | t.Open.WithNullableLength(openOff, &openLen) | ||||
| } | } | ||||
| switch addr := f.Hub.Address.(type) { | |||||
| switch addr := f.UserSpace.RecommendHub.Address.(type) { | |||||
| case *cortypes.HttpAddressInfo: | case *cortypes.HttpAddressInfo: | ||||
| t.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: f.Hub}) | |||||
| t.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: f.UserSpace.RecommendHub}) | |||||
| t.Env().Pinned = true | t.Env().Pinned = true | ||||
| case *cortypes.GRPCAddressInfo: | case *cortypes.GRPCAddressInfo: | ||||
| t.Env().ToEnvWorker(&ioswitch2.HubWorker{Hub: f.Hub, Address: *addr}) | |||||
| t.Env().ToEnvWorker(&ioswitch2.HubWorker{Hub: f.UserSpace.RecommendHub, Address: *addr}) | |||||
| t.Env().Pinned = true | t.Env().Pinned = true | ||||
| default: | default: | ||||
| @@ -336,16 +336,16 @@ func buildFromNode(ctx *state.GenerateState, f ioswitch2.From) (ops2.FromNode, e | |||||
| return n, nil | return n, nil | ||||
| case *ioswitch2.FromPublicStore: | |||||
| case *ioswitch2.FromBaseStore: | |||||
| // TODO 可以考虑支持设置读取范围 | // TODO 可以考虑支持设置读取范围 | ||||
| n := ctx.DAG.NewPublicRead(f, f.UserSpace, f.Path) | n := ctx.DAG.NewPublicRead(f, f.UserSpace, f.Path) | ||||
| switch addr := f.Hub.Address.(type) { | |||||
| switch addr := f.UserSpace.RecommendHub.Address.(type) { | |||||
| case *cortypes.HttpAddressInfo: | case *cortypes.HttpAddressInfo: | ||||
| n.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: f.Hub}) | |||||
| n.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: f.UserSpace.RecommendHub}) | |||||
| n.Env().Pinned = true | n.Env().Pinned = true | ||||
| case *cortypes.GRPCAddressInfo: | case *cortypes.GRPCAddressInfo: | ||||
| n.Env().ToEnvWorker(&ioswitch2.HubWorker{Hub: f.Hub, Address: *addr}) | |||||
| n.Env().ToEnvWorker(&ioswitch2.HubWorker{Hub: f.UserSpace.RecommendHub, Address: *addr}) | |||||
| n.Env().Pinned = true | n.Env().Pinned = true | ||||
| default: | default: | ||||
| @@ -364,7 +364,7 @@ func buildToNode(ctx *state.GenerateState, t ioswitch2.To) (ops2.ToNode, error) | |||||
| case *ioswitch2.ToShardStore: | case *ioswitch2.ToShardStore: | ||||
| n := ctx.DAG.NewShardWrite(t, t.Space, t.FileHashStoreKey) | n := ctx.DAG.NewShardWrite(t, t.Space, t.FileHashStoreKey) | ||||
| if err := setEnvByAddress(n, t.Hub, t.Hub.Address); err != nil { | |||||
| if err := setEnvByAddress(n, t.Space.RecommendHub, t.Space.RecommendHub.Address); err != nil { | |||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| @@ -379,10 +379,10 @@ func buildToNode(ctx *state.GenerateState, t ioswitch2.To) (ops2.ToNode, error) | |||||
| return n, nil | return n, nil | ||||
| case *ioswitch2.ToPublicStore: | |||||
| case *ioswitch2.ToBaseStore: | |||||
| n := ctx.DAG.NewPublicWrite(t, t.Space, t.ObjectPath) | n := ctx.DAG.NewPublicWrite(t, t.Space, t.ObjectPath) | ||||
| if err := setEnvByAddress(n, t.Hub, t.Hub.Address); err != nil { | |||||
| if err := setEnvByAddress(n, t.Space.RecommendHub, t.Space.RecommendHub.Address); err != nil { | |||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| @@ -69,7 +69,7 @@ func UseECMultiplier(ctx *state.GenerateState) { | |||||
| if to == nil { | if to == nil { | ||||
| to = swNode.To | to = swNode.To | ||||
| } else if to.Space.UserSpace.StorageID != swNode.UserSpace.UserSpace.StorageID { | |||||
| } else if to.Space.UserSpace.Storage.Equals(swNode.UserSpace.UserSpace.Storage) { | |||||
| return true | return true | ||||
| } | } | ||||
| swNodes = append(swNodes, swNode) | swNodes = append(swNodes, swNode) | ||||
| @@ -97,13 +97,13 @@ func UseECMultiplier(ctx *state.GenerateState) { | |||||
| // 检查满足条件后,替换ECMultiply指令 | // 检查满足条件后,替换ECMultiply指令 | ||||
| callMul := ctx.DAG.NewCallECMultiplier(to.Space) | callMul := ctx.DAG.NewCallECMultiplier(to.Space) | ||||
| switch addr := to.Hub.Address.(type) { | |||||
| switch addr := to.Space.RecommendHub.Address.(type) { | |||||
| case *cortypes.HttpAddressInfo: | case *cortypes.HttpAddressInfo: | ||||
| callMul.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: to.Hub}) | |||||
| callMul.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: to.Space.RecommendHub}) | |||||
| callMul.Env().Pinned = true | callMul.Env().Pinned = true | ||||
| case *cortypes.GRPCAddressInfo: | case *cortypes.GRPCAddressInfo: | ||||
| callMul.Env().ToEnvWorker(&ioswitch2.HubWorker{Hub: to.Hub, Address: *addr}) | |||||
| callMul.Env().ToEnvWorker(&ioswitch2.HubWorker{Hub: to.Space.RecommendHub, Address: *addr}) | |||||
| callMul.Env().Pinned = true | callMul.Env().Pinned = true | ||||
| default: | default: | ||||
| @@ -19,8 +19,8 @@ func UseS2STransfer(ctx *state.GenerateState) { | |||||
| switch fr := fr.(type) { | switch fr := fr.(type) { | ||||
| case *ioswitch2.FromShardstore: | case *ioswitch2.FromShardstore: | ||||
| s2sFromShardStore(ctx, fr, frNode) | s2sFromShardStore(ctx, fr, frNode) | ||||
| case *ioswitch2.FromPublicStore: | |||||
| s2sFromPublicStore(ctx, fr, frNode) | |||||
| case *ioswitch2.FromBaseStore: | |||||
| s2sFromBaseStore(ctx, fr, frNode) | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| @@ -44,7 +44,7 @@ func s2sFromShardStore(ctx *state.GenerateState, fromShard *ioswitch2.FromShards | |||||
| failed := false | failed := false | ||||
| var toShards []*ops2.ShardWriteNode | var toShards []*ops2.ShardWriteNode | ||||
| var toPublics []*ops2.PublicWriteNode | |||||
| var toPublics []*ops2.BaseWriteNode | |||||
| loop: | loop: | ||||
| for i := 0; i < outVar.Dst.Len(); i++ { | for i := 0; i < outVar.Dst.Len(); i++ { | ||||
| @@ -65,7 +65,7 @@ loop: | |||||
| toShards = append(toShards, dstNode) | toShards = append(toShards, dstNode) | ||||
| case *ops2.PublicWriteNode: | |||||
| case *ops2.BaseWriteNode: | |||||
| dstStgBld := factory.GetBuilder(&dstNode.UserSpace) | dstStgBld := factory.GetBuilder(&dstNode.UserSpace) | ||||
| if !dstStgBld.FeatureDesc().HasBypassPublicWrite { | if !dstStgBld.FeatureDesc().HasBypassPublicWrite { | ||||
| failed = true | failed = true | ||||
| @@ -112,7 +112,7 @@ loop: | |||||
| for _, toPub := range toPublics { | for _, toPub := range toPublics { | ||||
| s2sNode := ctx.DAG.NewS2STransfer(fromShard.UserSpace, toPub.UserSpace, types.S2SOption{ | s2sNode := ctx.DAG.NewS2STransfer(fromShard.UserSpace, toPub.UserSpace, types.S2SOption{ | ||||
| DestPathHint: toPub.ObjectPath, | |||||
| DestPathHint: toPub.Path, | |||||
| }) | }) | ||||
| // 直传指令在目的地Hub上执行 | // 直传指令在目的地Hub上执行 | ||||
| s2sNode.Env().CopyFrom(toPub.Env()) | s2sNode.Env().CopyFrom(toPub.Env()) | ||||
| @@ -123,7 +123,7 @@ loop: | |||||
| brNode.FilePathVar().ToSlot(s2sNode.SrcPathSlot()) | brNode.FilePathVar().ToSlot(s2sNode.SrcPathSlot()) | ||||
| // 传输结果通知目的节点 | // 传输结果通知目的节点 | ||||
| bwNode := ctx.DAG.NewBypassToPublicStore(toPub.UserSpace, toPub.ObjectPath) | |||||
| bwNode := ctx.DAG.NewBypassToBaseStore(toPub.UserSpace, toPub.Path) | |||||
| bwNode.Env().CopyFrom(toPub.Env()) | bwNode.Env().CopyFrom(toPub.Env()) | ||||
| s2sNode.BypassFileInfoVar().ToSlot(bwNode.BypassFileInfoSlot()) | s2sNode.BypassFileInfoVar().ToSlot(bwNode.BypassFileInfoSlot()) | ||||
| @@ -139,7 +139,7 @@ loop: | |||||
| delete(ctx.FromNodes, frNode.GetFrom()) | delete(ctx.FromNodes, frNode.GetFrom()) | ||||
| } | } | ||||
| func s2sFromPublicStore(ctx *state.GenerateState, fromPub *ioswitch2.FromPublicStore, frNode ops2.FromNode) { | |||||
| func s2sFromBaseStore(ctx *state.GenerateState, fromPub *ioswitch2.FromBaseStore, frNode ops2.FromNode) { | |||||
| fromStgBld := factory.GetBuilder(&fromPub.UserSpace) | fromStgBld := factory.GetBuilder(&fromPub.UserSpace) | ||||
| if !fromStgBld.FeatureDesc().HasBypassPublicRead { | if !fromStgBld.FeatureDesc().HasBypassPublicRead { | ||||
| return | return | ||||
| @@ -157,14 +157,14 @@ func s2sFromPublicStore(ctx *state.GenerateState, fromPub *ioswitch2.FromPublicS | |||||
| } | } | ||||
| failed := false | failed := false | ||||
| var toPublics []*ops2.PublicWriteNode | |||||
| var toPublics []*ops2.BaseWriteNode | |||||
| loop: | loop: | ||||
| for i := 0; i < outVar.Dst.Len(); i++ { | for i := 0; i < outVar.Dst.Len(); i++ { | ||||
| dstNode := outVar.Dst.Get(i) | dstNode := outVar.Dst.Get(i) | ||||
| switch dstNode := dstNode.(type) { | switch dstNode := dstNode.(type) { | ||||
| case *ops2.PublicWriteNode: | |||||
| case *ops2.BaseWriteNode: | |||||
| dstStgBld := factory.GetBuilder(&dstNode.UserSpace) | dstStgBld := factory.GetBuilder(&dstNode.UserSpace) | ||||
| if !dstStgBld.FeatureDesc().HasBypassPublicWrite { | if !dstStgBld.FeatureDesc().HasBypassPublicWrite { | ||||
| failed = true | failed = true | ||||
| @@ -189,18 +189,18 @@ loop: | |||||
| for _, toPub := range toPublics { | for _, toPub := range toPublics { | ||||
| s2sNode := ctx.DAG.NewS2STransfer(fromPub.UserSpace, toPub.UserSpace, types.S2SOption{ | s2sNode := ctx.DAG.NewS2STransfer(fromPub.UserSpace, toPub.UserSpace, types.S2SOption{ | ||||
| DestPathHint: toPub.ObjectPath, | |||||
| DestPathHint: toPub.Path, | |||||
| }) | }) | ||||
| // 直传指令在目的地Hub上执行 | // 直传指令在目的地Hub上执行 | ||||
| s2sNode.Env().CopyFrom(toPub.Env()) | s2sNode.Env().CopyFrom(toPub.Env()) | ||||
| // 先获取文件路径,送到S2S节点 | // 先获取文件路径,送到S2S节点 | ||||
| brNode := ctx.DAG.NewBypassFromPublicStore(fromPub.UserSpace, fromPub.Path) | |||||
| brNode := ctx.DAG.NewBypassFromBaseStore(fromPub.UserSpace, fromPub.Path) | |||||
| brNode.Env().CopyFrom(toPub.Env()) | brNode.Env().CopyFrom(toPub.Env()) | ||||
| brNode.FilePathVar().ToSlot(s2sNode.SrcPathSlot()) | brNode.FilePathVar().ToSlot(s2sNode.SrcPathSlot()) | ||||
| // 传输结果通知目的节点 | // 传输结果通知目的节点 | ||||
| bwNode := ctx.DAG.NewBypassToPublicStore(toPub.UserSpace, toPub.ObjectPath) | |||||
| bwNode := ctx.DAG.NewBypassToBaseStore(toPub.UserSpace, toPub.Path) | |||||
| bwNode.Env().CopyFrom(toPub.Env()) | bwNode.Env().CopyFrom(toPub.Env()) | ||||
| s2sNode.BypassFileInfoVar().ToSlot(bwNode.BypassFileInfoSlot()) | s2sNode.BypassFileInfoVar().ToSlot(bwNode.BypassFileInfoSlot()) | ||||
| @@ -16,12 +16,12 @@ func CompleteMultipart(blocks []clitypes.ObjectBlock, blockSpaces []clitypes.Use | |||||
| sizes[i] = blk.Size | sizes[i] = blk.Size | ||||
| } | } | ||||
| joinNode := da.NewSegmentJoin(sizes) | joinNode := da.NewSegmentJoin(sizes) | ||||
| joinNode.Env().ToEnvWorker(getWorkerInfo(*targetSpace.MasterHub)) | |||||
| joinNode.Env().ToEnvWorker(getWorkerInfo(targetSpace.RecommendHub)) | |||||
| joinNode.Env().Pinned = true | joinNode.Env().Pinned = true | ||||
| for i, blk := range blocks { | for i, blk := range blocks { | ||||
| rd := da.NewShardRead(nil, blockSpaces[i], types.NewOpen(blk.FileHash)) | rd := da.NewShardRead(nil, blockSpaces[i], types.NewOpen(blk.FileHash)) | ||||
| rd.Env().ToEnvWorker(getWorkerInfo(*blockSpaces[i].MasterHub)) | |||||
| rd.Env().ToEnvWorker(getWorkerInfo(blockSpaces[i].RecommendHub)) | |||||
| rd.Env().Pinned = true | rd.Env().Pinned = true | ||||
| rd.Output().ToSlot(joinNode.InputSlot(i)) | rd.Output().ToSlot(joinNode.InputSlot(i)) | ||||
| @@ -29,7 +29,7 @@ func CompleteMultipart(blocks []clitypes.ObjectBlock, blockSpaces []clitypes.Use | |||||
| // TODO 应该采取更合理的方式同时支持Parser和直接生成DAG | // TODO 应该采取更合理的方式同时支持Parser和直接生成DAG | ||||
| wr := da.NewShardWrite(nil, targetSpace, shardInfoKey) | wr := da.NewShardWrite(nil, targetSpace, shardInfoKey) | ||||
| wr.Env().ToEnvWorker(getWorkerInfo(*targetSpace.MasterHub)) | |||||
| wr.Env().ToEnvWorker(getWorkerInfo(targetSpace.RecommendHub)) | |||||
| wr.Env().Pinned = true | wr.Env().Pinned = true | ||||
| joinNode.Joined().ToSlot(wr.Input()) | joinNode.Joined().ToSlot(wr.Input()) | ||||
| @@ -4,7 +4,6 @@ import ( | |||||
| "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" | "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" | ||||
| "gitlink.org.cn/cloudream/common/utils/math2" | "gitlink.org.cn/cloudream/common/utils/math2" | ||||
| clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" | clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" | ||||
| cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" | |||||
| ) | ) | ||||
| type From interface { | type From interface { | ||||
| @@ -40,15 +39,13 @@ func (f *FromDriver) GetDataIndex() int { | |||||
| type FromNode struct { | type FromNode struct { | ||||
| FileHash clitypes.FileHash | FileHash clitypes.FileHash | ||||
| Hub cortypes.Hub | |||||
| Space clitypes.UserSpaceDetail | Space clitypes.UserSpaceDetail | ||||
| DataIndex int | DataIndex int | ||||
| } | } | ||||
| func NewFromStorage(fileHash clitypes.FileHash, hub cortypes.Hub, space clitypes.UserSpaceDetail, dataIndex int) *FromNode { | |||||
| func NewFromStorage(fileHash clitypes.FileHash, space clitypes.UserSpaceDetail, dataIndex int) *FromNode { | |||||
| return &FromNode{ | return &FromNode{ | ||||
| FileHash: fileHash, | FileHash: fileHash, | ||||
| Hub: hub, | |||||
| DataIndex: dataIndex, | DataIndex: dataIndex, | ||||
| Space: space, | Space: space, | ||||
| } | } | ||||
| @@ -90,25 +87,22 @@ func (t *ToDriver) GetRange() math2.Range { | |||||
| } | } | ||||
| type ToNode struct { | type ToNode struct { | ||||
| Hub cortypes.Hub | |||||
| Space clitypes.UserSpaceDetail | Space clitypes.UserSpaceDetail | ||||
| DataIndex int | DataIndex int | ||||
| Range math2.Range | Range math2.Range | ||||
| FileHashStoreKey string | FileHashStoreKey string | ||||
| } | } | ||||
| func NewToStorage(hub cortypes.Hub, space clitypes.UserSpaceDetail, dataIndex int, fileHashStoreKey string) *ToNode { | |||||
| func NewToStorage(space clitypes.UserSpaceDetail, dataIndex int, fileHashStoreKey string) *ToNode { | |||||
| return &ToNode{ | return &ToNode{ | ||||
| Hub: hub, | |||||
| Space: space, | Space: space, | ||||
| DataIndex: dataIndex, | DataIndex: dataIndex, | ||||
| FileHashStoreKey: fileHashStoreKey, | FileHashStoreKey: fileHashStoreKey, | ||||
| } | } | ||||
| } | } | ||||
| func NewToStorageWithRange(hub cortypes.Hub, space clitypes.UserSpaceDetail, dataIndex int, fileHashStoreKey string, rng math2.Range) *ToNode { | |||||
| func NewToStorageWithRange(space clitypes.UserSpaceDetail, dataIndex int, fileHashStoreKey string, rng math2.Range) *ToNode { | |||||
| return &ToNode{ | return &ToNode{ | ||||
| Hub: hub, | |||||
| Space: space, | Space: space, | ||||
| DataIndex: dataIndex, | DataIndex: dataIndex, | ||||
| FileHashStoreKey: fileHashStoreKey, | FileHashStoreKey: fileHashStoreKey, | ||||
| @@ -72,7 +72,7 @@ func buildFromNode(ctx *GenerateContext, f ioswitchlrc.From) (ops2.FromNode, err | |||||
| } | } | ||||
| // TODO2 支持HTTP协议 | // TODO2 支持HTTP协议 | ||||
| t.Env().ToEnvWorker(&ioswitchlrc.HubWorker{Hub: f.Hub, Address: *f.Hub.Address.(*cortypes.GRPCAddressInfo)}) | |||||
| t.Env().ToEnvWorker(&ioswitchlrc.HubWorker{Hub: f.Space.RecommendHub, Address: *f.Space.RecommendHub.Address.(*cortypes.GRPCAddressInfo)}) | |||||
| t.Env().Pinned = true | t.Env().Pinned = true | ||||
| return t, nil | return t, nil | ||||
| @@ -101,12 +101,12 @@ func buildToNode(ctx *GenerateContext, t ioswitchlrc.To) (ops2.ToNode, error) { | |||||
| switch t := t.(type) { | switch t := t.(type) { | ||||
| case *ioswitchlrc.ToNode: | case *ioswitchlrc.ToNode: | ||||
| n := ctx.DAG.NewShardWrite(t, t.Space, t.FileHashStoreKey) | n := ctx.DAG.NewShardWrite(t, t.Space, t.FileHashStoreKey) | ||||
| switch addr := t.Hub.Address.(type) { | |||||
| switch addr := t.Space.RecommendHub.Address.(type) { | |||||
| // case *cdssdk.HttpAddressInfo: | // case *cdssdk.HttpAddressInfo: | ||||
| // n.Env().ToEnvWorker(&ioswitchlrc.HttpHubWorker{Node: t.Hub}) | // n.Env().ToEnvWorker(&ioswitchlrc.HttpHubWorker{Node: t.Hub}) | ||||
| // TODO2 支持HTTP协议 | // TODO2 支持HTTP协议 | ||||
| case *cortypes.GRPCAddressInfo: | case *cortypes.GRPCAddressInfo: | ||||
| n.Env().ToEnvWorker(&ioswitchlrc.HubWorker{Hub: t.Hub, Address: *addr}) | |||||
| n.Env().ToEnvWorker(&ioswitchlrc.HubWorker{Hub: t.Space.RecommendHub, Address: *addr}) | |||||
| default: | default: | ||||
| return nil, fmt.Errorf("unsupported node address type %T", addr) | return nil, fmt.Errorf("unsupported node address type %T", addr) | ||||
| @@ -27,7 +27,7 @@ var file_pkgs_rpc_coordinator_coordinator_proto_rawDesc = []byte{ | |||||
| 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x63, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, | 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x63, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, | ||||
| 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x6f, 0x72, 0x72, 0x70, 0x63, | 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x6f, 0x72, 0x72, 0x70, 0x63, | ||||
| 0x1a, 0x12, 0x70, 0x6b, 0x67, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x70, 0x63, 0x2e, 0x70, | 0x1a, 0x12, 0x70, 0x6b, 0x67, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x70, 0x63, 0x2e, 0x70, | ||||
| 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xff, 0x01, 0x0a, 0x0b, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, | |||||
| 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xfe, 0x01, 0x0a, 0x0b, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, | |||||
| 0x61, 0x74, 0x6f, 0x72, 0x12, 0x2b, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x48, 0x75, 0x62, 0x43, 0x6f, | 0x61, 0x74, 0x6f, 0x72, 0x12, 0x2b, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x48, 0x75, 0x62, 0x43, 0x6f, | ||||
| 0x6e, 0x66, 0x69, 0x67, 0x12, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, | 0x6e, 0x66, 0x69, 0x67, 0x12, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, | ||||
| 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, | 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, | ||||
| @@ -40,14 +40,14 @@ var file_pkgs_rpc_coordinator_coordinator_proto_rawDesc = []byte{ | |||||
| 0x0a, 0x15, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x48, 0x75, 0x62, 0x43, 0x6f, 0x6e, 0x6e, 0x65, | 0x0a, 0x15, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x48, 0x75, 0x62, 0x43, 0x6f, 0x6e, 0x6e, 0x65, | ||||
| 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x12, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, | 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x12, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, | ||||
| 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x70, | 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x70, | ||||
| 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, | |||||
| 0x67, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x2e, | |||||
| 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, | |||||
| 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x6c, 0x69, 0x6e, | |||||
| 0x6b, 0x2e, 0x6f, 0x72, 0x67, 0x2e, 0x63, 0x6e, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, | |||||
| 0x61, 0x6d, 0x2f, 0x6a, 0x63, 0x73, 0x2d, 0x70, 0x75, 0x62, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, | |||||
| 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x72, 0x72, 0x70, | |||||
| 0x63, 0x3b, 0x63, 0x6f, 0x72, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, | |||||
| 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x10, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x53, 0x74, | |||||
| 0x6f, 0x72, 0x61, 0x67, 0x65, 0x48, 0x75, 0x62, 0x12, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, | |||||
| 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, | |||||
| 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x6c, 0x69, 0x6e, 0x6b, | |||||
| 0x2e, 0x6f, 0x72, 0x67, 0x2e, 0x63, 0x6e, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x61, | |||||
| 0x6d, 0x2f, 0x6a, 0x63, 0x73, 0x2d, 0x70, 0x75, 0x62, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, | |||||
| 0x2f, 0x70, 0x6b, 0x67, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x72, 0x72, 0x70, 0x63, | |||||
| 0x3b, 0x63, 0x6f, 0x72, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, | |||||
| } | } | ||||
| var file_pkgs_rpc_coordinator_coordinator_proto_goTypes = []any{ | var file_pkgs_rpc_coordinator_coordinator_proto_goTypes = []any{ | ||||
| @@ -59,12 +59,12 @@ var file_pkgs_rpc_coordinator_coordinator_proto_depIdxs = []int32{ | |||||
| 0, // 1: corrpc.Coordinator.GetHubs:input_type -> rpc.Request | 0, // 1: corrpc.Coordinator.GetHubs:input_type -> rpc.Request | ||||
| 0, // 2: corrpc.Coordinator.GetHubConnectivities:input_type -> rpc.Request | 0, // 2: corrpc.Coordinator.GetHubConnectivities:input_type -> rpc.Request | ||||
| 0, // 3: corrpc.Coordinator.ReportHubConnectivity:input_type -> rpc.Request | 0, // 3: corrpc.Coordinator.ReportHubConnectivity:input_type -> rpc.Request | ||||
| 0, // 4: corrpc.Coordinator.GetStorageDetails:input_type -> rpc.Request | |||||
| 0, // 4: corrpc.Coordinator.SelectStorageHub:input_type -> rpc.Request | |||||
| 1, // 5: corrpc.Coordinator.GetHubConfig:output_type -> rpc.Response | 1, // 5: corrpc.Coordinator.GetHubConfig:output_type -> rpc.Response | ||||
| 1, // 6: corrpc.Coordinator.GetHubs:output_type -> rpc.Response | 1, // 6: corrpc.Coordinator.GetHubs:output_type -> rpc.Response | ||||
| 1, // 7: corrpc.Coordinator.GetHubConnectivities:output_type -> rpc.Response | 1, // 7: corrpc.Coordinator.GetHubConnectivities:output_type -> rpc.Response | ||||
| 1, // 8: corrpc.Coordinator.ReportHubConnectivity:output_type -> rpc.Response | 1, // 8: corrpc.Coordinator.ReportHubConnectivity:output_type -> rpc.Response | ||||
| 1, // 9: corrpc.Coordinator.GetStorageDetails:output_type -> rpc.Response | |||||
| 1, // 9: corrpc.Coordinator.SelectStorageHub:output_type -> rpc.Response | |||||
| 5, // [5:10] is the sub-list for method output_type | 5, // [5:10] is the sub-list for method output_type | ||||
| 0, // [0:5] is the sub-list for method input_type | 0, // [0:5] is the sub-list for method input_type | ||||
| 0, // [0:0] is the sub-list for extension type_name | 0, // [0:0] is the sub-list for extension type_name | ||||
| @@ -13,5 +13,5 @@ service Coordinator { | |||||
| rpc GetHubConnectivities(rpc.Request) returns(rpc.Response); | rpc GetHubConnectivities(rpc.Request) returns(rpc.Response); | ||||
| rpc ReportHubConnectivity(rpc.Request) returns(rpc.Response); | rpc ReportHubConnectivity(rpc.Request) returns(rpc.Response); | ||||
| rpc GetStorageDetails(rpc.Request) returns(rpc.Response); | |||||
| rpc SelectStorageHub(rpc.Request) returns(rpc.Response); | |||||
| } | } | ||||
| @@ -24,7 +24,7 @@ const ( | |||||
| Coordinator_GetHubs_FullMethodName = "/corrpc.Coordinator/GetHubs" | Coordinator_GetHubs_FullMethodName = "/corrpc.Coordinator/GetHubs" | ||||
| Coordinator_GetHubConnectivities_FullMethodName = "/corrpc.Coordinator/GetHubConnectivities" | Coordinator_GetHubConnectivities_FullMethodName = "/corrpc.Coordinator/GetHubConnectivities" | ||||
| Coordinator_ReportHubConnectivity_FullMethodName = "/corrpc.Coordinator/ReportHubConnectivity" | Coordinator_ReportHubConnectivity_FullMethodName = "/corrpc.Coordinator/ReportHubConnectivity" | ||||
| Coordinator_GetStorageDetails_FullMethodName = "/corrpc.Coordinator/GetStorageDetails" | |||||
| Coordinator_SelectStorageHub_FullMethodName = "/corrpc.Coordinator/SelectStorageHub" | |||||
| ) | ) | ||||
| // CoordinatorClient is the client API for Coordinator service. | // CoordinatorClient is the client API for Coordinator service. | ||||
| @@ -35,7 +35,7 @@ type CoordinatorClient interface { | |||||
| GetHubs(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | GetHubs(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | ||||
| GetHubConnectivities(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | GetHubConnectivities(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | ||||
| ReportHubConnectivity(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | ReportHubConnectivity(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | ||||
| GetStorageDetails(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | |||||
| SelectStorageHub(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | |||||
| } | } | ||||
| type coordinatorClient struct { | type coordinatorClient struct { | ||||
| @@ -82,9 +82,9 @@ func (c *coordinatorClient) ReportHubConnectivity(ctx context.Context, in *rpc.R | |||||
| return out, nil | return out, nil | ||||
| } | } | ||||
| func (c *coordinatorClient) GetStorageDetails(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) { | |||||
| func (c *coordinatorClient) SelectStorageHub(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) { | |||||
| out := new(rpc.Response) | out := new(rpc.Response) | ||||
| err := c.cc.Invoke(ctx, Coordinator_GetStorageDetails_FullMethodName, in, out, opts...) | |||||
| err := c.cc.Invoke(ctx, Coordinator_SelectStorageHub_FullMethodName, in, out, opts...) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| @@ -99,7 +99,7 @@ type CoordinatorServer interface { | |||||
| GetHubs(context.Context, *rpc.Request) (*rpc.Response, error) | GetHubs(context.Context, *rpc.Request) (*rpc.Response, error) | ||||
| GetHubConnectivities(context.Context, *rpc.Request) (*rpc.Response, error) | GetHubConnectivities(context.Context, *rpc.Request) (*rpc.Response, error) | ||||
| ReportHubConnectivity(context.Context, *rpc.Request) (*rpc.Response, error) | ReportHubConnectivity(context.Context, *rpc.Request) (*rpc.Response, error) | ||||
| GetStorageDetails(context.Context, *rpc.Request) (*rpc.Response, error) | |||||
| SelectStorageHub(context.Context, *rpc.Request) (*rpc.Response, error) | |||||
| mustEmbedUnimplementedCoordinatorServer() | mustEmbedUnimplementedCoordinatorServer() | ||||
| } | } | ||||
| @@ -119,8 +119,8 @@ func (UnimplementedCoordinatorServer) GetHubConnectivities(context.Context, *rpc | |||||
| func (UnimplementedCoordinatorServer) ReportHubConnectivity(context.Context, *rpc.Request) (*rpc.Response, error) { | func (UnimplementedCoordinatorServer) ReportHubConnectivity(context.Context, *rpc.Request) (*rpc.Response, error) { | ||||
| return nil, status.Errorf(codes.Unimplemented, "method ReportHubConnectivity not implemented") | return nil, status.Errorf(codes.Unimplemented, "method ReportHubConnectivity not implemented") | ||||
| } | } | ||||
| func (UnimplementedCoordinatorServer) GetStorageDetails(context.Context, *rpc.Request) (*rpc.Response, error) { | |||||
| return nil, status.Errorf(codes.Unimplemented, "method GetStorageDetails not implemented") | |||||
| func (UnimplementedCoordinatorServer) SelectStorageHub(context.Context, *rpc.Request) (*rpc.Response, error) { | |||||
| return nil, status.Errorf(codes.Unimplemented, "method SelectStorageHub not implemented") | |||||
| } | } | ||||
| func (UnimplementedCoordinatorServer) mustEmbedUnimplementedCoordinatorServer() {} | func (UnimplementedCoordinatorServer) mustEmbedUnimplementedCoordinatorServer() {} | ||||
| @@ -207,20 +207,20 @@ func _Coordinator_ReportHubConnectivity_Handler(srv interface{}, ctx context.Con | |||||
| return interceptor(ctx, in, info, handler) | return interceptor(ctx, in, info, handler) | ||||
| } | } | ||||
| func _Coordinator_GetStorageDetails_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||||
| func _Coordinator_SelectStorageHub_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||||
| in := new(rpc.Request) | in := new(rpc.Request) | ||||
| if err := dec(in); err != nil { | if err := dec(in); err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| if interceptor == nil { | if interceptor == nil { | ||||
| return srv.(CoordinatorServer).GetStorageDetails(ctx, in) | |||||
| return srv.(CoordinatorServer).SelectStorageHub(ctx, in) | |||||
| } | } | ||||
| info := &grpc.UnaryServerInfo{ | info := &grpc.UnaryServerInfo{ | ||||
| Server: srv, | Server: srv, | ||||
| FullMethod: Coordinator_GetStorageDetails_FullMethodName, | |||||
| FullMethod: Coordinator_SelectStorageHub_FullMethodName, | |||||
| } | } | ||||
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { | handler := func(ctx context.Context, req interface{}) (interface{}, error) { | ||||
| return srv.(CoordinatorServer).GetStorageDetails(ctx, req.(*rpc.Request)) | |||||
| return srv.(CoordinatorServer).SelectStorageHub(ctx, req.(*rpc.Request)) | |||||
| } | } | ||||
| return interceptor(ctx, in, info, handler) | return interceptor(ctx, in, info, handler) | ||||
| } | } | ||||
| @@ -249,8 +249,8 @@ var Coordinator_ServiceDesc = grpc.ServiceDesc{ | |||||
| Handler: _Coordinator_ReportHubConnectivity_Handler, | Handler: _Coordinator_ReportHubConnectivity_Handler, | ||||
| }, | }, | ||||
| { | { | ||||
| MethodName: "GetStorageDetails", | |||||
| Handler: _Coordinator_GetStorageDetails_Handler, | |||||
| MethodName: "SelectStorageHub", | |||||
| Handler: _Coordinator_SelectStorageHub_Handler, | |||||
| }, | }, | ||||
| }, | }, | ||||
| Streams: []grpc.StreamDesc{}, | Streams: []grpc.StreamDesc{}, | ||||
| @@ -8,34 +8,23 @@ import ( | |||||
| ) | ) | ||||
| type StorageService interface { | type StorageService interface { | ||||
| GetStorageDetails(ctx context.Context, msg *GetStorageDetails) (*GetStorageDetailsResp, *rpc.CodeError) | |||||
| SelectStorageHub(ctx context.Context, msg *SelectStorageHub) (*SelectStorageHubResp, *rpc.CodeError) | |||||
| } | } | ||||
| // 获取Storage信息 | |||||
| type GetStorageDetails struct { | |||||
| StorageIDs []cortypes.StorageID `json:"storageIDs"` | |||||
| // 为指定的Storage选择一个适合通信的Hub | |||||
| type SelectStorageHub struct { | |||||
| Storages []cortypes.StorageType | |||||
| } | } | ||||
| type GetStorageDetailsResp struct { | |||||
| Storage []*cortypes.StorageDetail `json:"storages"` | |||||
| type SelectStorageHubResp struct { | |||||
| Hubs []*cortypes.Hub | |||||
| } | } | ||||
| func ReqGetStorageDetails(storageIDs []cortypes.StorageID) *GetStorageDetails { | |||||
| return &GetStorageDetails{ | |||||
| StorageIDs: storageIDs, | |||||
| } | |||||
| } | |||||
| func RespGetStorageDetails(stgs []*cortypes.StorageDetail) *GetStorageDetailsResp { | |||||
| return &GetStorageDetailsResp{ | |||||
| Storage: stgs, | |||||
| } | |||||
| } | |||||
| func (c *Client) GetStorageDetails(ctx context.Context, msg *GetStorageDetails) (*GetStorageDetailsResp, *rpc.CodeError) { | |||||
| func (c *Client) SelectStorageHub(ctx context.Context, msg *SelectStorageHub) (*SelectStorageHubResp, *rpc.CodeError) { | |||||
| if c.fusedErr != nil { | if c.fusedErr != nil { | ||||
| return nil, c.fusedErr | return nil, c.fusedErr | ||||
| } | } | ||||
| return rpc.UnaryClient[*GetStorageDetailsResp](c.cli.GetStorageDetails, ctx, msg) | |||||
| return rpc.UnaryClient[*SelectStorageHubResp](c.cli.SelectStorageHub, ctx, msg) | |||||
| } | } | ||||
| func (s *Server) GetStorageDetails(ctx context.Context, msg *rpc.Request) (*rpc.Response, error) { | |||||
| return rpc.UnaryServer(s.svrImpl.GetStorageDetails, ctx, msg) | |||||
| func (s *Server) SelectStorageHub(ctx context.Context, msg *rpc.Request) (*rpc.Response, error) { | |||||
| return rpc.UnaryServer(s.svrImpl.SelectStorageHub, ctx, msg) | |||||
| } | } | ||||
| @@ -76,8 +76,8 @@ var file_pkgs_rpc_hub_hub_proto_depIdxs = []int32{ | |||||
| 0, // 2: hubrpc.Hub.GetIOStream:input_type -> rpc.Request | 0, // 2: hubrpc.Hub.GetIOStream:input_type -> rpc.Request | ||||
| 0, // 3: hubrpc.Hub.SendIOVar:input_type -> rpc.Request | 0, // 3: hubrpc.Hub.SendIOVar:input_type -> rpc.Request | ||||
| 0, // 4: hubrpc.Hub.GetIOVar:input_type -> rpc.Request | 0, // 4: hubrpc.Hub.GetIOVar:input_type -> rpc.Request | ||||
| 0, // 5: hubrpc.Hub.PublicStoreListAll:input_type -> rpc.Request | |||||
| 0, // 6: hubrpc.Hub.PublicStoreMkdirs:input_type -> rpc.Request | |||||
| 0, // 5: hubrpc.Hub.BaseStoreListAll:input_type -> rpc.Request | |||||
| 0, // 6: hubrpc.Hub.BaseStoreMkdirs:input_type -> rpc.Request | |||||
| 0, // 7: hubrpc.Hub.CheckCache:input_type -> rpc.Request | 0, // 7: hubrpc.Hub.CheckCache:input_type -> rpc.Request | ||||
| 0, // 8: hubrpc.Hub.CacheGC:input_type -> rpc.Request | 0, // 8: hubrpc.Hub.CacheGC:input_type -> rpc.Request | ||||
| 0, // 9: hubrpc.Hub.Ping:input_type -> rpc.Request | 0, // 9: hubrpc.Hub.Ping:input_type -> rpc.Request | ||||
| @@ -87,8 +87,8 @@ var file_pkgs_rpc_hub_hub_proto_depIdxs = []int32{ | |||||
| 1, // 13: hubrpc.Hub.GetIOStream:output_type -> rpc.ChunkedData | 1, // 13: hubrpc.Hub.GetIOStream:output_type -> rpc.ChunkedData | ||||
| 2, // 14: hubrpc.Hub.SendIOVar:output_type -> rpc.Response | 2, // 14: hubrpc.Hub.SendIOVar:output_type -> rpc.Response | ||||
| 2, // 15: hubrpc.Hub.GetIOVar:output_type -> rpc.Response | 2, // 15: hubrpc.Hub.GetIOVar:output_type -> rpc.Response | ||||
| 2, // 16: hubrpc.Hub.PublicStoreListAll:output_type -> rpc.Response | |||||
| 2, // 17: hubrpc.Hub.PublicStoreMkdirs:output_type -> rpc.Response | |||||
| 2, // 16: hubrpc.Hub.BaseStoreListAll:output_type -> rpc.Response | |||||
| 2, // 17: hubrpc.Hub.BaseStoreMkdirs:output_type -> rpc.Response | |||||
| 2, // 18: hubrpc.Hub.CheckCache:output_type -> rpc.Response | 2, // 18: hubrpc.Hub.CheckCache:output_type -> rpc.Response | ||||
| 2, // 19: hubrpc.Hub.CacheGC:output_type -> rpc.Response | 2, // 19: hubrpc.Hub.CacheGC:output_type -> rpc.Response | ||||
| 2, // 20: hubrpc.Hub.Ping:output_type -> rpc.Response | 2, // 20: hubrpc.Hub.Ping:output_type -> rpc.Response | ||||
| @@ -14,8 +14,8 @@ service Hub { | |||||
| rpc SendIOVar(rpc.Request)returns(rpc.Response); | rpc SendIOVar(rpc.Request)returns(rpc.Response); | ||||
| rpc GetIOVar(rpc.Request)returns(rpc.Response); | rpc GetIOVar(rpc.Request)returns(rpc.Response); | ||||
| rpc PublicStoreListAll(rpc.Request) returns(rpc.Response); | |||||
| rpc PublicStoreMkdirs(rpc.Request) returns(rpc.Response); | |||||
| rpc BaseStoreListAll(rpc.Request) returns(rpc.Response); | |||||
| rpc BaseStoreMkdirs(rpc.Request) returns(rpc.Response); | |||||
| rpc CheckCache(rpc.Request) returns(rpc.Response); | rpc CheckCache(rpc.Request) returns(rpc.Response); | ||||
| rpc CacheGC(rpc.Request) returns(rpc.Response); | rpc CacheGC(rpc.Request) returns(rpc.Response); | ||||
| @@ -25,8 +25,8 @@ const ( | |||||
| Hub_GetIOStream_FullMethodName = "/hubrpc.Hub/GetIOStream" | Hub_GetIOStream_FullMethodName = "/hubrpc.Hub/GetIOStream" | ||||
| Hub_SendIOVar_FullMethodName = "/hubrpc.Hub/SendIOVar" | Hub_SendIOVar_FullMethodName = "/hubrpc.Hub/SendIOVar" | ||||
| Hub_GetIOVar_FullMethodName = "/hubrpc.Hub/GetIOVar" | Hub_GetIOVar_FullMethodName = "/hubrpc.Hub/GetIOVar" | ||||
| Hub_PublicStoreListAll_FullMethodName = "/hubrpc.Hub/PublicStoreListAll" | |||||
| Hub_PublicStoreMkdirs_FullMethodName = "/hubrpc.Hub/PublicStoreMkdirs" | |||||
| Hub_BaseStoreListAll_FullMethodName = "/hubrpc.Hub/BaseStoreListAll" | |||||
| Hub_BaseStoreMkdirs_FullMethodName = "/hubrpc.Hub/BaseStoreMkdirs" | |||||
| Hub_CheckCache_FullMethodName = "/hubrpc.Hub/CheckCache" | Hub_CheckCache_FullMethodName = "/hubrpc.Hub/CheckCache" | ||||
| Hub_CacheGC_FullMethodName = "/hubrpc.Hub/CacheGC" | Hub_CacheGC_FullMethodName = "/hubrpc.Hub/CacheGC" | ||||
| Hub_Ping_FullMethodName = "/hubrpc.Hub/Ping" | Hub_Ping_FullMethodName = "/hubrpc.Hub/Ping" | ||||
| @@ -42,8 +42,8 @@ type HubClient interface { | |||||
| GetIOStream(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (Hub_GetIOStreamClient, error) | GetIOStream(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (Hub_GetIOStreamClient, error) | ||||
| SendIOVar(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | SendIOVar(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | ||||
| GetIOVar(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | GetIOVar(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | ||||
| PublicStoreListAll(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | |||||
| PublicStoreMkdirs(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | |||||
| BaseStoreListAll(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | |||||
| BaseStoreMkdirs(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | |||||
| CheckCache(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | CheckCache(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | ||||
| CacheGC(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | CacheGC(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | ||||
| Ping(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | Ping(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) | ||||
| @@ -151,18 +151,18 @@ func (c *hubClient) GetIOVar(ctx context.Context, in *rpc.Request, opts ...grpc. | |||||
| return out, nil | return out, nil | ||||
| } | } | ||||
| func (c *hubClient) PublicStoreListAll(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) { | |||||
| func (c *hubClient) BaseStoreListAll(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) { | |||||
| out := new(rpc.Response) | out := new(rpc.Response) | ||||
| err := c.cc.Invoke(ctx, Hub_PublicStoreListAll_FullMethodName, in, out, opts...) | |||||
| err := c.cc.Invoke(ctx, Hub_BaseStoreListAll_FullMethodName, in, out, opts...) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| return out, nil | return out, nil | ||||
| } | } | ||||
| func (c *hubClient) PublicStoreMkdirs(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) { | |||||
| func (c *hubClient) BaseStoreMkdirs(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) { | |||||
| out := new(rpc.Response) | out := new(rpc.Response) | ||||
| err := c.cc.Invoke(ctx, Hub_PublicStoreMkdirs_FullMethodName, in, out, opts...) | |||||
| err := c.cc.Invoke(ctx, Hub_BaseStoreMkdirs_FullMethodName, in, out, opts...) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| @@ -214,8 +214,8 @@ type HubServer interface { | |||||
| GetIOStream(*rpc.Request, Hub_GetIOStreamServer) error | GetIOStream(*rpc.Request, Hub_GetIOStreamServer) error | ||||
| SendIOVar(context.Context, *rpc.Request) (*rpc.Response, error) | SendIOVar(context.Context, *rpc.Request) (*rpc.Response, error) | ||||
| GetIOVar(context.Context, *rpc.Request) (*rpc.Response, error) | GetIOVar(context.Context, *rpc.Request) (*rpc.Response, error) | ||||
| PublicStoreListAll(context.Context, *rpc.Request) (*rpc.Response, error) | |||||
| PublicStoreMkdirs(context.Context, *rpc.Request) (*rpc.Response, error) | |||||
| BaseStoreListAll(context.Context, *rpc.Request) (*rpc.Response, error) | |||||
| BaseStoreMkdirs(context.Context, *rpc.Request) (*rpc.Response, error) | |||||
| CheckCache(context.Context, *rpc.Request) (*rpc.Response, error) | CheckCache(context.Context, *rpc.Request) (*rpc.Response, error) | ||||
| CacheGC(context.Context, *rpc.Request) (*rpc.Response, error) | CacheGC(context.Context, *rpc.Request) (*rpc.Response, error) | ||||
| Ping(context.Context, *rpc.Request) (*rpc.Response, error) | Ping(context.Context, *rpc.Request) (*rpc.Response, error) | ||||
| @@ -242,11 +242,11 @@ func (UnimplementedHubServer) SendIOVar(context.Context, *rpc.Request) (*rpc.Res | |||||
| func (UnimplementedHubServer) GetIOVar(context.Context, *rpc.Request) (*rpc.Response, error) { | func (UnimplementedHubServer) GetIOVar(context.Context, *rpc.Request) (*rpc.Response, error) { | ||||
| return nil, status.Errorf(codes.Unimplemented, "method GetIOVar not implemented") | return nil, status.Errorf(codes.Unimplemented, "method GetIOVar not implemented") | ||||
| } | } | ||||
| func (UnimplementedHubServer) PublicStoreListAll(context.Context, *rpc.Request) (*rpc.Response, error) { | |||||
| return nil, status.Errorf(codes.Unimplemented, "method PublicStoreListAll not implemented") | |||||
| func (UnimplementedHubServer) BaseStoreListAll(context.Context, *rpc.Request) (*rpc.Response, error) { | |||||
| return nil, status.Errorf(codes.Unimplemented, "method BaseStoreListAll not implemented") | |||||
| } | } | ||||
| func (UnimplementedHubServer) PublicStoreMkdirs(context.Context, *rpc.Request) (*rpc.Response, error) { | |||||
| return nil, status.Errorf(codes.Unimplemented, "method PublicStoreMkdirs not implemented") | |||||
| func (UnimplementedHubServer) BaseStoreMkdirs(context.Context, *rpc.Request) (*rpc.Response, error) { | |||||
| return nil, status.Errorf(codes.Unimplemented, "method BaseStoreMkdirs not implemented") | |||||
| } | } | ||||
| func (UnimplementedHubServer) CheckCache(context.Context, *rpc.Request) (*rpc.Response, error) { | func (UnimplementedHubServer) CheckCache(context.Context, *rpc.Request) (*rpc.Response, error) { | ||||
| return nil, status.Errorf(codes.Unimplemented, "method CheckCache not implemented") | return nil, status.Errorf(codes.Unimplemented, "method CheckCache not implemented") | ||||
| @@ -374,38 +374,38 @@ func _Hub_GetIOVar_Handler(srv interface{}, ctx context.Context, dec func(interf | |||||
| return interceptor(ctx, in, info, handler) | return interceptor(ctx, in, info, handler) | ||||
| } | } | ||||
| func _Hub_PublicStoreListAll_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||||
| func _Hub_BaseStoreListAll_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||||
| in := new(rpc.Request) | in := new(rpc.Request) | ||||
| if err := dec(in); err != nil { | if err := dec(in); err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| if interceptor == nil { | if interceptor == nil { | ||||
| return srv.(HubServer).PublicStoreListAll(ctx, in) | |||||
| return srv.(HubServer).BaseStoreListAll(ctx, in) | |||||
| } | } | ||||
| info := &grpc.UnaryServerInfo{ | info := &grpc.UnaryServerInfo{ | ||||
| Server: srv, | Server: srv, | ||||
| FullMethod: Hub_PublicStoreListAll_FullMethodName, | |||||
| FullMethod: Hub_BaseStoreListAll_FullMethodName, | |||||
| } | } | ||||
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { | handler := func(ctx context.Context, req interface{}) (interface{}, error) { | ||||
| return srv.(HubServer).PublicStoreListAll(ctx, req.(*rpc.Request)) | |||||
| return srv.(HubServer).BaseStoreListAll(ctx, req.(*rpc.Request)) | |||||
| } | } | ||||
| return interceptor(ctx, in, info, handler) | return interceptor(ctx, in, info, handler) | ||||
| } | } | ||||
| func _Hub_PublicStoreMkdirs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||||
| func _Hub_BaseStoreMkdirs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||||
| in := new(rpc.Request) | in := new(rpc.Request) | ||||
| if err := dec(in); err != nil { | if err := dec(in); err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| if interceptor == nil { | if interceptor == nil { | ||||
| return srv.(HubServer).PublicStoreMkdirs(ctx, in) | |||||
| return srv.(HubServer).BaseStoreMkdirs(ctx, in) | |||||
| } | } | ||||
| info := &grpc.UnaryServerInfo{ | info := &grpc.UnaryServerInfo{ | ||||
| Server: srv, | Server: srv, | ||||
| FullMethod: Hub_PublicStoreMkdirs_FullMethodName, | |||||
| FullMethod: Hub_BaseStoreMkdirs_FullMethodName, | |||||
| } | } | ||||
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { | handler := func(ctx context.Context, req interface{}) (interface{}, error) { | ||||
| return srv.(HubServer).PublicStoreMkdirs(ctx, req.(*rpc.Request)) | |||||
| return srv.(HubServer).BaseStoreMkdirs(ctx, req.(*rpc.Request)) | |||||
| } | } | ||||
| return interceptor(ctx, in, info, handler) | return interceptor(ctx, in, info, handler) | ||||
| } | } | ||||
| @@ -502,12 +502,12 @@ var Hub_ServiceDesc = grpc.ServiceDesc{ | |||||
| Handler: _Hub_GetIOVar_Handler, | Handler: _Hub_GetIOVar_Handler, | ||||
| }, | }, | ||||
| { | { | ||||
| MethodName: "PublicStoreListAll", | |||||
| Handler: _Hub_PublicStoreListAll_Handler, | |||||
| MethodName: "BaseStoreListAll", | |||||
| Handler: _Hub_BaseStoreListAll_Handler, | |||||
| }, | }, | ||||
| { | { | ||||
| MethodName: "PublicStoreMkdirs", | |||||
| Handler: _Hub_PublicStoreMkdirs_Handler, | |||||
| MethodName: "BaseStoreMkdirs", | |||||
| Handler: _Hub_BaseStoreMkdirs_Handler, | |||||
| }, | }, | ||||
| { | { | ||||
| MethodName: "CheckCache", | MethodName: "CheckCache", | ||||
| @@ -9,45 +9,45 @@ import ( | |||||
| ) | ) | ||||
| type UserSpaceSvc interface { | type UserSpaceSvc interface { | ||||
| PublicStoreListAll(ctx context.Context, req *PublicStoreListAll) (*PublicStoreListAllResp, *rpc.CodeError) | |||||
| PublicStoreMkdirs(ctx context.Context, req *PublicStoreMkdirs) (*PublicStoreMkdirsResp, *rpc.CodeError) | |||||
| BaseStoreListAll(ctx context.Context, req *BaseStoreListAll) (*BaseStoreListAllResp, *rpc.CodeError) | |||||
| BaseStoreMkdirs(ctx context.Context, req *BaseStoreMkdirs) (*BaseStoreMkdirsResp, *rpc.CodeError) | |||||
| } | } | ||||
| // 列出指定PublicStore的指定位置内的所有文件 | |||||
| type PublicStoreListAll struct { | |||||
| // 列出指定BaseStore的指定位置内的所有文件 | |||||
| type BaseStoreListAll struct { | |||||
| UserSpace clitypes.UserSpaceDetail | UserSpace clitypes.UserSpaceDetail | ||||
| Path string | Path string | ||||
| } | } | ||||
| type PublicStoreListAllResp struct { | |||||
| Entries []stgtypes.PublicStoreEntry | |||||
| type BaseStoreListAllResp struct { | |||||
| Entries []stgtypes.BaseStoreEntry | |||||
| } | } | ||||
| func (c *Client) PublicStoreListAll(ctx context.Context, req *PublicStoreListAll) (*PublicStoreListAllResp, *rpc.CodeError) { | |||||
| func (c *Client) BaseStoreListAll(ctx context.Context, req *BaseStoreListAll) (*BaseStoreListAllResp, *rpc.CodeError) { | |||||
| if c.fusedErr != nil { | if c.fusedErr != nil { | ||||
| return nil, c.fusedErr | return nil, c.fusedErr | ||||
| } | } | ||||
| return rpc.UnaryClient[*PublicStoreListAllResp](c.cli.PublicStoreListAll, ctx, req) | |||||
| return rpc.UnaryClient[*BaseStoreListAllResp](c.cli.BaseStoreListAll, ctx, req) | |||||
| } | } | ||||
| func (s *Server) PublicStoreListAll(ctx context.Context, req *rpc.Request) (*rpc.Response, error) { | |||||
| return rpc.UnaryServer(s.svrImpl.PublicStoreListAll, ctx, req) | |||||
| func (s *Server) BaseStoreListAll(ctx context.Context, req *rpc.Request) (*rpc.Response, error) { | |||||
| return rpc.UnaryServer(s.svrImpl.BaseStoreListAll, ctx, req) | |||||
| } | } | ||||
| // 批量在指定PublicStore中创建文件夹 | |||||
| type PublicStoreMkdirs struct { | |||||
| // 批量在指定BaseStore中创建文件夹 | |||||
| type BaseStoreMkdirs struct { | |||||
| UserSpace clitypes.UserSpaceDetail | UserSpace clitypes.UserSpaceDetail | ||||
| Pathes []string | Pathes []string | ||||
| } | } | ||||
| type PublicStoreMkdirsResp struct { | |||||
| type BaseStoreMkdirsResp struct { | |||||
| Successes []bool | Successes []bool | ||||
| } | } | ||||
| func (c *Client) PublicStoreMkdirs(ctx context.Context, req *PublicStoreMkdirs) (*PublicStoreMkdirsResp, *rpc.CodeError) { | |||||
| func (c *Client) BaseStoreMkdirs(ctx context.Context, req *BaseStoreMkdirs) (*BaseStoreMkdirsResp, *rpc.CodeError) { | |||||
| if c.fusedErr != nil { | if c.fusedErr != nil { | ||||
| return nil, c.fusedErr | return nil, c.fusedErr | ||||
| } | } | ||||
| return rpc.UnaryClient[*PublicStoreMkdirsResp](c.cli.PublicStoreMkdirs, ctx, req) | |||||
| return rpc.UnaryClient[*BaseStoreMkdirsResp](c.cli.BaseStoreMkdirs, ctx, req) | |||||
| } | } | ||||
| func (s *Server) PublicStoreMkdirs(ctx context.Context, req *rpc.Request) (*rpc.Response, error) { | |||||
| return rpc.UnaryServer(s.svrImpl.PublicStoreMkdirs, ctx, req) | |||||
| func (s *Server) BaseStoreMkdirs(ctx context.Context, req *rpc.Request) (*rpc.Response, error) { | |||||
| return rpc.UnaryServer(s.svrImpl.BaseStoreMkdirs, ctx, req) | |||||
| } | } | ||||
| @@ -32,6 +32,7 @@ type builder struct { | |||||
| } | } | ||||
| func (b *builder) getToken() (string, error) { | func (b *builder) getToken() (string, error) { | ||||
| stgType := b.detail.UserSpace.Storage.(*cortypes.EFileType) | |||||
| cred := b.detail.UserSpace.Credential.(*cortypes.EFileCred) | cred := b.detail.UserSpace.Credential.(*cortypes.EFileCred) | ||||
| b.tokenLock.Lock() | b.tokenLock.Lock() | ||||
| @@ -80,18 +81,18 @@ func (b *builder) getToken() (string, error) { | |||||
| } | } | ||||
| for _, d := range r.Data { | for _, d := range r.Data { | ||||
| if d.ClusterID == cred.ClusterID { | |||||
| if d.ClusterID == stgType.ClusterID { | |||||
| b.token = d.Token | b.token = d.Token | ||||
| b.getTokenTime = time.Now() | b.getTokenTime = time.Now() | ||||
| return d.Token, nil | return d.Token, nil | ||||
| } | } | ||||
| } | } | ||||
| return "", fmt.Errorf("clusterID %s not found", cred.ClusterID) | |||||
| return "", fmt.Errorf("clusterID %s not found", stgType.ClusterID) | |||||
| } | } | ||||
| func (b *builder) CreateECMultiplier() (types.ECMultiplier, error) { | func (b *builder) CreateECMultiplier() (types.ECMultiplier, error) { | ||||
| feat := utils.FindFeature[*cortypes.ECMultiplierFeature](b.detail.Storage) | |||||
| feat := utils.FindFeature[*cortypes.ECMultiplierFeature](b.detail) | |||||
| if feat == nil { | if feat == nil { | ||||
| return nil, fmt.Errorf("feature ECMultiplier not found") | return nil, fmt.Errorf("feature ECMultiplier not found") | ||||
| } | } | ||||
| @@ -12,7 +12,7 @@ import ( | |||||
| // 此函数永远不会返回nil。如果找不到对应的Builder,则会返回EmptyBuilder, | // 此函数永远不会返回nil。如果找不到对应的Builder,则会返回EmptyBuilder, | ||||
| // 此Builder的所有函数都会返回否定值或者封装后的ErrUnsupported错误(需要使用errors.Is检查) | // 此Builder的所有函数都会返回否定值或者封装后的ErrUnsupported错误(需要使用errors.Is检查) | ||||
| func GetBuilder(detail *clitypes.UserSpaceDetail) types.StorageBuilder { | func GetBuilder(detail *clitypes.UserSpaceDetail) types.StorageBuilder { | ||||
| typ := reflect.TypeOf(detail.Storage.Type) | |||||
| typ := reflect.TypeOf(detail.UserSpace.Storage) | |||||
| ctor, ok := reg.StorageBuilders[typ] | ctor, ok := reg.StorageBuilders[typ] | ||||
| if !ok { | if !ok { | ||||
| @@ -22,7 +22,7 @@ func RegisterBuilder[T cortypes.StorageType](ctor BuilderCtor) { | |||||
| // 此函数永远不会返回nil。如果找不到对应的Builder,则会返回EmptyBuilder, | // 此函数永远不会返回nil。如果找不到对应的Builder,则会返回EmptyBuilder, | ||||
| // 此Builder的所有函数都会返回否定值或者封装后的ErrUnsupported错误(需要使用errors.Is检查) | // 此Builder的所有函数都会返回否定值或者封装后的ErrUnsupported错误(需要使用errors.Is检查) | ||||
| func GetBuilderInternal(detail *clitypes.UserSpaceDetail) types.StorageBuilder { | func GetBuilderInternal(detail *clitypes.UserSpaceDetail) types.StorageBuilder { | ||||
| typ := reflect.TypeOf(detail.Storage.Type) | |||||
| typ := reflect.TypeOf(detail.UserSpace.Storage) | |||||
| ctor, ok := StorageBuilders[typ] | ctor, ok := StorageBuilders[typ] | ||||
| if !ok { | if !ok { | ||||
| @@ -11,7 +11,7 @@ import ( | |||||
| ) | ) | ||||
| func init() { | func init() { | ||||
| reg.RegisterBuilder[*cortypes.LocalStorageType](func(detail *clitypes.UserSpaceDetail) types.StorageBuilder { | |||||
| reg.RegisterBuilder[*cortypes.LocalType](func(detail *clitypes.UserSpaceDetail) types.StorageBuilder { | |||||
| return &builder{ | return &builder{ | ||||
| detail: detail, | detail: detail, | ||||
| } | } | ||||
| @@ -39,17 +39,17 @@ func (b *builder) CreateShardStore() (types.ShardStore, error) { | |||||
| return NewShardStore(cred.RootDir, b.detail) | return NewShardStore(cred.RootDir, b.detail) | ||||
| } | } | ||||
| func (b *builder) CreatePublicStore() (types.PublicStore, error) { | |||||
| func (b *builder) CreateBaseStore() (types.BaseStore, error) { | |||||
| cred, ok := b.detail.UserSpace.Credential.(*cortypes.LocalCred) | cred, ok := b.detail.UserSpace.Credential.(*cortypes.LocalCred) | ||||
| if !ok { | if !ok { | ||||
| return nil, fmt.Errorf("invalid storage credential type %T for local storage", b.detail.UserSpace.Credential) | return nil, fmt.Errorf("invalid storage credential type %T for local storage", b.detail.UserSpace.Credential) | ||||
| } | } | ||||
| return NewPublicStore(cred.RootDir, b.detail) | |||||
| return NewBaseStore(cred.RootDir, b.detail) | |||||
| } | } | ||||
| func (b *builder) CreateMultiparter() (types.Multiparter, error) { | func (b *builder) CreateMultiparter() (types.Multiparter, error) { | ||||
| feat := utils.FindFeature[*cortypes.MultipartUploadFeature](b.detail.Storage) | |||||
| feat := utils.FindFeature[*cortypes.MultipartUploadFeature](b.detail) | |||||
| if feat == nil { | if feat == nil { | ||||
| return nil, fmt.Errorf("feature %T not found", cortypes.MultipartUploadFeature{}) | return nil, fmt.Errorf("feature %T not found", cortypes.MultipartUploadFeature{}) | ||||
| } | } | ||||
| @@ -60,7 +60,7 @@ func (b *builder) CreateMultiparter() (types.Multiparter, error) { | |||||
| } | } | ||||
| func (b *builder) CreateS2STransfer() (types.S2STransfer, error) { | func (b *builder) CreateS2STransfer() (types.S2STransfer, error) { | ||||
| feat := utils.FindFeature[*cortypes.S2STransferFeature](b.detail.Storage) | |||||
| feat := utils.FindFeature[*cortypes.S2STransferFeature](b.detail) | |||||
| if feat == nil { | if feat == nil { | ||||
| return nil, fmt.Errorf("feature %T not found", cortypes.S2STransferFeature{}) | return nil, fmt.Errorf("feature %T not found", cortypes.S2STransferFeature{}) | ||||
| } | } | ||||
| @@ -1,51 +1,59 @@ | |||||
| package local | package local | ||||
| import ( | import ( | ||||
| "crypto/sha256" | |||||
| "io" | "io" | ||||
| "io/fs" | "io/fs" | ||||
| "os" | "os" | ||||
| "path/filepath" | "path/filepath" | ||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | "gitlink.org.cn/cloudream/common/pkgs/logger" | ||||
| "gitlink.org.cn/cloudream/common/utils/io2" | |||||
| clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" | clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" | ||||
| "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" | "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" | ||||
| ) | ) | ||||
| type PublicStore struct { | |||||
| type BaseStore struct { | |||||
| root string | root string | ||||
| detail *clitypes.UserSpaceDetail | detail *clitypes.UserSpaceDetail | ||||
| } | } | ||||
| func NewPublicStore(root string, detail *clitypes.UserSpaceDetail) (*PublicStore, error) { | |||||
| return &PublicStore{ | |||||
| func NewBaseStore(root string, detail *clitypes.UserSpaceDetail) (*BaseStore, error) { | |||||
| return &BaseStore{ | |||||
| root: root, | root: root, | ||||
| detail: detail, | detail: detail, | ||||
| }, nil | }, nil | ||||
| } | } | ||||
| func (s *PublicStore) Write(objPath string, stream io.Reader) error { | |||||
| func (s *BaseStore) Write(objPath string, stream io.Reader) (types.FileInfo, error) { | |||||
| absObjPath := filepath.Join(s.root, objPath) | absObjPath := filepath.Join(s.root, objPath) | ||||
| err := os.MkdirAll(filepath.Dir(absObjPath), 0755) | err := os.MkdirAll(filepath.Dir(absObjPath), 0755) | ||||
| if err != nil { | if err != nil { | ||||
| return err | |||||
| return types.FileInfo{}, err | |||||
| } | } | ||||
| f, err := os.Create(absObjPath) | f, err := os.Create(absObjPath) | ||||
| if err != nil { | if err != nil { | ||||
| return err | |||||
| return types.FileInfo{}, err | |||||
| } | } | ||||
| defer f.Close() | defer f.Close() | ||||
| _, err = io.Copy(f, stream) | |||||
| counter := io2.Counter(stream) | |||||
| hasher := io2.NewReadHasher(sha256.New(), counter) | |||||
| _, err = io.Copy(f, hasher) | |||||
| if err != nil { | if err != nil { | ||||
| return err | |||||
| return types.FileInfo{}, err | |||||
| } | } | ||||
| return nil | |||||
| return types.FileInfo{ | |||||
| Hash: clitypes.NewFullHash(hasher.Sum()), | |||||
| Size: counter.Count(), | |||||
| }, nil | |||||
| } | } | ||||
| func (s *PublicStore) Read(objPath string) (io.ReadCloser, error) { | |||||
| func (s *BaseStore) Read(objPath string) (io.ReadCloser, error) { | |||||
| absObjPath := filepath.Join(s.root, objPath) | absObjPath := filepath.Join(s.root, objPath) | ||||
| f, err := os.Open(absObjPath) | f, err := os.Open(absObjPath) | ||||
| if err != nil { | if err != nil { | ||||
| @@ -55,7 +63,7 @@ func (s *PublicStore) Read(objPath string) (io.ReadCloser, error) { | |||||
| return f, nil | return f, nil | ||||
| } | } | ||||
| func (s *PublicStore) Mkdir(path string) error { | |||||
| func (s *BaseStore) Mkdir(path string) error { | |||||
| absObjPath := filepath.Join(s.root, path) | absObjPath := filepath.Join(s.root, path) | ||||
| err := os.MkdirAll(absObjPath, 0755) | err := os.MkdirAll(absObjPath, 0755) | ||||
| if err != nil { | if err != nil { | ||||
| @@ -65,10 +73,10 @@ func (s *PublicStore) Mkdir(path string) error { | |||||
| return nil | return nil | ||||
| } | } | ||||
| func (s *PublicStore) ListAll(path string) ([]types.PublicStoreEntry, error) { | |||||
| func (s *BaseStore) ListAll(path string) ([]types.BaseStoreEntry, error) { | |||||
| absObjPath := filepath.Join(s.root, path) | absObjPath := filepath.Join(s.root, path) | ||||
| var es []types.PublicStoreEntry | |||||
| var es []types.BaseStoreEntry | |||||
| err := filepath.WalkDir(absObjPath, func(path string, d fs.DirEntry, err error) error { | err := filepath.WalkDir(absObjPath, func(path string, d fs.DirEntry, err error) error { | ||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| @@ -80,7 +88,7 @@ func (s *PublicStore) ListAll(path string) ([]types.PublicStoreEntry, error) { | |||||
| } | } | ||||
| if d.IsDir() { | if d.IsDir() { | ||||
| es = append(es, types.PublicStoreEntry{ | |||||
| es = append(es, types.BaseStoreEntry{ | |||||
| Path: filepath.ToSlash(relaPath), | Path: filepath.ToSlash(relaPath), | ||||
| Size: 0, | Size: 0, | ||||
| IsDir: true, | IsDir: true, | ||||
| @@ -92,7 +100,7 @@ func (s *PublicStore) ListAll(path string) ([]types.PublicStoreEntry, error) { | |||||
| return err | return err | ||||
| } | } | ||||
| es = append(es, types.PublicStoreEntry{ | |||||
| es = append(es, types.BaseStoreEntry{ | |||||
| Path: filepath.ToSlash(relaPath), | Path: filepath.ToSlash(relaPath), | ||||
| Size: info.Size(), | Size: info.Size(), | ||||
| IsDir: false, | IsDir: false, | ||||
| @@ -109,6 +117,6 @@ func (s *PublicStore) ListAll(path string) ([]types.PublicStoreEntry, error) { | |||||
| return es, nil | return es, nil | ||||
| } | } | ||||
| func (s *PublicStore) getLogger() logger.Logger { | |||||
| return logger.WithField("PublicStore", "Local").WithField("UserSpace", s.detail.UserSpace) | |||||
| func (s *BaseStore) getLogger() logger.Logger { | |||||
| return logger.WithField("BaseStore", "Local").WithField("UserSpace", s.detail.UserSpace) | |||||
| } | } | ||||
| @@ -21,12 +21,12 @@ type S2STransfer struct { | |||||
| // 只有同一个机器的存储之间才可以进行数据直传 | // 只有同一个机器的存储之间才可以进行数据直传 | ||||
| func (s *S2STransfer) CanTransfer(src *clitypes.UserSpaceDetail) bool { | func (s *S2STransfer) CanTransfer(src *clitypes.UserSpaceDetail) bool { | ||||
| _, ok := src.Storage.Type.(*cortypes.LocalStorageType) | |||||
| _, ok := src.UserSpace.Storage.(*cortypes.LocalType) | |||||
| if !ok { | if !ok { | ||||
| return false | return false | ||||
| } | } | ||||
| if src.Storage.MasterHub != s.detail.Storage.MasterHub { | |||||
| if src.RecommendHub != s.detail.RecommendHub { | |||||
| return false | return false | ||||
| } | } | ||||
| @@ -14,7 +14,6 @@ import ( | |||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | "gitlink.org.cn/cloudream/common/pkgs/logger" | ||||
| "gitlink.org.cn/cloudream/common/utils/io2" | "gitlink.org.cn/cloudream/common/utils/io2" | ||||
| clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" | clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" | ||||
| stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" | |||||
| "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" | "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" | ||||
| ) | ) | ||||
| @@ -117,9 +116,10 @@ func (s *ShardStore) Create(stream io.Reader) (types.FileInfo, error) { | |||||
| counter := io2.Counter(stream) | counter := io2.Counter(stream) | ||||
| size, hash, err := s.writeTempFile(file, counter) | size, hash, err := s.writeTempFile(file, counter) | ||||
| if stgglb.Stats.HubStorageTransfer != nil { | |||||
| stgglb.Stats.HubStorageTransfer.RecordUpload(s.detail.Storage.StorageID, counter.Count(), err == nil) | |||||
| } | |||||
| // TODO2 | |||||
| // if stgglb.Stats.HubStorageTransfer != nil { | |||||
| // stgglb.Stats.HubStorageTransfer.RecordUpload(s.detail.Storage.StorageID, counter.Count(), err == nil) | |||||
| // } | |||||
| if err != nil { | if err != nil { | ||||
| // Name是文件完整路径 | // Name是文件完整路径 | ||||
| s.onCreateFailed(file.Name()) | s.onCreateFailed(file.Name()) | ||||
| @@ -265,9 +265,10 @@ func (s *ShardStore) Open(opt types.OpenOption) (io.ReadCloser, error) { | |||||
| } | } | ||||
| return io2.CounterCloser(ret, func(cnt int64, err error) { | return io2.CounterCloser(ret, func(cnt int64, err error) { | ||||
| if stgglb.Stats.HubStorageTransfer != nil { | |||||
| stgglb.Stats.HubStorageTransfer.RecordDownload(s.detail.Storage.StorageID, cnt, err == nil || err == io.EOF) | |||||
| } | |||||
| // TODO2 | |||||
| // if stgglb.Stats.HubStorageTransfer != nil { | |||||
| // stgglb.Stats.HubStorageTransfer.RecordDownload(s.detail.Storage.StorageID, cnt, err == nil || err == io.EOF) | |||||
| // } | |||||
| }), nil | }), nil | ||||
| } | } | ||||
| @@ -388,7 +389,7 @@ func (s *ShardStore) Stats() types.Stats { | |||||
| } | } | ||||
| func (s *ShardStore) getLogger() logger.Logger { | func (s *ShardStore) getLogger() logger.Logger { | ||||
| return logger.WithField("ShardStore", "Local").WithField("Storage", s.detail.Storage.String()) | |||||
| return logger.WithField("ShardStore", "Local").WithField("Storage", s.detail.UserSpace.Storage.String()) | |||||
| } | } | ||||
| func (s *ShardStore) getFileDirFromHash(hash clitypes.FileHash) string { | func (s *ShardStore) getFileDirFromHash(hash clitypes.FileHash) string { | ||||
| @@ -1,5 +1,6 @@ | |||||
| package mashup | package mashup | ||||
| /* | |||||
| import ( | import ( | ||||
| "fmt" | "fmt" | ||||
| @@ -22,14 +23,14 @@ type builder struct { | |||||
| } | } | ||||
| func (b *builder) FeatureDesc() types.FeatureDesc { | func (b *builder) FeatureDesc() types.FeatureDesc { | ||||
| stgType := b.detail.Storage.Type.(*cortypes.MashupStorageType) | |||||
| stgType := b.detail.UserSpace.Storage.(*cortypes.MashupStorageType) | |||||
| cred, ok := b.detail.UserSpace.Credential.(*cortypes.MashupCred) | cred, ok := b.detail.UserSpace.Credential.(*cortypes.MashupCred) | ||||
| if !ok { | if !ok { | ||||
| return types.FeatureDesc{} | return types.FeatureDesc{} | ||||
| } | } | ||||
| newDetail := *b.detail | newDetail := *b.detail | ||||
| newDetail.Storage.Type = stgType.Store | |||||
| newDetail.UserSpace.Storage = stgType.Store | |||||
| newDetail.UserSpace.Credential = cred.Store | newDetail.UserSpace.Credential = cred.Store | ||||
| blder := reg.GetBuilderInternal(&newDetail) | blder := reg.GetBuilderInternal(&newDetail) | ||||
| @@ -37,44 +38,44 @@ func (b *builder) FeatureDesc() types.FeatureDesc { | |||||
| } | } | ||||
| func (b *builder) CreateShardStore() (types.ShardStore, error) { | func (b *builder) CreateShardStore() (types.ShardStore, error) { | ||||
| stgType := b.detail.Storage.Type.(*cortypes.MashupStorageType) | |||||
| stgType := b.detail.UserSpace.Storage.(*cortypes.MashupStorageType) | |||||
| cred, ok := b.detail.UserSpace.Credential.(*cortypes.MashupCred) | cred, ok := b.detail.UserSpace.Credential.(*cortypes.MashupCred) | ||||
| if !ok { | if !ok { | ||||
| return nil, fmt.Errorf("invalid storage credential type %T for mashup storage", b.detail.UserSpace.Credential) | return nil, fmt.Errorf("invalid storage credential type %T for mashup storage", b.detail.UserSpace.Credential) | ||||
| } | } | ||||
| newDetail := *b.detail | newDetail := *b.detail | ||||
| newDetail.Storage.Type = stgType.Store | |||||
| newDetail.UserSpace.Storage = stgType.Store | |||||
| newDetail.UserSpace.Credential = cred.Store | newDetail.UserSpace.Credential = cred.Store | ||||
| blder := reg.GetBuilderInternal(&newDetail) | blder := reg.GetBuilderInternal(&newDetail) | ||||
| return blder.CreateShardStore() | return blder.CreateShardStore() | ||||
| } | } | ||||
| func (b *builder) CreatePublicStore() (types.PublicStore, error) { | |||||
| stgType := b.detail.Storage.Type.(*cortypes.MashupStorageType) | |||||
| func (b *builder) CreateBaseStore() (types.BaseStore, error) { | |||||
| stgType := b.detail.UserSpace.Storage.(*cortypes.MashupStorageType) | |||||
| cred, ok := b.detail.UserSpace.Credential.(*cortypes.MashupCred) | cred, ok := b.detail.UserSpace.Credential.(*cortypes.MashupCred) | ||||
| if !ok { | if !ok { | ||||
| return nil, fmt.Errorf("invalid storage credential type %T for mashup storage", b.detail.UserSpace.Credential) | return nil, fmt.Errorf("invalid storage credential type %T for mashup storage", b.detail.UserSpace.Credential) | ||||
| } | } | ||||
| newDetail := *b.detail | newDetail := *b.detail | ||||
| newDetail.Storage.Type = stgType.Store | |||||
| newDetail.UserSpace.Storage = stgType.Store | |||||
| newDetail.UserSpace.Credential = cred.Store | newDetail.UserSpace.Credential = cred.Store | ||||
| blder := reg.GetBuilderInternal(&newDetail) | blder := reg.GetBuilderInternal(&newDetail) | ||||
| return blder.CreatePublicStore() | |||||
| return blder.CreateBaseStore() | |||||
| } | } | ||||
| func (b *builder) CreateMultiparter() (types.Multiparter, error) { | func (b *builder) CreateMultiparter() (types.Multiparter, error) { | ||||
| stgType := b.detail.Storage.Type.(*cortypes.MashupStorageType) | |||||
| stgType := b.detail.UserSpace.Storage.(*cortypes.MashupStorageType) | |||||
| cred, ok := b.detail.UserSpace.Credential.(*cortypes.MashupCred) | cred, ok := b.detail.UserSpace.Credential.(*cortypes.MashupCred) | ||||
| if !ok { | if !ok { | ||||
| return nil, fmt.Errorf("invalid storage credential type %T for mashup storage", b.detail.UserSpace.Credential) | return nil, fmt.Errorf("invalid storage credential type %T for mashup storage", b.detail.UserSpace.Credential) | ||||
| } | } | ||||
| newDetail := *b.detail | newDetail := *b.detail | ||||
| newDetail.Storage.Type = stgType.Feature | |||||
| newDetail.UserSpace.Storage = stgType.Feature | |||||
| newDetail.UserSpace.Credential = cred.Feature | newDetail.UserSpace.Credential = cred.Feature | ||||
| blder := reg.GetBuilderInternal(&newDetail) | blder := reg.GetBuilderInternal(&newDetail) | ||||
| @@ -82,14 +83,14 @@ func (b *builder) CreateMultiparter() (types.Multiparter, error) { | |||||
| } | } | ||||
| func (b *builder) CreateS2STransfer() (types.S2STransfer, error) { | func (b *builder) CreateS2STransfer() (types.S2STransfer, error) { | ||||
| stgType := b.detail.Storage.Type.(*cortypes.MashupStorageType) | |||||
| stgType := b.detail.UserSpace.Storage.(*cortypes.MashupStorageType) | |||||
| cred, ok := b.detail.UserSpace.Credential.(*cortypes.MashupCred) | cred, ok := b.detail.UserSpace.Credential.(*cortypes.MashupCred) | ||||
| if !ok { | if !ok { | ||||
| return nil, fmt.Errorf("invalid storage credential type %T for mashup storage", b.detail.UserSpace.Credential) | return nil, fmt.Errorf("invalid storage credential type %T for mashup storage", b.detail.UserSpace.Credential) | ||||
| } | } | ||||
| newDetail := *b.detail | newDetail := *b.detail | ||||
| newDetail.Storage.Type = stgType.Feature | |||||
| newDetail.UserSpace.Storage = stgType.Feature | |||||
| newDetail.UserSpace.Credential = cred.Feature | newDetail.UserSpace.Credential = cred.Feature | ||||
| blder := reg.GetBuilderInternal(&newDetail) | blder := reg.GetBuilderInternal(&newDetail) | ||||
| @@ -97,16 +98,17 @@ func (b *builder) CreateS2STransfer() (types.S2STransfer, error) { | |||||
| } | } | ||||
| func (b *builder) CreateECMultiplier() (types.ECMultiplier, error) { | func (b *builder) CreateECMultiplier() (types.ECMultiplier, error) { | ||||
| stgType := b.detail.Storage.Type.(*cortypes.MashupStorageType) | |||||
| stgType := b.detail.UserSpace.Storage.(*cortypes.MashupStorageType) | |||||
| cred, ok := b.detail.UserSpace.Credential.(*cortypes.MashupCred) | cred, ok := b.detail.UserSpace.Credential.(*cortypes.MashupCred) | ||||
| if !ok { | if !ok { | ||||
| return nil, fmt.Errorf("invalid storage credential type %T for mashup storage", b.detail.UserSpace.Credential) | return nil, fmt.Errorf("invalid storage credential type %T for mashup storage", b.detail.UserSpace.Credential) | ||||
| } | } | ||||
| newDetail := *b.detail | newDetail := *b.detail | ||||
| newDetail.Storage.Type = stgType.Feature | |||||
| newDetail.UserSpace.Storage = stgType.Feature | |||||
| newDetail.UserSpace.Credential = cred.Feature | newDetail.UserSpace.Credential = cred.Feature | ||||
| blder := reg.GetBuilderInternal(&newDetail) | blder := reg.GetBuilderInternal(&newDetail) | ||||
| return blder.CreateECMultiplier() | return blder.CreateECMultiplier() | ||||
| } | } | ||||
| */ | |||||
| @@ -40,34 +40,36 @@ func (b *builder) FeatureDesc() types.FeatureDesc { | |||||
| } | } | ||||
| func (b *builder) CreateShardStore() (types.ShardStore, error) { | func (b *builder) CreateShardStore() (types.ShardStore, error) { | ||||
| stgType := b.detail.UserSpace.Storage.(*cortypes.OBSType) | |||||
| cred, ok := b.detail.UserSpace.Credential.(*cortypes.OBSCred) | cred, ok := b.detail.UserSpace.Credential.(*cortypes.OBSCred) | ||||
| if !ok { | if !ok { | ||||
| return nil, fmt.Errorf("invalid storage credential type %T for obs storage", b.detail.UserSpace.Credential) | return nil, fmt.Errorf("invalid storage credential type %T for obs storage", b.detail.UserSpace.Credential) | ||||
| } | } | ||||
| cli, bucket, err := createClient(cred) | |||||
| cli, bucket, err := createClient(stgType, cred) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| return NewShardStore(b.detail, cred, cli, bucket) | |||||
| return NewShardStore(b.detail, stgType, cred, cli, bucket) | |||||
| } | } | ||||
| func (b *builder) CreatePublicStore() (types.PublicStore, error) { | |||||
| func (b *builder) CreateBaseStore() (types.BaseStore, error) { | |||||
| stgType := b.detail.UserSpace.Storage.(*cortypes.OBSType) | |||||
| cred, ok := b.detail.UserSpace.Credential.(*cortypes.OBSCred) | cred, ok := b.detail.UserSpace.Credential.(*cortypes.OBSCred) | ||||
| if !ok { | if !ok { | ||||
| return nil, fmt.Errorf("invalid storage credential type %T for obs storage", b.detail.UserSpace.Credential) | return nil, fmt.Errorf("invalid storage credential type %T for obs storage", b.detail.UserSpace.Credential) | ||||
| } | } | ||||
| cli, bucket, err := createClient(cred) | |||||
| cli, bucket, err := createClient(stgType, cred) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| return s3stg.NewPublicStore(b.detail, cli, bucket) | |||||
| return s3stg.NewBaseStore(b.detail, cli, bucket, s3stg.BaseStoreOption{UseAWSSha256: false}) | |||||
| } | } | ||||
| func createClient(cred *cortypes.OBSCred) (*s3.Client, string, error) { | |||||
| func createClient(stgType *cortypes.OBSType, cred *cortypes.OBSCred) (*s3.Client, string, error) { | |||||
| awsConfig := aws.Config{} | awsConfig := aws.Config{} | ||||
| cre := aws.Credentials{ | cre := aws.Credentials{ | ||||
| @@ -75,19 +77,20 @@ func createClient(cred *cortypes.OBSCred) (*s3.Client, string, error) { | |||||
| SecretAccessKey: cred.SK, | SecretAccessKey: cred.SK, | ||||
| } | } | ||||
| awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: cre} | awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: cre} | ||||
| awsConfig.Region = cred.Region | |||||
| awsConfig.Region = stgType.Region | |||||
| options := []func(*s3.Options){} | options := []func(*s3.Options){} | ||||
| options = append(options, func(s3Opt *s3.Options) { | options = append(options, func(s3Opt *s3.Options) { | ||||
| s3Opt.BaseEndpoint = &cred.Endpoint | |||||
| s3Opt.BaseEndpoint = &stgType.Endpoint | |||||
| }) | }) | ||||
| cli := s3.NewFromConfig(awsConfig, options...) | cli := s3.NewFromConfig(awsConfig, options...) | ||||
| return cli, cred.Bucket, nil | |||||
| return cli, stgType.Bucket, nil | |||||
| } | } | ||||
| func (b *builder) CreateMultiparter() (types.Multiparter, error) { | func (b *builder) CreateMultiparter() (types.Multiparter, error) { | ||||
| feat := utils.FindFeature[*cortypes.MultipartUploadFeature](b.detail.Storage) | |||||
| stgType := b.detail.UserSpace.Storage.(*cortypes.OBSType) | |||||
| feat := utils.FindFeature[*cortypes.MultipartUploadFeature](b.detail) | |||||
| if feat == nil { | if feat == nil { | ||||
| return nil, fmt.Errorf("feature %T not found", cortypes.MultipartUploadFeature{}) | return nil, fmt.Errorf("feature %T not found", cortypes.MultipartUploadFeature{}) | ||||
| } | } | ||||
| @@ -97,7 +100,7 @@ func (b *builder) CreateMultiparter() (types.Multiparter, error) { | |||||
| return nil, fmt.Errorf("invalid storage credential type %T for obs storage", b.detail.UserSpace.Credential) | return nil, fmt.Errorf("invalid storage credential type %T for obs storage", b.detail.UserSpace.Credential) | ||||
| } | } | ||||
| cli, bucket, err := createClient(cred) | |||||
| cli, bucket, err := createClient(stgType, cred) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| @@ -111,7 +114,8 @@ func (b *builder) CreateMultiparter() (types.Multiparter, error) { | |||||
| } | } | ||||
| func (b *builder) CreateS2STransfer() (types.S2STransfer, error) { | func (b *builder) CreateS2STransfer() (types.S2STransfer, error) { | ||||
| feat := utils.FindFeature[*cortypes.S2STransferFeature](b.detail.Storage) | |||||
| stgType := b.detail.UserSpace.Storage.(*cortypes.OBSType) | |||||
| feat := utils.FindFeature[*cortypes.S2STransferFeature](b.detail) | |||||
| if feat == nil { | if feat == nil { | ||||
| return nil, fmt.Errorf("feature %T not found", cortypes.S2STransferFeature{}) | return nil, fmt.Errorf("feature %T not found", cortypes.S2STransferFeature{}) | ||||
| } | } | ||||
| @@ -121,5 +125,5 @@ func (b *builder) CreateS2STransfer() (types.S2STransfer, error) { | |||||
| return nil, fmt.Errorf("invalid storage credential type %T for obs storage", b.detail.UserSpace.Credential) | return nil, fmt.Errorf("invalid storage credential type %T for obs storage", b.detail.UserSpace.Credential) | ||||
| } | } | ||||
| return NewS2STransfer(cred, feat), nil | |||||
| return NewS2STransfer(stgType, cred, feat), nil | |||||
| } | } | ||||
| @@ -13,14 +13,16 @@ import ( | |||||
| func Test_S2S(t *testing.T) { | func Test_S2S(t *testing.T) { | ||||
| Convey("OBS", t, func() { | Convey("OBS", t, func() { | ||||
| s2s := S2STransfer{ | s2s := S2STransfer{ | ||||
| cred: &cortypes.OBSCred{ | |||||
| stgType: &cortypes.OBSType{ | |||||
| Region: "cn-north-4", | Region: "cn-north-4", | ||||
| Endpoint: "obs.cn-north-4.myhuaweicloud.com", | Endpoint: "obs.cn-north-4.myhuaweicloud.com", | ||||
| AK: "", | |||||
| SK: "", | |||||
| Bucket: "pcm3-bucket3", | Bucket: "pcm3-bucket3", | ||||
| ProjectID: "", | ProjectID: "", | ||||
| }, | }, | ||||
| cred: &cortypes.OBSCred{ | |||||
| AK: "", | |||||
| SK: "", | |||||
| }, | |||||
| feat: &cortypes.S2STransferFeature{ | feat: &cortypes.S2STransferFeature{ | ||||
| TempDir: "s2s", | TempDir: "s2s", | ||||
| }, | }, | ||||
| @@ -28,17 +30,16 @@ func Test_S2S(t *testing.T) { | |||||
| newPath, err := s2s.Transfer(context.TODO(), &clitypes.UserSpaceDetail{ | newPath, err := s2s.Transfer(context.TODO(), &clitypes.UserSpaceDetail{ | ||||
| UserSpace: clitypes.UserSpace{ | UserSpace: clitypes.UserSpace{ | ||||
| Credential: cortypes.OBSCred{ | |||||
| Storage: &cortypes.OBSType{ | |||||
| Region: "cn-north-4", | Region: "cn-north-4", | ||||
| Endpoint: "obs.cn-north-4.myhuaweicloud.com", | Endpoint: "obs.cn-north-4.myhuaweicloud.com", | ||||
| AK: "", | |||||
| SK: "", | |||||
| Bucket: "pcm2-bucket2", | Bucket: "pcm2-bucket2", | ||||
| ProjectID: "", | ProjectID: "", | ||||
| }, | }, | ||||
| }, | |||||
| Storage: cortypes.Storage{ | |||||
| Type: &cortypes.OBSType{}, | |||||
| Credential: cortypes.OBSCred{ | |||||
| AK: "", | |||||
| SK: "", | |||||
| }, | |||||
| }, | }, | ||||
| }, "test_data/test03.txt", types.S2SOption{}) | }, "test_data/test03.txt", types.S2SOption{}) | ||||
| defer s2s.Abort() | defer s2s.Abort() | ||||
| @@ -18,16 +18,18 @@ import ( | |||||
| ) | ) | ||||
| type S2STransfer struct { | type S2STransfer struct { | ||||
| cred *cortypes.OBSCred | |||||
| feat *cortypes.S2STransferFeature | |||||
| taskID *int64 | |||||
| omsCli *oms.OmsClient | |||||
| stgType *cortypes.OBSType | |||||
| cred *cortypes.OBSCred | |||||
| feat *cortypes.S2STransferFeature | |||||
| taskID *int64 | |||||
| omsCli *oms.OmsClient | |||||
| } | } | ||||
| func NewS2STransfer(cred *cortypes.OBSCred, feat *cortypes.S2STransferFeature) *S2STransfer { | |||||
| func NewS2STransfer(stgType *cortypes.OBSType, cred *cortypes.OBSCred, feat *cortypes.S2STransferFeature) *S2STransfer { | |||||
| return &S2STransfer{ | return &S2STransfer{ | ||||
| cred: cred, | |||||
| feat: feat, | |||||
| stgType: stgType, | |||||
| cred: cred, | |||||
| feat: feat, | |||||
| } | } | ||||
| } | } | ||||
| @@ -41,19 +43,19 @@ func (s *S2STransfer) CanTransfer(src *clitypes.UserSpaceDetail) bool { | |||||
| func (s *S2STransfer) Transfer(ctx context.Context, src *clitypes.UserSpaceDetail, srcPath string, opt types.S2SOption) (string, error) { | func (s *S2STransfer) Transfer(ctx context.Context, src *clitypes.UserSpaceDetail, srcPath string, opt types.S2SOption) (string, error) { | ||||
| req := s.makeRequest(src, srcPath) | req := s.makeRequest(src, srcPath) | ||||
| if req == nil { | if req == nil { | ||||
| return "", fmt.Errorf("unsupported source storage type: %T", src.Storage.Type) | |||||
| return "", fmt.Errorf("unsupported source storage type: %T", src.UserSpace.Storage) | |||||
| } | } | ||||
| auth, err := basic.NewCredentialsBuilder(). | auth, err := basic.NewCredentialsBuilder(). | ||||
| WithAk(s.cred.AK). | WithAk(s.cred.AK). | ||||
| WithSk(s.cred.SK). | WithSk(s.cred.SK). | ||||
| WithProjectId(s.cred.ProjectID). | |||||
| WithProjectId(s.stgType.ProjectID). | |||||
| SafeBuild() | SafeBuild() | ||||
| if err != nil { | if err != nil { | ||||
| return "", err | return "", err | ||||
| } | } | ||||
| region, err := omsregion.SafeValueOf(s.cred.Region) | |||||
| region, err := omsregion.SafeValueOf(s.stgType.Region) | |||||
| if err != nil { | if err != nil { | ||||
| return "", err | return "", err | ||||
| } | } | ||||
| @@ -75,10 +77,10 @@ func (s *S2STransfer) Transfer(ctx context.Context, src *clitypes.UserSpaceDetai | |||||
| TaskType: &taskType, | TaskType: &taskType, | ||||
| SrcNode: req, | SrcNode: req, | ||||
| DstNode: &model.DstNodeReq{ | DstNode: &model.DstNodeReq{ | ||||
| Region: s.cred.Region, | |||||
| Region: s.stgType.Region, | |||||
| Ak: s.cred.AK, | Ak: s.cred.AK, | ||||
| Sk: s.cred.SK, | Sk: s.cred.SK, | ||||
| Bucket: s.cred.Bucket, | |||||
| Bucket: s.stgType.Bucket, | |||||
| SavePrefix: &tempPrefix, | SavePrefix: &tempPrefix, | ||||
| }, | }, | ||||
| }, | }, | ||||
| @@ -98,7 +100,7 @@ func (s *S2STransfer) Transfer(ctx context.Context, src *clitypes.UserSpaceDetai | |||||
| } | } | ||||
| func (s *S2STransfer) makeRequest(srcStg *clitypes.UserSpaceDetail, srcPath string) *model.SrcNodeReq { | func (s *S2STransfer) makeRequest(srcStg *clitypes.UserSpaceDetail, srcPath string) *model.SrcNodeReq { | ||||
| switch srcStg.Storage.Type.(type) { | |||||
| switch srcType := srcStg.UserSpace.Storage.(type) { | |||||
| case *cortypes.OBSType: | case *cortypes.OBSType: | ||||
| cloudType := "HuaweiCloud" | cloudType := "HuaweiCloud" | ||||
| @@ -109,10 +111,10 @@ func (s *S2STransfer) makeRequest(srcStg *clitypes.UserSpaceDetail, srcPath stri | |||||
| return &model.SrcNodeReq{ | return &model.SrcNodeReq{ | ||||
| CloudType: &cloudType, | CloudType: &cloudType, | ||||
| Region: &cred.Region, | |||||
| Region: &srcType.Region, | |||||
| Ak: &cred.AK, | Ak: &cred.AK, | ||||
| Sk: &cred.SK, | Sk: &cred.SK, | ||||
| Bucket: &cred.Bucket, | |||||
| Bucket: &srcType.Bucket, | |||||
| ObjectKey: &[]string{srcPath}, | ObjectKey: &[]string{srcPath}, | ||||
| } | } | ||||
| @@ -11,12 +11,14 @@ import ( | |||||
| type ShardStore struct { | type ShardStore struct { | ||||
| *s3.ShardStore | *s3.ShardStore | ||||
| cred *cortypes.OBSCred | |||||
| stgType *cortypes.OBSType | |||||
| cred *cortypes.OBSCred | |||||
| } | } | ||||
| func NewShardStore(detail *clitypes.UserSpaceDetail, cred *cortypes.OBSCred, s3Cli *awss3.Client, bkt string) (*ShardStore, error) { | |||||
| func NewShardStore(detail *clitypes.UserSpaceDetail, stgType *cortypes.OBSType, cred *cortypes.OBSCred, s3Cli *awss3.Client, bkt string) (*ShardStore, error) { | |||||
| sd := ShardStore{ | sd := ShardStore{ | ||||
| cred: cred, | |||||
| stgType: stgType, | |||||
| cred: cred, | |||||
| } | } | ||||
| var err error | var err error | ||||
| @@ -31,7 +33,7 @@ func NewShardStore(detail *clitypes.UserSpaceDetail, cred *cortypes.OBSCred, s3C | |||||
| } | } | ||||
| func (s *ShardStore) HTTPBypassRead(fileHash clitypes.FileHash) (types.HTTPRequest, error) { | func (s *ShardStore) HTTPBypassRead(fileHash clitypes.FileHash) (types.HTTPRequest, error) { | ||||
| cli, err := obs.New(s.cred.AK, s.cred.SK, s.cred.Endpoint) | |||||
| cli, err := obs.New(s.cred.AK, s.cred.SK, s.stgType.Endpoint) | |||||
| if err != nil { | if err != nil { | ||||
| return types.HTTPRequest{}, err | return types.HTTPRequest{}, err | ||||
| } | } | ||||
| @@ -75,8 +75,8 @@ func (p *Pool) GetShardStore(spaceDetail *clitypes.UserSpaceDetail) (types.Shard | |||||
| return space.store, nil | return space.store, nil | ||||
| } | } | ||||
| func (p *Pool) GetPublicStore(spaceDetail *clitypes.UserSpaceDetail) (types.PublicStore, error) { | |||||
| return factory.GetBuilder(spaceDetail).CreatePublicStore() | |||||
| func (p *Pool) GetBaseStore(spaceDetail *clitypes.UserSpaceDetail) (types.BaseStore, error) { | |||||
| return factory.GetBuilder(spaceDetail).CreateBaseStore() | |||||
| } | } | ||||
| func (p *Pool) GetMultiparter(spaceDetail *clitypes.UserSpaceDetail) (types.Multiparter, error) { | func (p *Pool) GetMultiparter(spaceDetail *clitypes.UserSpaceDetail) (types.Multiparter, error) { | ||||
| @@ -3,42 +3,87 @@ package s3 | |||||
| import ( | import ( | ||||
| "bytes" | "bytes" | ||||
| "context" | "context" | ||||
| "crypto/sha256" | |||||
| "errors" | |||||
| "fmt" | |||||
| "io" | "io" | ||||
| "github.com/aws/aws-sdk-go-v2/aws" | "github.com/aws/aws-sdk-go-v2/aws" | ||||
| "github.com/aws/aws-sdk-go-v2/service/s3" | "github.com/aws/aws-sdk-go-v2/service/s3" | ||||
| s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" | |||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | "gitlink.org.cn/cloudream/common/pkgs/logger" | ||||
| "gitlink.org.cn/cloudream/common/utils/io2" | |||||
| clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" | clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" | ||||
| "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" | "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" | ||||
| ) | ) | ||||
| type PublicStore struct { | |||||
| type BaseStore struct { | |||||
| Detail *clitypes.UserSpaceDetail | Detail *clitypes.UserSpaceDetail | ||||
| Bucket string | Bucket string | ||||
| cli *s3.Client | cli *s3.Client | ||||
| opt BaseStoreOption | |||||
| } | } | ||||
| func NewPublicStore(detail *clitypes.UserSpaceDetail, cli *s3.Client, bkt string) (*PublicStore, error) { | |||||
| return &PublicStore{ | |||||
| type BaseStoreOption struct { | |||||
| UseAWSSha256 bool // 能否直接使用AWS提供的SHA256校验,如果不行,则使用本地计算。默认使用本地计算。 | |||||
| } | |||||
| func NewBaseStore(detail *clitypes.UserSpaceDetail, cli *s3.Client, bkt string, opt BaseStoreOption) (*BaseStore, error) { | |||||
| return &BaseStore{ | |||||
| Detail: detail, | Detail: detail, | ||||
| Bucket: bkt, | Bucket: bkt, | ||||
| cli: cli, | cli: cli, | ||||
| opt: opt, | |||||
| }, nil | }, nil | ||||
| } | } | ||||
| func (s *PublicStore) Write(objPath string, stream io.Reader) error { | |||||
| func (s *BaseStore) Write(objPath string, stream io.Reader) (types.FileInfo, error) { | |||||
| key := objPath | key := objPath | ||||
| counter := io2.Counter(stream) | |||||
| if s.opt.UseAWSSha256 { | |||||
| resp, err := s.cli.PutObject(context.TODO(), &s3.PutObjectInput{ | |||||
| Bucket: aws.String(s.Bucket), | |||||
| Key: aws.String(key), | |||||
| Body: counter, | |||||
| ChecksumAlgorithm: s3types.ChecksumAlgorithmSha256, | |||||
| }) | |||||
| if err != nil { | |||||
| return types.FileInfo{}, err | |||||
| } | |||||
| if resp.ChecksumSHA256 == nil { | |||||
| return types.FileInfo{}, errors.New("SHA256 checksum not found in response") | |||||
| } | |||||
| hash, err := DecodeBase64Hash(*resp.ChecksumSHA256) | |||||
| if err != nil { | |||||
| return types.FileInfo{}, fmt.Errorf("decode SHA256 checksum: %v", err) | |||||
| } | |||||
| return types.FileInfo{ | |||||
| Hash: clitypes.NewFullHash(hash), | |||||
| Size: counter.Count(), | |||||
| }, nil | |||||
| } | |||||
| hashStr := io2.NewReadHasher(sha256.New(), counter) | |||||
| _, err := s.cli.PutObject(context.TODO(), &s3.PutObjectInput{ | _, err := s.cli.PutObject(context.TODO(), &s3.PutObjectInput{ | ||||
| Bucket: aws.String(s.Bucket), | Bucket: aws.String(s.Bucket), | ||||
| Key: aws.String(key), | Key: aws.String(key), | ||||
| Body: stream, | |||||
| Body: counter, | |||||
| }) | }) | ||||
| if err != nil { | |||||
| return types.FileInfo{}, err | |||||
| } | |||||
| return err | |||||
| return types.FileInfo{ | |||||
| Hash: clitypes.NewFullHash(hashStr.Sum()), | |||||
| Size: counter.Count(), | |||||
| }, nil | |||||
| } | } | ||||
| func (s *PublicStore) Read(objPath string) (io.ReadCloser, error) { | |||||
| func (s *BaseStore) Read(objPath string) (io.ReadCloser, error) { | |||||
| key := objPath | key := objPath | ||||
| resp, err := s.cli.GetObject(context.TODO(), &s3.GetObjectInput{ | resp, err := s.cli.GetObject(context.TODO(), &s3.GetObjectInput{ | ||||
| @@ -53,7 +98,7 @@ func (s *PublicStore) Read(objPath string) (io.ReadCloser, error) { | |||||
| return resp.Body, nil | return resp.Body, nil | ||||
| } | } | ||||
| func (s *PublicStore) Mkdir(path string) error { | |||||
| func (s *BaseStore) Mkdir(path string) error { | |||||
| _, err := s.cli.PutObject(context.TODO(), &s3.PutObjectInput{ | _, err := s.cli.PutObject(context.TODO(), &s3.PutObjectInput{ | ||||
| Bucket: aws.String(s.Bucket), | Bucket: aws.String(s.Bucket), | ||||
| Key: aws.String(path + "/"), | Key: aws.String(path + "/"), | ||||
| @@ -62,7 +107,7 @@ func (s *PublicStore) Mkdir(path string) error { | |||||
| return err | return err | ||||
| } | } | ||||
| func (s *PublicStore) ListAll(path string) ([]types.PublicStoreEntry, error) { | |||||
| func (s *BaseStore) ListAll(path string) ([]types.BaseStoreEntry, error) { | |||||
| key := path | key := path | ||||
| // TODO 待测试 | // TODO 待测试 | ||||
| @@ -72,7 +117,7 @@ func (s *PublicStore) ListAll(path string) ([]types.PublicStoreEntry, error) { | |||||
| Delimiter: aws.String("/"), | Delimiter: aws.String("/"), | ||||
| } | } | ||||
| var objs []types.PublicStoreEntry | |||||
| var objs []types.BaseStoreEntry | |||||
| var marker *string | var marker *string | ||||
| for { | for { | ||||
| @@ -83,7 +128,7 @@ func (s *PublicStore) ListAll(path string) ([]types.PublicStoreEntry, error) { | |||||
| } | } | ||||
| for _, obj := range resp.Contents { | for _, obj := range resp.Contents { | ||||
| objs = append(objs, types.PublicStoreEntry{ | |||||
| objs = append(objs, types.BaseStoreEntry{ | |||||
| Path: *obj.Key, | Path: *obj.Key, | ||||
| Size: *obj.Size, | Size: *obj.Size, | ||||
| IsDir: false, | IsDir: false, | ||||
| @@ -100,13 +145,13 @@ func (s *PublicStore) ListAll(path string) ([]types.PublicStoreEntry, error) { | |||||
| return objs, nil | return objs, nil | ||||
| } | } | ||||
| func (s *PublicStore) getLogger() logger.Logger { | |||||
| return logger.WithField("PublicStore", "S3").WithField("Storage", s.Detail.Storage.String()) | |||||
| func (s *BaseStore) getLogger() logger.Logger { | |||||
| return logger.WithField("BaseStore", "S3").WithField("Storage", s.Detail.UserSpace.Storage.String()) | |||||
| } | } | ||||
| var _ types.BypassPublicRead = (*PublicStore)(nil) | |||||
| var _ types.BypassPublicRead = (*BaseStore)(nil) | |||||
| func (s *PublicStore) BypassPublicRead(pa string) (types.BypassFilePath, error) { | |||||
| func (s *BaseStore) BypassPublicRead(pa string) (types.BypassFilePath, error) { | |||||
| info, err := s.cli.HeadObject(context.TODO(), &s3.HeadObjectInput{ | info, err := s.cli.HeadObject(context.TODO(), &s3.HeadObjectInput{ | ||||
| Bucket: aws.String(s.Bucket), | Bucket: aws.String(s.Bucket), | ||||
| Key: aws.String(pa), | Key: aws.String(pa), | ||||
| @@ -125,9 +170,9 @@ func (s *PublicStore) BypassPublicRead(pa string) (types.BypassFilePath, error) | |||||
| }, nil | }, nil | ||||
| } | } | ||||
| var _ types.BypassPublicWrite = (*PublicStore)(nil) | |||||
| var _ types.BypassPublicWrite = (*BaseStore)(nil) | |||||
| func (s *PublicStore) BypassedPublic(info types.BypassedFileInfo, dstPath string) error { | |||||
| func (s *BaseStore) BypassedPublic(info types.BypassedFileInfo, dstPath string) error { | |||||
| _, err := s.cli.CopyObject(context.TODO(), &s3.CopyObjectInput{ | _, err := s.cli.CopyObject(context.TODO(), &s3.CopyObjectInput{ | ||||
| Bucket: aws.String(s.Bucket), | Bucket: aws.String(s.Bucket), | ||||
| CopySource: aws.String(s.Bucket + "/" + info.Path), | CopySource: aws.String(s.Bucket + "/" + info.Path), | ||||
| @@ -39,12 +39,13 @@ func (b *builder) FeatureDesc() types.FeatureDesc { | |||||
| } | } | ||||
| func (b *builder) CreateShardStore() (types.ShardStore, error) { | func (b *builder) CreateShardStore() (types.ShardStore, error) { | ||||
| stgType := b.detail.UserSpace.Storage.(*cortypes.S3Type) | |||||
| s3Cred, ok := b.detail.UserSpace.Credential.(*cortypes.S3Cred) | s3Cred, ok := b.detail.UserSpace.Credential.(*cortypes.S3Cred) | ||||
| if !ok { | if !ok { | ||||
| return nil, fmt.Errorf("invalid storage credential type %T for s3 storage", b.detail.UserSpace.Credential) | return nil, fmt.Errorf("invalid storage credential type %T for s3 storage", b.detail.UserSpace.Credential) | ||||
| } | } | ||||
| cli, bkt, err := createClient(s3Cred) | |||||
| cli, bkt, err := createClient(stgType, s3Cred) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| @@ -52,21 +53,21 @@ func (b *builder) CreateShardStore() (types.ShardStore, error) { | |||||
| return NewShardStore(b.detail, cli, bkt, ShardStoreOption{UseAWSSha256: true}) | return NewShardStore(b.detail, cli, bkt, ShardStoreOption{UseAWSSha256: true}) | ||||
| } | } | ||||
| func (b *builder) CreatePublicStore() (types.PublicStore, error) { | |||||
| func (b *builder) CreateBaseStore() (types.BaseStore, error) { | |||||
| stgType := b.detail.UserSpace.Storage.(*cortypes.S3Type) | |||||
| s3Cred, ok := b.detail.UserSpace.Credential.(*cortypes.S3Cred) | s3Cred, ok := b.detail.UserSpace.Credential.(*cortypes.S3Cred) | ||||
| if !ok { | if !ok { | ||||
| return nil, fmt.Errorf("invalid storage credential type %T for s3 storage", b.detail.UserSpace.Credential) | return nil, fmt.Errorf("invalid storage credential type %T for s3 storage", b.detail.UserSpace.Credential) | ||||
| } | } | ||||
| cli, bkt, err := createClient(s3Cred) | |||||
| cli, bkt, err := createClient(stgType, s3Cred) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| return NewPublicStore(b.detail, cli, bkt) | |||||
| return NewBaseStore(b.detail, cli, bkt, BaseStoreOption{UseAWSSha256: false}) | |||||
| } | } | ||||
| func createClient(cred *cortypes.S3Cred) (*s3.Client, string, error) { | |||||
| func createClient(stgType *cortypes.S3Type, cred *cortypes.S3Cred) (*s3.Client, string, error) { | |||||
| awsConfig := aws.Config{} | awsConfig := aws.Config{} | ||||
| if cred.AK != "" && cred.SK != "" { | if cred.AK != "" && cred.SK != "" { | ||||
| @@ -77,29 +78,30 @@ func createClient(cred *cortypes.S3Cred) (*s3.Client, string, error) { | |||||
| awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: cre} | awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: cre} | ||||
| } | } | ||||
| awsConfig.Region = cred.Region | |||||
| awsConfig.Region = stgType.Region | |||||
| options := []func(*s3.Options){} | options := []func(*s3.Options){} | ||||
| options = append(options, func(s3Opt *s3.Options) { | options = append(options, func(s3Opt *s3.Options) { | ||||
| s3Opt.BaseEndpoint = &cred.Endpoint | |||||
| s3Opt.BaseEndpoint = &stgType.Endpoint | |||||
| }) | }) | ||||
| cli := s3.NewFromConfig(awsConfig, options...) | cli := s3.NewFromConfig(awsConfig, options...) | ||||
| return cli, cred.Bucket, nil | |||||
| return cli, stgType.Bucket, nil | |||||
| } | } | ||||
| func (b *builder) CreateMultiparter() (types.Multiparter, error) { | func (b *builder) CreateMultiparter() (types.Multiparter, error) { | ||||
| feat := utils.FindFeature[*cortypes.MultipartUploadFeature](b.detail.Storage) | |||||
| stgType := b.detail.UserSpace.Storage.(*cortypes.S3Type) | |||||
| feat := utils.FindFeature[*cortypes.MultipartUploadFeature](b.detail) | |||||
| if feat == nil { | if feat == nil { | ||||
| return nil, fmt.Errorf("feature %T not found", cortypes.MultipartUploadFeature{}) | return nil, fmt.Errorf("feature %T not found", cortypes.MultipartUploadFeature{}) | ||||
| } | } | ||||
| s3Cred, ok := b.detail.UserSpace.Credential.(*cortypes.S3Cred) | s3Cred, ok := b.detail.UserSpace.Credential.(*cortypes.S3Cred) | ||||
| if !ok { | if !ok { | ||||
| return nil, fmt.Errorf("invalid storage credential type %T for s3 public store", b.detail.UserSpace.Credential) | |||||
| return nil, fmt.Errorf("invalid storage credential type %T for s3 base store", b.detail.UserSpace.Credential) | |||||
| } | } | ||||
| cli, bucket, err := createClient(s3Cred) | |||||
| cli, bucket, err := createClient(stgType, s3Cred) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| @@ -17,7 +17,6 @@ import ( | |||||
| "gitlink.org.cn/cloudream/common/utils/io2" | "gitlink.org.cn/cloudream/common/utils/io2" | ||||
| "gitlink.org.cn/cloudream/common/utils/os2" | "gitlink.org.cn/cloudream/common/utils/os2" | ||||
| clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" | clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" | ||||
| stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" | |||||
| "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" | "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" | ||||
| ) | ) | ||||
| @@ -161,9 +160,10 @@ func (s *ShardStore) createWithAwsSha256(stream io.Reader) (types.FileInfo, erro | |||||
| Body: counter, | Body: counter, | ||||
| ChecksumAlgorithm: s3types.ChecksumAlgorithmSha256, | ChecksumAlgorithm: s3types.ChecksumAlgorithmSha256, | ||||
| }) | }) | ||||
| if stgglb.Stats.HubStorageTransfer != nil { | |||||
| stgglb.Stats.HubStorageTransfer.RecordUpload(s.Detail.Storage.StorageID, counter.Count(), err == nil) | |||||
| } | |||||
| // TODO2 | |||||
| // if stgglb.Stats.HubStorageTransfer != nil { | |||||
| // stgglb.Stats.HubStorageTransfer.RecordUpload(s.Detail.Storage.StorageID, counter.Count(), err == nil) | |||||
| // } | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("uploading file %v: %v", key, err) | log.Warnf("uploading file %v: %v", key, err) | ||||
| @@ -203,9 +203,10 @@ func (s *ShardStore) createWithCalcSha256(stream io.Reader) (types.FileInfo, err | |||||
| Key: aws.String(key), | Key: aws.String(key), | ||||
| Body: counter, | Body: counter, | ||||
| }) | }) | ||||
| if stgglb.Stats.HubStorageTransfer != nil { | |||||
| stgglb.Stats.HubStorageTransfer.RecordUpload(s.Detail.Storage.StorageID, counter.Count(), err == nil) | |||||
| } | |||||
| // TODO2 | |||||
| // if stgglb.Stats.HubStorageTransfer != nil { | |||||
| // stgglb.Stats.HubStorageTransfer.RecordUpload(s.Detail.Storage.StorageID, counter.Count(), err == nil) | |||||
| // } | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("uploading file %v: %v", key, err) | log.Warnf("uploading file %v: %v", key, err) | ||||
| @@ -302,9 +303,10 @@ func (s *ShardStore) Open(opt types.OpenOption) (io.ReadCloser, error) { | |||||
| } | } | ||||
| return io2.CounterCloser(resp.Body, func(cnt int64, err error) { | return io2.CounterCloser(resp.Body, func(cnt int64, err error) { | ||||
| if stgglb.Stats.HubStorageTransfer != nil { | |||||
| stgglb.Stats.HubStorageTransfer.RecordDownload(s.Detail.Storage.StorageID, cnt, err == nil || err == io.EOF) | |||||
| } | |||||
| // TODO2 | |||||
| // if stgglb.Stats.HubStorageTransfer != nil { | |||||
| // stgglb.Stats.HubStorageTransfer.RecordDownload(s.Detail.Storage.StorageID, cnt, err == nil || err == io.EOF) | |||||
| // } | |||||
| }), nil | }), nil | ||||
| } | } | ||||
| @@ -16,7 +16,7 @@ type BypassShardWrite interface { | |||||
| BypassedShard(info BypassedFileInfo) error | BypassedShard(info BypassedFileInfo) error | ||||
| } | } | ||||
| // 不通过PublicStore上传文件。 | |||||
| // 不通过BaseStore上传文件。 | |||||
| type BypassPublicWrite interface { | type BypassPublicWrite interface { | ||||
| BypassedPublic(info BypassedFileInfo, dstPath string) error | BypassedPublic(info BypassedFileInfo, dstPath string) error | ||||
| } | } | ||||
| @@ -33,7 +33,7 @@ type BypassShardRead interface { | |||||
| BypassShardRead(fileHash clitypes.FileHash) (BypassFilePath, error) | BypassShardRead(fileHash clitypes.FileHash) (BypassFilePath, error) | ||||
| } | } | ||||
| // 不通过PublicStore读取文件。虽然仅使用path就已经可以读取到文件,但还是增加了此接口用于获取更详细的文件信息。 | |||||
| // 不通过BaseStore读取文件。虽然仅使用path就已经可以读取到文件,但还是增加了此接口用于获取更详细的文件信息。 | |||||
| type BypassPublicRead interface { | type BypassPublicRead interface { | ||||
| BypassPublicRead(path string) (BypassFilePath, error) | BypassPublicRead(path string) (BypassFilePath, error) | ||||
| } | } | ||||
| @@ -14,21 +14,21 @@ func (b *EmptyBuilder) FeatureDesc() FeatureDesc { | |||||
| return FeatureDesc{} | return FeatureDesc{} | ||||
| } | } | ||||
| func (b *EmptyBuilder) CreateShardStore() (ShardStore, error) { | func (b *EmptyBuilder) CreateShardStore() (ShardStore, error) { | ||||
| return nil, fmt.Errorf("create shard store for %T: %w", b.Detail.Storage.Type, ErrUnsupported) | |||||
| return nil, fmt.Errorf("create shard store for %T: %w", b.Detail.UserSpace.Storage, ErrUnsupported) | |||||
| } | } | ||||
| func (b *EmptyBuilder) CreatePublicStore() (PublicStore, error) { | |||||
| return nil, fmt.Errorf("create public store for %T: %w", b.Detail.Storage.Type, ErrUnsupported) | |||||
| func (b *EmptyBuilder) CreateBaseStore() (BaseStore, error) { | |||||
| return nil, fmt.Errorf("create base store for %T: %w", b.Detail.UserSpace.Storage, ErrUnsupported) | |||||
| } | } | ||||
| func (b *EmptyBuilder) CreateMultiparter() (Multiparter, error) { | func (b *EmptyBuilder) CreateMultiparter() (Multiparter, error) { | ||||
| return nil, fmt.Errorf("create multipart initiator for %T: %w", b.Detail.Storage.Type, ErrUnsupported) | |||||
| return nil, fmt.Errorf("create multipart initiator for %T: %w", b.Detail.UserSpace.Storage, ErrUnsupported) | |||||
| } | } | ||||
| func (b *EmptyBuilder) CreateS2STransfer() (S2STransfer, error) { | func (b *EmptyBuilder) CreateS2STransfer() (S2STransfer, error) { | ||||
| return nil, fmt.Errorf("create s2s transfer for %T: %w", b.Detail.Storage.Type, ErrUnsupported) | |||||
| return nil, fmt.Errorf("create s2s transfer for %T: %w", b.Detail.UserSpace.Storage, ErrUnsupported) | |||||
| } | } | ||||
| func (b *EmptyBuilder) CreateECMultiplier() (ECMultiplier, error) { | func (b *EmptyBuilder) CreateECMultiplier() (ECMultiplier, error) { | ||||
| return nil, fmt.Errorf("create ec multiplier for %T: %w", b.Detail.Storage.Type, ErrUnsupported) | |||||
| return nil, fmt.Errorf("create ec multiplier for %T: %w", b.Detail.UserSpace.Storage, ErrUnsupported) | |||||
| } | } | ||||
| @@ -4,19 +4,19 @@ import ( | |||||
| "io" | "io" | ||||
| ) | ) | ||||
| type PublicStoreEntry struct { | |||||
| type BaseStoreEntry struct { | |||||
| Path string | Path string | ||||
| Size int64 | Size int64 | ||||
| IsDir bool | IsDir bool | ||||
| } | } | ||||
| type PublicStore interface { | |||||
| Write(path string, stream io.Reader) error | |||||
| type BaseStore interface { | |||||
| Write(path string, stream io.Reader) (FileInfo, error) | |||||
| Read(path string) (io.ReadCloser, error) | Read(path string) (io.ReadCloser, error) | ||||
| // 创建指定路径的文件夹。对于不支持空文件夹的存储系统来说,可以采用创建以/结尾的对象的方式来模拟文件夹。 | // 创建指定路径的文件夹。对于不支持空文件夹的存储系统来说,可以采用创建以/结尾的对象的方式来模拟文件夹。 | ||||
| Mkdir(path string) error | Mkdir(path string) error | ||||
| // 返回指定路径下的所有文件,文件路径是包含path在内的完整路径。返回结果的第一条一定是路径本身,可能是文件,也可能是目录。 | // 返回指定路径下的所有文件,文件路径是包含path在内的完整路径。返回结果的第一条一定是路径本身,可能是文件,也可能是目录。 | ||||
| // 如果路径不存在,那么不会返回错误,而是返回一个空列表。 | // 如果路径不存在,那么不会返回错误,而是返回一个空列表。 | ||||
| // 返回的内容严格按照存储系统的原始结果来,比如当存储系统是一个对象存储时,那么就可能不会包含目录,或者包含用于模拟的以“/”结尾的对象。 | // 返回的内容严格按照存储系统的原始结果来,比如当存储系统是一个对象存储时,那么就可能不会包含目录,或者包含用于模拟的以“/”结尾的对象。 | ||||
| ListAll(path string) ([]PublicStoreEntry, error) | |||||
| ListAll(path string) ([]BaseStoreEntry, error) | |||||
| } | } | ||||
| @@ -20,10 +20,10 @@ type StorageEventChan = async.UnboundChannel[StorageEvent] | |||||
| type StorageBuilder interface { | type StorageBuilder interface { | ||||
| // 关于此存储系统特性功能的描述 | // 关于此存储系统特性功能的描述 | ||||
| FeatureDesc() FeatureDesc | FeatureDesc() FeatureDesc | ||||
| // 创建一个提供基础读写存储服务能力的组件 | |||||
| CreateBaseStore() (BaseStore, error) | |||||
| // 创建一个分片存储组件 | // 创建一个分片存储组件 | ||||
| CreateShardStore() (ShardStore, error) | CreateShardStore() (ShardStore, error) | ||||
| // 创建一个公共存储组件 | |||||
| CreatePublicStore() (PublicStore, error) | |||||
| // 创建一个分片上传组件 | // 创建一个分片上传组件 | ||||
| CreateMultiparter() (Multiparter, error) | CreateMultiparter() (Multiparter, error) | ||||
| // 创建一个存储服务直传组件 | // 创建一个存储服务直传组件 | ||||
| @@ -1,11 +1,12 @@ | |||||
| package utils | package utils | ||||
| import ( | import ( | ||||
| clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" | |||||
| cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" | cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" | ||||
| ) | ) | ||||
| func FindFeature[T cortypes.StorageFeature](detail cortypes.Storage) T { | |||||
| for _, f := range detail.Features { | |||||
| func FindFeature[T cortypes.StorageFeature](detail *clitypes.UserSpaceDetail) T { | |||||
| for _, f := range detail.UserSpace.Features { | |||||
| f2, ok := f.(T) | f2, ok := f.(T) | ||||
| if ok { | if ok { | ||||
| return f2 | return f2 | ||||
| @@ -40,8 +40,7 @@ func migrate(configPath string) { | |||||
| migrateOne(db, cortypes.HubConnectivity{}) | migrateOne(db, cortypes.HubConnectivity{}) | ||||
| migrateOne(db, cortypes.Hub{}) | migrateOne(db, cortypes.Hub{}) | ||||
| migrateOne(db, cortypes.Location{}) | |||||
| migrateOne(db, cortypes.Storage{}) | |||||
| migrateOne(db, cortypes.HubLocation{}) | |||||
| migrateOne(db, cortypes.User{}) | migrateOne(db, cortypes.User{}) | ||||
| fmt.Println("migrate success") | fmt.Println("migrate success") | ||||
| @@ -0,0 +1,25 @@ | |||||
| package db | |||||
| import ( | |||||
| cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" | |||||
| ) | |||||
| type HubLocationDB struct { | |||||
| *DB | |||||
| } | |||||
| func (db *DB) HubLocation() *HubLocationDB { | |||||
| return &HubLocationDB{DB: db} | |||||
| } | |||||
| func (*HubLocationDB) GetByHubID(ctx SQLContext, id cortypes.HubID) ([]cortypes.HubLocation, error) { | |||||
| var ret []cortypes.HubLocation | |||||
| err := ctx.Where("HubID = ?", id).Find(&ret).Error | |||||
| return ret, err | |||||
| } | |||||
| func (*HubLocationDB) GetAll(ctx SQLContext) ([]cortypes.HubLocation, error) { | |||||
| var ret []cortypes.HubLocation | |||||
| err := ctx.Find(&ret).Error | |||||
| return ret, err | |||||
| } | |||||
| @@ -1,36 +0,0 @@ | |||||
| package db | |||||
| import ( | |||||
| "fmt" | |||||
| cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" | |||||
| ) | |||||
| type LocationDB struct { | |||||
| *DB | |||||
| } | |||||
| func (db *DB) Location() *LocationDB { | |||||
| return &LocationDB{DB: db} | |||||
| } | |||||
| func (*LocationDB) GetByID(ctx SQLContext, id int64) (cortypes.Location, error) { | |||||
| var ret cortypes.Location | |||||
| err := ctx.First(&ret, id).Error | |||||
| return ret, err | |||||
| } | |||||
| func (db *LocationDB) FindLocationByExternalIP(ctx SQLContext, ip string) (cortypes.Location, error) { | |||||
| var locID int64 | |||||
| err := ctx.Table("Hub").Select("LocationID").Where("ExternalIP = ?", ip).Scan(&locID).Error | |||||
| if err != nil { | |||||
| return cortypes.Location{}, fmt.Errorf("finding hub by external ip: %w", err) | |||||
| } | |||||
| loc, err := db.GetByID(ctx, locID) | |||||
| if err != nil { | |||||
| return cortypes.Location{}, fmt.Errorf("getting location by id: %w", err) | |||||
| } | |||||
| return loc, nil | |||||
| } | |||||
| @@ -1,43 +0,0 @@ | |||||
| package db | |||||
| import ( | |||||
| cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" | |||||
| ) | |||||
| type StorageDB struct { | |||||
| *DB | |||||
| } | |||||
| func (db *DB) Storage() *StorageDB { | |||||
| return &StorageDB{DB: db} | |||||
| } | |||||
| func (db *StorageDB) GetByID(ctx SQLContext, stgID cortypes.StorageID) (cortypes.Storage, error) { | |||||
| var stg cortypes.Storage | |||||
| err := ctx.Table("Storage").First(&stg, stgID).Error | |||||
| return stg, err | |||||
| } | |||||
| func (StorageDB) GetAllIDs(ctx SQLContext) ([]cortypes.StorageID, error) { | |||||
| var stgs []cortypes.StorageID | |||||
| err := ctx.Table("Storage").Select("StorageID").Find(&stgs).Error | |||||
| return stgs, err | |||||
| } | |||||
| func (db *StorageDB) BatchGetByID(ctx SQLContext, stgIDs []cortypes.StorageID) ([]cortypes.Storage, error) { | |||||
| var stgs []cortypes.Storage | |||||
| err := ctx.Table("Storage").Find(&stgs, "StorageID IN (?)", stgIDs).Error | |||||
| return stgs, err | |||||
| } | |||||
| func (db *StorageDB) BatchGetAllStorageIDs(ctx SQLContext, start int, count int) ([]cortypes.StorageID, error) { | |||||
| var ret []cortypes.StorageID | |||||
| err := ctx.Table("Storage").Select("StorageID").Find(&ret).Limit(count).Offset(start).Error | |||||
| return ret, err | |||||
| } | |||||
| func (db *StorageDB) GetHubStorages(ctx SQLContext, hubID cortypes.HubID) ([]cortypes.Storage, error) { | |||||
| var stgs []cortypes.Storage | |||||
| err := ctx.Table("Storage").Select("Storage.*").Find(&stgs, "MasterHub = ?", hubID).Error | |||||
| return stgs, err | |||||
| } | |||||
| @@ -2,40 +2,43 @@ package rpc | |||||
| import ( | import ( | ||||
| "context" | "context" | ||||
| "fmt" | |||||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | "gitlink.org.cn/cloudream/common/consts/errorcode" | ||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | "gitlink.org.cn/cloudream/common/pkgs/logger" | ||||
| "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" | "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" | ||||
| corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" | corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" | ||||
| "gitlink.org.cn/cloudream/jcs-pub/coordinator/internal/db" | "gitlink.org.cn/cloudream/jcs-pub/coordinator/internal/db" | ||||
| cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" | cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" | ||||
| ) | ) | ||||
| func (svc *Service) GetStorageDetails(ctx context.Context, msg *corrpc.GetStorageDetails) (*corrpc.GetStorageDetailsResp, *rpc.CodeError) { | |||||
| func (svc *Service) SelectStorageHub(ctx context.Context, msg *corrpc.SelectStorageHub) (*corrpc.SelectStorageHubResp, *rpc.CodeError) { | |||||
| d := svc.db | d := svc.db | ||||
| stgs, err := db.DoTx02(d, func(tx db.SQLContext) ([]*cortypes.StorageDetail, error) { | |||||
| stgs, err := d.Storage().BatchGetByID(tx, msg.StorageIDs) | |||||
| resp, err := db.DoTx02(d, func(tx db.SQLContext) ([]*cortypes.Hub, error) { | |||||
| allLoc, err := d.HubLocation().GetAll(tx) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, fmt.Errorf("getting storages: %v", err) | |||||
| } | |||||
| stgMap := make(map[cortypes.StorageID]*cortypes.Storage) | |||||
| for _, stg := range stgs { | |||||
| s := stg | |||||
| stgMap[stg.StorageID] = &s | |||||
| return nil, err | |||||
| } | } | ||||
| hubIDs := make([]cortypes.HubID, 0, len(stgs)) | |||||
| for _, stg := range stgs { | |||||
| if stg.MasterHub != 0 { | |||||
| hubIDs = append(hubIDs, stg.MasterHub) | |||||
| stgHubIDs := make([]cortypes.HubID, 0, len(msg.Storages)) | |||||
| for _, stg := range msg.Storages { | |||||
| stgLoc := stg.GetLocation() | |||||
| var matchedHubID cortypes.HubID | |||||
| var matchedScore int | |||||
| for _, loc := range allLoc { | |||||
| sc := matchLocation(stgLoc, loc) | |||||
| if sc > matchedScore { | |||||
| matchedScore = sc | |||||
| matchedHubID = loc.HubID | |||||
| } | |||||
| } | } | ||||
| stgHubIDs = append(stgHubIDs, matchedHubID) | |||||
| } | } | ||||
| hubs, err := d.Hub().BatchGetByID(tx, hubIDs) | |||||
| hubs, err := d.Hub().BatchGetByID(tx, stgHubIDs) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, fmt.Errorf("getting hubs: %v", err) | |||||
| return nil, err | |||||
| } | } | ||||
| hubMap := make(map[cortypes.HubID]*cortypes.Hub) | hubMap := make(map[cortypes.HubID]*cortypes.Hub) | ||||
| @@ -44,24 +47,43 @@ func (svc *Service) GetStorageDetails(ctx context.Context, msg *corrpc.GetStorag | |||||
| hubMap[hub.HubID] = &h | hubMap[hub.HubID] = &h | ||||
| } | } | ||||
| details := make([]*cortypes.StorageDetail, len(msg.StorageIDs)) | |||||
| for i, stgID := range msg.StorageIDs { | |||||
| stg := stgMap[stgID] | |||||
| if stg == nil { | |||||
| continue | |||||
| } | |||||
| details[i] = &cortypes.StorageDetail{ | |||||
| Storage: *stg, | |||||
| MasterHub: hubMap[stg.MasterHub], | |||||
| } | |||||
| resp := make([]*cortypes.Hub, len(msg.Storages)) | |||||
| for i := range msg.Storages { | |||||
| resp[i] = hubMap[stgHubIDs[i]] | |||||
| } | } | ||||
| return details, nil | |||||
| return resp, nil | |||||
| }) | }) | ||||
| if err != nil { | if err != nil { | ||||
| logger.Warnf("getting storage details: %s", err.Error()) | |||||
| return nil, rpc.Failed(errorcode.OperationFailed, fmt.Sprintf("getting storage details: %v", err)) | |||||
| logger.Warnf("select storage hubs: %s", err.Error()) | |||||
| return nil, rpc.Failed(errorcode.OperationFailed, "%v", err) | |||||
| } | |||||
| return &corrpc.SelectStorageHubResp{ | |||||
| Hubs: resp, | |||||
| }, nil | |||||
| } | |||||
| // 匹配规则: | |||||
| // 1. 按照StorageName、Location顺序检查StorageLocation和HubLocation | |||||
| // 2. "*"代表通配符,匹配任意值,如果匹配到了通配,那么就直接结束匹配 | |||||
| // 3. 匹配越精确,分数越高 | |||||
| func matchLocation(loc cortypes.Location, hubLoc cortypes.HubLocation) int { | |||||
| if hubLoc.StorageName == "*" { | |||||
| return 1 | |||||
| } | |||||
| if hubLoc.StorageName != loc.StorageName { | |||||
| return 0 | |||||
| } | |||||
| if hubLoc.Location == "*" { | |||||
| return 2 | |||||
| } | |||||
| if hubLoc.Location == loc.Location { | |||||
| return 3 | |||||
| } | } | ||||
| return corrpc.RespGetStorageDetails(stgs), nil | |||||
| return 0 | |||||
| } | } | ||||
| @@ -0,0 +1,6 @@ | |||||
| package types | |||||
| type Location struct { | |||||
| StorageName string `json:"storageName"` | |||||
| Location string `json:"location"` | |||||
| } | |||||
| @@ -1,12 +1,11 @@ | |||||
| package types | package types | ||||
| import ( | import ( | ||||
| "fmt" | |||||
| "gitlink.org.cn/cloudream/common/pkgs/types" | "gitlink.org.cn/cloudream/common/pkgs/types" | ||||
| "gitlink.org.cn/cloudream/common/utils/serder" | "gitlink.org.cn/cloudream/common/utils/serder" | ||||
| ) | ) | ||||
| /* | |||||
| type Storage struct { | type Storage struct { | ||||
| StorageID StorageID `json:"storageID" gorm:"column:StorageID; primaryKey; type:bigint; autoIncrement;"` | StorageID StorageID `json:"storageID" gorm:"column:StorageID; primaryKey; type:bigint; autoIncrement;"` | ||||
| Name string `json:"name" gorm:"column:Name; type:varchar(256); not null"` | Name string `json:"name" gorm:"column:Name; type:varchar(256); not null"` | ||||
| @@ -30,17 +29,20 @@ type StorageDetail struct { | |||||
| Storage Storage | Storage Storage | ||||
| MasterHub *Hub | MasterHub *Hub | ||||
| } | } | ||||
| */ | |||||
| // 存储服务地址 | // 存储服务地址 | ||||
| type StorageType interface { | type StorageType interface { | ||||
| GetStorageType() string | GetStorageType() string | ||||
| GetLocation() Location | |||||
| // 判断两个存储服务是否是同一个。注意要精细比较,不仅是需要类型相同,甚至需要是同一个桶。 | |||||
| Equals(other StorageType) bool | |||||
| // 输出调试用的字符串,不要包含敏感信息 | // 输出调试用的字符串,不要包含敏感信息 | ||||
| String() string | String() string | ||||
| } | } | ||||
| var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[StorageType]( | var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[StorageType]( | ||||
| (*MashupStorageType)(nil), | |||||
| (*LocalStorageType)(nil), | |||||
| // (*MashupStorageType)(nil), | |||||
| (*LocalType)(nil), | |||||
| (*OBSType)(nil), | (*OBSType)(nil), | ||||
| (*OSSType)(nil), | (*OSSType)(nil), | ||||
| (*COSType)(nil), | (*COSType)(nil), | ||||
| @@ -48,44 +50,75 @@ var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[Storage | |||||
| (*S3Type)(nil), | (*S3Type)(nil), | ||||
| )), "type") | )), "type") | ||||
| // 多种存储服务的混合存储服务。需谨慎选择存储服务的组合,避免出Bug | |||||
| type MashupStorageType struct { | |||||
| serder.Metadata `union:"Mashup"` | |||||
| Type string `json:"type"` | |||||
| Store StorageType `json:"store"` // 创建ShardStore或PublicStore时,使用的存储服务类型 | |||||
| Feature StorageType `json:"feature"` // 根据Feature创建组件时使用的存储服务类型 | |||||
| } | |||||
| // // 多种存储服务的混合存储服务。需谨慎选择存储服务的组合,避免出Bug | |||||
| // type MashupStorageType struct { | |||||
| // serder.Metadata `union:"Mashup"` | |||||
| // Type string `json:"type"` | |||||
| // Store StorageType `json:"store"` // 创建ShardStore或BaseStore时,使用的存储服务类型 | |||||
| // Feature StorageType `json:"feature"` // 根据Feature创建组件时使用的存储服务类型 | |||||
| // } | |||||
| func (a *MashupStorageType) GetStorageType() string { | |||||
| return "Mashup" | |||||
| } | |||||
| // func (a *MashupStorageType) GetStorageType() string { | |||||
| // return "Mashup" | |||||
| // } | |||||
| func (a *MashupStorageType) String() string { | |||||
| return "Mashup" | |||||
| } | |||||
| // func (a *MashupStorageType) String() string { | |||||
| // return "Mashup" | |||||
| // } | |||||
| type LocalStorageType struct { | |||||
| type LocalType struct { | |||||
| serder.Metadata `union:"Local"` | serder.Metadata `union:"Local"` | ||||
| Type string `json:"type"` | |||||
| Type string `json:"type"` | |||||
| Location Location `json:"location"` | |||||
| } | } | ||||
| func (a *LocalStorageType) GetStorageType() string { | |||||
| func (a *LocalType) GetStorageType() string { | |||||
| return "Local" | return "Local" | ||||
| } | } | ||||
| func (a *LocalStorageType) String() string { | |||||
| func (a *LocalType) GetLocation() Location { | |||||
| return a.Location | |||||
| } | |||||
| func (a *LocalType) Equals(other StorageType) bool { | |||||
| o, ok := other.(*LocalType) | |||||
| if !ok { | |||||
| return false | |||||
| } | |||||
| return a.Location == o.Location | |||||
| } | |||||
| func (a *LocalType) String() string { | |||||
| return "Local" | return "Local" | ||||
| } | } | ||||
| type OSSType struct { | type OSSType struct { | ||||
| serder.Metadata `union:"OSS"` | serder.Metadata `union:"OSS"` | ||||
| Type string `json:"type"` | Type string `json:"type"` | ||||
| Region string `json:"region"` | |||||
| Endpoint string `json:"endpoint"` | |||||
| Bucket string `json:"bucket"` | |||||
| } | } | ||||
| func (a *OSSType) GetStorageType() string { | func (a *OSSType) GetStorageType() string { | ||||
| return "OSS" | return "OSS" | ||||
| } | } | ||||
| func (a *OSSType) GetLocation() Location { | |||||
| return Location{ | |||||
| StorageName: a.GetStorageType(), | |||||
| Location: a.Region, | |||||
| } | |||||
| } | |||||
| func (a *OSSType) Equals(other StorageType) bool { | |||||
| o, ok := other.(*OSSType) | |||||
| if !ok { | |||||
| return false | |||||
| } | |||||
| return a.Region == o.Region && a.Endpoint == o.Endpoint && a.Bucket == o.Bucket | |||||
| } | |||||
| func (a *OSSType) String() string { | func (a *OSSType) String() string { | ||||
| return "OSS" | return "OSS" | ||||
| } | } | ||||
| @@ -93,12 +126,31 @@ func (a *OSSType) String() string { | |||||
| type OBSType struct { | type OBSType struct { | ||||
| serder.Metadata `union:"OBS"` | serder.Metadata `union:"OBS"` | ||||
| Type string `json:"type"` | Type string `json:"type"` | ||||
| Region string `json:"region"` | |||||
| Endpoint string `json:"endpoint"` | |||||
| Bucket string `json:"bucket"` | |||||
| ProjectID string `json:"projectID"` | |||||
| } | } | ||||
| func (a *OBSType) GetStorageType() string { | func (a *OBSType) GetStorageType() string { | ||||
| return "OBS" | return "OBS" | ||||
| } | } | ||||
| func (a *OBSType) GetLocation() Location { | |||||
| return Location{ | |||||
| StorageName: a.GetStorageType(), | |||||
| Location: a.Region, | |||||
| } | |||||
| } | |||||
| func (a *OBSType) Equals(other StorageType) bool { | |||||
| o, ok := other.(*OBSType) | |||||
| if !ok { | |||||
| return false | |||||
| } | |||||
| return a.Region == o.Region && a.Endpoint == o.Endpoint && a.Bucket == o.Bucket && a.ProjectID == o.ProjectID | |||||
| } | |||||
| func (a *OBSType) String() string { | func (a *OBSType) String() string { | ||||
| return "OBS" | return "OBS" | ||||
| } | } | ||||
| @@ -106,12 +158,30 @@ func (a *OBSType) String() string { | |||||
| type COSType struct { | type COSType struct { | ||||
| serder.Metadata `union:"COS"` | serder.Metadata `union:"COS"` | ||||
| Type string `json:"type"` | Type string `json:"type"` | ||||
| Region string `json:"region"` | |||||
| Endpoint string `json:"endpoint"` | |||||
| Bucket string `json:"bucket"` | |||||
| } | } | ||||
| func (a *COSType) GetStorageType() string { | func (a *COSType) GetStorageType() string { | ||||
| return "COS" | return "COS" | ||||
| } | } | ||||
| func (a *COSType) GetLocation() Location { | |||||
| return Location{ | |||||
| StorageName: a.GetStorageType(), | |||||
| Location: a.Region, | |||||
| } | |||||
| } | |||||
| func (a *COSType) Equals(other StorageType) bool { | |||||
| o, ok := other.(*COSType) | |||||
| if !ok { | |||||
| return false | |||||
| } | |||||
| return a.Region == o.Region && a.Endpoint == o.Endpoint && a.Bucket == o.Bucket | |||||
| } | |||||
| func (a *COSType) String() string { | func (a *COSType) String() string { | ||||
| return "COS" | return "COS" | ||||
| } | } | ||||
| @@ -119,12 +189,28 @@ func (a *COSType) String() string { | |||||
| type EFileType struct { | type EFileType struct { | ||||
| serder.Metadata `union:"EFile"` | serder.Metadata `union:"EFile"` | ||||
| Type string `json:"type"` | Type string `json:"type"` | ||||
| ClusterID string `json:"clusterID"` | |||||
| } | } | ||||
| func (a *EFileType) GetStorageType() string { | func (a *EFileType) GetStorageType() string { | ||||
| return "EFile" | return "EFile" | ||||
| } | } | ||||
| func (a *EFileType) GetLocation() Location { | |||||
| return Location{ | |||||
| StorageName: a.GetStorageType(), | |||||
| Location: a.ClusterID, | |||||
| } | |||||
| } | |||||
| func (a *EFileType) Equals(other StorageType) bool { | |||||
| o, ok := other.(*EFileType) | |||||
| if !ok { | |||||
| return false | |||||
| } | |||||
| return a.ClusterID == o.ClusterID | |||||
| } | |||||
| func (a *EFileType) String() string { | func (a *EFileType) String() string { | ||||
| return "EFile" | return "EFile" | ||||
| } | } | ||||
| @@ -133,16 +219,34 @@ func (a *EFileType) String() string { | |||||
| type S3Type struct { | type S3Type struct { | ||||
| serder.Metadata `union:"S3"` | serder.Metadata `union:"S3"` | ||||
| Type string `json:"type"` | Type string `json:"type"` | ||||
| Region string `json:"region"` | |||||
| Endpoint string `json:"endpoint"` | |||||
| Bucket string `json:"bucket"` | |||||
| } | } | ||||
| func (a *S3Type) GetStorageType() string { | func (a *S3Type) GetStorageType() string { | ||||
| return "S3" | return "S3" | ||||
| } | } | ||||
| func (a *S3Type) GetLocation() Location { | |||||
| return Location{ | |||||
| StorageName: a.GetStorageType(), | |||||
| Location: a.Region, | |||||
| } | |||||
| } | |||||
| func (a *S3Type) String() string { | func (a *S3Type) String() string { | ||||
| return "S3" | return "S3" | ||||
| } | } | ||||
| func (a *S3Type) Equals(other StorageType) bool { | |||||
| o, ok := other.(*S3Type) | |||||
| if !ok { | |||||
| return false | |||||
| } | |||||
| return a.Region == o.Region && a.Endpoint == o.Endpoint && a.Bucket == o.Bucket | |||||
| } | |||||
| type ShardStoreUserConfig struct { | type ShardStoreUserConfig struct { | ||||
| BaseDir string `json:"baseDir"` | BaseDir string `json:"baseDir"` | ||||
| MaxSize int64 `json:"maxSize"` | MaxSize int64 `json:"maxSize"` | ||||
| @@ -11,7 +11,7 @@ type StorageCredential interface { | |||||
| var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[StorageCredential]( | var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[StorageCredential]( | ||||
| (*LocalCred)(nil), | (*LocalCred)(nil), | ||||
| (*MashupCred)(nil), | |||||
| // (*MashupCred)(nil), | |||||
| (*OBSCred)(nil), | (*OBSCred)(nil), | ||||
| (*OSSCred)(nil), | (*OSSCred)(nil), | ||||
| (*COSCred)(nil), | (*COSCred)(nil), | ||||
| @@ -26,45 +26,35 @@ type LocalCred struct { | |||||
| RootDir string `json:"rootDir"` | RootDir string `json:"rootDir"` | ||||
| } | } | ||||
| type MashupCred struct { | |||||
| StorageCredential `json:"-"` | |||||
| serder.Metadata `union:"Mashup"` | |||||
| Store StorageCredential `json:"store"` | |||||
| Feature StorageCredential `json:"feature"` | |||||
| } | |||||
| // type MashupCred struct { | |||||
| // StorageCredential `json:"-"` | |||||
| // serder.Metadata `union:"Mashup"` | |||||
| // Store StorageCredential `json:"store"` | |||||
| // Feature StorageCredential `json:"feature"` | |||||
| // } | |||||
| type OSSCred struct { | type OSSCred struct { | ||||
| StorageCredential `json:"-"` | StorageCredential `json:"-"` | ||||
| serder.Metadata `union:"OSS"` | serder.Metadata `union:"OSS"` | ||||
| Type string `json:"type"` | Type string `json:"type"` | ||||
| Region string `json:"region"` | |||||
| AK string `json:"accessKeyId"` | AK string `json:"accessKeyId"` | ||||
| SK string `json:"secretAccessKey"` | SK string `json:"secretAccessKey"` | ||||
| Endpoint string `json:"endpoint"` | |||||
| Bucket string `json:"bucket"` | |||||
| } | } | ||||
| type OBSCred struct { | type OBSCred struct { | ||||
| StorageCredential `json:"-"` | StorageCredential `json:"-"` | ||||
| serder.Metadata `union:"OBS"` | serder.Metadata `union:"OBS"` | ||||
| Type string `json:"type"` | Type string `json:"type"` | ||||
| Region string `json:"region"` | |||||
| AK string `json:"accessKeyId"` | AK string `json:"accessKeyId"` | ||||
| SK string `json:"secretAccessKey"` | SK string `json:"secretAccessKey"` | ||||
| Endpoint string `json:"endpoint"` | |||||
| Bucket string `json:"bucket"` | |||||
| ProjectID string `json:"projectID"` | |||||
| } | } | ||||
| type COSCred struct { | type COSCred struct { | ||||
| StorageCredential `json:"-"` | StorageCredential `json:"-"` | ||||
| serder.Metadata `union:"COS"` | serder.Metadata `union:"COS"` | ||||
| Type string `json:"type"` | Type string `json:"type"` | ||||
| Region string `json:"region"` | |||||
| AK string `json:"accessKeyId"` | AK string `json:"accessKeyId"` | ||||
| SK string `json:"secretAccessKey"` | SK string `json:"secretAccessKey"` | ||||
| Endpoint string `json:"endpoint"` | |||||
| Bucket string `json:"bucket"` | |||||
| } | } | ||||
| type EFileCred struct { | type EFileCred struct { | ||||
| @@ -77,7 +67,6 @@ type EFileCred struct { | |||||
| User string `json:"user"` | User string `json:"user"` | ||||
| Password string `json:"password"` | Password string `json:"password"` | ||||
| OrgID string `json:"orgID"` | OrgID string `json:"orgID"` | ||||
| ClusterID string `json:"clusterID"` | |||||
| } | } | ||||
| // 通用的S3协议的存储服务 | // 通用的S3协议的存储服务 | ||||
| @@ -85,9 +74,6 @@ type S3Cred struct { | |||||
| StorageCredential `json:"-"` | StorageCredential `json:"-"` | ||||
| serder.Metadata `union:"S3"` | serder.Metadata `union:"S3"` | ||||
| Type string `json:"type"` | Type string `json:"type"` | ||||
| Region string `json:"region"` | |||||
| AK string `json:"accessKeyId"` | AK string `json:"accessKeyId"` | ||||
| SK string `json:"secretAccessKey"` | SK string `json:"secretAccessKey"` | ||||
| Endpoint string `json:"endpoint"` | |||||
| Bucket string `json:"bucket"` | |||||
| } | } | ||||
| @@ -71,15 +71,6 @@ func (HubConnectivity) TableName() string { | |||||
| return "HubConnectivity" | return "HubConnectivity" | ||||
| } | } | ||||
| type Location struct { | |||||
| LocationID LocationID `gorm:"column:LocationID; primaryKey; type:bigint; autoIncrement" json:"locationID"` | |||||
| Name string `gorm:"column:Name; type:varchar(255); not null" json:"name"` | |||||
| } | |||||
| func (Location) TableName() string { | |||||
| return "Location" | |||||
| } | |||||
| type User struct { | type User struct { | ||||
| UserID UserID `gorm:"column:UserID; primaryKey; type:bigint; autoIncrement" json:"userID"` | UserID UserID `gorm:"column:UserID; primaryKey; type:bigint; autoIncrement" json:"userID"` | ||||
| Name string `gorm:"column:Name; type:varchar(255); not null" json:"name"` | Name string `gorm:"column:Name; type:varchar(255); not null" json:"name"` | ||||
| @@ -88,3 +79,13 @@ type User struct { | |||||
| func (User) TableName() string { | func (User) TableName() string { | ||||
| return "User" | return "User" | ||||
| } | } | ||||
| type HubLocation struct { | |||||
| HubID HubID `gorm:"column:HubID; primaryKey; type:bigint" json:"hubID"` | |||||
| StorageName string `gorm:"column:StorageName; type:varchar(255); not null" json:"storageName"` | |||||
| Location string `gorm:"column:Location; type:varchar(255); not null" json:"location"` | |||||
| } | |||||
| func (HubLocation) TableName() string { | |||||
| return "HubLocation" | |||||
| } | |||||
| @@ -9,8 +9,8 @@ import ( | |||||
| hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" | hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" | ||||
| ) | ) | ||||
| func (svc *Service) PublicStoreListAll(context context.Context, msg *hubrpc.PublicStoreListAll) (*hubrpc.PublicStoreListAllResp, *rpc.CodeError) { | |||||
| pub, err := svc.stgPool.GetPublicStore(&msg.UserSpace) | |||||
| func (svc *Service) BaseStoreListAll(context context.Context, msg *hubrpc.BaseStoreListAll) (*hubrpc.BaseStoreListAllResp, *rpc.CodeError) { | |||||
| pub, err := svc.stgPool.GetBaseStore(&msg.UserSpace) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, rpc.Failed(errorcode.OperationFailed, err.Error()) | return nil, rpc.Failed(errorcode.OperationFailed, err.Error()) | ||||
| } | } | ||||
| @@ -20,13 +20,13 @@ func (svc *Service) PublicStoreListAll(context context.Context, msg *hubrpc.Publ | |||||
| return nil, rpc.Failed(errorcode.OperationFailed, err.Error()) | return nil, rpc.Failed(errorcode.OperationFailed, err.Error()) | ||||
| } | } | ||||
| return &hubrpc.PublicStoreListAllResp{ | |||||
| return &hubrpc.BaseStoreListAllResp{ | |||||
| Entries: es, | Entries: es, | ||||
| }, nil | }, nil | ||||
| } | } | ||||
| func (svc *Service) PublicStoreMkdirs(context context.Context, msg *hubrpc.PublicStoreMkdirs) (*hubrpc.PublicStoreMkdirsResp, *rpc.CodeError) { | |||||
| pub, err := svc.stgPool.GetPublicStore(&msg.UserSpace) | |||||
| func (svc *Service) BaseStoreMkdirs(context context.Context, msg *hubrpc.BaseStoreMkdirs) (*hubrpc.BaseStoreMkdirsResp, *rpc.CodeError) { | |||||
| pub, err := svc.stgPool.GetBaseStore(&msg.UserSpace) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, rpc.Failed(errorcode.OperationFailed, err.Error()) | return nil, rpc.Failed(errorcode.OperationFailed, err.Error()) | ||||
| } | } | ||||
| @@ -41,7 +41,7 @@ func (svc *Service) PublicStoreMkdirs(context context.Context, msg *hubrpc.Publi | |||||
| } | } | ||||
| } | } | ||||
| return &hubrpc.PublicStoreMkdirsResp{ | |||||
| return &hubrpc.BaseStoreMkdirsResp{ | |||||
| Successes: suc, | Successes: suc, | ||||
| }, nil | }, nil | ||||
| } | } | ||||