| @@ -83,7 +83,7 @@ func serve(configPath string) { | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| cons := collector.GetAll() | |||
| nodeCons := make([]cdssdk.NodeConnectivity, 0, len(cons)) | |||
| hubCons := make([]cdssdk.HubConnectivity, 0, len(cons)) | |||
| for _, con := range cons { | |||
| var delay *float32 | |||
| if con.Delay != nil { | |||
| @@ -91,17 +91,17 @@ func serve(configPath string) { | |||
| delay = &v | |||
| } | |||
| nodeCons = append(nodeCons, cdssdk.NodeConnectivity{ | |||
| FromNodeID: *stgglb.Local.NodeID, | |||
| ToNodeID: con.ToNodeID, | |||
| Delay: delay, | |||
| TestTime: con.TestTime, | |||
| hubCons = append(hubCons, cdssdk.HubConnectivity{ | |||
| FromHubID: *stgglb.Local.HubID, | |||
| ToHubID: con.ToHubID, | |||
| Delay: delay, | |||
| TestTime: con.TestTime, | |||
| }) | |||
| } | |||
| _, err = coorCli.UpdateNodeConnectivities(coormq.ReqUpdateNodeConnectivities(nodeCons)) | |||
| _, err = coorCli.UpdateHubConnectivities(coormq.ReqUpdateHubConnectivities(hubCons)) | |||
| if err != nil { | |||
| log.Warnf("update node connectivities: %v", err) | |||
| log.Warnf("update hub connectivities: %v", err) | |||
| } | |||
| }) | |||
| conCol.CollectInPlace() | |||
| @@ -160,7 +160,7 @@ func downloadHubConfig() coormq.GetHubConfigResp { | |||
| } | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| cfgResp, err := coorCli.GetHubConfig(coormq.ReqGetHubConfig(cdssdk.NodeID(config.Cfg().ID))) | |||
| cfgResp, err := coorCli.GetHubConfig(coormq.ReqGetHubConfig(cdssdk.HubID(config.Cfg().ID))) | |||
| if err != nil { | |||
| logger.Errorf("getting hub config: %v", err) | |||
| os.Exit(1) | |||
| @@ -13,7 +13,7 @@ import ( | |||
| ) | |||
| type Config struct { | |||
| ID cdssdk.NodeID `json:"id"` | |||
| ID cdssdk.HubID `json:"id"` | |||
| ListenAddr string `json:"listenAddr"` | |||
| Local stgmodels.LocalMachineInfo `json:"local"` | |||
| GRPC *grpc.Config `json:"grpc"` | |||
| @@ -254,7 +254,7 @@ func (svc *Service) StartStorageCreatePackage(msg *agtmq.StartStorageCreatePacka | |||
| } | |||
| objIter := iterator.NewUploadingObjectIterator(fullPath, uploadFilePathes) | |||
| tsk := svc.taskManager.StartNew(mytask.NewCreatePackage(msg.UserID, msg.BucketID, msg.Name, objIter, msg.NodeAffinity)) | |||
| tsk := svc.taskManager.StartNew(mytask.NewCreatePackage(msg.UserID, msg.BucketID, msg.Name, objIter, msg.StorageAffinity)) | |||
| return mq.ReplyOK(agtmq.NewStartStorageCreatePackageResp(tsk.ID())) | |||
| } | |||
| @@ -23,12 +23,12 @@ type CreatePackageResult struct { | |||
| // CreatePackage 定义创建包的任务结构 | |||
| // 包含用户ID、存储桶ID、包名称、上传对象的迭代器、节点亲和性以及任务结果 | |||
| type CreatePackage struct { | |||
| userID cdssdk.UserID | |||
| bucketID cdssdk.BucketID | |||
| name string | |||
| objIter iterator.UploadingObjectIterator | |||
| nodeAffinity *cdssdk.NodeID | |||
| Result CreatePackageResult | |||
| userID cdssdk.UserID | |||
| bucketID cdssdk.BucketID | |||
| name string | |||
| objIter iterator.UploadingObjectIterator | |||
| stgAffinity cdssdk.StorageID | |||
| Result CreatePackageResult | |||
| } | |||
| // NewCreatePackage 创建一个新的CreatePackage实例 | |||
| @@ -36,15 +36,15 @@ type CreatePackage struct { | |||
| // bucketID: 存储桶ID | |||
| // name: 包名称 | |||
| // objIter: 上传对象的迭代器 | |||
| // nodeAffinity: 节点亲和性,指定包应该创建在哪个节点上(可选) | |||
| // stgAffinity: 节点亲和性,指定包应该创建在哪个节点上(可选) | |||
| // 返回CreatePackage实例的指针 | |||
| func NewCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, objIter iterator.UploadingObjectIterator, nodeAffinity *cdssdk.NodeID) *CreatePackage { | |||
| func NewCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, objIter iterator.UploadingObjectIterator, stgAffinity cdssdk.StorageID) *CreatePackage { | |||
| return &CreatePackage{ | |||
| userID: userID, | |||
| bucketID: bucketID, | |||
| name: name, | |||
| objIter: objIter, | |||
| nodeAffinity: nodeAffinity, | |||
| userID: userID, | |||
| bucketID: bucketID, | |||
| name: name, | |||
| objIter: objIter, | |||
| stgAffinity: stgAffinity, | |||
| } | |||
| } | |||
| @@ -84,7 +84,7 @@ func (t *CreatePackage) Execute(task *task.Task[TaskContext], ctx TaskContext, c | |||
| return | |||
| } | |||
| uploadRet, err := cmd.NewUploadObjects(t.userID, createResp.Package.PackageID, t.objIter, t.nodeAffinity).Execute(&cmd.UploadObjectsContext{ | |||
| uploadRet, err := cmd.NewUploadObjects(t.userID, createResp.Package.PackageID, t.objIter, t.stgAffinity).Execute(&cmd.UploadObjectsContext{ | |||
| Distlock: ctx.distlock, | |||
| Connectivity: ctx.connectivity, | |||
| StgMgr: ctx.stgMgr, | |||
| @@ -182,7 +182,7 @@ func (t *StorageLoadPackage) downloadOne(coorCli *coormq.Client, shardStore type | |||
| func (t *StorageLoadPackage) downloadNoneOrRepObject(shardStore types.ShardStore, obj stgmod.ObjectDetail) (io.ReadCloser, error) { | |||
| if len(obj.Blocks) == 0 && len(obj.PinnedAt) == 0 { | |||
| return nil, fmt.Errorf("no node has this object") | |||
| return nil, fmt.Errorf("no storage has this object") | |||
| } | |||
| file, err := shardStore.Open(types.NewOpen(obj.Object.FileHash)) | |||
| @@ -194,12 +194,12 @@ func (t *StorageLoadPackage) downloadNoneOrRepObject(shardStore types.ShardStore | |||
| } | |||
| func (t *StorageLoadPackage) downloadECObject(coorCli *coormq.Client, shardStore types.ShardStore, obj stgmod.ObjectDetail, ecRed *cdssdk.ECRedundancy) (io.ReadCloser, []stgmod.ObjectBlock, error) { | |||
| allNodes, err := t.sortDownloadNodes(coorCli, obj) | |||
| allStorages, err := t.sortDownloadStorages(coorCli, obj) | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| bsc, blocks := t.getMinReadingBlockSolution(allNodes, ecRed.K) | |||
| osc, _ := t.getMinReadingObjectSolution(allNodes, ecRed.K) | |||
| bsc, blocks := t.getMinReadingBlockSolution(allStorages, ecRed.K) | |||
| osc, _ := t.getMinReadingObjectSolution(allStorages, ecRed.K) | |||
| if bsc < osc { | |||
| var fileStrs []io.ReadCloser | |||
| @@ -251,7 +251,7 @@ type downloadStorageInfo struct { | |||
| Distance float64 | |||
| } | |||
| func (t *StorageLoadPackage) sortDownloadNodes(coorCli *coormq.Client, obj stgmod.ObjectDetail) ([]*downloadStorageInfo, error) { | |||
| func (t *StorageLoadPackage) sortDownloadStorages(coorCli *coormq.Client, obj stgmod.ObjectDetail) ([]*downloadStorageInfo, error) { | |||
| var stgIDs []cdssdk.StorageID | |||
| for _, id := range obj.PinnedAt { | |||
| if !lo.Contains(stgIDs, id) { | |||
| @@ -273,37 +273,37 @@ func (t *StorageLoadPackage) sortDownloadNodes(coorCli *coormq.Client, obj stgmo | |||
| allStgs[stg.Storage.StorageID] = *stg | |||
| } | |||
| downloadNodeMap := make(map[cdssdk.StorageID]*downloadStorageInfo) | |||
| downloadStorageMap := make(map[cdssdk.StorageID]*downloadStorageInfo) | |||
| for _, id := range obj.PinnedAt { | |||
| node, ok := downloadNodeMap[id] | |||
| storage, ok := downloadStorageMap[id] | |||
| if !ok { | |||
| mod := allStgs[id] | |||
| node = &downloadStorageInfo{ | |||
| storage = &downloadStorageInfo{ | |||
| Storage: mod, | |||
| ObjectPinned: true, | |||
| Distance: t.getNodeDistance(mod), | |||
| Distance: t.getStorageDistance(mod), | |||
| } | |||
| downloadNodeMap[id] = node | |||
| downloadStorageMap[id] = storage | |||
| } | |||
| node.ObjectPinned = true | |||
| storage.ObjectPinned = true | |||
| } | |||
| for _, b := range obj.Blocks { | |||
| node, ok := downloadNodeMap[b.StorageID] | |||
| storage, ok := downloadStorageMap[b.StorageID] | |||
| if !ok { | |||
| mod := allStgs[b.StorageID] | |||
| node = &downloadStorageInfo{ | |||
| storage = &downloadStorageInfo{ | |||
| Storage: mod, | |||
| Distance: t.getNodeDistance(mod), | |||
| Distance: t.getStorageDistance(mod), | |||
| } | |||
| downloadNodeMap[b.StorageID] = node | |||
| downloadStorageMap[b.StorageID] = storage | |||
| } | |||
| node.Blocks = append(node.Blocks, b) | |||
| storage.Blocks = append(storage.Blocks, b) | |||
| } | |||
| return sort2.Sort(lo.Values(downloadNodeMap), func(left, right *downloadStorageInfo) int { | |||
| return sort2.Sort(lo.Values(downloadStorageMap), func(left, right *downloadStorageInfo) int { | |||
| return sort2.Cmp(left.Distance, right.Distance) | |||
| }), nil | |||
| } | |||
| @@ -313,11 +313,11 @@ type downloadBlock struct { | |||
| Block stgmod.ObjectBlock | |||
| } | |||
| func (t *StorageLoadPackage) getMinReadingBlockSolution(sortedNodes []*downloadStorageInfo, k int) (float64, []downloadBlock) { | |||
| func (t *StorageLoadPackage) getMinReadingBlockSolution(sortedStorages []*downloadStorageInfo, k int) (float64, []downloadBlock) { | |||
| gotBlocksMap := bitmap.Bitmap64(0) | |||
| var gotBlocks []downloadBlock | |||
| dist := float64(0.0) | |||
| for _, n := range sortedNodes { | |||
| for _, n := range sortedStorages { | |||
| for _, b := range n.Blocks { | |||
| if !gotBlocksMap.Get(b.Index) { | |||
| gotBlocks = append(gotBlocks, downloadBlock{ | |||
| @@ -337,10 +337,10 @@ func (t *StorageLoadPackage) getMinReadingBlockSolution(sortedNodes []*downloadS | |||
| return math.MaxFloat64, gotBlocks | |||
| } | |||
| func (t *StorageLoadPackage) getMinReadingObjectSolution(sortedNodes []*downloadStorageInfo, k int) (float64, *stgmod.StorageDetail) { | |||
| func (t *StorageLoadPackage) getMinReadingObjectSolution(sortedStorages []*downloadStorageInfo, k int) (float64, *stgmod.StorageDetail) { | |||
| dist := math.MaxFloat64 | |||
| var downloadStg *stgmod.StorageDetail | |||
| for _, n := range sortedNodes { | |||
| for _, n := range sortedStorages { | |||
| if n.ObjectPinned && float64(k)*n.Distance < dist { | |||
| dist = float64(k) * n.Distance | |||
| stg := n.Storage | |||
| @@ -351,16 +351,16 @@ func (t *StorageLoadPackage) getMinReadingObjectSolution(sortedNodes []*download | |||
| return dist, downloadStg | |||
| } | |||
| func (t *StorageLoadPackage) getNodeDistance(stg stgmod.StorageDetail) float64 { | |||
| if stgglb.Local.NodeID != nil { | |||
| if stg.MasterHub.NodeID == *stgglb.Local.NodeID { | |||
| return consts.NodeDistanceSameNode | |||
| func (t *StorageLoadPackage) getStorageDistance(stg stgmod.StorageDetail) float64 { | |||
| if stgglb.Local.HubID != nil { | |||
| if stg.MasterHub.HubID == *stgglb.Local.HubID { | |||
| return consts.StorageDistanceSameStorage | |||
| } | |||
| } | |||
| if stg.MasterHub.LocationID == stgglb.Local.LocationID { | |||
| return consts.NodeDistanceSameLocation | |||
| return consts.StorageDistanceSameLocation | |||
| } | |||
| return consts.NodeDistanceOther | |||
| return consts.StorageDistanceOther | |||
| } | |||
| @@ -68,14 +68,14 @@ func loadByID(cmdCtx *CommandContext, pkgID cdssdk.PackageID, stgID cdssdk.Stora | |||
| userID := cdssdk.UserID(1) | |||
| startTime := time.Now() | |||
| nodeID, taskID, err := cmdCtx.Cmdline.Svc.StorageSvc().StartStorageLoadPackage(userID, pkgID, stgID) | |||
| hubID, taskID, err := cmdCtx.Cmdline.Svc.StorageSvc().StartStorageLoadPackage(userID, pkgID, stgID) | |||
| if err != nil { | |||
| fmt.Println(err) | |||
| return | |||
| } | |||
| for { | |||
| complete, fullPath, err := cmdCtx.Cmdline.Svc.StorageSvc().WaitStorageLoadPackage(nodeID, taskID, time.Second*10) | |||
| complete, fullPath, err := cmdCtx.Cmdline.Svc.StorageSvc().WaitStorageLoadPackage(hubID, taskID, time.Second*10) | |||
| if err != nil { | |||
| fmt.Println(err) | |||
| return | |||
| @@ -15,9 +15,9 @@ import ( | |||
| // ctx: 命令上下文,提供必要的服务和环境配置。 | |||
| // packageID: 上传套餐的唯一标识。 | |||
| // rootPath: 本地文件系统中待上传文件的根目录。 | |||
| // nodeAffinity: 偏好的节点ID列表,上传任务可能会分配到这些节点上。 | |||
| // storageAffinity: 偏好的节点ID列表,上传任务可能会分配到这些节点上。 | |||
| // 返回值: 执行过程中遇到的任何错误。 | |||
| var _ = MustAddCmd(func(ctx CommandContext, packageID cdssdk.PackageID, rootPath string, nodeAffinity []cdssdk.NodeID) error { | |||
| var _ = MustAddCmd(func(ctx CommandContext, packageID cdssdk.PackageID, rootPath string, storageAffinity []cdssdk.StorageID) error { | |||
| // 记录函数开始时间,用于计算执行时间。 | |||
| startTime := time.Now() | |||
| defer func() { | |||
| @@ -48,16 +48,15 @@ var _ = MustAddCmd(func(ctx CommandContext, packageID cdssdk.PackageID, rootPath | |||
| } | |||
| // 根据节点亲和性列表设置首选上传节点。 | |||
| var nodeAff *cdssdk.NodeID | |||
| if len(nodeAffinity) > 0 { | |||
| n := cdssdk.NodeID(nodeAffinity[0]) | |||
| nodeAff = &n | |||
| var storageAff cdssdk.StorageID | |||
| if len(storageAffinity) > 0 { | |||
| storageAff = storageAffinity[0] | |||
| } | |||
| // 创建上传对象迭代器。 | |||
| objIter := iterator.NewUploadingObjectIterator(rootPath, uploadFilePathes) | |||
| // 开始上传任务。 | |||
| taskID, err := ctx.Cmdline.Svc.ObjectSvc().StartUploading(userID, packageID, objIter, nodeAff) | |||
| taskID, err := ctx.Cmdline.Svc.ObjectSvc().StartUploading(userID, packageID, objIter, storageAff) | |||
| if err != nil { | |||
| // 上传任务启动失败处理。 | |||
| return fmt.Errorf("update objects to package %d failed, err: %w", packageID, err) | |||
| @@ -176,7 +176,7 @@ func PackageGetCachedStorages(ctx CommandContext, packageID cdssdk.PackageID) er | |||
| resp, err := ctx.Cmdline.Svc.PackageSvc().GetCachedStorages(userID, packageID) | |||
| fmt.Printf("resp: %v\n", resp) | |||
| if err != nil { | |||
| return fmt.Errorf("get package %d cached nodes failed, err: %w", packageID, err) | |||
| return fmt.Errorf("get package %d cached storages failed, err: %w", packageID, err) | |||
| } | |||
| return nil | |||
| } | |||
| @@ -193,10 +193,10 @@ func PackageGetCachedStorages(ctx CommandContext, packageID cdssdk.PackageID) er | |||
| // error - 操作过程中发生的任何错误。 | |||
| func PackageGetLoadedStorages(ctx CommandContext, packageID cdssdk.PackageID) error { | |||
| userID := cdssdk.UserID(1) | |||
| nodeIDs, err := ctx.Cmdline.Svc.PackageSvc().GetLoadedStorages(userID, packageID) | |||
| fmt.Printf("nodeIDs: %v\n", nodeIDs) | |||
| hubIDs, err := ctx.Cmdline.Svc.PackageSvc().GetLoadedStorages(userID, packageID) | |||
| fmt.Printf("hubIDs: %v\n", hubIDs) | |||
| if err != nil { | |||
| return fmt.Errorf("get package %d loaded nodes failed, err: %w", packageID, err) | |||
| return fmt.Errorf("get package %d loaded storages failed, err: %w", packageID, err) | |||
| } | |||
| return nil | |||
| } | |||
| @@ -16,7 +16,7 @@ import ( | |||
| ) | |||
| func init() { | |||
| var nodeID int64 | |||
| var stgID int64 | |||
| cmd := &cobra.Command{ | |||
| Use: "put [local] [remote]", | |||
| Short: "Upload files to CDS", | |||
| @@ -90,14 +90,13 @@ func init() { | |||
| return | |||
| } | |||
| var nodeAff *cdssdk.NodeID | |||
| if nodeID != 0 { | |||
| id := cdssdk.NodeID(nodeID) | |||
| nodeAff = &id | |||
| var storageAff cdssdk.StorageID | |||
| if stgID != 0 { | |||
| storageAff = cdssdk.StorageID(stgID) | |||
| } | |||
| objIter := iterator.NewUploadingObjectIterator(local, uploadFilePathes) | |||
| taskID, err := cmdCtx.Cmdline.Svc.ObjectSvc().StartUploading(userID, pkg.PackageID, objIter, nodeAff) | |||
| taskID, err := cmdCtx.Cmdline.Svc.ObjectSvc().StartUploading(userID, pkg.PackageID, objIter, storageAff) | |||
| if err != nil { | |||
| fmt.Printf("start uploading objects: %v\n", err) | |||
| return | |||
| @@ -118,7 +117,7 @@ func init() { | |||
| fmt.Printf("Put %v files (%v) to %s in %v.\n", fileCount, bytesize.ByteSize(totalSize), remote, time.Since(startTime)) | |||
| }, | |||
| } | |||
| cmd.Flags().Int64VarP(&nodeID, "node", "n", 0, "node affinity") | |||
| cmd.Flags().Int64VarP(&stgID, "storage", "s", 0, "storage affinity") | |||
| rootCmd.AddCommand(cmd) | |||
| } | |||
| @@ -20,14 +20,14 @@ func StorageLoadPackage(ctx CommandContext, packageID cdssdk.PackageID, storageI | |||
| }() | |||
| // 开始加载包到存储系统 | |||
| nodeID, taskID, err := ctx.Cmdline.Svc.StorageSvc().StartStorageLoadPackage(1, packageID, storageID) | |||
| hubID, taskID, err := ctx.Cmdline.Svc.StorageSvc().StartStorageLoadPackage(1, packageID, storageID) | |||
| if err != nil { | |||
| return fmt.Errorf("start loading package to storage: %w", err) | |||
| } | |||
| // 循环等待加载完成 | |||
| for { | |||
| complete, fullPath, err := ctx.Cmdline.Svc.StorageSvc().WaitStorageLoadPackage(nodeID, taskID, time.Second*10) | |||
| complete, fullPath, err := ctx.Cmdline.Svc.StorageSvc().WaitStorageLoadPackage(hubID, taskID, time.Second*10) | |||
| if complete { | |||
| if err != nil { | |||
| return fmt.Errorf("moving complete with: %w", err) | |||
| @@ -58,14 +58,14 @@ func StorageCreatePackage(ctx CommandContext, bucketID cdssdk.BucketID, name str | |||
| }() | |||
| // 开始创建并上传包到存储系统 | |||
| nodeID, taskID, err := ctx.Cmdline.Svc.StorageSvc().StartStorageCreatePackage(1, bucketID, name, storageID, path, nil) | |||
| hubID, taskID, err := ctx.Cmdline.Svc.StorageSvc().StartStorageCreatePackage(1, bucketID, name, storageID, path, 0) | |||
| if err != nil { | |||
| return fmt.Errorf("start storage uploading package: %w", err) | |||
| } | |||
| // 循环等待上传完成 | |||
| for { | |||
| complete, packageID, err := ctx.Cmdline.Svc.StorageSvc().WaitStorageCreatePackage(nodeID, taskID, time.Second*10) | |||
| complete, packageID, err := ctx.Cmdline.Svc.StorageSvc().WaitStorageCreatePackage(hubID, taskID, time.Second*10) | |||
| if complete { | |||
| if err != nil { | |||
| return fmt.Errorf("uploading complete with: %w", err) | |||
| @@ -31,7 +31,7 @@ func init() { | |||
| } | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| nodes, err := coorCli.GetNodes(coormq.NewGetNodes([]cdssdk.NodeID{1, 2})) | |||
| nodes, err := coorCli.GetNodes(coormq.NewGetNodes([]cdssdk.HubID{1, 2})) | |||
| if err != nil { | |||
| panic(err) | |||
| } | |||
| @@ -131,7 +131,7 @@ func init() { | |||
| } | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| nodes, err := coorCli.GetNodes(coormq.NewGetNodes([]cdssdk.NodeID{1, 2})) | |||
| nodes, err := coorCli.GetNodes(coormq.NewGetNodes([]cdssdk.HubID{1, 2})) | |||
| if err != nil { | |||
| panic(err) | |||
| } | |||
| @@ -173,7 +173,7 @@ func init() { | |||
| } | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| nodes, err := coorCli.GetNodes(coormq.NewGetNodes([]cdssdk.NodeID{1, 2})) | |||
| nodes, err := coorCli.GetNodes(coormq.NewGetNodes([]cdssdk.HubID{1, 2})) | |||
| if err != nil { | |||
| panic(err) | |||
| } | |||
| @@ -215,7 +215,7 @@ func init() { | |||
| } | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| nodes, err := coorCli.GetNodes(coormq.NewGetNodes([]cdssdk.NodeID{1, 2})) | |||
| nodes, err := coorCli.GetNodes(coormq.NewGetNodes([]cdssdk.HubID{1, 2})) | |||
| if err != nil { | |||
| panic(err) | |||
| } | |||
| @@ -10,37 +10,37 @@ import ( | |||
| "gitlink.org.cn/cloudream/common/sdks/storage/cdsapi" | |||
| ) | |||
| type NodeService struct { | |||
| type HubService struct { | |||
| *Server | |||
| } | |||
| func (s *Server) NodeSvc() *NodeService { | |||
| return &NodeService{ | |||
| func (s *Server) HubSvc() *HubService { | |||
| return &HubService{ | |||
| Server: s, | |||
| } | |||
| } | |||
| type GetNodesReq struct { | |||
| NodeIDs *[]cdssdk.NodeID `form:"nodeIDs" binding:"required"` | |||
| type GetHubsReq struct { | |||
| HubIDs *[]cdssdk.HubID `form:"hubIDs" binding:"required"` | |||
| } | |||
| type GetNodesResp = cdsapi.NodeGetNodesResp | |||
| type GetHubsResp = cdsapi.HubGetHubsResp | |||
| func (s *ObjectService) GetNodes(ctx *gin.Context) { | |||
| log := logger.WithField("HTTP", "Node.GetNodes") | |||
| func (s *ObjectService) GetHubs(ctx *gin.Context) { | |||
| log := logger.WithField("HTTP", "Hub.GetHubs") | |||
| var req GetNodesReq | |||
| var req GetHubsReq | |||
| if err := ctx.ShouldBindQuery(&req); err != nil { | |||
| log.Warnf("binding body: %s", err.Error()) | |||
| ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) | |||
| return | |||
| } | |||
| nodes, err := s.svc.NodeSvc().GetNodes(*req.NodeIDs) | |||
| hubs, err := s.svc.HubSvc().GetHubs(*req.HubIDs) | |||
| if err != nil { | |||
| log.Warnf("getting nodes: %s", err.Error()) | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get nodes failed")) | |||
| log.Warnf("getting hubs: %s", err.Error()) | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get hubs failed")) | |||
| return | |||
| } | |||
| ctx.JSON(http.StatusOK, OK(GetNodesResp{Nodes: nodes})) | |||
| ctx.JSON(http.StatusOK, OK(GetHubsResp{Hubs: hubs})) | |||
| } | |||
| @@ -45,7 +45,7 @@ func (s *ObjectService) Upload(ctx *gin.Context) { | |||
| objIter := mapMultiPartFileToUploadingObject(req.Files) | |||
| taskID, err := s.svc.ObjectSvc().StartUploading(req.Info.UserID, req.Info.PackageID, objIter, req.Info.NodeAffinity) | |||
| taskID, err := s.svc.ObjectSvc().StartUploading(req.Info.UserID, req.Info.PackageID, objIter, req.Info.StorageAffinity) | |||
| if err != nil { | |||
| log.Warnf("start uploading object task: %s", err.Error()) | |||
| @@ -143,8 +143,8 @@ func (s *PackageService) GetCachedStorages(ctx *gin.Context) { | |||
| resp, err := s.svc.PackageSvc().GetCachedStorages(req.UserID, req.PackageID) | |||
| if err != nil { | |||
| log.Warnf("get package cached nodes failed: %s", err.Error()) | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get package cached nodes failed")) | |||
| log.Warnf("get package cached storages failed: %s", err.Error()) | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get package cached storages failed")) | |||
| return | |||
| } | |||
| @@ -164,8 +164,8 @@ func (s *PackageService) GetLoadedStorages(ctx *gin.Context) { | |||
| stgIDs, err := s.svc.PackageSvc().GetLoadedStorages(req.UserID, req.PackageID) | |||
| if err != nil { | |||
| log.Warnf("get package loaded nodes failed: %s", err.Error()) | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get package loaded nodes failed")) | |||
| log.Warnf("get package loaded storages failed: %s", err.Error()) | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get package loaded storages failed")) | |||
| return | |||
| } | |||
| @@ -32,7 +32,7 @@ func (s *StorageService) LoadPackage(ctx *gin.Context) { | |||
| return | |||
| } | |||
| nodeID, taskID, err := s.svc.StorageSvc().StartStorageLoadPackage(req.UserID, req.PackageID, req.StorageID) | |||
| hubID, taskID, err := s.svc.StorageSvc().StartStorageLoadPackage(req.UserID, req.PackageID, req.StorageID) | |||
| if err != nil { | |||
| log.Warnf("start storage load package: %s", err.Error()) | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("start loading: %v", err))) | |||
| @@ -40,7 +40,7 @@ func (s *StorageService) LoadPackage(ctx *gin.Context) { | |||
| } | |||
| for { | |||
| complete, ret, err := s.svc.StorageSvc().WaitStorageLoadPackage(nodeID, taskID, time.Second*10) | |||
| complete, ret, err := s.svc.StorageSvc().WaitStorageLoadPackage(hubID, taskID, time.Second*10) | |||
| if complete { | |||
| if err != nil { | |||
| log.Warnf("loading complete with: %s", err.Error()) | |||
| @@ -75,8 +75,8 @@ func (s *StorageService) CreatePackage(ctx *gin.Context) { | |||
| return | |||
| } | |||
| nodeID, taskID, err := s.svc.StorageSvc().StartStorageCreatePackage( | |||
| req.UserID, req.BucketID, req.Name, req.StorageID, req.Path, req.NodeAffinity) | |||
| hubID, taskID, err := s.svc.StorageSvc().StartStorageCreatePackage( | |||
| req.UserID, req.BucketID, req.Name, req.StorageID, req.Path, req.StorageAffinity) | |||
| if err != nil { | |||
| log.Warnf("start storage create package: %s", err.Error()) | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "storage create package failed")) | |||
| @@ -84,7 +84,7 @@ func (s *StorageService) CreatePackage(ctx *gin.Context) { | |||
| } | |||
| for { | |||
| complete, packageID, err := s.svc.StorageSvc().WaitStorageCreatePackage(nodeID, taskID, time.Second*10) | |||
| complete, packageID, err := s.svc.StorageSvc().WaitStorageCreatePackage(hubID, taskID, time.Second*10) | |||
| if complete { | |||
| if err != nil { | |||
| log.Warnf("creating complete with: %s", err.Error()) | |||
| @@ -123,43 +123,43 @@ func (s *TempService) GetObjectDetail(ctx *gin.Context) { | |||
| return | |||
| } | |||
| loadedNodeIDs, err := s.svc.PackageSvc().GetLoadedNodes(1, details.Object.PackageID) | |||
| loadedHubIDs, err := s.svc.PackageSvc().GetLoadedNodes(1, details.Object.PackageID) | |||
| if err != nil { | |||
| log.Warnf("getting loaded nodes: %s", err.Error()) | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get loaded nodes failed")) | |||
| return | |||
| } | |||
| var allNodeIDs []cdssdk.NodeID | |||
| allNodeIDs = append(allNodeIDs, details.PinnedAt...) | |||
| var allHubIDs []cdssdk.HubID | |||
| allHubIDs = append(allHubIDs, details.PinnedAt...) | |||
| for _, b := range details.Blocks { | |||
| allNodeIDs = append(allNodeIDs, b.StorageID) | |||
| allHubIDs = append(allHubIDs, b.StorageID) | |||
| } | |||
| allNodeIDs = append(allNodeIDs, loadedNodeIDs...) | |||
| allHubIDs = append(allHubIDs, loadedHubIDs...) | |||
| allNodeIDs = lo.Uniq(allNodeIDs) | |||
| allHubIDs = lo.Uniq(allHubIDs) | |||
| getNodes, err := s.svc.NodeSvc().GetNodes(allNodeIDs) | |||
| getNodes, err := s.svc.NodeSvc().GetNodes(allHubIDs) | |||
| if err != nil { | |||
| log.Warnf("getting nodes: %s", err.Error()) | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get nodes failed")) | |||
| return | |||
| } | |||
| allNodes := make(map[cdssdk.NodeID]*cdssdk.Node) | |||
| allNodes := make(map[cdssdk.HubID]*cdssdk.Node) | |||
| for _, n := range getNodes { | |||
| n2 := n | |||
| allNodes[n.NodeID] = &n2 | |||
| allNodes[n.HubID] = &n2 | |||
| } | |||
| var blocks []ObjectBlockDetail | |||
| for _, nodeID := range details.PinnedAt { | |||
| for _, hubID := range details.PinnedAt { | |||
| blocks = append(blocks, ObjectBlockDetail{ | |||
| Type: "Rep", | |||
| FileHash: details.Object.FileHash, | |||
| LocationType: "Agent", | |||
| LocationName: allNodes[nodeID].Name, | |||
| LocationName: allNodes[hubID].Name, | |||
| }) | |||
| } | |||
| @@ -198,12 +198,12 @@ func (s *TempService) GetObjectDetail(ctx *gin.Context) { | |||
| } | |||
| } | |||
| for _, nodeID := range loadedNodeIDs { | |||
| for _, hubID := range loadedHubIDs { | |||
| blocks = append(blocks, ObjectBlockDetail{ | |||
| Type: "Rep", | |||
| FileHash: details.Object.FileHash, | |||
| LocationType: "Storage", | |||
| LocationName: allNodes[nodeID].Name, | |||
| LocationName: allNodes[hubID].Name, | |||
| }) | |||
| } | |||
| @@ -258,9 +258,9 @@ func (s *TempService) GetDatabaseAll(ctx *gin.Context) { | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get nodes failed")) | |||
| return | |||
| } | |||
| allNodes := make(map[cdssdk.NodeID]cdssdk.Node) | |||
| allNodes := make(map[cdssdk.HubID]cdssdk.Node) | |||
| for _, n := range nodes { | |||
| allNodes[n.NodeID] = n | |||
| allNodes[n.HubID] = n | |||
| } | |||
| bkts := make(map[cdssdk.BucketID]*BucketDetail) | |||
| @@ -290,8 +290,8 @@ func (s *TempService) GetDatabaseAll(ctx *gin.Context) { | |||
| return | |||
| } | |||
| for _, nodeID := range loaded { | |||
| p.Loaded = append(p.Loaded, allNodes[nodeID]) | |||
| for _, hubID := range loaded { | |||
| p.Loaded = append(p.Loaded, allNodes[hubID]) | |||
| } | |||
| pkgs[pkg.PackageID] = &p | |||
| @@ -310,13 +310,13 @@ func (s *TempService) GetDatabaseAll(ctx *gin.Context) { | |||
| for _, obj := range db.Objects { | |||
| bkts[pkgs[obj.Object.PackageID].Package.BucketID].ObjectCount++ | |||
| for _, nodeID := range obj.PinnedAt { | |||
| for _, hubID := range obj.PinnedAt { | |||
| blocks = append(blocks, ObjectBlockDetail{ | |||
| ObjectID: obj.Object.ObjectID, | |||
| Type: "Rep", | |||
| FileHash: obj.Object.FileHash, | |||
| LocationType: "Agent", | |||
| LocationName: allNodes[nodeID].Name, | |||
| LocationName: allNodes[hubID].Name, | |||
| }) | |||
| } | |||
| @@ -364,7 +364,7 @@ func (s *TempService) GetDatabaseAll(ctx *gin.Context) { | |||
| Type: "Rep", | |||
| FileHash: obj.Object.FileHash, | |||
| LocationType: "Storage", | |||
| LocationName: allNodes[node.NodeID].Name, | |||
| LocationName: allNodes[node.HubID].Name, | |||
| }) | |||
| } | |||
| @@ -19,7 +19,7 @@ func (svc *Service) CacheSvc() *CacheService { | |||
| return &CacheService{Service: svc} | |||
| } | |||
| func (svc *CacheService) StartCacheMovePackage(userID cdssdk.UserID, packageID cdssdk.PackageID, stgID cdssdk.StorageID) (cdssdk.NodeID, string, error) { | |||
| func (svc *CacheService) StartCacheMovePackage(userID cdssdk.UserID, packageID cdssdk.PackageID, stgID cdssdk.StorageID) (cdssdk.HubID, string, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return 0, "", fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -35,7 +35,7 @@ func (svc *CacheService) StartCacheMovePackage(userID cdssdk.UserID, packageID c | |||
| return 0, "", fmt.Errorf("shard storage is not enabled") | |||
| } | |||
| agentCli, err := stgglb.AgentMQPool.Acquire(getStg.Storages[0].MasterHub.NodeID) | |||
| agentCli, err := stgglb.AgentMQPool.Acquire(getStg.Storages[0].MasterHub.HubID) | |||
| if err != nil { | |||
| return 0, "", fmt.Errorf("new agent client: %w", err) | |||
| } | |||
| @@ -46,10 +46,10 @@ func (svc *CacheService) StartCacheMovePackage(userID cdssdk.UserID, packageID c | |||
| return 0, "", fmt.Errorf("start cache move package: %w", err) | |||
| } | |||
| return getStg.Storages[0].MasterHub.NodeID, startResp.TaskID, nil | |||
| return getStg.Storages[0].MasterHub.HubID, startResp.TaskID, nil | |||
| } | |||
| func (svc *CacheService) WaitCacheMovePackage(hubID cdssdk.NodeID, taskID string, waitTimeout time.Duration) (bool, error) { | |||
| func (svc *CacheService) WaitCacheMovePackage(hubID cdssdk.HubID, taskID string, waitTimeout time.Duration) (bool, error) { | |||
| agentCli, err := stgglb.AgentMQPool.Acquire(hubID) | |||
| if err != nil { | |||
| return true, fmt.Errorf("new agent client: %w", err) | |||
| @@ -8,26 +8,26 @@ import ( | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| ) | |||
| // NodeService 是关于节点操作的服务结构体 | |||
| type NodeService struct { | |||
| // HubService 是关于节点操作的服务结构体 | |||
| type HubService struct { | |||
| *Service | |||
| } | |||
| // NodeSvc 创建并返回一个NodeService的实例 | |||
| func (svc *Service) NodeSvc() *NodeService { | |||
| return &NodeService{Service: svc} | |||
| // HubSvc 创建并返回一个HubService的实例 | |||
| func (svc *Service) HubSvc() *HubService { | |||
| return &HubService{Service: svc} | |||
| } | |||
| // GetNodes 根据提供的节点ID列表,获取对应的节点信息 | |||
| // GetHubs 根据提供的节点ID列表,获取对应的节点信息 | |||
| // 参数: | |||
| // | |||
| // nodeIDs []cdssdk.NodeID - 需要查询的节点ID列表 | |||
| // hubIDs []cdssdk.HubID - 需要查询的节点ID列表 | |||
| // | |||
| // 返回值: | |||
| // | |||
| // []cdssdk.Node - 获取到的节点信息列表 | |||
| // []cdssdk.Hub - 获取到的节点信息列表 | |||
| // error - 如果过程中发生错误,则返回错误信息 | |||
| func (svc *NodeService) GetNodes(nodeIDs []cdssdk.NodeID) ([]cdssdk.Node, error) { | |||
| func (svc *HubService) GetHubs(hubIDs []cdssdk.HubID) ([]cdssdk.Hub, error) { | |||
| // 从协调器MQ池中获取一个客户端实例 | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| @@ -37,11 +37,11 @@ func (svc *NodeService) GetNodes(nodeIDs []cdssdk.NodeID) ([]cdssdk.Node, error) | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| // 向协调器发送获取节点信息的请求 | |||
| getResp, err := coorCli.GetNodes(coormq.NewGetNodes(nodeIDs)) | |||
| getResp, err := coorCli.GetHubs(coormq.NewGetHubs(hubIDs)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("requesting to coordinator: %w", err) | |||
| } | |||
| // 返回获取到的节点信息 | |||
| return getResp.Nodes, nil | |||
| return getResp.Hubs, nil | |||
| } | |||
| @@ -29,10 +29,10 @@ func (svc *Service) ObjectSvc() *ObjectService { | |||
| // userID: 用户ID。 | |||
| // packageID: 套件ID。 | |||
| // objIter: 正在上传的对象迭代器。 | |||
| // nodeAffinity: 节点亲和性,指定对象上传的首选节点。 | |||
| // storageAffinity: 节点亲和性,指定对象上传的首选节点。 | |||
| // 返回值: 任务ID和错误信息。 | |||
| func (svc *ObjectService) StartUploading(userID cdssdk.UserID, packageID cdssdk.PackageID, objIter iterator.UploadingObjectIterator, nodeAffinity *cdssdk.NodeID) (string, error) { | |||
| tsk := svc.TaskMgr.StartNew(mytask.NewUploadObjects(userID, packageID, objIter, nodeAffinity)) | |||
| func (svc *ObjectService) StartUploading(userID cdssdk.UserID, packageID cdssdk.PackageID, objIter iterator.UploadingObjectIterator, storageAffinity cdssdk.StorageID) (string, error) { | |||
| tsk := svc.TaskMgr.StartNew(mytask.NewUploadObjects(userID, packageID, objIter, storageAffinity)) | |||
| return tsk.ID(), nil | |||
| } | |||
| @@ -118,7 +118,7 @@ func (svc *PackageService) GetCachedStorages(userID cdssdk.UserID, packageID cds | |||
| // 向协调器请求获取包的缓存节点信息 | |||
| resp, err := coorCli.GetPackageCachedStorages(coormq.ReqGetPackageCachedStorages(userID, packageID)) | |||
| if err != nil { | |||
| return cdssdk.PackageCachingInfo{}, fmt.Errorf("get package cached nodes: %w", err) | |||
| return cdssdk.PackageCachingInfo{}, fmt.Errorf("get package cached storages: %w", err) | |||
| } | |||
| // 构造并返回缓存信息 | |||
| @@ -141,7 +141,7 @@ func (svc *PackageService) GetLoadedStorages(userID cdssdk.UserID, packageID cds | |||
| // 向协调器请求获取加载指定包的节点ID列表 | |||
| resp, err := coorCli.GetPackageLoadedStorages(coormq.ReqGetPackageLoadedStorages(userID, packageID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("get package loaded nodes: %w", err) | |||
| return nil, fmt.Errorf("get package loaded storages: %w", err) | |||
| } | |||
| return resp.StorageIDs, nil | |||
| } | |||
| @@ -50,7 +50,7 @@ func (svc *StorageService) GetByName(userID cdssdk.UserID, name string) (*model. | |||
| return &getResp.Storage, nil | |||
| } | |||
| func (svc *StorageService) StartStorageLoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID) (cdssdk.NodeID, string, error) { | |||
| func (svc *StorageService) StartStorageLoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID) (cdssdk.HubID, string, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return 0, "", fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -66,7 +66,7 @@ func (svc *StorageService) StartStorageLoadPackage(userID cdssdk.UserID, package | |||
| return 0, "", fmt.Errorf("shard storage is not enabled") | |||
| } | |||
| agentCli, err := stgglb.AgentMQPool.Acquire(stgResp.Storages[0].MasterHub.NodeID) | |||
| agentCli, err := stgglb.AgentMQPool.Acquire(stgResp.Storages[0].MasterHub.HubID) | |||
| if err != nil { | |||
| return 0, "", fmt.Errorf("new agent client: %w", err) | |||
| } | |||
| @@ -77,7 +77,7 @@ func (svc *StorageService) StartStorageLoadPackage(userID cdssdk.UserID, package | |||
| return 0, "", fmt.Errorf("start storage load package: %w", err) | |||
| } | |||
| return stgResp.Storages[0].MasterHub.NodeID, startResp.TaskID, nil | |||
| return stgResp.Storages[0].MasterHub.HubID, startResp.TaskID, nil | |||
| } | |||
| type StorageLoadPackageResult struct { | |||
| @@ -86,8 +86,8 @@ type StorageLoadPackageResult struct { | |||
| RemoteBase string | |||
| } | |||
| func (svc *StorageService) WaitStorageLoadPackage(nodeID cdssdk.NodeID, taskID string, waitTimeout time.Duration) (bool, *StorageLoadPackageResult, error) { | |||
| agentCli, err := stgglb.AgentMQPool.Acquire(nodeID) | |||
| func (svc *StorageService) WaitStorageLoadPackage(hubID cdssdk.HubID, taskID string, waitTimeout time.Duration) (bool, *StorageLoadPackageResult, error) { | |||
| agentCli, err := stgglb.AgentMQPool.Acquire(hubID) | |||
| if err != nil { | |||
| // TODO 失败是否要当做任务已经结束? | |||
| return true, nil, fmt.Errorf("new agent client: %w", err) | |||
| @@ -121,7 +121,7 @@ func (svc *StorageService) DeleteStoragePackage(userID int64, packageID int64, s | |||
| } | |||
| // 请求节点启动从Storage中上传文件的任务。会返回节点ID和任务ID | |||
| func (svc *StorageService) StartStorageCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, storageID cdssdk.StorageID, path string, nodeAffinity *cdssdk.NodeID) (cdssdk.NodeID, string, error) { | |||
| func (svc *StorageService) StartStorageCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, storageID cdssdk.StorageID, path string, storageAffinity cdssdk.StorageID) (cdssdk.HubID, string, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return 0, "", fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -137,22 +137,22 @@ func (svc *StorageService) StartStorageCreatePackage(userID cdssdk.UserID, bucke | |||
| return 0, "", fmt.Errorf("shard storage is not enabled") | |||
| } | |||
| agentCli, err := stgglb.AgentMQPool.Acquire(stgResp.Storages[0].MasterHub.NodeID) | |||
| agentCli, err := stgglb.AgentMQPool.Acquire(stgResp.Storages[0].MasterHub.HubID) | |||
| if err != nil { | |||
| return 0, "", fmt.Errorf("new agent client: %w", err) | |||
| } | |||
| defer stgglb.AgentMQPool.Release(agentCli) | |||
| startResp, err := agentCli.StartStorageCreatePackage(agtmq.NewStartStorageCreatePackage(userID, bucketID, name, storageID, path, nodeAffinity)) | |||
| startResp, err := agentCli.StartStorageCreatePackage(agtmq.NewStartStorageCreatePackage(userID, bucketID, name, storageID, path, storageAffinity)) | |||
| if err != nil { | |||
| return 0, "", fmt.Errorf("start storage upload package: %w", err) | |||
| } | |||
| return stgResp.Storages[0].MasterHub.NodeID, startResp.TaskID, nil | |||
| return stgResp.Storages[0].MasterHub.HubID, startResp.TaskID, nil | |||
| } | |||
| func (svc *StorageService) WaitStorageCreatePackage(nodeID cdssdk.NodeID, taskID string, waitTimeout time.Duration) (bool, cdssdk.PackageID, error) { | |||
| agentCli, err := stgglb.AgentMQPool.Acquire(nodeID) | |||
| func (svc *StorageService) WaitStorageCreatePackage(hubID cdssdk.HubID, taskID string, waitTimeout time.Duration) (bool, cdssdk.PackageID, error) { | |||
| agentCli, err := stgglb.AgentMQPool.Acquire(hubID) | |||
| if err != nil { | |||
| // TODO 失败是否要当做任务已经结束? | |||
| return true, 0, fmt.Errorf("new agent client: %w", err) | |||
| @@ -24,11 +24,11 @@ type UploadObjects struct { | |||
| // userID: 用户ID,标识发起上传请求的用户。 | |||
| // packageID: 包ID,标识被上传的对象所属的包。 | |||
| // objectIter: 上传对象迭代器,用于遍历和上传多个对象。 | |||
| // nodeAffinity: 节点亲和性,指定上传任务首选的执行节点。 | |||
| // storageAffinity: 节点亲和性,指定上传任务首选的执行节点。 | |||
| // 返回值为初始化后的UploadObjects指针。 | |||
| func NewUploadObjects(userID cdssdk.UserID, packageID cdssdk.PackageID, objectIter iterator.UploadingObjectIterator, nodeAffinity *cdssdk.NodeID) *UploadObjects { | |||
| func NewUploadObjects(userID cdssdk.UserID, packageID cdssdk.PackageID, objectIter iterator.UploadingObjectIterator, storageAffinity cdssdk.StorageID) *UploadObjects { | |||
| return &UploadObjects{ | |||
| cmd: *cmd.NewUploadObjects(userID, packageID, objectIter, nodeAffinity), | |||
| cmd: *cmd.NewUploadObjects(userID, packageID, objectIter, storageAffinity), | |||
| } | |||
| } | |||
| @@ -40,32 +40,32 @@ func main() { | |||
| stgglb.InitAgentRPCPool(&config.Cfg().AgentGRPC) | |||
| var conCol connectivity.Collector | |||
| if config.Cfg().Local.NodeID != nil { | |||
| //如果client与某个node处于同一台机器,则使用这个node的连通性信息 | |||
| if config.Cfg().Local.HubID != nil { | |||
| //如果client与某个hub处于同一台机器,则使用这个hub的连通性信息 | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| logger.Warnf("acquire coordinator mq failed, err: %s", err.Error()) | |||
| os.Exit(1) | |||
| } | |||
| getCons, err := coorCli.GetNodeConnectivities(coormq.ReqGetNodeConnectivities([]cdssdk.NodeID{*config.Cfg().Local.NodeID})) | |||
| getCons, err := coorCli.GetHubConnectivities(coormq.ReqGetHubConnectivities([]cdssdk.HubID{*config.Cfg().Local.HubID})) | |||
| if err != nil { | |||
| logger.Warnf("get node connectivities failed, err: %s", err.Error()) | |||
| logger.Warnf("get hub connectivities failed, err: %s", err.Error()) | |||
| os.Exit(1) | |||
| } | |||
| consMap := make(map[cdssdk.NodeID]connectivity.Connectivity) | |||
| consMap := make(map[cdssdk.HubID]connectivity.Connectivity) | |||
| for _, con := range getCons.Connectivities { | |||
| var delay *time.Duration | |||
| if con.Delay != nil { | |||
| d := time.Duration(*con.Delay * float32(time.Millisecond)) | |||
| delay = &d | |||
| } | |||
| consMap[con.FromNodeID] = connectivity.Connectivity{ | |||
| ToNodeID: con.ToNodeID, | |||
| Delay: delay, | |||
| consMap[con.FromHubID] = connectivity.Connectivity{ | |||
| ToHubID: con.ToHubID, | |||
| Delay: delay, | |||
| } | |||
| } | |||
| conCol = connectivity.NewCollectorWithInitData(&config.Cfg().Connectivity, nil, consMap) | |||
| logger.Info("use local node connectivities") | |||
| logger.Info("use local hub connectivities") | |||
| } else { | |||
| // 否则需要就地收集连通性信息 | |||
| @@ -1,7 +1,7 @@ | |||
| { | |||
| "id": 1, | |||
| "local": { | |||
| "nodeID": 1, | |||
| "hubID": 1, | |||
| "localIP": "127.0.0.1", | |||
| "externalIP": "127.0.0.1", | |||
| "locationID": 1 | |||
| @@ -42,7 +42,7 @@ | |||
| }, | |||
| "downloader": { | |||
| "maxStripCacheCount": 100, | |||
| "highLatencyNode": 35, | |||
| "highLatencyHub": 35, | |||
| "ecStripPrefetchCount": 1 | |||
| } | |||
| } | |||
| @@ -34,7 +34,7 @@ | |||
| }, | |||
| "downloader": { | |||
| "maxStripCacheCount": 100, | |||
| "highLatencyNode": 35, | |||
| "highLatencyHub": 35, | |||
| "ecStripPrefetchCount": 1 | |||
| }, | |||
| "storageID": 0 | |||
| @@ -1,7 +1,7 @@ | |||
| { | |||
| "accessStatHistoryAmount": 0.8, | |||
| "ecFileSizeThreshold": 104857600, | |||
| "nodeUnavailableSeconds": 300, | |||
| "hubUnavailableSeconds": 300, | |||
| "logger": { | |||
| "output": "file", | |||
| "outputFileName": "scanner", | |||
| @@ -1,187 +0,0 @@ | |||
| drop database if exists cloudream; | |||
| create database cloudream; | |||
| use cloudream; | |||
| create table Node ( | |||
| NodeID int not null auto_increment primary key comment '节点ID', | |||
| Name varchar(128) not null comment '节点名称', | |||
| LocalIP varchar(128) not null comment '节点的内网IP', | |||
| ExternalIP varchar(128) not null comment '节点的外网IP', | |||
| LocalGRPCPort int not null comment '节点的内网GRCP端口', | |||
| ExternalGRPCPort int not null comment '节点的外网GRCP端口', | |||
| LocationID int not null comment '节点的地域', | |||
| State varchar(128) comment '节点的状态', | |||
| LastReportTime timestamp comment '节点上次上报时间' | |||
| ) comment = '节点表'; | |||
| insert into | |||
| Node ( | |||
| NodeID, | |||
| Name, | |||
| LocalIP, | |||
| ExternalIP, | |||
| LocalGRPCPort, | |||
| ExternalGRPCPort, | |||
| LocationID, | |||
| State | |||
| ) | |||
| values | |||
| ( | |||
| 1, | |||
| "localhost", | |||
| "localhost", | |||
| "localhost", | |||
| 5010, | |||
| 5010, | |||
| 1, | |||
| "alive" | |||
| ); | |||
| create table Storage ( | |||
| StorageID int not null auto_increment primary key comment '存储服务ID', | |||
| Name varchar(100) not null comment '存储服务名称', | |||
| NodeID int not null comment '存储服务所在节点的ID', | |||
| Directory varchar(4096) not null comment '存储服务所在节点的目录', | |||
| Remote varchar(4096) not null, | |||
| State varchar(100) comment '状态' | |||
| ) comment = "存储服务表"; | |||
| insert into | |||
| Storage (StorageID, Name, NodeID, Directory, State) | |||
| values | |||
| (1, "HuaWei-Cloud", 1, "/", "Online"); | |||
| create table NodeConnectivity ( | |||
| FromNodeID int not null comment '发起检测的节点ID', | |||
| ToNodeID int not null comment '被检测节点的ID', | |||
| Delay float comment '发起节点与被检测节点间延迟(毫秒),为null代表节点不可达', | |||
| TestTime timestamp comment '进行连通性测试的时间', | |||
| primary key(FromNodeID, ToNodeID) | |||
| ) comment = '节点连通性表'; | |||
| create table User ( | |||
| UserID int not null primary key comment '用户ID', | |||
| Password varchar(100) not null comment '用户密码' | |||
| ) comment = '用户密码表'; | |||
| create table UserBucket ( | |||
| UserID int not null comment '用户ID', | |||
| BucketID int not null comment '用户可访问的桶ID', | |||
| primary key(UserID, BucketID) | |||
| ) comment = '用户桶权限表'; | |||
| insert into | |||
| UserBucket (UserID, BucketID) | |||
| values | |||
| (0, 1); | |||
| create table UserNode ( | |||
| UserID int not null comment '用户ID', | |||
| NodeID int not null comment '用户可使用的节点ID', | |||
| primary key(UserID, NodeID) | |||
| ) comment = '用户节点权限表'; | |||
| insert into | |||
| UserNode (UserID, NodeID) | |||
| values | |||
| (0, 1); | |||
| create table UserStorage ( | |||
| UserID int not null comment "用户ID", | |||
| StorageID int not null comment "存储服务ID", | |||
| primary key(UserID, StorageID) | |||
| ); | |||
| insert into | |||
| UserStorage (UserID, StorageID) | |||
| values | |||
| (0, 1); | |||
| create table Bucket ( | |||
| BucketID int not null auto_increment primary key comment '桶ID', | |||
| Name varchar(100) not null comment '桶名', | |||
| CreatorID int not null comment '创建者ID' | |||
| ) comment = '桶表'; | |||
| insert into | |||
| Bucket (BucketID, Name, CreatorID) | |||
| values | |||
| (0, "bucket01", 0); | |||
| create table Package ( | |||
| PackageID int not null auto_increment primary key comment '包ID', | |||
| Name varchar(100) not null comment '对象名', | |||
| BucketID int not null comment '桶ID', | |||
| State varchar(100) not null comment '状态', | |||
| ); | |||
| create table Object ( | |||
| ObjectID int not null auto_increment primary key comment '对象ID', | |||
| PackageID int not null comment '包ID', | |||
| Path varchar(500) not null comment '对象路径', | |||
| Size bigint not null comment '对象大小(Byte)', | |||
| FileHash varchar(100) not null comment '完整对象的FileHash', | |||
| Redundancy JSON not null comment '冗余策略', | |||
| CreateTime timestamp not null comment '创建时间', | |||
| UpdateTime timestamp not null comment '更新时间', | |||
| UNIQUE KEY PackagePath (PackageID, Path) | |||
| ) comment = '对象表'; | |||
| create table ObjectBlock ( | |||
| ObjectID int not null comment '对象ID', | |||
| `Index` int not null comment '编码块在条带内的排序', | |||
| NodeID int not null comment '此编码块应该存在的节点', | |||
| FileHash varchar(100) not null comment '编码块哈希值', | |||
| primary key(ObjectID, `Index`, NodeID) | |||
| ) comment = '对象编码块表'; | |||
| create table Cache ( | |||
| FileHash varchar(100) not null comment '编码块块ID', | |||
| NodeID int not null comment '节点ID', | |||
| CreateTime timestamp not null comment '缓存时间', | |||
| Priority int not null comment '编码块优先级', | |||
| primary key(FileHash, NodeID) | |||
| ) comment = '缓存表'; | |||
| create table PinnedObject ( | |||
| NodeID int not null comment '节点ID', | |||
| ObjectID int not null comment '对象ID', | |||
| CreateTime timestamp not null comment '缓存时间', | |||
| primary key(NodeID, ObjectID) | |||
| ) comment = '临时对象表'; | |||
| create table StoragePackage ( | |||
| StorageID int not null comment '存储服务ID', | |||
| PackageID int not null comment '包ID', | |||
| UserID int not null comment '调度了此文件的用户ID', | |||
| State varchar(100) not null comment '包状态', | |||
| primary key(StorageID, PackageID, UserID) | |||
| ); | |||
| create table PackageAccessStat ( | |||
| PackageID int not null comment '包ID', | |||
| NodeID int not null comment '节点ID', | |||
| Amount float not null comment '前一日流量的滑动平均值', | |||
| Counter float not null comment '本日的流量', | |||
| primary key(PackageID, NodeID) | |||
| ); | |||
| create table Location ( | |||
| LocationID int not null auto_increment primary key comment 'ID', | |||
| Name varchar(128) not null comment '名称' | |||
| ) comment = '地域表'; | |||
| insert into | |||
| Location (LocationID, Name) | |||
| values | |||
| (1, "Local"); | |||
| create table ObjectAccessStat ( | |||
| ObjectID int not null comment '对象ID', | |||
| NodeID int not null comment '节点ID', | |||
| Amount float not null comment '前一日流量的滑动平均值', | |||
| Counter float not null comment '本日的流量', | |||
| primary key(ObjectID, NodeID) | |||
| ); | |||
| @@ -3,13 +3,13 @@ package consts | |||
| const ( | |||
| StorageDirectoryStateOK = "OK" | |||
| NodeStateNormal = "Normal" | |||
| NodeStateUnavailable = "Unavailable" | |||
| HubStateNormal = "Normal" | |||
| HubStateUnavailable = "Unavailable" | |||
| ) | |||
| const ( | |||
| NodeDistanceSameNode = 0.1 | |||
| NodeDistanceSameLocation = 1 | |||
| NodeDistanceOther = 5 | |||
| NodeDistanceHighLatencyNode = 10 | |||
| StorageDistanceSameStorage = 0.1 | |||
| StorageDistanceSameLocation = 1 | |||
| StorageDistanceOther = 5 | |||
| HubDistanceHighLatencyHub = 10 | |||
| ) | |||
| @@ -3,8 +3,8 @@ package stgglb | |||
| import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| // 根据当前节点与目标地址的距离关系,选择合适的地址 | |||
| func SelectGRPCAddress(node cdssdk.Node, addr cdssdk.GRPCAddressInfo) (string, int) { | |||
| if Local != nil && Local.LocationID == node.LocationID { | |||
| func SelectGRPCAddress(hub cdssdk.Hub, addr cdssdk.GRPCAddressInfo) (string, int) { | |||
| if Local != nil && Local.LocationID == hub.LocationID { | |||
| return addr.LocalIP, addr.LocalGRPCPort | |||
| } | |||
| @@ -99,7 +99,7 @@ func (o *ObjectDetail) GroupBlocks() []GrouppedObjectBlock { | |||
| } | |||
| type LocalMachineInfo struct { | |||
| NodeID *cdssdk.NodeID `json:"nodeID"` | |||
| HubID *cdssdk.HubID `json:"hubID"` | |||
| ExternalIP string `json:"externalIP"` | |||
| LocalIP string `json:"localIP"` | |||
| LocationID cdssdk.LocationID `json:"locationID"` | |||
| @@ -129,7 +129,7 @@ func (ObjectAccessStat) TableName() string { | |||
| type StorageDetail struct { | |||
| Storage cdssdk.Storage `json:"storage"` | |||
| MasterHub *cdssdk.Node `json:"masterHub"` | |||
| MasterHub *cdssdk.Hub `json:"masterHub"` | |||
| Shard *cdssdk.ShardStorage `json:"shard"` | |||
| Shared *cdssdk.SharedStorage `json:"shared"` | |||
| } | |||
| @@ -28,10 +28,10 @@ import ( | |||
| ) | |||
| type UploadObjects struct { | |||
| userID cdssdk.UserID | |||
| packageID cdssdk.PackageID | |||
| objectIter iterator.UploadingObjectIterator | |||
| nodeAffinity *cdssdk.NodeID | |||
| userID cdssdk.UserID | |||
| packageID cdssdk.PackageID | |||
| objectIter iterator.UploadingObjectIterator | |||
| stgAffinity cdssdk.StorageID | |||
| } | |||
| type UploadObjectsResult struct { | |||
| @@ -56,12 +56,12 @@ type UploadObjectsContext struct { | |||
| StgMgr *mgr.Manager | |||
| } | |||
| func NewUploadObjects(userID cdssdk.UserID, packageID cdssdk.PackageID, objIter iterator.UploadingObjectIterator, nodeAffinity *cdssdk.NodeID) *UploadObjects { | |||
| func NewUploadObjects(userID cdssdk.UserID, packageID cdssdk.PackageID, objIter iterator.UploadingObjectIterator, stgAffinity cdssdk.StorageID) *UploadObjects { | |||
| return &UploadObjects{ | |||
| userID: userID, | |||
| packageID: packageID, | |||
| objectIter: objIter, | |||
| nodeAffinity: nodeAffinity, | |||
| userID: userID, | |||
| packageID: packageID, | |||
| objectIter: objIter, | |||
| stgAffinity: stgAffinity, | |||
| } | |||
| } | |||
| @@ -75,7 +75,7 @@ func (t *UploadObjects) Execute(ctx *UploadObjectsContext) (*UploadObjectsResult | |||
| getUserStgsResp, err := coorCli.GetUserStorageDetails(coormq.ReqGetUserStorageDetails(t.userID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting user nodes: %w", err) | |||
| return nil, fmt.Errorf("getting user storages: %w", err) | |||
| } | |||
| cons := ctx.Connectivity.GetAll() | |||
| @@ -87,7 +87,7 @@ func (t *UploadObjects) Execute(ctx *UploadObjectsContext) (*UploadObjectsResult | |||
| delay := time.Duration(math.MaxInt64) | |||
| con, ok := cons[stg.MasterHub.NodeID] | |||
| con, ok := cons[stg.MasterHub.HubID] | |||
| if ok && con.Delay != nil { | |||
| delay = *con.Delay | |||
| } | |||
| @@ -116,7 +116,7 @@ func (t *UploadObjects) Execute(ctx *UploadObjectsContext) (*UploadObjectsResult | |||
| } | |||
| defer ipfsMutex.Unlock() | |||
| rets, err := uploadAndUpdatePackage(ctx, t.packageID, t.objectIter, userStgs, t.nodeAffinity) | |||
| rets, err := uploadAndUpdatePackage(ctx, t.packageID, t.objectIter, userStgs, t.stgAffinity) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| @@ -126,30 +126,30 @@ func (t *UploadObjects) Execute(ctx *UploadObjectsContext) (*UploadObjectsResult | |||
| }, nil | |||
| } | |||
| // chooseUploadNode 选择一个上传文件的节点 | |||
| // chooseUploadStorage 选择一个上传文件的节点 | |||
| // 1. 选择设置了亲和性的节点 | |||
| // 2. 从与当前客户端相同地域的节点中随机选一个 | |||
| // 3. 没有的话从所有节点选择延迟最低的节点 | |||
| func chooseUploadNode(nodes []UploadStorageInfo, nodeAffinity *cdssdk.NodeID) UploadStorageInfo { | |||
| if nodeAffinity != nil { | |||
| aff, ok := lo.Find(nodes, func(node UploadStorageInfo) bool { return node.Storage.MasterHub.NodeID == *nodeAffinity }) | |||
| func chooseUploadStorage(storages []UploadStorageInfo, stgAffinity cdssdk.StorageID) UploadStorageInfo { | |||
| if stgAffinity > 0 { | |||
| aff, ok := lo.Find(storages, func(storage UploadStorageInfo) bool { return storage.Storage.Storage.StorageID == stgAffinity }) | |||
| if ok { | |||
| return aff | |||
| } | |||
| } | |||
| sameLocationNodes := lo.Filter(nodes, func(e UploadStorageInfo, i int) bool { return e.IsSameLocation }) | |||
| if len(sameLocationNodes) > 0 { | |||
| return sameLocationNodes[rand.Intn(len(sameLocationNodes))] | |||
| sameLocationStorages := lo.Filter(storages, func(e UploadStorageInfo, i int) bool { return e.IsSameLocation }) | |||
| if len(sameLocationStorages) > 0 { | |||
| return sameLocationStorages[rand.Intn(len(sameLocationStorages))] | |||
| } | |||
| // 选择延迟最低的节点 | |||
| nodes = sort2.Sort(nodes, func(e1, e2 UploadStorageInfo) int { return sort2.Cmp(e1.Delay, e2.Delay) }) | |||
| storages = sort2.Sort(storages, func(e1, e2 UploadStorageInfo) int { return sort2.Cmp(e1.Delay, e2.Delay) }) | |||
| return nodes[0] | |||
| return storages[0] | |||
| } | |||
| func uploadAndUpdatePackage(ctx *UploadObjectsContext, packageID cdssdk.PackageID, objectIter iterator.UploadingObjectIterator, userNodes []UploadStorageInfo, nodeAffinity *cdssdk.NodeID) ([]ObjectUploadResult, error) { | |||
| func uploadAndUpdatePackage(ctx *UploadObjectsContext, packageID cdssdk.PackageID, objectIter iterator.UploadingObjectIterator, userStorages []UploadStorageInfo, stgAffinity cdssdk.StorageID) ([]ObjectUploadResult, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -157,7 +157,7 @@ func uploadAndUpdatePackage(ctx *UploadObjectsContext, packageID cdssdk.PackageI | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| // 为所有文件选择相同的上传节点 | |||
| uploadNode := chooseUploadNode(userNodes, nodeAffinity) | |||
| uploadStorage := chooseUploadStorage(userStorages, stgAffinity) | |||
| var uploadRets []ObjectUploadResult | |||
| //上传文件夹 | |||
| @@ -174,7 +174,7 @@ func uploadAndUpdatePackage(ctx *UploadObjectsContext, packageID cdssdk.PackageI | |||
| defer objInfo.File.Close() | |||
| uploadTime := time.Now() | |||
| fileHash, err := uploadFile(ctx, objInfo.File, uploadNode) | |||
| fileHash, err := uploadFile(ctx, objInfo.File, uploadStorage) | |||
| if err != nil { | |||
| return fmt.Errorf("uploading file: %w", err) | |||
| } | |||
| @@ -184,7 +184,7 @@ func uploadAndUpdatePackage(ctx *UploadObjectsContext, packageID cdssdk.PackageI | |||
| Error: err, | |||
| }) | |||
| adds = append(adds, coormq.NewAddObjectEntry(objInfo.Path, objInfo.Size, fileHash, uploadTime, uploadNode.Storage.Storage.StorageID)) | |||
| adds = append(adds, coormq.NewAddObjectEntry(objInfo.Path, objInfo.Size, fileHash, uploadTime, uploadStorage.Storage.Storage.StorageID)) | |||
| return nil | |||
| }() | |||
| if err != nil { | |||
| @@ -12,7 +12,7 @@ import ( | |||
| ) | |||
| type Connectivity struct { | |||
| ToNodeID cdssdk.NodeID | |||
| ToHubID cdssdk.HubID | |||
| Delay *time.Duration | |||
| TestTime time.Time | |||
| } | |||
| @@ -22,7 +22,7 @@ type Collector struct { | |||
| onCollected func(collector *Collector) | |||
| collectNow chan any | |||
| close chan any | |||
| connectivities map[cdssdk.NodeID]Connectivity | |||
| connectivities map[cdssdk.HubID]Connectivity | |||
| lock *sync.RWMutex | |||
| } | |||
| @@ -31,7 +31,7 @@ func NewCollector(cfg *Config, onCollected func(collector *Collector)) Collector | |||
| cfg: cfg, | |||
| collectNow: make(chan any), | |||
| close: make(chan any), | |||
| connectivities: make(map[cdssdk.NodeID]Connectivity), | |||
| connectivities: make(map[cdssdk.HubID]Connectivity), | |||
| lock: &sync.RWMutex{}, | |||
| onCollected: onCollected, | |||
| } | |||
| @@ -39,7 +39,7 @@ func NewCollector(cfg *Config, onCollected func(collector *Collector)) Collector | |||
| return rpt | |||
| } | |||
| func NewCollectorWithInitData(cfg *Config, onCollected func(collector *Collector), initData map[cdssdk.NodeID]Connectivity) Collector { | |||
| func NewCollectorWithInitData(cfg *Config, onCollected func(collector *Collector), initData map[cdssdk.HubID]Connectivity) Collector { | |||
| rpt := Collector{ | |||
| cfg: cfg, | |||
| collectNow: make(chan any), | |||
| @@ -52,22 +52,22 @@ func NewCollectorWithInitData(cfg *Config, onCollected func(collector *Collector | |||
| return rpt | |||
| } | |||
| func (r *Collector) Get(nodeID cdssdk.NodeID) *Connectivity { | |||
| func (r *Collector) Get(hubID cdssdk.HubID) *Connectivity { | |||
| r.lock.RLock() | |||
| defer r.lock.RUnlock() | |||
| con, ok := r.connectivities[nodeID] | |||
| con, ok := r.connectivities[hubID] | |||
| if ok { | |||
| return &con | |||
| } | |||
| return nil | |||
| } | |||
| func (r *Collector) GetAll() map[cdssdk.NodeID]Connectivity { | |||
| func (r *Collector) GetAll() map[cdssdk.HubID]Connectivity { | |||
| r.lock.RLock() | |||
| defer r.lock.RUnlock() | |||
| ret := make(map[cdssdk.NodeID]Connectivity) | |||
| ret := make(map[cdssdk.HubID]Connectivity) | |||
| for k, v := range r.connectivities { | |||
| ret[k] = v | |||
| } | |||
| @@ -136,31 +136,31 @@ func (r *Collector) testing() { | |||
| } | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| getNodeResp, err := coorCli.GetNodes(coormq.NewGetNodes(nil)) | |||
| getHubResp, err := coorCli.GetHubs(coormq.NewGetHubs(nil)) | |||
| if err != nil { | |||
| return | |||
| } | |||
| wg := sync.WaitGroup{} | |||
| cons := make([]Connectivity, len(getNodeResp.Nodes)) | |||
| for i, node := range getNodeResp.Nodes { | |||
| cons := make([]Connectivity, len(getHubResp.Hubs)) | |||
| for i, hub := range getHubResp.Hubs { | |||
| tmpIdx := i | |||
| tmpNode := node | |||
| tmpHub := hub | |||
| wg.Add(1) | |||
| go func() { | |||
| defer wg.Done() | |||
| cons[tmpIdx] = r.ping(tmpNode) | |||
| cons[tmpIdx] = r.ping(tmpHub) | |||
| }() | |||
| } | |||
| wg.Wait() | |||
| r.lock.Lock() | |||
| // 删除所有node的记录,然后重建,避免node数量变化时导致残余数据 | |||
| r.connectivities = make(map[cdssdk.NodeID]Connectivity) | |||
| // 删除所有hub的记录,然后重建,避免hub数量变化时导致残余数据 | |||
| r.connectivities = make(map[cdssdk.HubID]Connectivity) | |||
| for _, con := range cons { | |||
| r.connectivities[con.ToNodeID] = con | |||
| r.connectivities[con.ToHubID] = con | |||
| } | |||
| r.lock.Unlock() | |||
| @@ -169,14 +169,14 @@ func (r *Collector) testing() { | |||
| } | |||
| } | |||
| func (r *Collector) ping(node cdssdk.Node) Connectivity { | |||
| log := logger.WithType[Collector]("").WithField("NodeID", node.NodeID) | |||
| func (r *Collector) ping(hub cdssdk.Hub) Connectivity { | |||
| log := logger.WithType[Collector]("").WithField("HubID", hub.HubID) | |||
| var ip string | |||
| var port int | |||
| switch addr := node.Address.(type) { | |||
| switch addr := hub.Address.(type) { | |||
| case *cdssdk.GRPCAddressInfo: | |||
| if node.LocationID == stgglb.Local.LocationID { | |||
| if hub.LocationID == stgglb.Local.LocationID { | |||
| ip = addr.LocalIP | |||
| port = addr.LocalGRPCPort | |||
| } else { | |||
| @@ -189,7 +189,7 @@ func (r *Collector) ping(node cdssdk.Node) Connectivity { | |||
| log.Warnf("unsupported address type: %v", addr) | |||
| return Connectivity{ | |||
| ToNodeID: node.NodeID, | |||
| ToHubID: hub.HubID, | |||
| Delay: nil, | |||
| TestTime: time.Now(), | |||
| } | |||
| @@ -199,7 +199,7 @@ func (r *Collector) ping(node cdssdk.Node) Connectivity { | |||
| if err != nil { | |||
| log.Warnf("new agent %v:%v rpc client: %w", ip, port, err) | |||
| return Connectivity{ | |||
| ToNodeID: node.NodeID, | |||
| ToHubID: hub.HubID, | |||
| Delay: nil, | |||
| TestTime: time.Now(), | |||
| } | |||
| @@ -211,7 +211,7 @@ func (r *Collector) ping(node cdssdk.Node) Connectivity { | |||
| if err != nil { | |||
| log.Warnf("pre ping: %v", err) | |||
| return Connectivity{ | |||
| ToNodeID: node.NodeID, | |||
| ToHubID: hub.HubID, | |||
| Delay: nil, | |||
| TestTime: time.Now(), | |||
| } | |||
| @@ -225,7 +225,7 @@ func (r *Collector) ping(node cdssdk.Node) Connectivity { | |||
| if err != nil { | |||
| log.Warnf("ping: %v", err) | |||
| return Connectivity{ | |||
| ToNodeID: node.NodeID, | |||
| ToHubID: hub.HubID, | |||
| Delay: nil, | |||
| TestTime: time.Now(), | |||
| } | |||
| @@ -240,7 +240,7 @@ func (r *Collector) ping(node cdssdk.Node) Connectivity { | |||
| delay := avgDelay / 3 | |||
| return Connectivity{ | |||
| ToNodeID: node.NodeID, | |||
| ToHubID: hub.HubID, | |||
| Delay: &delay, | |||
| TestTime: time.Now(), | |||
| } | |||
| @@ -17,9 +17,9 @@ func (db *DB) Cache() *CacheDB { | |||
| return &CacheDB{DB: db} | |||
| } | |||
| func (*CacheDB) Get(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID) (model.Cache, error) { | |||
| func (*CacheDB) Get(ctx SQLContext, fileHash string, hubID cdssdk.HubID) (model.Cache, error) { | |||
| var ret model.Cache | |||
| err := sqlx.Get(ctx, &ret, "select * from Cache where FileHash = ? and NodeID = ?", fileHash, nodeID) | |||
| err := sqlx.Get(ctx, &ret, "select * from Cache where FileHash = ? and HubID = ?", fileHash, hubID) | |||
| return ret, err | |||
| } | |||
| @@ -29,15 +29,15 @@ func (*CacheDB) BatchGetAllFileHashes(ctx SQLContext, start int, count int) ([]s | |||
| return ret, err | |||
| } | |||
| func (*CacheDB) GetByNodeID(ctx SQLContext, nodeID cdssdk.NodeID) ([]model.Cache, error) { | |||
| func (*CacheDB) GetByHubID(ctx SQLContext, hubID cdssdk.HubID) ([]model.Cache, error) { | |||
| var ret []model.Cache | |||
| err := sqlx.Select(ctx, &ret, "select * from Cache where NodeID = ?", nodeID) | |||
| err := sqlx.Select(ctx, &ret, "select * from Cache where HubID = ?", hubID) | |||
| return ret, err | |||
| } | |||
| // Create 创建一条的缓存记录,如果已有则不进行操作 | |||
| func (*CacheDB) Create(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID, priority int) error { | |||
| _, err := ctx.Exec("insert ignore into Cache values(?,?,?,?)", fileHash, nodeID, time.Now(), priority) | |||
| func (*CacheDB) Create(ctx SQLContext, fileHash string, hubID cdssdk.HubID, priority int) error { | |||
| _, err := ctx.Exec("insert ignore into Cache values(?,?,?,?)", fileHash, hubID, time.Now(), priority) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| @@ -52,7 +52,7 @@ func (*CacheDB) BatchCreate(ctx SQLContext, caches []model.Cache) error { | |||
| } | |||
| return BatchNamedExec( | |||
| ctx, | |||
| "insert into Cache(FileHash,NodeID,CreateTime,Priority) values(:FileHash,:NodeID,:CreateTime,:Priority)"+ | |||
| "insert into Cache(FileHash,HubID,CreateTime,Priority) values(:FileHash,:HubID,:CreateTime,:Priority)"+ | |||
| " on duplicate key update CreateTime=values(CreateTime), Priority=values(Priority)", | |||
| 4, | |||
| caches, | |||
| @@ -60,7 +60,7 @@ func (*CacheDB) BatchCreate(ctx SQLContext, caches []model.Cache) error { | |||
| ) | |||
| } | |||
| func (*CacheDB) BatchCreateOnSameNode(ctx SQLContext, fileHashes []string, nodeID cdssdk.NodeID, priority int) error { | |||
| func (*CacheDB) BatchCreateOnSameNode(ctx SQLContext, fileHashes []string, hubID cdssdk.HubID, priority int) error { | |||
| if len(fileHashes) == 0 { | |||
| return nil | |||
| } | |||
| @@ -70,14 +70,14 @@ func (*CacheDB) BatchCreateOnSameNode(ctx SQLContext, fileHashes []string, nodeI | |||
| for _, hash := range fileHashes { | |||
| caches = append(caches, model.Cache{ | |||
| FileHash: hash, | |||
| NodeID: nodeID, | |||
| HubID: hubID, | |||
| CreateTime: nowTime, | |||
| Priority: priority, | |||
| }) | |||
| } | |||
| return BatchNamedExec(ctx, | |||
| "insert into Cache(FileHash,NodeID,CreateTime,Priority) values(:FileHash,:NodeID,:CreateTime,:Priority)"+ | |||
| "insert into Cache(FileHash,HubID,CreateTime,Priority) values(:FileHash,:HubID,:CreateTime,:Priority)"+ | |||
| " on duplicate key update CreateTime=values(CreateTime), Priority=values(Priority)", | |||
| 4, | |||
| caches, | |||
| @@ -85,13 +85,13 @@ func (*CacheDB) BatchCreateOnSameNode(ctx SQLContext, fileHashes []string, nodeI | |||
| ) | |||
| } | |||
| func (*CacheDB) NodeBatchDelete(ctx SQLContext, nodeID cdssdk.NodeID, fileHashes []string) error { | |||
| func (*CacheDB) NodeBatchDelete(ctx SQLContext, hubID cdssdk.HubID, fileHashes []string) error { | |||
| if len(fileHashes) == 0 { | |||
| return nil | |||
| } | |||
| // TODO in语句有长度限制 | |||
| query, args, err := sqlx.In("delete from Cache where NodeID = ? and FileHash in (?)", nodeID, fileHashes) | |||
| query, args, err := sqlx.In("delete from Cache where HubID = ? and FileHash in (?)", hubID, fileHashes) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| @@ -103,23 +103,23 @@ func (*CacheDB) NodeBatchDelete(ctx SQLContext, nodeID cdssdk.NodeID, fileHashes | |||
| func (*CacheDB) GetCachingFileNodes(ctx SQLContext, fileHash string) ([]cdssdk.Node, error) { | |||
| var x []cdssdk.Node | |||
| err := sqlx.Select(ctx, &x, | |||
| "select Node.* from Cache, Node where Cache.FileHash=? and Cache.NodeID = Node.NodeID", fileHash) | |||
| "select Node.* from Cache, Node where Cache.FileHash=? and Cache.HubID = Node.HubID", fileHash) | |||
| return x, err | |||
| } | |||
| // DeleteNodeAll 删除一个节点所有的记录 | |||
| func (*CacheDB) DeleteNodeAll(ctx SQLContext, nodeID cdssdk.NodeID) error { | |||
| _, err := ctx.Exec("delete from Cache where NodeID = ?", nodeID) | |||
| func (*CacheDB) DeleteNodeAll(ctx SQLContext, hubID cdssdk.HubID) error { | |||
| _, err := ctx.Exec("delete from Cache where HubID = ?", hubID) | |||
| return err | |||
| } | |||
| // FindCachingFileUserNodes 在缓存表中查询指定数据所在的节点 | |||
| func (*CacheDB) FindCachingFileUserNodes(ctx SQLContext, userID cdssdk.NodeID, fileHash string) ([]cdssdk.Node, error) { | |||
| func (*CacheDB) FindCachingFileUserNodes(ctx SQLContext, userID cdssdk.HubID, fileHash string) ([]cdssdk.Node, error) { | |||
| var x []cdssdk.Node | |||
| err := sqlx.Select(ctx, &x, | |||
| "select Node.* from Cache, UserNode, Node where"+ | |||
| " Cache.FileHash=? and Cache.NodeID = UserNode.NodeID and"+ | |||
| " UserNode.UserID = ? and UserNode.NodeID = Node.NodeID", fileHash, userID) | |||
| " Cache.FileHash=? and Cache.HubID = UserNode.HubID and"+ | |||
| " UserNode.UserID = ? and UserNode.HubID = Node.HubID", fileHash, userID) | |||
| return x, err | |||
| } | |||
| */ | |||
| @@ -16,9 +16,9 @@ func (db *DB) Node() *NodeDB { | |||
| return &NodeDB{DB: db} | |||
| } | |||
| func (db *NodeDB) GetByID(ctx SQLContext, nodeID cdssdk.NodeID) (cdssdk.Node, error) { | |||
| func (db *NodeDB) GetByID(ctx SQLContext, hubID cdssdk.HubID) (cdssdk.Node, error) { | |||
| var ret cdssdk.Node | |||
| err := sqlx.Get(ctx, &ret, "select * from Node where NodeID = ?", nodeID) | |||
| err := sqlx.Get(ctx, &ret, "select * from Node where HubID = ?", hubID) | |||
| return ret, err | |||
| } | |||
| @@ -31,13 +31,13 @@ func (db *NodeDB) GetAllNodes(ctx SQLContext) ([]cdssdk.Node, error) { | |||
| // GetUserNodes 根据用户id查询可用node | |||
| func (db *NodeDB) GetUserNodes(ctx SQLContext, userID cdssdk.UserID) ([]cdssdk.Node, error) { | |||
| var nodes []cdssdk.Node | |||
| err := sqlx.Select(ctx, &nodes, "select Node.* from UserNode, Node where UserNode.NodeID = Node.NodeID and UserNode.UserID=?", userID) | |||
| err := sqlx.Select(ctx, &nodes, "select Node.* from UserNode, Node where UserNode.HubID = Node.HubID and UserNode.UserID=?", userID) | |||
| return nodes, err | |||
| } | |||
| // UpdateState 更新状态,并且设置上次上报时间为现在 | |||
| func (db *NodeDB) UpdateState(ctx SQLContext, nodeID cdssdk.NodeID, state string) error { | |||
| _, err := ctx.Exec("update Node set State = ?, LastReportTime = ? where NodeID = ?", state, time.Now(), nodeID) | |||
| func (db *NodeDB) UpdateState(ctx SQLContext, hubID cdssdk.HubID, state string) error { | |||
| _, err := ctx.Exec("update Node set State = ?, LastReportTime = ? where HubID = ?", state, time.Now(), hubID) | |||
| return err | |||
| } | |||
| */ | |||
| @@ -15,14 +15,14 @@ func (db *DB) NodeConnectivity() *NodeConnectivityDB { | |||
| return &NodeConnectivityDB{DB: db} | |||
| } | |||
| func (db *NodeConnectivityDB) BatchGetByFromNode(ctx SQLContext, fromNodeIDs []cdssdk.NodeID) ([]model.NodeConnectivity, error) { | |||
| if len(fromNodeIDs) == 0 { | |||
| func (db *NodeConnectivityDB) BatchGetByFromNode(ctx SQLContext, fromHubIDs []cdssdk.HubID) ([]model.NodeConnectivity, error) { | |||
| if len(fromHubIDs) == 0 { | |||
| return nil, nil | |||
| } | |||
| var ret []model.NodeConnectivity | |||
| sql, args, err := sqlx.In("select * from NodeConnectivity where FromNodeID in (?)", fromNodeIDs) | |||
| sql, args, err := sqlx.In("select * from NodeConnectivity where FromHubID in (?)", fromHubIDs) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| @@ -36,7 +36,7 @@ func (db *NodeConnectivityDB) BatchUpdateOrCreate(ctx SQLContext, cons []model.N | |||
| } | |||
| return BatchNamedExec(ctx, | |||
| "insert into NodeConnectivity(FromNodeID, ToNodeID, Delay, TestTime) values(:FromNodeID, :ToNodeID, :Delay, :TestTime) as new"+ | |||
| "insert into NodeConnectivity(FromHubID, ToHubID, Delay, TestTime) values(:FromHubID, :ToHubID, :Delay, :TestTime) as new"+ | |||
| " on duplicate key update Delay = new.Delay, TestTime = new.TestTime", 4, cons, nil) | |||
| } | |||
| */ | |||
| @@ -175,9 +175,9 @@ func (db *ObjectDB) GetPackageObjectDetails(ctx SQLContext, packageID cdssdk.Pac | |||
| return details, nil | |||
| } | |||
| func (*ObjectDB) GetObjectsIfAnyBlockOnNode(ctx SQLContext, nodeID cdssdk.NodeID) ([]cdssdk.Object, error) { | |||
| func (*ObjectDB) GetObjectsIfAnyBlockOnNode(ctx SQLContext, hubID cdssdk.HubID) ([]cdssdk.Object, error) { | |||
| var temps []model.TempObject | |||
| err := sqlx.Select(ctx, &temps, "select * from Object where ObjectID in (select ObjectID from ObjectBlock where NodeID = ?) order by ObjectID asc", nodeID) | |||
| err := sqlx.Select(ctx, &temps, "select * from Object where ObjectID in (select ObjectID from ObjectBlock where HubID = ?) order by ObjectID asc", hubID) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting objects: %w", err) | |||
| } | |||
| @@ -247,7 +247,7 @@ func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID cdssdk.PackageID, adds [] | |||
| objBlocks = append(objBlocks, stgmod.ObjectBlock{ | |||
| ObjectID: addedObjIDs[i], | |||
| Index: 0, | |||
| NodeID: add.NodeID, | |||
| HubID: add.HubID, | |||
| FileHash: add.FileHash, | |||
| }) | |||
| } | |||
| @@ -260,7 +260,7 @@ func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID cdssdk.PackageID, adds [] | |||
| for _, add := range adds { | |||
| caches = append(caches, model.Cache{ | |||
| FileHash: add.FileHash, | |||
| NodeID: add.NodeID, | |||
| HubID: add.HubID, | |||
| CreateTime: time.Now(), | |||
| Priority: 0, | |||
| }) | |||
| @@ -326,7 +326,7 @@ func (db *ObjectDB) BatchUpdateRedundancy(ctx SQLContext, objs []coormq.Updating | |||
| for _, blk := range obj.Blocks { | |||
| caches = append(caches, model.Cache{ | |||
| FileHash: blk.FileHash, | |||
| NodeID: blk.NodeID, | |||
| HubID: blk.HubID, | |||
| CreateTime: time.Now(), | |||
| Priority: 0, | |||
| }) | |||
| @@ -16,9 +16,9 @@ func (db *DB) ObjectAccessStat() *ObjectAccessStatDB { | |||
| return &ObjectAccessStatDB{db} | |||
| } | |||
| func (*ObjectAccessStatDB) Get(ctx SQLContext, objID cdssdk.ObjectID, nodeID cdssdk.NodeID) (stgmod.ObjectAccessStat, error) { | |||
| func (*ObjectAccessStatDB) Get(ctx SQLContext, objID cdssdk.ObjectID, hubID cdssdk.HubID) (stgmod.ObjectAccessStat, error) { | |||
| var ret stgmod.ObjectAccessStat | |||
| err := sqlx.Get(ctx, &ret, "select * from ObjectAccessStat where ObjectID=? and NodeID=?", objID, nodeID) | |||
| err := sqlx.Get(ctx, &ret, "select * from ObjectAccessStat where ObjectID=? and HubID=?", objID, hubID) | |||
| return ret, err | |||
| } | |||
| @@ -43,13 +43,13 @@ func (*ObjectAccessStatDB) BatchGetByObjectID(ctx SQLContext, objIDs []cdssdk.Ob | |||
| return ret, err | |||
| } | |||
| func (*ObjectAccessStatDB) BatchGetByObjectIDOnNode(ctx SQLContext, objIDs []cdssdk.ObjectID, nodeID cdssdk.NodeID) ([]stgmod.ObjectAccessStat, error) { | |||
| func (*ObjectAccessStatDB) BatchGetByObjectIDOnNode(ctx SQLContext, objIDs []cdssdk.ObjectID, hubID cdssdk.HubID) ([]stgmod.ObjectAccessStat, error) { | |||
| if len(objIDs) == 0 { | |||
| return nil, nil | |||
| } | |||
| var ret []stgmod.ObjectAccessStat | |||
| stmt, args, err := sqlx.In("select * from ObjectAccessStat where ObjectID in (?) and NodeID=?", objIDs, nodeID) | |||
| stmt, args, err := sqlx.In("select * from ObjectAccessStat where ObjectID in (?) and HubID=?", objIDs, hubID) | |||
| if err != nil { | |||
| return ret, err | |||
| } | |||
| @@ -63,8 +63,8 @@ func (*ObjectAccessStatDB) BatchAddCounter(ctx SQLContext, entries []coormq.AddA | |||
| return nil | |||
| } | |||
| sql := "insert into ObjectAccessStat(ObjectID, NodeID, Counter, Amount) " + | |||
| " values(:ObjectID, :NodeID, :Counter, 0) as new" + | |||
| sql := "insert into ObjectAccessStat(ObjectID, HubID, Counter, Amount) " + | |||
| " values(:ObjectID, :HubID, :Counter, 0) as new" + | |||
| " on duplicate key update ObjectAccessStat.Counter=ObjectAccessStat.Counter+new.Counter" | |||
| err := BatchNamedExec(ctx, sql, 4, entries, nil) | |||
| return err | |||
| @@ -19,9 +19,9 @@ func (db *DB) ObjectBlock() *ObjectBlockDB { | |||
| return &ObjectBlockDB{DB: db} | |||
| } | |||
| func (db *ObjectBlockDB) GetByNodeID(ctx SQLContext, nodeID cdssdk.NodeID) ([]stgmod.ObjectBlock, error) { | |||
| func (db *ObjectBlockDB) GetByHubID(ctx SQLContext, hubID cdssdk.HubID) ([]stgmod.ObjectBlock, error) { | |||
| var rets []stgmod.ObjectBlock | |||
| err := sqlx.Select(ctx, &rets, "select * from ObjectBlock where NodeID = ?", nodeID) | |||
| err := sqlx.Select(ctx, &rets, "select * from ObjectBlock where HubID = ?", hubID) | |||
| return rets, err | |||
| } | |||
| @@ -45,8 +45,8 @@ func (db *ObjectBlockDB) BatchGetByObjectID(ctx SQLContext, objectIDs []cdssdk.O | |||
| return blocks, nil | |||
| } | |||
| func (db *ObjectBlockDB) Create(ctx SQLContext, objectID cdssdk.ObjectID, index int, nodeID cdssdk.NodeID, fileHash string) error { | |||
| _, err := ctx.Exec("insert into ObjectBlock values(?,?,?,?)", objectID, index, nodeID, fileHash) | |||
| func (db *ObjectBlockDB) Create(ctx SQLContext, objectID cdssdk.ObjectID, index int, hubID cdssdk.HubID, fileHash string) error { | |||
| _, err := ctx.Exec("insert into ObjectBlock values(?,?,?,?)", objectID, index, hubID, fileHash) | |||
| return err | |||
| } | |||
| @@ -56,7 +56,7 @@ func (db *ObjectBlockDB) BatchCreate(ctx SQLContext, blocks []stgmod.ObjectBlock | |||
| } | |||
| return BatchNamedExec(ctx, | |||
| "insert ignore into ObjectBlock(ObjectID, `Index`, NodeID, FileHash) values(:ObjectID, :Index, :NodeID, :FileHash)", | |||
| "insert ignore into ObjectBlock(ObjectID, `Index`, HubID, FileHash) values(:ObjectID, :Index, :HubID, :FileHash)", | |||
| 4, | |||
| blocks, | |||
| nil, | |||
| @@ -87,12 +87,12 @@ func (db *ObjectBlockDB) DeleteInPackage(ctx SQLContext, packageID cdssdk.Packag | |||
| return err | |||
| } | |||
| func (db *ObjectBlockDB) NodeBatchDelete(ctx SQLContext, nodeID cdssdk.NodeID, fileHashes []string) error { | |||
| func (db *ObjectBlockDB) NodeBatchDelete(ctx SQLContext, hubID cdssdk.HubID, fileHashes []string) error { | |||
| if len(fileHashes) == 0 { | |||
| return nil | |||
| } | |||
| query, args, err := sqlx.In("delete from ObjectBlock where NodeID = ? and FileHash in (?)", nodeID, fileHashes) | |||
| query, args, err := sqlx.In("delete from ObjectBlock where HubID = ? and FileHash in (?)", hubID, fileHashes) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| @@ -117,14 +117,14 @@ func (db *ObjectBlockDB) CountBlockWithHash(ctx SQLContext, fileHash string) (in | |||
| // 按逗号切割字符串,并将每一个部分解析为一个int64的ID。 | |||
| // 注:需要外部保证分隔的每一个部分都是正确的10进制数字格式 | |||
| func splitConcatedNodeID(idStr string) []cdssdk.NodeID { | |||
| func splitConcatedHubID(idStr string) []cdssdk.HubID { | |||
| idStrs := strings.Split(idStr, ",") | |||
| ids := make([]cdssdk.NodeID, 0, len(idStrs)) | |||
| ids := make([]cdssdk.HubID, 0, len(idStrs)) | |||
| for _, str := range idStrs { | |||
| // 假设传入的ID是正确的数字格式 | |||
| id, _ := strconv.ParseInt(str, 10, 64) | |||
| ids = append(ids, cdssdk.NodeID(id)) | |||
| ids = append(ids, cdssdk.HubID(id)) | |||
| } | |||
| return ids | |||
| @@ -16,9 +16,9 @@ func (db *DB) PackageAccessStat() *PackageAccessStatDB { | |||
| return &PackageAccessStatDB{db} | |||
| } | |||
| func (*PackageAccessStatDB) Get(ctx SQLContext, pkgID cdssdk.PackageID, nodeID cdssdk.NodeID) (stgmod.PackageAccessStat, error) { | |||
| func (*PackageAccessStatDB) Get(ctx SQLContext, pkgID cdssdk.PackageID, hubID cdssdk.HubID) (stgmod.PackageAccessStat, error) { | |||
| var ret stgmod.PackageAccessStat | |||
| err := sqlx.Get(ctx, &ret, "select * from PackageAccessStat where PackageID=? and NodeID=?", pkgID, nodeID) | |||
| err := sqlx.Get(ctx, &ret, "select * from PackageAccessStat where PackageID=? and HubID=?", pkgID, hubID) | |||
| return ret, err | |||
| } | |||
| @@ -48,8 +48,8 @@ func (*PackageAccessStatDB) BatchAddCounter(ctx SQLContext, entries []coormq.Add | |||
| return nil | |||
| } | |||
| sql := "insert into PackageAccessStat(PackageID, NodeID, Counter, Amount)" + | |||
| " values(:PackageID, :NodeID, :Counter, 0) as new" + | |||
| sql := "insert into PackageAccessStat(PackageID, HubID, Counter, Amount)" + | |||
| " values(:PackageID, :HubID, :Counter, 0) as new" + | |||
| " on duplicate key update Counter=Counter+new.Counter" | |||
| err := BatchNamedExec(ctx, sql, 4, entries, nil) | |||
| return err | |||
| @@ -22,9 +22,9 @@ func (*LocationDB) GetByID(ctx SQLContext, id int64) (model.Location, error) { | |||
| func (db *LocationDB) FindLocationByExternalIP(ctx SQLContext, ip string) (model.Location, error) { | |||
| var locID int64 | |||
| err := ctx.Table("Node").Select("LocationID").Where("ExternalIP = ?", ip).Scan(&locID).Error | |||
| err := ctx.Table("Hub").Select("LocationID").Where("ExternalIP = ?", ip).Scan(&locID).Error | |||
| if err != nil { | |||
| return model.Location{}, fmt.Errorf("finding node by external ip: %w", err) | |||
| return model.Location{}, fmt.Errorf("finding hub by external ip: %w", err) | |||
| } | |||
| loc, err := db.GetByID(ctx, locID) | |||
| @@ -28,13 +28,13 @@ func (UserBucket) TableName() string { | |||
| return "UserBucket" | |||
| } | |||
| type UserNode struct { | |||
| type UserHub struct { | |||
| UserID cdssdk.UserID `gorm:"column:UserID; primaryKey; type:bigint" json:"userID"` | |||
| NodeID cdssdk.NodeID `gorm:"column:NodeID; primaryKey; type:bigint" json:"nodeID"` | |||
| HubID cdssdk.HubID `gorm:"column:HubID; primaryKey; type:bigint" json:"hubID"` | |||
| } | |||
| func (UserNode) TableName() string { | |||
| return "UserNode" | |||
| func (UserHub) TableName() string { | |||
| return "UserHub" | |||
| } | |||
| type UserStorage struct { | |||
| @@ -52,7 +52,7 @@ type Package = cdssdk.Package | |||
| type Object = cdssdk.Object | |||
| type NodeConnectivity = cdssdk.NodeConnectivity | |||
| type HubConnectivity = cdssdk.HubConnectivity | |||
| type ObjectBlock = stgmod.ObjectBlock | |||
| @@ -6,52 +6,52 @@ import ( | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| ) | |||
| type NodeDB struct { | |||
| type HubDB struct { | |||
| *DB | |||
| } | |||
| func (db *DB) Node() *NodeDB { | |||
| return &NodeDB{DB: db} | |||
| func (db *DB) Hub() *HubDB { | |||
| return &HubDB{DB: db} | |||
| } | |||
| func (*NodeDB) GetAllNodes(ctx SQLContext) ([]cdssdk.Node, error) { | |||
| var ret []cdssdk.Node | |||
| func (*HubDB) GetAllHubs(ctx SQLContext) ([]cdssdk.Hub, error) { | |||
| var ret []cdssdk.Hub | |||
| err := ctx.Table("Node").Find(&ret).Error | |||
| err := ctx.Table("Hub").Find(&ret).Error | |||
| return ret, err | |||
| } | |||
| func (*NodeDB) GetByID(ctx SQLContext, nodeID cdssdk.NodeID) (cdssdk.Node, error) { | |||
| var ret cdssdk.Node | |||
| err := ctx.Table("Node").Where("NodeID = ?", nodeID).Find(&ret).Error | |||
| func (*HubDB) GetByID(ctx SQLContext, hubID cdssdk.HubID) (cdssdk.Hub, error) { | |||
| var ret cdssdk.Hub | |||
| err := ctx.Table("Hub").Where("HubID = ?", hubID).Find(&ret).Error | |||
| return ret, err | |||
| } | |||
| func (*NodeDB) BatchGetByID(ctx SQLContext, nodeIDs []cdssdk.NodeID) ([]cdssdk.Node, error) { | |||
| var ret []cdssdk.Node | |||
| err := ctx.Table("Node").Where("NodeID IN (?)", nodeIDs).Find(&ret).Error | |||
| func (*HubDB) BatchGetByID(ctx SQLContext, hubIDs []cdssdk.HubID) ([]cdssdk.Hub, error) { | |||
| var ret []cdssdk.Hub | |||
| err := ctx.Table("Hub").Where("HubID IN (?)", hubIDs).Find(&ret).Error | |||
| return ret, err | |||
| } | |||
| // GetUserNodes 根据用户id查询可用node | |||
| func (*NodeDB) GetUserNodes(ctx SQLContext, userID cdssdk.UserID) ([]cdssdk.Node, error) { | |||
| var nodes []cdssdk.Node | |||
| // GetUserHubs 根据用户id查询可用hub | |||
| func (*HubDB) GetUserHubs(ctx SQLContext, userID cdssdk.UserID) ([]cdssdk.Hub, error) { | |||
| var hubs []cdssdk.Hub | |||
| err := ctx. | |||
| Table("Node"). | |||
| Select("Node.*"). | |||
| Joins("JOIN UserNode ON UserNode.NodeID = Node.NodeID"). | |||
| Where("UserNode.UserID = ?", userID). | |||
| Find(&nodes).Error | |||
| return nodes, err | |||
| Table("Hub"). | |||
| Select("Hub.*"). | |||
| Joins("JOIN UserHub ON UserHub.HubID = Hub.HubID"). | |||
| Where("UserHub.UserID = ?", userID). | |||
| Find(&hubs).Error | |||
| return hubs, err | |||
| } | |||
| // UpdateState 更新状态,并且设置上次上报时间为现在 | |||
| func (*NodeDB) UpdateState(ctx SQLContext, nodeID cdssdk.NodeID, state string) error { | |||
| func (*HubDB) UpdateState(ctx SQLContext, hubID cdssdk.HubID, state string) error { | |||
| err := ctx. | |||
| Model(&cdssdk.Node{}). | |||
| Where("NodeID = ?", nodeID). | |||
| Model(&cdssdk.Hub{}). | |||
| Where("HubID = ?", hubID). | |||
| Updates(map[string]interface{}{ | |||
| "State": state, | |||
| "LastReportTime": time.Now(), | |||
| @@ -6,32 +6,32 @@ import ( | |||
| "gorm.io/gorm/clause" | |||
| ) | |||
| type NodeConnectivityDB struct { | |||
| type HubConnectivityDB struct { | |||
| *DB | |||
| } | |||
| func (db *DB) NodeConnectivity() *NodeConnectivityDB { | |||
| return &NodeConnectivityDB{DB: db} | |||
| func (db *DB) HubConnectivity() *HubConnectivityDB { | |||
| return &HubConnectivityDB{DB: db} | |||
| } | |||
| func (db *NodeConnectivityDB) BatchGetByFromNode(ctx SQLContext, fromNodeIDs []cdssdk.NodeID) ([]model.NodeConnectivity, error) { | |||
| if len(fromNodeIDs) == 0 { | |||
| func (db *HubConnectivityDB) BatchGetByFromHub(ctx SQLContext, fromHubIDs []cdssdk.HubID) ([]model.HubConnectivity, error) { | |||
| if len(fromHubIDs) == 0 { | |||
| return nil, nil | |||
| } | |||
| var ret []model.NodeConnectivity | |||
| var ret []model.HubConnectivity | |||
| err := ctx.Table("NodeConnectivity").Where("FromNodeID IN (?)", fromNodeIDs).Find(&ret).Error | |||
| err := ctx.Table("HubConnectivity").Where("FromHubID IN (?)", fromHubIDs).Find(&ret).Error | |||
| return ret, err | |||
| } | |||
| func (db *NodeConnectivityDB) BatchUpdateOrCreate(ctx SQLContext, cons []model.NodeConnectivity) error { | |||
| func (db *HubConnectivityDB) BatchUpdateOrCreate(ctx SQLContext, cons []model.HubConnectivity) error { | |||
| if len(cons) == 0 { | |||
| return nil | |||
| } | |||
| // 使用 GORM 的批量插入或更新 | |||
| return ctx.Table("NodeConnectivity").Clauses(clause.OnConflict{ | |||
| return ctx.Table("HubConnectivity").Clauses(clause.OnConflict{ | |||
| UpdateAll: true, | |||
| }).Create(&cons).Error | |||
| } | |||
| @@ -88,14 +88,14 @@ func (db *ObjectBlockDB) CountBlockWithHash(ctx SQLContext, fileHash string) (in | |||
| // 按逗号切割字符串,并将每一个部分解析为一个int64的ID。 | |||
| // 注:需要外部保证分隔的每一个部分都是正确的10进制数字格式 | |||
| func splitConcatedNodeID(idStr string) []cdssdk.NodeID { | |||
| func splitConcatedHubID(idStr string) []cdssdk.HubID { | |||
| idStrs := strings.Split(idStr, ",") | |||
| ids := make([]cdssdk.NodeID, 0, len(idStrs)) | |||
| ids := make([]cdssdk.HubID, 0, len(idStrs)) | |||
| for _, str := range idStrs { | |||
| // 假设传入的ID是正确的数字格式 | |||
| id, _ := strconv.ParseInt(str, 10, 64) | |||
| ids = append(ids, cdssdk.NodeID(id)) | |||
| ids = append(ids, cdssdk.HubID(id)) | |||
| } | |||
| return ids | |||
| @@ -80,7 +80,7 @@ func (db *StorageDB) GetUserStorageByName(ctx SQLContext, userID cdssdk.UserID, | |||
| return stg, err | |||
| } | |||
| func (db *StorageDB) GetHubStorages(ctx SQLContext, hubID cdssdk.NodeID) ([]model.Storage, error) { | |||
| func (db *StorageDB) GetHubStorages(ctx SQLContext, hubID cdssdk.HubID) ([]model.Storage, error) { | |||
| var stgs []model.Storage | |||
| err := ctx.Table("Storage").Select("Storage.*").Find(&stgs, "MasterHub = ?", hubID).Error | |||
| return stgs, err | |||
| @@ -89,7 +89,7 @@ func (db *StorageDB) GetHubStorages(ctx SQLContext, hubID cdssdk.NodeID) ([]mode | |||
| func (db *StorageDB) FillDetails(ctx SQLContext, details []stgmod.StorageDetail) error { | |||
| stgsMp := make(map[cdssdk.StorageID]*stgmod.StorageDetail) | |||
| stgIDs := make([]cdssdk.StorageID, 0, len(details)) | |||
| var masterHubIDs []cdssdk.NodeID | |||
| var masterHubIDs []cdssdk.HubID | |||
| for i := range details { | |||
| stgsMp[details[i].Storage.StorageID] = &details[i] | |||
| stgIDs = append(stgIDs, details[i].Storage.StorageID) | |||
| @@ -97,13 +97,13 @@ func (db *StorageDB) FillDetails(ctx SQLContext, details []stgmod.StorageDetail) | |||
| } | |||
| // 获取监护Hub信息 | |||
| masterHubs, err := db.Node().BatchGetByID(ctx, masterHubIDs) | |||
| masterHubs, err := db.Hub().BatchGetByID(ctx, masterHubIDs) | |||
| if err != nil && err != gorm.ErrRecordNotFound { | |||
| return fmt.Errorf("getting master hub: %w", err) | |||
| } | |||
| masterHubMap := make(map[cdssdk.NodeID]cdssdk.Node) | |||
| masterHubMap := make(map[cdssdk.HubID]cdssdk.Hub) | |||
| for _, hub := range masterHubs { | |||
| masterHubMap[hub.NodeID] = hub | |||
| masterHubMap[hub.HubID] = hub | |||
| } | |||
| for _, stg := range stgsMp { | |||
| if stg.Storage.MasterHub != 0 { | |||
| @@ -8,10 +8,10 @@ import ( | |||
| ) | |||
| const ( | |||
| IPFSLockPathPrefix = "IPFS" | |||
| IPFSNodeIDPathIndex = 1 | |||
| IPFSBuzyLock = "Buzy" | |||
| IPFSGCLock = "GC" | |||
| IPFSLockPathPrefix = "IPFS" | |||
| IPFSHubIDPathIndex = 1 | |||
| IPFSBuzyLock = "Buzy" | |||
| IPFSGCLock = "GC" | |||
| ) | |||
| type IPFSLock struct { | |||
| @@ -28,7 +28,7 @@ func NewIPFSLock() *IPFSLock { | |||
| // CanLock 判断这个锁能否锁定成功 | |||
| func (l *IPFSLock) CanLock(lock distlock.Lock) error { | |||
| nodeLock, ok := l.nodeLocks[lock.Path[IPFSNodeIDPathIndex]] | |||
| nodeLock, ok := l.nodeLocks[lock.Path[IPFSHubIDPathIndex]] | |||
| if !ok { | |||
| // 不能直接返回nil,因为如果锁数据的格式不对,也不能获取锁。 | |||
| // 这里使用一个空Provider来进行检查。 | |||
| @@ -40,12 +40,12 @@ func (l *IPFSLock) CanLock(lock distlock.Lock) error { | |||
| // 锁定。在内部可以不用判断能否加锁,外部需要保证调用此函数前调用了CanLock进行检查 | |||
| func (l *IPFSLock) Lock(reqID string, lock distlock.Lock) error { | |||
| nodeID := lock.Path[IPFSNodeIDPathIndex] | |||
| hubID := lock.Path[IPFSHubIDPathIndex] | |||
| nodeLock, ok := l.nodeLocks[nodeID] | |||
| nodeLock, ok := l.nodeLocks[hubID] | |||
| if !ok { | |||
| nodeLock = NewIPFSNodeLock() | |||
| l.nodeLocks[nodeID] = nodeLock | |||
| l.nodeLocks[hubID] = nodeLock | |||
| } | |||
| return nodeLock.Lock(reqID, lock) | |||
| @@ -53,9 +53,9 @@ func (l *IPFSLock) Lock(reqID string, lock distlock.Lock) error { | |||
| // 解锁 | |||
| func (l *IPFSLock) Unlock(reqID string, lock distlock.Lock) error { | |||
| nodeID := lock.Path[IPFSNodeIDPathIndex] | |||
| hubID := lock.Path[IPFSHubIDPathIndex] | |||
| nodeLock, ok := l.nodeLocks[nodeID] | |||
| nodeLock, ok := l.nodeLocks[hubID] | |||
| if !ok { | |||
| return nil | |||
| } | |||
| @@ -18,12 +18,12 @@ func Test_IPFSLock(t *testing.T) { | |||
| title: "同节点,同一个Buzy锁", | |||
| initLocks: []distlock.Lock{ | |||
| { | |||
| Path: []string{IPFSLockPathPrefix, "node1"}, | |||
| Path: []string{IPFSLockPathPrefix, "hub1"}, | |||
| Name: IPFSBuzyLock, | |||
| }, | |||
| }, | |||
| doLock: distlock.Lock{ | |||
| Path: []string{IPFSLockPathPrefix, "node1"}, | |||
| Path: []string{IPFSLockPathPrefix, "hub1"}, | |||
| Name: IPFSBuzyLock, | |||
| }, | |||
| wantOK: true, | |||
| @@ -32,12 +32,12 @@ func Test_IPFSLock(t *testing.T) { | |||
| title: "同节点,同一个GC锁", | |||
| initLocks: []distlock.Lock{ | |||
| { | |||
| Path: []string{IPFSLockPathPrefix, "node1"}, | |||
| Path: []string{IPFSLockPathPrefix, "hub1"}, | |||
| Name: IPFSGCLock, | |||
| }, | |||
| }, | |||
| doLock: distlock.Lock{ | |||
| Path: []string{IPFSLockPathPrefix, "node1"}, | |||
| Path: []string{IPFSLockPathPrefix, "hub1"}, | |||
| Name: IPFSGCLock, | |||
| }, | |||
| wantOK: true, | |||
| @@ -46,13 +46,13 @@ func Test_IPFSLock(t *testing.T) { | |||
| title: "同时设置Buzy和GC", | |||
| initLocks: []distlock.Lock{ | |||
| { | |||
| Path: []string{IPFSLockPathPrefix, "node1"}, | |||
| Path: []string{IPFSLockPathPrefix, "hub1"}, | |||
| Name: IPFSBuzyLock, | |||
| Target: *NewStringLockTarget(), | |||
| }, | |||
| }, | |||
| doLock: distlock.Lock{ | |||
| Path: []string{IPFSLockPathPrefix, "node1"}, | |||
| Path: []string{IPFSLockPathPrefix, "hub1"}, | |||
| Name: IPFSGCLock, | |||
| Target: *NewStringLockTarget(), | |||
| }, | |||
| @@ -81,7 +81,7 @@ func Test_IPFSLock(t *testing.T) { | |||
| ipfsLock := NewIPFSLock() | |||
| lock := distlock.Lock{ | |||
| Path: []string{IPFSLockPathPrefix, "node1"}, | |||
| Path: []string{IPFSLockPathPrefix, "hub1"}, | |||
| Name: IPFSBuzyLock, | |||
| } | |||
| @@ -93,7 +93,7 @@ func Test_IPFSLock(t *testing.T) { | |||
| ipfsLock.Unlock("req1", lock) | |||
| lock = distlock.Lock{ | |||
| Path: []string{IPFSLockPathPrefix, "node1"}, | |||
| Path: []string{IPFSLockPathPrefix, "hub1"}, | |||
| Name: IPFSGCLock, | |||
| } | |||
| err = ipfsLock.CanLock(lock) | |||
| @@ -8,10 +8,10 @@ import ( | |||
| ) | |||
| const ( | |||
| StorageLockPathPrefix = "Storage" | |||
| StorageNodeIDPathIndex = 1 | |||
| StorageBuzyLock = "Buzy" | |||
| StorageGCLock = "GC" | |||
| StorageLockPathPrefix = "Storage" | |||
| StorageHubIDPathIndex = 1 | |||
| StorageBuzyLock = "Buzy" | |||
| StorageGCLock = "GC" | |||
| ) | |||
| type StorageLock struct { | |||
| @@ -28,7 +28,7 @@ func NewStorageLock() *StorageLock { | |||
| // CanLock 判断这个锁能否锁定成功 | |||
| func (l *StorageLock) CanLock(lock distlock.Lock) error { | |||
| nodeLock, ok := l.nodeLocks[lock.Path[StorageNodeIDPathIndex]] | |||
| nodeLock, ok := l.nodeLocks[lock.Path[StorageHubIDPathIndex]] | |||
| if !ok { | |||
| // 不能直接返回nil,因为如果锁数据的格式不对,也不能获取锁。 | |||
| // 这里使用一个空Provider来进行检查。 | |||
| @@ -40,12 +40,12 @@ func (l *StorageLock) CanLock(lock distlock.Lock) error { | |||
| // 锁定。在内部可以不用判断能否加锁,外部需要保证调用此函数前调用了CanLock进行检查 | |||
| func (l *StorageLock) Lock(reqID string, lock distlock.Lock) error { | |||
| nodeID := lock.Path[StorageNodeIDPathIndex] | |||
| hubID := lock.Path[StorageHubIDPathIndex] | |||
| nodeLock, ok := l.nodeLocks[nodeID] | |||
| nodeLock, ok := l.nodeLocks[hubID] | |||
| if !ok { | |||
| nodeLock = NewStorageNodeLock() | |||
| l.nodeLocks[nodeID] = nodeLock | |||
| l.nodeLocks[hubID] = nodeLock | |||
| } | |||
| return nodeLock.Lock(reqID, lock) | |||
| @@ -53,9 +53,9 @@ func (l *StorageLock) Lock(reqID string, lock distlock.Lock) error { | |||
| // 解锁 | |||
| func (l *StorageLock) Unlock(reqID string, lock distlock.Lock) error { | |||
| nodeID := lock.Path[StorageNodeIDPathIndex] | |||
| hubID := lock.Path[StorageHubIDPathIndex] | |||
| nodeLock, ok := l.nodeLocks[nodeID] | |||
| nodeLock, ok := l.nodeLocks[hubID] | |||
| if !ok { | |||
| return nil | |||
| } | |||
| @@ -33,6 +33,6 @@ func (b *ShardStoreLockReqBuilder) GC(stgID cdssdk.StorageID) *ShardStoreLockReq | |||
| return b | |||
| } | |||
| func (b *ShardStoreLockReqBuilder) makePath(nodeID cdssdk.StorageID) []string { | |||
| return []string{lockprovider.IPFSLockPathPrefix, strconv.FormatInt(int64(nodeID), 10)} | |||
| func (b *ShardStoreLockReqBuilder) makePath(hubID cdssdk.StorageID) []string { | |||
| return []string{lockprovider.IPFSLockPathPrefix, strconv.FormatInt(int64(hubID), 10)} | |||
| } | |||
| @@ -33,11 +33,11 @@ func initProviders() []distlock.PathProvider { | |||
| func initMetadataLockProviders() []distlock.PathProvider { | |||
| return []distlock.PathProvider{ | |||
| distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Node"), | |||
| distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Hub"), | |||
| distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Storage"), | |||
| distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "User"), | |||
| distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "UserBucket"), | |||
| distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "UserNode"), | |||
| distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "UserHub"), | |||
| distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "UserStorage"), | |||
| distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Bucket"), | |||
| distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Object"), | |||
| @@ -4,7 +4,7 @@ type Config struct { | |||
| // EC模式的Object的条带缓存数量 | |||
| MaxStripCacheCount int `json:"maxStripCacheCount"` | |||
| // 当到下载节点的延迟高于这个值时,该节点在评估时会有更高的分数惩罚,单位:ms | |||
| HighLatencyNodeMs float64 `json:"highLatencyNodeMs"` | |||
| HighLatencyHubMs float64 `json:"highLatencyHubMs"` | |||
| // EC模式下,每个Object的条带的预取数量,最少为1 | |||
| ECStripPrefetchCount int `json:"ecStripPrefetchCount"` | |||
| } | |||
| @@ -210,13 +210,13 @@ func (iter *DownloadObjectIterator) downloadNoneOrRepObject(obj downloadReqeust2 | |||
| } | |||
| func (iter *DownloadObjectIterator) downloadECObject(req downloadReqeust2, ecRed *cdssdk.ECRedundancy) (io.ReadCloser, error) { | |||
| allNodes, err := iter.sortDownloadStorages(req) | |||
| allStorages, err := iter.sortDownloadStorages(req) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| bsc, blocks := iter.getMinReadingBlockSolution(allNodes, ecRed.K) | |||
| osc, stg := iter.getMinReadingObjectSolution(allNodes, ecRed.K) | |||
| bsc, blocks := iter.getMinReadingBlockSolution(allStorages, ecRed.K) | |||
| osc, stg := iter.getMinReadingObjectSolution(allStorages, ecRed.K) | |||
| if bsc < osc { | |||
| var logStrs []any = []any{fmt.Sprintf("downloading ec object %v from blocks: ", req.Raw.ObjectID)} | |||
| @@ -291,37 +291,37 @@ func (iter *DownloadObjectIterator) sortDownloadStorages(req downloadReqeust2) ( | |||
| } | |||
| } | |||
| downloadNodeMap := make(map[cdssdk.StorageID]*downloadStorageInfo) | |||
| downloadStorageMap := make(map[cdssdk.StorageID]*downloadStorageInfo) | |||
| for _, id := range req.Detail.PinnedAt { | |||
| node, ok := downloadNodeMap[id] | |||
| storage, ok := downloadStorageMap[id] | |||
| if !ok { | |||
| mod := iter.allStorages[id] | |||
| node = &downloadStorageInfo{ | |||
| storage = &downloadStorageInfo{ | |||
| Storage: mod, | |||
| ObjectPinned: true, | |||
| Distance: iter.getNodeDistance(mod), | |||
| Distance: iter.getStorageDistance(mod), | |||
| } | |||
| downloadNodeMap[id] = node | |||
| downloadStorageMap[id] = storage | |||
| } | |||
| node.ObjectPinned = true | |||
| storage.ObjectPinned = true | |||
| } | |||
| for _, b := range req.Detail.Blocks { | |||
| node, ok := downloadNodeMap[b.StorageID] | |||
| storage, ok := downloadStorageMap[b.StorageID] | |||
| if !ok { | |||
| mod := iter.allStorages[b.StorageID] | |||
| node = &downloadStorageInfo{ | |||
| storage = &downloadStorageInfo{ | |||
| Storage: mod, | |||
| Distance: iter.getNodeDistance(mod), | |||
| Distance: iter.getStorageDistance(mod), | |||
| } | |||
| downloadNodeMap[b.StorageID] = node | |||
| downloadStorageMap[b.StorageID] = storage | |||
| } | |||
| node.Blocks = append(node.Blocks, b) | |||
| storage.Blocks = append(storage.Blocks, b) | |||
| } | |||
| return sort2.Sort(lo.Values(downloadNodeMap), func(left, right *downloadStorageInfo) int { | |||
| return sort2.Sort(lo.Values(downloadStorageMap), func(left, right *downloadStorageInfo) int { | |||
| return sort2.Cmp(left.Distance, right.Distance) | |||
| }), nil | |||
| } | |||
| @@ -364,23 +364,23 @@ func (iter *DownloadObjectIterator) getMinReadingObjectSolution(sortedStgs []*do | |||
| return dist, downloadStg | |||
| } | |||
| func (iter *DownloadObjectIterator) getNodeDistance(stg stgmod.StorageDetail) float64 { | |||
| if stgglb.Local.NodeID != nil { | |||
| if stg.MasterHub.NodeID == *stgglb.Local.NodeID { | |||
| return consts.NodeDistanceSameNode | |||
| func (iter *DownloadObjectIterator) getStorageDistance(stg stgmod.StorageDetail) float64 { | |||
| if stgglb.Local.HubID != nil { | |||
| if stg.MasterHub.HubID == *stgglb.Local.HubID { | |||
| return consts.StorageDistanceSameStorage | |||
| } | |||
| } | |||
| if stg.MasterHub.LocationID == stgglb.Local.LocationID { | |||
| return consts.NodeDistanceSameLocation | |||
| return consts.StorageDistanceSameLocation | |||
| } | |||
| c := iter.downloader.conn.Get(stg.MasterHub.NodeID) | |||
| if c == nil || c.Delay == nil || *c.Delay > time.Duration(float64(time.Millisecond)*iter.downloader.cfg.HighLatencyNodeMs) { | |||
| return consts.NodeDistanceHighLatencyNode | |||
| c := iter.downloader.conn.Get(stg.MasterHub.HubID) | |||
| if c == nil || c.Delay == nil || *c.Delay > time.Duration(float64(time.Millisecond)*iter.downloader.cfg.HighLatencyHubMs) { | |||
| return consts.HubDistanceHighLatencyHub | |||
| } | |||
| return consts.NodeDistanceOther | |||
| return consts.StorageDistanceOther | |||
| } | |||
| func (iter *DownloadObjectIterator) downloadFromStorage(stg *stgmod.StorageDetail, req downloadReqeust2) (io.ReadCloser, error) { | |||
| @@ -98,7 +98,7 @@ func (s *LRCStripIterator) downloading() { | |||
| var froms []ioswitchlrc.From | |||
| for _, b := range s.blocks { | |||
| stg := b.Storage | |||
| froms = append(froms, ioswitchlrc.NewFromNode(b.Block.FileHash, *stg.MasterHub, stg.Storage, b.Block.Index)) | |||
| froms = append(froms, ioswitchlrc.NewFromStorage(b.Block.FileHash, *stg.MasterHub, stg.Storage, b.Block.Index)) | |||
| } | |||
| toExec, hd := ioswitchlrc.NewToDriverWithRange(-1, exec.Range{ | |||
| @@ -18,12 +18,12 @@ var _ = serder.UseTypeUnionExternallyTagged(types.Ref(types.NewTypeUnion[exec.Wo | |||
| ))) | |||
| type AgentWorker struct { | |||
| Node cdssdk.Node | |||
| Hub cdssdk.Hub | |||
| Address cdssdk.GRPCAddressInfo | |||
| } | |||
| func (w *AgentWorker) NewClient() (exec.WorkerClient, error) { | |||
| cli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(w.Node, w.Address)) | |||
| cli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(w.Hub, w.Address)) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| @@ -32,7 +32,7 @@ func (w *AgentWorker) NewClient() (exec.WorkerClient, error) { | |||
| } | |||
| func (w *AgentWorker) String() string { | |||
| return w.Node.String() | |||
| return w.Hub.String() | |||
| } | |||
| func (w *AgentWorker) Equals(worker exec.WorkerInfo) bool { | |||
| @@ -41,7 +41,7 @@ func (w *AgentWorker) Equals(worker exec.WorkerInfo) bool { | |||
| return false | |||
| } | |||
| return w.Node.NodeID == aw.Node.NodeID | |||
| return w.Hub.HubID == aw.Hub.HubID | |||
| } | |||
| type AgentWorkerClient struct { | |||
| @@ -59,12 +59,12 @@ func (f *FromDriver) GetDataIndex() int { | |||
| type FromShardstore struct { | |||
| FileHash cdssdk.FileHash | |||
| Hub cdssdk.Node | |||
| Hub cdssdk.Hub | |||
| Storage cdssdk.Storage | |||
| DataIndex int | |||
| } | |||
| func NewFromShardstore(fileHash cdssdk.FileHash, hub cdssdk.Node, storage cdssdk.Storage, dataIndex int) *FromShardstore { | |||
| func NewFromShardstore(fileHash cdssdk.FileHash, hub cdssdk.Hub, storage cdssdk.Storage, dataIndex int) *FromShardstore { | |||
| return &FromShardstore{ | |||
| FileHash: fileHash, | |||
| Hub: hub, | |||
| @@ -109,14 +109,14 @@ func (t *ToDriver) GetRange() exec.Range { | |||
| } | |||
| type ToShardStore struct { | |||
| Hub cdssdk.Node | |||
| Hub cdssdk.Hub | |||
| Storage cdssdk.Storage | |||
| DataIndex int | |||
| Range exec.Range | |||
| FileHashStoreKey string | |||
| } | |||
| func NewToShardStore(hub cdssdk.Node, stg cdssdk.Storage, dataIndex int, fileHashStoreKey string) *ToShardStore { | |||
| func NewToShardStore(hub cdssdk.Hub, stg cdssdk.Storage, dataIndex int, fileHashStoreKey string) *ToShardStore { | |||
| return &ToShardStore{ | |||
| Hub: hub, | |||
| Storage: stg, | |||
| @@ -125,7 +125,7 @@ func NewToShardStore(hub cdssdk.Node, stg cdssdk.Storage, dataIndex int, fileHas | |||
| } | |||
| } | |||
| func NewToShardStoreWithRange(hub cdssdk.Node, stg cdssdk.Storage, dataIndex int, fileHashStoreKey string, rng exec.Range) *ToShardStore { | |||
| func NewToShardStoreWithRange(hub cdssdk.Hub, stg cdssdk.Storage, dataIndex int, fileHashStoreKey string, rng exec.Range) *ToShardStore { | |||
| return &ToShardStore{ | |||
| Hub: hub, | |||
| Storage: stg, | |||
| @@ -11,11 +11,11 @@ import ( | |||
| ) | |||
| type HttpHubWorker struct { | |||
| Node cdssdk.Node | |||
| Hub cdssdk.Hub | |||
| } | |||
| func (w *HttpHubWorker) NewClient() (exec.WorkerClient, error) { | |||
| addressInfo := w.Node.Address.(*cdssdk.HttpAddressInfo) | |||
| addressInfo := w.Hub.Address.(*cdssdk.HttpAddressInfo) | |||
| baseUrl := "http://" + addressInfo.ExternalIP + ":" + strconv.Itoa(addressInfo.Port) | |||
| config := cdsapi.Config{ | |||
| URL: baseUrl, | |||
| @@ -31,7 +31,7 @@ func (w *HttpHubWorker) NewClient() (exec.WorkerClient, error) { | |||
| } | |||
| func (w *HttpHubWorker) String() string { | |||
| return w.Node.String() | |||
| return w.Hub.String() | |||
| } | |||
| func (w *HttpHubWorker) Equals(worker exec.WorkerInfo) bool { | |||
| @@ -40,7 +40,7 @@ func (w *HttpHubWorker) Equals(worker exec.WorkerInfo) bool { | |||
| return false | |||
| } | |||
| return w.Node.NodeID == aw.Node.NodeID | |||
| return w.Hub.HubID == aw.Hub.HubID | |||
| } | |||
| type HttpHubWorkerClient struct { | |||
| @@ -245,11 +245,11 @@ func (p *DefaultParser) buildFromNode(ctx *ParseContext, f ioswitch2.From) (ops2 | |||
| switch addr := f.Hub.Address.(type) { | |||
| case *cdssdk.HttpAddressInfo: | |||
| t.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Node: f.Hub}) | |||
| t.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: f.Hub}) | |||
| t.Env().Pinned = true | |||
| case *cdssdk.GRPCAddressInfo: | |||
| t.Env().ToEnvWorker(&ioswitch2.AgentWorker{Node: f.Hub, Address: *addr}) | |||
| t.Env().ToEnvWorker(&ioswitch2.AgentWorker{Hub: f.Hub, Address: *addr}) | |||
| t.Env().Pinned = true | |||
| default: | |||
| @@ -285,10 +285,10 @@ func (p *DefaultParser) buildToNode(ctx *ParseContext, t ioswitch2.To) (ops2.ToN | |||
| switch addr := t.Hub.Address.(type) { | |||
| case *cdssdk.HttpAddressInfo: | |||
| n.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Node: t.Hub}) | |||
| n.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: t.Hub}) | |||
| case *cdssdk.GRPCAddressInfo: | |||
| n.Env().ToEnvWorker(&ioswitch2.AgentWorker{Node: t.Hub, Address: *addr}) | |||
| n.Env().ToEnvWorker(&ioswitch2.AgentWorker{Hub: t.Hub, Address: *addr}) | |||
| default: | |||
| return nil, fmt.Errorf("unsupported node address type %T", addr) | |||
| @@ -15,12 +15,12 @@ import ( | |||
| // ))) | |||
| type AgentWorker struct { | |||
| Node cdssdk.Node | |||
| Hub cdssdk.Hub | |||
| Address cdssdk.GRPCAddressInfo | |||
| } | |||
| func (w *AgentWorker) NewClient() (exec.WorkerClient, error) { | |||
| cli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(w.Node, w.Address)) | |||
| cli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(w.Hub, w.Address)) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| @@ -29,7 +29,7 @@ func (w *AgentWorker) NewClient() (exec.WorkerClient, error) { | |||
| } | |||
| func (w *AgentWorker) String() string { | |||
| return w.Node.String() | |||
| return w.Hub.String() | |||
| } | |||
| func (w *AgentWorker) Equals(worker exec.WorkerInfo) bool { | |||
| @@ -38,7 +38,7 @@ func (w *AgentWorker) Equals(worker exec.WorkerInfo) bool { | |||
| return false | |||
| } | |||
| return w.Node.NodeID == aw.Node.NodeID | |||
| return w.Hub.HubID == aw.Hub.HubID | |||
| } | |||
| type AgentWorkerClient struct { | |||
| @@ -38,15 +38,15 @@ func (f *FromDriver) GetDataIndex() int { | |||
| type FromNode struct { | |||
| FileHash cdssdk.FileHash | |||
| Node cdssdk.Node | |||
| Hub cdssdk.Hub | |||
| Storage cdssdk.Storage | |||
| DataIndex int | |||
| } | |||
| func NewFromNode(fileHash cdssdk.FileHash, node cdssdk.Node, storage cdssdk.Storage, dataIndex int) *FromNode { | |||
| func NewFromStorage(fileHash cdssdk.FileHash, hub cdssdk.Hub, storage cdssdk.Storage, dataIndex int) *FromNode { | |||
| return &FromNode{ | |||
| FileHash: fileHash, | |||
| Node: node, | |||
| Hub: hub, | |||
| DataIndex: dataIndex, | |||
| } | |||
| } | |||
| @@ -87,14 +87,14 @@ func (t *ToDriver) GetRange() exec.Range { | |||
| } | |||
| type ToNode struct { | |||
| Hub cdssdk.Node | |||
| Hub cdssdk.Hub | |||
| Storage cdssdk.Storage | |||
| DataIndex int | |||
| Range exec.Range | |||
| FileHashStoreKey string | |||
| } | |||
| func NewToStorage(hub cdssdk.Node, stg cdssdk.Storage, dataIndex int, fileHashStoreKey string) *ToNode { | |||
| func NewToStorage(hub cdssdk.Hub, stg cdssdk.Storage, dataIndex int, fileHashStoreKey string) *ToNode { | |||
| return &ToNode{ | |||
| Hub: hub, | |||
| Storage: stg, | |||
| @@ -103,7 +103,7 @@ func NewToStorage(hub cdssdk.Node, stg cdssdk.Storage, dataIndex int, fileHashSt | |||
| } | |||
| } | |||
| func NewToStorageWithRange(hub cdssdk.Node, stg cdssdk.Storage, dataIndex int, fileHashStoreKey string, rng exec.Range) *ToNode { | |||
| func NewToStorageWithRange(hub cdssdk.Hub, stg cdssdk.Storage, dataIndex int, fileHashStoreKey string, rng exec.Range) *ToNode { | |||
| return &ToNode{ | |||
| Hub: hub, | |||
| Storage: stg, | |||
| @@ -73,7 +73,7 @@ func buildFromNode(ctx *GenerateContext, f ioswitchlrc.From) (ops2.FromNode, err | |||
| } | |||
| // TODO2 支持HTTP协议 | |||
| t.Env().ToEnvWorker(&ioswitchlrc.AgentWorker{Node: f.Node, Address: *f.Node.Address.(*cdssdk.GRPCAddressInfo)}) | |||
| t.Env().ToEnvWorker(&ioswitchlrc.AgentWorker{Hub: f.Hub, Address: *f.Hub.Address.(*cdssdk.GRPCAddressInfo)}) | |||
| t.Env().Pinned = true | |||
| return t, nil | |||
| @@ -107,7 +107,7 @@ func buildToNode(ctx *GenerateContext, t ioswitchlrc.To) (ops2.ToNode, error) { | |||
| // n.Env().ToEnvWorker(&ioswitchlrc.HttpHubWorker{Node: t.Hub}) | |||
| // TODO2 支持HTTP协议 | |||
| case *cdssdk.GRPCAddressInfo: | |||
| n.Env().ToEnvWorker(&ioswitchlrc.AgentWorker{Node: t.Hub, Address: *addr}) | |||
| n.Env().ToEnvWorker(&ioswitchlrc.AgentWorker{Hub: t.Hub, Address: *addr}) | |||
| default: | |||
| return nil, fmt.Errorf("unsupported node address type %T", addr) | |||
| @@ -10,10 +10,10 @@ import ( | |||
| type Client struct { | |||
| rabbitCli *mq.RabbitMQTransport | |||
| id cdssdk.NodeID | |||
| id cdssdk.HubID | |||
| } | |||
| func NewClient(id cdssdk.NodeID, cfg *stgmq.Config) (*Client, error) { | |||
| func NewClient(id cdssdk.HubID, cfg *stgmq.Config) (*Client, error) { | |||
| rabbitCli, err := mq.NewRabbitMQTransport(cfg.MakeConnectingURL(), stgmq.MakeAgentQueueName(int64(id)), "") | |||
| if err != nil { | |||
| return nil, err | |||
| @@ -30,23 +30,23 @@ func (c *Client) Close() { | |||
| } | |||
| type Pool interface { | |||
| Acquire(id cdssdk.NodeID) (*Client, error) | |||
| Acquire(id cdssdk.HubID) (*Client, error) | |||
| Release(cli *Client) | |||
| } | |||
| type pool struct { | |||
| mqcfg *stgmq.Config | |||
| shareds map[cdssdk.NodeID]*Client | |||
| shareds map[cdssdk.HubID]*Client | |||
| lock sync.Mutex | |||
| } | |||
| func NewPool(mqcfg *stgmq.Config) Pool { | |||
| return &pool{ | |||
| mqcfg: mqcfg, | |||
| shareds: make(map[cdssdk.NodeID]*Client), | |||
| shareds: make(map[cdssdk.HubID]*Client), | |||
| } | |||
| } | |||
| func (p *pool) Acquire(id cdssdk.NodeID) (*Client, error) { | |||
| func (p *pool) Acquire(id cdssdk.HubID) (*Client, error) { | |||
| p.lock.Lock() | |||
| defer p.lock.Unlock() | |||
| @@ -20,7 +20,7 @@ type Server struct { | |||
| rabbitSvr mq.RabbitMQServer | |||
| } | |||
| func NewServer(svc Service, id cdssdk.NodeID, cfg *mymq.Config) (*Server, error) { | |||
| func NewServer(svc Service, id cdssdk.HubID, cfg *mymq.Config) (*Server, error) { | |||
| srv := &Server{ | |||
| service: svc, | |||
| } | |||
| @@ -145,26 +145,26 @@ var _ = Register(Service.StartStorageCreatePackage) | |||
| type StartStorageCreatePackage struct { | |||
| mq.MessageBodyBase | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| BucketID cdssdk.BucketID `json:"bucketID"` | |||
| Name string `json:"name"` | |||
| StorageID cdssdk.StorageID `json:"storageID"` | |||
| Path string `json:"path"` | |||
| NodeAffinity *cdssdk.NodeID `json:"nodeAffinity"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| BucketID cdssdk.BucketID `json:"bucketID"` | |||
| Name string `json:"name"` | |||
| StorageID cdssdk.StorageID `json:"storageID"` | |||
| Path string `json:"path"` | |||
| StorageAffinity cdssdk.StorageID `json:"storageAffinity"` | |||
| } | |||
| type StartStorageCreatePackageResp struct { | |||
| mq.MessageBodyBase | |||
| TaskID string `json:"taskID"` | |||
| } | |||
| func NewStartStorageCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, storageID cdssdk.StorageID, path string, nodeAffinity *cdssdk.NodeID) *StartStorageCreatePackage { | |||
| func NewStartStorageCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, storageID cdssdk.StorageID, path string, stgAffinity cdssdk.StorageID) *StartStorageCreatePackage { | |||
| return &StartStorageCreatePackage{ | |||
| UserID: userID, | |||
| BucketID: bucketID, | |||
| Name: name, | |||
| StorageID: storageID, | |||
| Path: path, | |||
| NodeAffinity: nodeAffinity, | |||
| UserID: userID, | |||
| BucketID: bucketID, | |||
| Name: name, | |||
| StorageID: storageID, | |||
| Path: path, | |||
| StorageAffinity: stgAffinity, | |||
| } | |||
| } | |||
| func NewStartStorageCreatePackageResp(taskID string) *StartStorageCreatePackageResp { | |||
| @@ -1,51 +1,4 @@ | |||
| package coordinator | |||
| import "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| type AgentService interface { | |||
| TempCacheReport(msg *TempCacheReport) | |||
| AgentStatusReport(msg *AgentStatusReport) | |||
| } | |||
| // 代理端发给协调端,告知临时缓存的数据 | |||
| var _ = RegisterNoReply(Service.TempCacheReport) | |||
| type TempCacheReport struct { | |||
| mq.MessageBodyBase | |||
| NodeID int64 `json:"nodeID"` | |||
| Hashes []string `json:"hashes"` | |||
| } | |||
| func NewTempCacheReportBody(nodeID int64, hashes []string) *TempCacheReport { | |||
| return &TempCacheReport{ | |||
| NodeID: nodeID, | |||
| Hashes: hashes, | |||
| } | |||
| } | |||
| func (client *Client) TempCacheReport(msg *TempCacheReport) error { | |||
| return mq.Send(AgentService.TempCacheReport, client.rabbitCli, msg) | |||
| } | |||
| // 代理端发给协调端,告知延迟、ipfs和资源目录的可达性 | |||
| var _ = RegisterNoReply(Service.AgentStatusReport) | |||
| type AgentStatusReport struct { | |||
| mq.MessageBodyBase | |||
| NodeID int64 `json:"nodeID"` | |||
| NodeDelayIDs []int64 `json:"nodeDelayIDs"` | |||
| NodeDelays []int `json:"nodeDelays"` | |||
| LocalDirStatus string `json:"localDirStatus"` | |||
| } | |||
| func NewAgentStatusReportBody(nodeID int64, nodeDelayIDs []int64, nodeDelays []int, localDirStatus string) *AgentStatusReport { | |||
| return &AgentStatusReport{ | |||
| NodeID: nodeID, | |||
| NodeDelayIDs: nodeDelayIDs, | |||
| NodeDelays: nodeDelays, | |||
| LocalDirStatus: localDirStatus, | |||
| } | |||
| } | |||
| func (client *Client) AgentStatusReport(msg *AgentStatusReport) error { | |||
| return mq.Send(AgentService.AgentStatusReport, client.rabbitCli, msg) | |||
| } | |||
| @@ -6,36 +6,36 @@ import ( | |||
| stgmod "gitlink.org.cn/cloudream/storage/common/models" | |||
| ) | |||
| type NodeService interface { | |||
| type HubService interface { | |||
| GetHubConfig(msg *GetHubConfig) (*GetHubConfigResp, *mq.CodeMessage) | |||
| GetUserNodes(msg *GetUserNodes) (*GetUserNodesResp, *mq.CodeMessage) | |||
| GetUserHubs(msg *GetUserHubs) (*GetUserHubsResp, *mq.CodeMessage) | |||
| GetNodes(msg *GetNodes) (*GetNodesResp, *mq.CodeMessage) | |||
| GetHubs(msg *GetHubs) (*GetHubsResp, *mq.CodeMessage) | |||
| GetNodeConnectivities(msg *GetNodeConnectivities) (*GetNodeConnectivitiesResp, *mq.CodeMessage) | |||
| GetHubConnectivities(msg *GetHubConnectivities) (*GetHubConnectivitiesResp, *mq.CodeMessage) | |||
| UpdateNodeConnectivities(msg *UpdateNodeConnectivities) (*UpdateNodeConnectivitiesResp, *mq.CodeMessage) | |||
| UpdateHubConnectivities(msg *UpdateHubConnectivities) (*UpdateHubConnectivitiesResp, *mq.CodeMessage) | |||
| } | |||
| var _ = Register(Service.GetHubConfig) | |||
| type GetHubConfig struct { | |||
| mq.MessageBodyBase | |||
| HubID cdssdk.NodeID `json:"hubID"` | |||
| HubID cdssdk.HubID `json:"hubID"` | |||
| } | |||
| type GetHubConfigResp struct { | |||
| mq.MessageBodyBase | |||
| Hub cdssdk.Node `json:"hub"` | |||
| Hub cdssdk.Hub `json:"hub"` | |||
| Storages []stgmod.StorageDetail `json:"storages"` | |||
| } | |||
| func ReqGetHubConfig(hubID cdssdk.NodeID) *GetHubConfig { | |||
| func ReqGetHubConfig(hubID cdssdk.HubID) *GetHubConfig { | |||
| return &GetHubConfig{ | |||
| HubID: hubID, | |||
| } | |||
| } | |||
| func RespGetHubConfig(hub cdssdk.Node, storages []stgmod.StorageDetail) *GetHubConfigResp { | |||
| func RespGetHubConfig(hub cdssdk.Hub, storages []stgmod.StorageDetail) *GetHubConfigResp { | |||
| return &GetHubConfigResp{ | |||
| Hub: hub, | |||
| Storages: storages, | |||
| @@ -46,111 +46,111 @@ func (client *Client) GetHubConfig(msg *GetHubConfig) (*GetHubConfigResp, error) | |||
| } | |||
| // 查询用户可用的节点 | |||
| var _ = Register(Service.GetUserNodes) | |||
| var _ = Register(Service.GetUserHubs) | |||
| type GetUserNodes struct { | |||
| type GetUserHubs struct { | |||
| mq.MessageBodyBase | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| } | |||
| type GetUserNodesResp struct { | |||
| type GetUserHubsResp struct { | |||
| mq.MessageBodyBase | |||
| Nodes []cdssdk.Node `json:"nodes"` | |||
| Hubs []cdssdk.Hub `json:"hubs"` | |||
| } | |||
| func NewGetUserNodes(userID cdssdk.UserID) *GetUserNodes { | |||
| return &GetUserNodes{ | |||
| func NewGetUserHubs(userID cdssdk.UserID) *GetUserHubs { | |||
| return &GetUserHubs{ | |||
| UserID: userID, | |||
| } | |||
| } | |||
| func NewGetUserNodesResp(nodes []cdssdk.Node) *GetUserNodesResp { | |||
| return &GetUserNodesResp{ | |||
| Nodes: nodes, | |||
| func NewGetUserHubsResp(hubs []cdssdk.Hub) *GetUserHubsResp { | |||
| return &GetUserHubsResp{ | |||
| Hubs: hubs, | |||
| } | |||
| } | |||
| func (client *Client) GetUserNodes(msg *GetUserNodes) (*GetUserNodesResp, error) { | |||
| return mq.Request(Service.GetUserNodes, client.rabbitCli, msg) | |||
| func (client *Client) GetUserHubs(msg *GetUserHubs) (*GetUserHubsResp, error) { | |||
| return mq.Request(Service.GetUserHubs, client.rabbitCli, msg) | |||
| } | |||
| // 获取指定节点的信息。如果NodeIDs为nil,则返回所有Node | |||
| var _ = Register(Service.GetNodes) | |||
| // 获取指定节点的信息。如果HubIDs为nil,则返回所有Hub | |||
| var _ = Register(Service.GetHubs) | |||
| type GetNodes struct { | |||
| type GetHubs struct { | |||
| mq.MessageBodyBase | |||
| NodeIDs []cdssdk.NodeID `json:"nodeIDs"` | |||
| HubIDs []cdssdk.HubID `json:"hubIDs"` | |||
| } | |||
| type GetNodesResp struct { | |||
| type GetHubsResp struct { | |||
| mq.MessageBodyBase | |||
| Nodes []cdssdk.Node `json:"nodes"` | |||
| Hubs []cdssdk.Hub `json:"hubs"` | |||
| } | |||
| func NewGetNodes(nodeIDs []cdssdk.NodeID) *GetNodes { | |||
| return &GetNodes{ | |||
| NodeIDs: nodeIDs, | |||
| func NewGetHubs(hubIDs []cdssdk.HubID) *GetHubs { | |||
| return &GetHubs{ | |||
| HubIDs: hubIDs, | |||
| } | |||
| } | |||
| func NewGetNodesResp(nodes []cdssdk.Node) *GetNodesResp { | |||
| return &GetNodesResp{ | |||
| Nodes: nodes, | |||
| func NewGetHubsResp(hubs []cdssdk.Hub) *GetHubsResp { | |||
| return &GetHubsResp{ | |||
| Hubs: hubs, | |||
| } | |||
| } | |||
| func (r *GetNodesResp) GetNode(id cdssdk.NodeID) *cdssdk.Node { | |||
| for _, n := range r.Nodes { | |||
| if n.NodeID == id { | |||
| func (r *GetHubsResp) GetHub(id cdssdk.HubID) *cdssdk.Hub { | |||
| for _, n := range r.Hubs { | |||
| if n.HubID == id { | |||
| return &n | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func (client *Client) GetNodes(msg *GetNodes) (*GetNodesResp, error) { | |||
| return mq.Request(Service.GetNodes, client.rabbitCli, msg) | |||
| func (client *Client) GetHubs(msg *GetHubs) (*GetHubsResp, error) { | |||
| return mq.Request(Service.GetHubs, client.rabbitCli, msg) | |||
| } | |||
| // 获取节点连通性信息 | |||
| var _ = Register(Service.GetNodeConnectivities) | |||
| var _ = Register(Service.GetHubConnectivities) | |||
| type GetNodeConnectivities struct { | |||
| type GetHubConnectivities struct { | |||
| mq.MessageBodyBase | |||
| NodeIDs []cdssdk.NodeID `json:"nodeIDs"` | |||
| HubIDs []cdssdk.HubID `json:"hubIDs"` | |||
| } | |||
| type GetNodeConnectivitiesResp struct { | |||
| type GetHubConnectivitiesResp struct { | |||
| mq.MessageBodyBase | |||
| Connectivities []cdssdk.NodeConnectivity `json:"nodes"` | |||
| Connectivities []cdssdk.HubConnectivity `json:"hubs"` | |||
| } | |||
| func ReqGetNodeConnectivities(nodeIDs []cdssdk.NodeID) *GetNodeConnectivities { | |||
| return &GetNodeConnectivities{ | |||
| NodeIDs: nodeIDs, | |||
| func ReqGetHubConnectivities(hubIDs []cdssdk.HubID) *GetHubConnectivities { | |||
| return &GetHubConnectivities{ | |||
| HubIDs: hubIDs, | |||
| } | |||
| } | |||
| func RespGetNodeConnectivities(cons []cdssdk.NodeConnectivity) *GetNodeConnectivitiesResp { | |||
| return &GetNodeConnectivitiesResp{ | |||
| func RespGetHubConnectivities(cons []cdssdk.HubConnectivity) *GetHubConnectivitiesResp { | |||
| return &GetHubConnectivitiesResp{ | |||
| Connectivities: cons, | |||
| } | |||
| } | |||
| func (client *Client) GetNodeConnectivities(msg *GetNodeConnectivities) (*GetNodeConnectivitiesResp, error) { | |||
| return mq.Request(Service.GetNodeConnectivities, client.rabbitCli, msg) | |||
| func (client *Client) GetHubConnectivities(msg *GetHubConnectivities) (*GetHubConnectivitiesResp, error) { | |||
| return mq.Request(Service.GetHubConnectivities, client.rabbitCli, msg) | |||
| } | |||
| // 批量更新节点连通性信息 | |||
| var _ = Register(Service.UpdateNodeConnectivities) | |||
| var _ = Register(Service.UpdateHubConnectivities) | |||
| type UpdateNodeConnectivities struct { | |||
| type UpdateHubConnectivities struct { | |||
| mq.MessageBodyBase | |||
| Connectivities []cdssdk.NodeConnectivity `json:"connectivities"` | |||
| Connectivities []cdssdk.HubConnectivity `json:"connectivities"` | |||
| } | |||
| type UpdateNodeConnectivitiesResp struct { | |||
| type UpdateHubConnectivitiesResp struct { | |||
| mq.MessageBodyBase | |||
| } | |||
| func ReqUpdateNodeConnectivities(cons []cdssdk.NodeConnectivity) *UpdateNodeConnectivities { | |||
| return &UpdateNodeConnectivities{ | |||
| func ReqUpdateHubConnectivities(cons []cdssdk.HubConnectivity) *UpdateHubConnectivities { | |||
| return &UpdateHubConnectivities{ | |||
| Connectivities: cons, | |||
| } | |||
| } | |||
| func RespUpdateNodeConnectivities() *UpdateNodeConnectivitiesResp { | |||
| return &UpdateNodeConnectivitiesResp{} | |||
| func RespUpdateHubConnectivities() *UpdateHubConnectivitiesResp { | |||
| return &UpdateHubConnectivitiesResp{} | |||
| } | |||
| func (client *Client) UpdateNodeConnectivities(msg *UpdateNodeConnectivities) (*UpdateNodeConnectivitiesResp, error) { | |||
| return mq.Request(Service.UpdateNodeConnectivities, client.rabbitCli, msg) | |||
| func (client *Client) UpdateHubConnectivities(msg *UpdateHubConnectivities) (*UpdateHubConnectivitiesResp, error) { | |||
| return mq.Request(Service.UpdateHubConnectivities, client.rabbitCli, msg) | |||
| } | |||
| @@ -213,10 +213,10 @@ func ReqGetPackageCachedStorages(userID cdssdk.UserID, packageID cdssdk.PackageI | |||
| } | |||
| } | |||
| func ReqGetPackageCachedStoragesResp(nodeInfos []cdssdk.StoragePackageCachingInfo, packageSize int64) *GetPackageCachedStoragesResp { | |||
| func ReqGetPackageCachedStoragesResp(stgInfos []cdssdk.StoragePackageCachingInfo, packageSize int64) *GetPackageCachedStoragesResp { | |||
| return &GetPackageCachedStoragesResp{ | |||
| PackageCachingInfo: cdssdk.PackageCachingInfo{ | |||
| StorageInfos: nodeInfos, | |||
| StorageInfos: stgInfos, | |||
| PackageSize: packageSize, | |||
| }, | |||
| } | |||
| @@ -14,7 +14,7 @@ type Service interface { | |||
| CacheService | |||
| NodeService | |||
| HubService | |||
| ObjectService | |||
| @@ -4,12 +4,12 @@ import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| type AgentCheckState struct { | |||
| EventBase | |||
| NodeID cdssdk.NodeID `json:"nodeID"` | |||
| HubID cdssdk.HubID `json:"hubID"` | |||
| } | |||
| func NewAgentCheckState(nodeID cdssdk.NodeID) *AgentCheckState { | |||
| func NewAgentCheckState(hubID cdssdk.HubID) *AgentCheckState { | |||
| return &AgentCheckState{ | |||
| NodeID: nodeID, | |||
| HubID: hubID, | |||
| } | |||
| } | |||
| @@ -4,7 +4,7 @@ import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| type CleanPinned struct { | |||
| EventBase | |||
| PackageID cdssdk.PackageID `json:"nodeID"` | |||
| PackageID cdssdk.PackageID `json:"hubID"` | |||
| } | |||
| func NewCleanPinned(packageID cdssdk.PackageID) *CleanPinned { | |||
| @@ -44,8 +44,8 @@ func migrate(configPath string) { | |||
| migrateOne(db, cdssdk.Bucket{}) | |||
| migrateOne(db, model.Cache{}) | |||
| migrateOne(db, model.Location{}) | |||
| migrateOne(db, model.NodeConnectivity{}) | |||
| migrateOne(db, cdssdk.Node{}) | |||
| migrateOne(db, model.HubConnectivity{}) | |||
| migrateOne(db, cdssdk.Hub{}) | |||
| migrateOne(db, stgmod.ObjectAccessStat{}) | |||
| migrateOne(db, stgmod.ObjectBlock{}) | |||
| migrateOne(db, cdssdk.Object{}) | |||
| @@ -59,7 +59,7 @@ func migrate(configPath string) { | |||
| migrateOne(db, model.UserStorage{}) | |||
| migrateOne(db, model.UserBucket{}) | |||
| migrateOne(db, model.User{}) | |||
| migrateOne(db, model.UserNode{}) | |||
| migrateOne(db, model.UserHub{}) | |||
| fmt.Println("migrate success") | |||
| } | |||
| @@ -1,24 +1 @@ | |||
| package mq | |||
| import ( | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| ) | |||
| func (service *Service) TempCacheReport(msg *coormq.TempCacheReport) { | |||
| //service.db.BatchInsertOrUpdateCache(msg.Hashes, msg.NodeID) | |||
| } | |||
| func (service *Service) AgentStatusReport(msg *coormq.AgentStatusReport) { | |||
| //jh:根据command中的Ip,插入节点延迟表,和节点表的NodeStatus | |||
| //根据command中的Ip,插入节点延迟表 | |||
| // TODO | |||
| /* | |||
| ips := utils.GetAgentIps() | |||
| Insert_NodeDelay(msg.Body.IP, ips, msg.Body.AgentDelay) | |||
| //从配置表里读取节点地域NodeLocation | |||
| //插入节点表的NodeStatus | |||
| Insert_Node(msg.Body.IP, msg.Body.IP, msg.Body.IPFSStatus, msg.Body.LocalDirStatus) | |||
| */ | |||
| } | |||
| @@ -31,7 +31,7 @@ func (svc *Service) CachePackageMoved(msg *coormq.CachePackageMoved) (*coormq.Ca | |||
| return nil | |||
| }) | |||
| if err != nil { | |||
| logger.WithField("PackageID", msg.PackageID).WithField("NodeID", msg.StorageID).Warn(err.Error()) | |||
| logger.WithField("PackageID", msg.PackageID).WithField("HubID", msg.StorageID).Warn(err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "create package pinned objects failed") | |||
| } | |||
| @@ -52,13 +52,13 @@ func (svc *Service) CacheRemovePackage(msg *coormq.CacheRemovePackage) (*coormq. | |||
| err = svc.db2.PinnedObject().DeleteInPackageAtStorage(tx, msg.PackageID, msg.StorageID) | |||
| if err != nil { | |||
| return fmt.Errorf("delete pinned objects in package at node: %w", err) | |||
| return fmt.Errorf("delete pinned objects in package at storage: %w", err) | |||
| } | |||
| return nil | |||
| }) | |||
| if err != nil { | |||
| logger.WithField("PackageID", msg.PackageID).WithField("NodeID", msg.StorageID).Warn(err.Error()) | |||
| logger.WithField("PackageID", msg.PackageID).WithField("HubID", msg.StorageID).Warn(err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "remove pinned package failed") | |||
| } | |||
| @@ -16,7 +16,7 @@ import ( | |||
| func (svc *Service) GetHubConfig(msg *coormq.GetHubConfig) (*coormq.GetHubConfigResp, *mq.CodeMessage) { | |||
| log := logger.WithField("HubID", msg.HubID) | |||
| hub, err := svc.db2.Node().GetByID(svc.db2.DefCtx(), msg.HubID) | |||
| hub, err := svc.db2.Hub().GetByID(svc.db2.DefCtx(), msg.HubID) | |||
| if err != nil { | |||
| log.Warnf("getting hub: %v", err) | |||
| return nil, mq.Failed(errorcode.OperationFailed, fmt.Sprintf("getting hub: %v", err)) | |||
| @@ -67,78 +67,78 @@ func (svc *Service) GetHubConfig(msg *coormq.GetHubConfig) (*coormq.GetHubConfig | |||
| return mq.ReplyOK(coormq.RespGetHubConfig(hub, details)) | |||
| } | |||
| func (svc *Service) GetUserNodes(msg *coormq.GetUserNodes) (*coormq.GetUserNodesResp, *mq.CodeMessage) { | |||
| nodes, err := svc.db2.Node().GetUserNodes(svc.db2.DefCtx(), msg.UserID) | |||
| func (svc *Service) GetUserHubs(msg *coormq.GetUserHubs) (*coormq.GetUserHubsResp, *mq.CodeMessage) { | |||
| hubs, err := svc.db2.Hub().GetUserHubs(svc.db2.DefCtx(), msg.UserID) | |||
| if err != nil { | |||
| logger.WithField("UserID", msg.UserID). | |||
| Warnf("query user nodes failed, err: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "query user nodes failed") | |||
| Warnf("query user hubs failed, err: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "query user hubs failed") | |||
| } | |||
| return mq.ReplyOK(coormq.NewGetUserNodesResp(nodes)) | |||
| return mq.ReplyOK(coormq.NewGetUserHubsResp(hubs)) | |||
| } | |||
| func (svc *Service) GetNodes(msg *coormq.GetNodes) (*coormq.GetNodesResp, *mq.CodeMessage) { | |||
| var nodes []cdssdk.Node | |||
| func (svc *Service) GetHubs(msg *coormq.GetHubs) (*coormq.GetHubsResp, *mq.CodeMessage) { | |||
| var hubs []cdssdk.Hub | |||
| if msg.NodeIDs == nil { | |||
| if msg.HubIDs == nil { | |||
| var err error | |||
| nodes, err = svc.db2.Node().GetAllNodes(svc.db2.DefCtx()) | |||
| hubs, err = svc.db2.Hub().GetAllHubs(svc.db2.DefCtx()) | |||
| if err != nil { | |||
| logger.Warnf("getting all nodes: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "get all node failed") | |||
| logger.Warnf("getting all hubs: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "get all hub failed") | |||
| } | |||
| } else { | |||
| // 可以不用事务 | |||
| for _, id := range msg.NodeIDs { | |||
| node, err := svc.db2.Node().GetByID(svc.db2.DefCtx(), id) | |||
| for _, id := range msg.HubIDs { | |||
| hub, err := svc.db2.Hub().GetByID(svc.db2.DefCtx(), id) | |||
| if err != nil { | |||
| logger.WithField("NodeID", id). | |||
| Warnf("query node failed, err: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "query node failed") | |||
| logger.WithField("HubID", id). | |||
| Warnf("query hub failed, err: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "query hub failed") | |||
| } | |||
| nodes = append(nodes, node) | |||
| hubs = append(hubs, hub) | |||
| } | |||
| } | |||
| return mq.ReplyOK(coormq.NewGetNodesResp(nodes)) | |||
| return mq.ReplyOK(coormq.NewGetHubsResp(hubs)) | |||
| } | |||
| func (svc *Service) GetNodeConnectivities(msg *coormq.GetNodeConnectivities) (*coormq.GetNodeConnectivitiesResp, *mq.CodeMessage) { | |||
| cons, err := svc.db2.NodeConnectivity().BatchGetByFromNode(svc.db2.DefCtx(), msg.NodeIDs) | |||
| func (svc *Service) GetHubConnectivities(msg *coormq.GetHubConnectivities) (*coormq.GetHubConnectivitiesResp, *mq.CodeMessage) { | |||
| cons, err := svc.db2.HubConnectivity().BatchGetByFromHub(svc.db2.DefCtx(), msg.HubIDs) | |||
| if err != nil { | |||
| logger.Warnf("batch get node connectivities by from node: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "batch get node connectivities by from node failed") | |||
| logger.Warnf("batch get hub connectivities by from hub: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "batch get hub connectivities by from hub failed") | |||
| } | |||
| return mq.ReplyOK(coormq.RespGetNodeConnectivities(cons)) | |||
| return mq.ReplyOK(coormq.RespGetHubConnectivities(cons)) | |||
| } | |||
| func (svc *Service) UpdateNodeConnectivities(msg *coormq.UpdateNodeConnectivities) (*coormq.UpdateNodeConnectivitiesResp, *mq.CodeMessage) { | |||
| func (svc *Service) UpdateHubConnectivities(msg *coormq.UpdateHubConnectivities) (*coormq.UpdateHubConnectivitiesResp, *mq.CodeMessage) { | |||
| err := svc.db2.DoTx(func(tx db2.SQLContext) error { | |||
| // 只有发起节点和目的节点都存在,才能插入这条记录到数据库 | |||
| allNodes, err := svc.db2.Node().GetAllNodes(tx) | |||
| allHubs, err := svc.db2.Hub().GetAllHubs(tx) | |||
| if err != nil { | |||
| return fmt.Errorf("getting all nodes: %w", err) | |||
| return fmt.Errorf("getting all hubs: %w", err) | |||
| } | |||
| allNodeID := make(map[cdssdk.NodeID]bool) | |||
| for _, node := range allNodes { | |||
| allNodeID[node.NodeID] = true | |||
| allHubID := make(map[cdssdk.HubID]bool) | |||
| for _, hub := range allHubs { | |||
| allHubID[hub.HubID] = true | |||
| } | |||
| var avaiCons []cdssdk.NodeConnectivity | |||
| var avaiCons []cdssdk.HubConnectivity | |||
| for _, con := range msg.Connectivities { | |||
| if allNodeID[con.FromNodeID] && allNodeID[con.ToNodeID] { | |||
| if allHubID[con.FromHubID] && allHubID[con.ToHubID] { | |||
| avaiCons = append(avaiCons, con) | |||
| } | |||
| } | |||
| err = svc.db2.NodeConnectivity().BatchUpdateOrCreate(tx, avaiCons) | |||
| err = svc.db2.HubConnectivity().BatchUpdateOrCreate(tx, avaiCons) | |||
| if err != nil { | |||
| return fmt.Errorf("batch update or create node connectivities: %s", err) | |||
| return fmt.Errorf("batch update or create hub connectivities: %s", err) | |||
| } | |||
| return nil | |||
| @@ -148,5 +148,5 @@ func (svc *Service) UpdateNodeConnectivities(msg *coormq.UpdateNodeConnectivitie | |||
| return nil, mq.Failed(errorcode.OperationFailed, err.Error()) | |||
| } | |||
| return mq.ReplyOK(coormq.RespUpdateNodeConnectivities()) | |||
| return mq.ReplyOK(coormq.RespUpdateHubConnectivities()) | |||
| } | |||
| @@ -191,15 +191,15 @@ func (svc *Service) GetPackageCachedStorages(msg *coormq.GetPackageCachedStorage | |||
| } | |||
| } | |||
| var nodeInfos []cdssdk.StoragePackageCachingInfo | |||
| for _, nodeInfo := range stgInfoMap { | |||
| nodeInfos = append(nodeInfos, *nodeInfo) | |||
| var stgInfos []cdssdk.StoragePackageCachingInfo | |||
| for _, stgInfo := range stgInfoMap { | |||
| stgInfos = append(stgInfos, *stgInfo) | |||
| } | |||
| sort.Slice(nodeInfos, func(i, j int) bool { | |||
| return nodeInfos[i].StorageID < nodeInfos[j].StorageID | |||
| sort.Slice(stgInfos, func(i, j int) bool { | |||
| return stgInfos[i].StorageID < stgInfos[j].StorageID | |||
| }) | |||
| return mq.ReplyOK(coormq.ReqGetPackageCachedStoragesResp(nodeInfos, packageSize)) | |||
| return mq.ReplyOK(coormq.ReqGetPackageCachedStoragesResp(stgInfos, packageSize)) | |||
| } | |||
| func (svc *Service) GetPackageLoadedStorages(msg *coormq.GetPackageLoadedStorages) (*coormq.GetPackageLoadedStoragesResp, *mq.CodeMessage) { | |||
| @@ -1,7 +1,7 @@ | |||
| { | |||
| "id": 1, | |||
| "local": { | |||
| "nodeID": 1, | |||
| "hubID": 1, | |||
| "localIP": "127.0.0.1", | |||
| "externalIP": "127.0.0.1", | |||
| "locationID": 1 | |||
| @@ -1,17 +0,0 @@ | |||
| 2024-04-10 12:34:16 [INFO] start serving command server | |||
| 2024-04-10 13:04:14 [WARN] coordinator server err: deserialize error: channel is closed | |||
| 2024-04-10 13:04:14 [ERRO] command server stopped with error: receive message error: channel is closed | |||
| 2024-04-10 13:04:14 [INFO] command server stopped | |||
| 2024-04-10 14:41:25 [INFO] start serving command server | |||
| 2024-04-10 16:59:00 [WARN] coordinator server err: deserialize error: channel is closed | |||
| 2024-04-10 16:59:00 [ERRO] command server stopped with error: receive message error: channel is closed | |||
| 2024-04-10 16:59:00 [INFO] command server stopped | |||
| 2024-04-10 17:06:56 [INFO] start serving command server | |||
| 2024-04-10 17:07:36 [INFO] start serving command server | |||
| 2024-04-10 20:05:49 [WARN] coordinator server err: deserialize error: channel is closed | |||
| 2024-04-10 20:05:49 [ERRO] command server stopped with error: receive message error: channel is closed | |||
| 2024-04-10 20:05:49 [INFO] command server stopped | |||
| 2024-04-11 09:22:23 [INFO] start serving command server | |||
| 2024-04-11 12:19:52 [WARN] coordinator server err: deserialize error: channel is closed | |||
| 2024-04-11 12:19:52 [ERRO] command server stopped with error: receive message error: channel is closed | |||
| 2024-04-11 12:19:52 [INFO] command server stopped | |||
| @@ -11,7 +11,7 @@ import ( | |||
| type Config struct { | |||
| AccessStatHistoryAmount float64 `json:"accessStatHistoryAmount"` | |||
| ECFileSizeThreshold int64 `json:"ecFileSizeThreshold"` | |||
| NodeUnavailableSeconds int `json:"nodeUnavailableSeconds"` // 如果节点上次上报时间超过这个值,则认为节点已经不可用 | |||
| HubUnavailableSeconds int `json:"hubUnavailableSeconds"` // 如果节点上次上报时间超过这个值,则认为节点已经不可用 | |||
| Logger log.Config `json:"logger"` | |||
| DB db.Config `json:"db"` | |||
| RabbitMQ stgmq.Config `json:"rabbitMQ"` | |||
| @@ -67,21 +67,21 @@ func (t *AgentCacheGC) Execute(execCtx ExecuteContext) { | |||
| // 收集需要进行垃圾回收的文件哈希值 | |||
| var allFileHashes []cdssdk.FileHash | |||
| var masterHub cdssdk.Node | |||
| var masterHub cdssdk.Hub | |||
| err = execCtx.Args.DB.DoTx(func(tx db2.SQLContext) error { | |||
| stg, err := execCtx.Args.DB.Storage().GetByID(tx, t.StorageID) | |||
| if err != nil { | |||
| return fmt.Errorf("getting storage by id: %w", err) | |||
| } | |||
| masterHub, err = execCtx.Args.DB.Node().GetByID(tx, stg.MasterHub) | |||
| masterHub, err = execCtx.Args.DB.Hub().GetByID(tx, stg.MasterHub) | |||
| if err != nil { | |||
| return fmt.Errorf("getting master hub by id: %w", err) | |||
| } | |||
| blocks, err := execCtx.Args.DB.ObjectBlock().GetByStorageID(tx, t.StorageID) | |||
| if err != nil { | |||
| return fmt.Errorf("getting object blocks by node id: %w", err) | |||
| return fmt.Errorf("getting object blocks by hub id: %w", err) | |||
| } | |||
| for _, c := range blocks { | |||
| allFileHashes = append(allFileHashes, c.FileHash) | |||
| @@ -89,7 +89,7 @@ func (t *AgentCacheGC) Execute(execCtx ExecuteContext) { | |||
| objs, err := execCtx.Args.DB.PinnedObject().GetObjectsByStorageID(tx, t.StorageID) | |||
| if err != nil { | |||
| return fmt.Errorf("getting pinned objects by node id: %w", err) | |||
| return fmt.Errorf("getting pinned objects by hub id: %w", err) | |||
| } | |||
| for _, o := range objs { | |||
| allFileHashes = append(allFileHashes, o.FileHash) | |||
| @@ -98,14 +98,14 @@ func (t *AgentCacheGC) Execute(execCtx ExecuteContext) { | |||
| return nil | |||
| }) | |||
| if err != nil { | |||
| log.WithField("NodeID", t.StorageID).Warn(err.Error()) | |||
| log.WithField("HubID", t.StorageID).Warn(err.Error()) | |||
| return | |||
| } | |||
| // 获取与节点通信的代理客户端 | |||
| agtCli, err := stgglb.AgentMQPool.Acquire(masterHub.NodeID) | |||
| agtCli, err := stgglb.AgentMQPool.Acquire(masterHub.HubID) | |||
| if err != nil { | |||
| log.WithField("NodeID", t.StorageID).Warnf("create agent client failed, err: %s", err.Error()) | |||
| log.WithField("HubID", t.StorageID).Warnf("create agent client failed, err: %s", err.Error()) | |||
| return | |||
| } | |||
| defer stgglb.AgentMQPool.Release(agtCli) | |||
| @@ -113,7 +113,7 @@ func (t *AgentCacheGC) Execute(execCtx ExecuteContext) { | |||
| // 向代理发送垃圾回收请求 | |||
| _, err = agtCli.CacheGC(agtmq.ReqCacheGC(t.StorageID, allFileHashes), mq.RequestOption{Timeout: time.Minute}) | |||
| if err != nil { | |||
| log.WithField("NodeID", t.StorageID).Warnf("ipfs gc: %s", err.Error()) | |||
| log.WithField("HubID", t.StorageID).Warnf("ipfs gc: %s", err.Error()) | |||
| return | |||
| } | |||
| } | |||
| @@ -29,7 +29,7 @@ func (t *AgentCheckState) TryMerge(other Event) bool { | |||
| return false | |||
| } | |||
| return t.NodeID == event.NodeID | |||
| return t.HubID == event.HubID | |||
| } | |||
| func (t *AgentCheckState) Execute(execCtx ExecuteContext) { | |||
| @@ -37,42 +37,42 @@ func (t *AgentCheckState) Execute(execCtx ExecuteContext) { | |||
| log.Debugf("begin with %v", logger.FormatStruct(t.AgentCheckState)) | |||
| defer log.Debugf("end") | |||
| node, err := execCtx.Args.DB.Node().GetByID(execCtx.Args.DB.DefCtx(), t.NodeID) | |||
| hub, err := execCtx.Args.DB.Hub().GetByID(execCtx.Args.DB.DefCtx(), t.HubID) | |||
| if err == sql.ErrNoRows { | |||
| return | |||
| } | |||
| if err != nil { | |||
| log.WithField("NodeID", t.NodeID).Warnf("get node by id failed, err: %s", err.Error()) | |||
| log.WithField("HubID", t.HubID).Warnf("get hub by id failed, err: %s", err.Error()) | |||
| return | |||
| } | |||
| agtCli, err := stgglb.AgentMQPool.Acquire(t.NodeID) | |||
| agtCli, err := stgglb.AgentMQPool.Acquire(t.HubID) | |||
| if err != nil { | |||
| log.WithField("NodeID", t.NodeID).Warnf("create agent client failed, err: %s", err.Error()) | |||
| log.WithField("HubID", t.HubID).Warnf("create agent client failed, err: %s", err.Error()) | |||
| return | |||
| } | |||
| defer stgglb.AgentMQPool.Release(agtCli) | |||
| _, err = agtCli.GetState(agtmq.NewGetState(), mq.RequestOption{Timeout: time.Second * 30}) | |||
| if err != nil { | |||
| log.WithField("NodeID", t.NodeID).Warnf("getting state: %s", err.Error()) | |||
| log.WithField("HubID", t.HubID).Warnf("getting state: %s", err.Error()) | |||
| // 检查上次上报时间,超时的设置为不可用 | |||
| // TODO 没有上报过是否要特殊处理? | |||
| if node.LastReportTime != nil && time.Since(*node.LastReportTime) > time.Duration(config.Cfg().NodeUnavailableSeconds)*time.Second { | |||
| err := execCtx.Args.DB.Node().UpdateState(execCtx.Args.DB.DefCtx(), t.NodeID, consts.NodeStateUnavailable) | |||
| if hub.LastReportTime != nil && time.Since(*hub.LastReportTime) > time.Duration(config.Cfg().HubUnavailableSeconds)*time.Second { | |||
| err := execCtx.Args.DB.Hub().UpdateState(execCtx.Args.DB.DefCtx(), t.HubID, consts.HubStateUnavailable) | |||
| if err != nil { | |||
| log.WithField("NodeID", t.NodeID).Warnf("set node state failed, err: %s", err.Error()) | |||
| log.WithField("HubID", t.HubID).Warnf("set hub state failed, err: %s", err.Error()) | |||
| } | |||
| } | |||
| return | |||
| } | |||
| // TODO 如果以后还有其他的状态,要判断哪些状态下能设置Normal | |||
| err = execCtx.Args.DB.Node().UpdateState(execCtx.Args.DB.DefCtx(), t.NodeID, consts.NodeStateNormal) | |||
| err = execCtx.Args.DB.Hub().UpdateState(execCtx.Args.DB.DefCtx(), t.HubID, consts.HubStateNormal) | |||
| if err != nil { | |||
| log.WithField("NodeID", t.NodeID).Warnf("change node state failed, err: %s", err.Error()) | |||
| log.WithField("HubID", t.HubID).Warnf("change hub state failed, err: %s", err.Error()) | |||
| } | |||
| } | |||
| @@ -53,15 +53,15 @@ func (t *AgentCheckStorage) Execute(execCtx ExecuteContext) { | |||
| return | |||
| } | |||
| node, err := execCtx.Args.DB.Node().GetByID(execCtx.Args.DB.DefCtx(), stg.MasterHub) | |||
| hub, err := execCtx.Args.DB.Hub().GetByID(execCtx.Args.DB.DefCtx(), stg.MasterHub) | |||
| if err != nil { | |||
| if err != sql.ErrNoRows { | |||
| log.WithField("StorageID", t.StorageID).Warnf("get storage node failed, err: %s", err.Error()) | |||
| log.WithField("StorageID", t.StorageID).Warnf("get storage hub failed, err: %s", err.Error()) | |||
| } | |||
| return | |||
| } | |||
| if node.State != consts.NodeStateNormal { | |||
| if hub.State != consts.HubStateNormal { | |||
| return | |||
| } | |||
| @@ -80,12 +80,12 @@ func (t *CheckPackageRedundancy) Execute(execCtx ExecuteContext) { | |||
| // TODO UserID | |||
| getStgs, err := coorCli.GetUserStorageDetails(coormq.ReqGetUserStorageDetails(1)) | |||
| if err != nil { | |||
| log.Warnf("getting all nodes: %s", err.Error()) | |||
| log.Warnf("getting all storages: %s", err.Error()) | |||
| return | |||
| } | |||
| if len(getStgs.Storages) == 0 { | |||
| log.Warnf("no available nodes") | |||
| log.Warnf("no available storages") | |||
| return | |||
| } | |||
| @@ -111,18 +111,18 @@ func (t *CheckPackageRedundancy) Execute(execCtx ExecuteContext) { | |||
| // TODO 目前rep的备份数量固定为2,所以这里直接选出两个节点 | |||
| // TODO 放到chooseRedundancy函数中 | |||
| mostBlockStgIDs := t.summaryRepObjectBlockNodes(getObjs.Objects, 2) | |||
| newRepStgs := t.chooseNewNodesForRep(&defRep, userAllStorages) | |||
| rechoosedRepStgs := t.rechooseNodesForRep(mostBlockStgIDs, &defRep, userAllStorages) | |||
| newECStgs := t.chooseNewNodesForEC(&defEC, userAllStorages) | |||
| mostBlockStgIDs := t.summaryRepObjectBlockStorages(getObjs.Objects, 2) | |||
| newRepStgs := t.chooseNewStoragesForRep(&defRep, userAllStorages) | |||
| rechoosedRepStgs := t.rechooseStoragesForRep(mostBlockStgIDs, &defRep, userAllStorages) | |||
| newECStgs := t.chooseNewStoragesForEC(&defEC, userAllStorages) | |||
| // 加锁 | |||
| builder := reqbuilder.NewBuilder() | |||
| for _, node := range newRepStgs { | |||
| builder.Shard().Buzy(node.Storage.Storage.StorageID) | |||
| for _, storage := range newRepStgs { | |||
| builder.Shard().Buzy(storage.Storage.Storage.StorageID) | |||
| } | |||
| for _, node := range newECStgs { | |||
| builder.Shard().Buzy(node.Storage.Storage.StorageID) | |||
| for _, storage := range newECStgs { | |||
| builder.Shard().Buzy(storage.Storage.Storage.StorageID) | |||
| } | |||
| mutex, err := builder.MutexLock(execCtx.Args.DistLock) | |||
| if err != nil { | |||
| @@ -135,7 +135,7 @@ func (t *CheckPackageRedundancy) Execute(execCtx ExecuteContext) { | |||
| var updating *coormq.UpdatingObjectRedundancy | |||
| var err error | |||
| newRed, selectedNodes := t.chooseRedundancy(obj, userAllStorages) | |||
| newRed, selectedStorages := t.chooseRedundancy(obj, userAllStorages) | |||
| switch srcRed := obj.Object.Redundancy.(type) { | |||
| case *cdssdk.NoneRedundancy: | |||
| @@ -150,7 +150,7 @@ func (t *CheckPackageRedundancy) Execute(execCtx ExecuteContext) { | |||
| case *cdssdk.LRCRedundancy: | |||
| log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: none -> lrc") | |||
| updating, err = t.noneToLRC(execCtx, obj, newRed, selectedNodes) | |||
| updating, err = t.noneToLRC(execCtx, obj, newRed, selectedStorages) | |||
| } | |||
| case *cdssdk.RepRedundancy: | |||
| @@ -170,15 +170,15 @@ func (t *CheckPackageRedundancy) Execute(execCtx ExecuteContext) { | |||
| updating, err = t.ecToRep(execCtx, obj, srcRed, newRed, newRepStgs) | |||
| case *cdssdk.ECRedundancy: | |||
| uploadNodes := t.rechooseNodesForEC(obj, srcRed, userAllStorages) | |||
| updating, err = t.ecToEC(execCtx, obj, srcRed, newRed, uploadNodes) | |||
| uploadStorages := t.rechooseStoragesForEC(obj, srcRed, userAllStorages) | |||
| updating, err = t.ecToEC(execCtx, obj, srcRed, newRed, uploadStorages) | |||
| } | |||
| case *cdssdk.LRCRedundancy: | |||
| switch newRed := newRed.(type) { | |||
| case *cdssdk.LRCRedundancy: | |||
| uploadNodes := t.rechooseNodesForLRC(obj, srcRed, userAllStorages) | |||
| updating, err = t.lrcToLRC(execCtx, obj, srcRed, newRed, uploadNodes) | |||
| uploadStorages := t.rechooseStoragesForLRC(obj, srcRed, userAllStorages) | |||
| updating, err = t.lrcToLRC(execCtx, obj, srcRed, newRed, uploadStorages) | |||
| } | |||
| } | |||
| @@ -205,22 +205,22 @@ func (t *CheckPackageRedundancy) Execute(execCtx ExecuteContext) { | |||
| func (t *CheckPackageRedundancy) chooseRedundancy(obj stgmod.ObjectDetail, userAllStgs map[cdssdk.StorageID]*StorageLoadInfo) (cdssdk.Redundancy, []*StorageLoadInfo) { | |||
| switch obj.Object.Redundancy.(type) { | |||
| case *cdssdk.NoneRedundancy: | |||
| newStgs := t.chooseNewNodesForEC(&cdssdk.DefaultECRedundancy, userAllStgs) | |||
| newStgs := t.chooseNewStoragesForEC(&cdssdk.DefaultECRedundancy, userAllStgs) | |||
| return &cdssdk.DefaultECRedundancy, newStgs | |||
| // newLRCNodes := t.chooseNewNodesForLRC(&cdssdk.DefaultLRCRedundancy, userAllNodes) | |||
| // return &cdssdk.DefaultLRCRedundancy, newLRCNodes | |||
| // newLRCStorages := t.chooseNewStoragesForLRC(&cdssdk.DefaultLRCRedundancy, userAllStorages) | |||
| // return &cdssdk.DefaultLRCRedundancy, newLRCStorages | |||
| case *cdssdk.LRCRedundancy: | |||
| newLRCStgs := t.rechooseNodesForLRC(obj, &cdssdk.DefaultLRCRedundancy, userAllStgs) | |||
| newLRCStgs := t.rechooseStoragesForLRC(obj, &cdssdk.DefaultLRCRedundancy, userAllStgs) | |||
| return &cdssdk.DefaultLRCRedundancy, newLRCStgs | |||
| } | |||
| return nil, nil | |||
| } | |||
| // 统计每个对象块所在的节点,选出块最多的不超过nodeCnt个节点 | |||
| func (t *CheckPackageRedundancy) summaryRepObjectBlockNodes(objs []stgmod.ObjectDetail, nodeCnt int) []cdssdk.StorageID { | |||
| // 统计每个对象块所在的节点,选出块最多的不超过storageCnt个节点 | |||
| func (t *CheckPackageRedundancy) summaryRepObjectBlockStorages(objs []stgmod.ObjectDetail, storageCnt int) []cdssdk.StorageID { | |||
| type stgBlocks struct { | |||
| StorageID cdssdk.StorageID | |||
| Count int | |||
| @@ -242,49 +242,49 @@ func (t *CheckPackageRedundancy) summaryRepObjectBlockNodes(objs []stgmod.Object | |||
| } | |||
| } | |||
| nodes := lo.Values(stgBlocksMap) | |||
| sort2.Sort(nodes, func(left *stgBlocks, right *stgBlocks) int { | |||
| storages := lo.Values(stgBlocksMap) | |||
| sort2.Sort(storages, func(left *stgBlocks, right *stgBlocks) int { | |||
| return right.Count - left.Count | |||
| }) | |||
| ids := lo.Map(nodes, func(item *stgBlocks, idx int) cdssdk.StorageID { return item.StorageID }) | |||
| if len(ids) > nodeCnt { | |||
| ids = ids[:nodeCnt] | |||
| ids := lo.Map(storages, func(item *stgBlocks, idx int) cdssdk.StorageID { return item.StorageID }) | |||
| if len(ids) > storageCnt { | |||
| ids = ids[:storageCnt] | |||
| } | |||
| return ids | |||
| } | |||
| func (t *CheckPackageRedundancy) chooseNewNodesForRep(red *cdssdk.RepRedundancy, allStgs map[cdssdk.StorageID]*StorageLoadInfo) []*StorageLoadInfo { | |||
| sortedNodes := sort2.Sort(lo.Values(allStgs), func(left *StorageLoadInfo, right *StorageLoadInfo) int { | |||
| func (t *CheckPackageRedundancy) chooseNewStoragesForRep(red *cdssdk.RepRedundancy, allStgs map[cdssdk.StorageID]*StorageLoadInfo) []*StorageLoadInfo { | |||
| sortedStorages := sort2.Sort(lo.Values(allStgs), func(left *StorageLoadInfo, right *StorageLoadInfo) int { | |||
| return sort2.Cmp(right.AccessAmount, left.AccessAmount) | |||
| }) | |||
| return t.chooseSoManyNodes(red.RepCount, sortedNodes) | |||
| return t.chooseSoManyStorages(red.RepCount, sortedStorages) | |||
| } | |||
| func (t *CheckPackageRedundancy) chooseNewNodesForEC(red *cdssdk.ECRedundancy, allStgs map[cdssdk.StorageID]*StorageLoadInfo) []*StorageLoadInfo { | |||
| sortedNodes := sort2.Sort(lo.Values(allStgs), func(left *StorageLoadInfo, right *StorageLoadInfo) int { | |||
| func (t *CheckPackageRedundancy) chooseNewStoragesForEC(red *cdssdk.ECRedundancy, allStgs map[cdssdk.StorageID]*StorageLoadInfo) []*StorageLoadInfo { | |||
| sortedStorages := sort2.Sort(lo.Values(allStgs), func(left *StorageLoadInfo, right *StorageLoadInfo) int { | |||
| return sort2.Cmp(right.AccessAmount, left.AccessAmount) | |||
| }) | |||
| return t.chooseSoManyNodes(red.N, sortedNodes) | |||
| return t.chooseSoManyStorages(red.N, sortedStorages) | |||
| } | |||
| func (t *CheckPackageRedundancy) chooseNewNodesForLRC(red *cdssdk.LRCRedundancy, allNodes map[cdssdk.NodeID]*StorageLoadInfo) []*StorageLoadInfo { | |||
| sortedNodes := sort2.Sort(lo.Values(allNodes), func(left *StorageLoadInfo, right *StorageLoadInfo) int { | |||
| func (t *CheckPackageRedundancy) chooseNewStoragesForLRC(red *cdssdk.LRCRedundancy, allStorages map[cdssdk.HubID]*StorageLoadInfo) []*StorageLoadInfo { | |||
| sortedStorages := sort2.Sort(lo.Values(allStorages), func(left *StorageLoadInfo, right *StorageLoadInfo) int { | |||
| return sort2.Cmp(right.AccessAmount, left.AccessAmount) | |||
| }) | |||
| return t.chooseSoManyNodes(red.N, sortedNodes) | |||
| return t.chooseSoManyStorages(red.N, sortedStorages) | |||
| } | |||
| func (t *CheckPackageRedundancy) rechooseNodesForRep(mostBlockStgIDs []cdssdk.StorageID, red *cdssdk.RepRedundancy, allStgs map[cdssdk.StorageID]*StorageLoadInfo) []*StorageLoadInfo { | |||
| type rechooseNode struct { | |||
| func (t *CheckPackageRedundancy) rechooseStoragesForRep(mostBlockStgIDs []cdssdk.StorageID, red *cdssdk.RepRedundancy, allStgs map[cdssdk.StorageID]*StorageLoadInfo) []*StorageLoadInfo { | |||
| type rechooseStorage struct { | |||
| *StorageLoadInfo | |||
| HasBlock bool | |||
| } | |||
| var rechooseStgs []*rechooseNode | |||
| var rechooseStgs []*rechooseStorage | |||
| for _, stg := range allStgs { | |||
| hasBlock := false | |||
| for _, id := range mostBlockStgIDs { | |||
| @@ -294,13 +294,13 @@ func (t *CheckPackageRedundancy) rechooseNodesForRep(mostBlockStgIDs []cdssdk.St | |||
| } | |||
| } | |||
| rechooseStgs = append(rechooseStgs, &rechooseNode{ | |||
| rechooseStgs = append(rechooseStgs, &rechooseStorage{ | |||
| StorageLoadInfo: stg, | |||
| HasBlock: hasBlock, | |||
| }) | |||
| } | |||
| sortedStgs := sort2.Sort(rechooseStgs, func(left *rechooseNode, right *rechooseNode) int { | |||
| sortedStgs := sort2.Sort(rechooseStgs, func(left *rechooseStorage, right *rechooseStorage) int { | |||
| // 已经缓存了文件块的节点优先选择 | |||
| v := sort2.CmpBool(right.HasBlock, left.HasBlock) | |||
| if v != 0 { | |||
| @@ -310,10 +310,10 @@ func (t *CheckPackageRedundancy) rechooseNodesForRep(mostBlockStgIDs []cdssdk.St | |||
| return sort2.Cmp(right.AccessAmount, left.AccessAmount) | |||
| }) | |||
| return t.chooseSoManyNodes(red.RepCount, lo.Map(sortedStgs, func(node *rechooseNode, idx int) *StorageLoadInfo { return node.StorageLoadInfo })) | |||
| return t.chooseSoManyStorages(red.RepCount, lo.Map(sortedStgs, func(storage *rechooseStorage, idx int) *StorageLoadInfo { return storage.StorageLoadInfo })) | |||
| } | |||
| func (t *CheckPackageRedundancy) rechooseNodesForEC(obj stgmod.ObjectDetail, red *cdssdk.ECRedundancy, allStgs map[cdssdk.StorageID]*StorageLoadInfo) []*StorageLoadInfo { | |||
| func (t *CheckPackageRedundancy) rechooseStoragesForEC(obj stgmod.ObjectDetail, red *cdssdk.ECRedundancy, allStgs map[cdssdk.StorageID]*StorageLoadInfo) []*StorageLoadInfo { | |||
| type rechooseStg struct { | |||
| *StorageLoadInfo | |||
| CachedBlockIndex int | |||
| @@ -346,10 +346,10 @@ func (t *CheckPackageRedundancy) rechooseNodesForEC(obj stgmod.ObjectDetail, red | |||
| }) | |||
| // TODO 可以考虑选择已有块的节点时,能依然按照Index顺序选择 | |||
| return t.chooseSoManyNodes(red.N, lo.Map(sortedStgs, func(node *rechooseStg, idx int) *StorageLoadInfo { return node.StorageLoadInfo })) | |||
| return t.chooseSoManyStorages(red.N, lo.Map(sortedStgs, func(storage *rechooseStg, idx int) *StorageLoadInfo { return storage.StorageLoadInfo })) | |||
| } | |||
| func (t *CheckPackageRedundancy) rechooseNodesForLRC(obj stgmod.ObjectDetail, red *cdssdk.LRCRedundancy, allStgs map[cdssdk.StorageID]*StorageLoadInfo) []*StorageLoadInfo { | |||
| func (t *CheckPackageRedundancy) rechooseStoragesForLRC(obj stgmod.ObjectDetail, red *cdssdk.LRCRedundancy, allStgs map[cdssdk.StorageID]*StorageLoadInfo) []*StorageLoadInfo { | |||
| type rechooseStg struct { | |||
| *StorageLoadInfo | |||
| CachedBlockIndex int | |||
| @@ -382,19 +382,19 @@ func (t *CheckPackageRedundancy) rechooseNodesForLRC(obj stgmod.ObjectDetail, re | |||
| }) | |||
| // TODO 可以考虑选择已有块的节点时,能依然按照Index顺序选择 | |||
| return t.chooseSoManyNodes(red.N, lo.Map(sortedStgs, func(node *rechooseStg, idx int) *StorageLoadInfo { return node.StorageLoadInfo })) | |||
| return t.chooseSoManyStorages(red.N, lo.Map(sortedStgs, func(storage *rechooseStg, idx int) *StorageLoadInfo { return storage.StorageLoadInfo })) | |||
| } | |||
| func (t *CheckPackageRedundancy) chooseSoManyNodes(count int, stgs []*StorageLoadInfo) []*StorageLoadInfo { | |||
| func (t *CheckPackageRedundancy) chooseSoManyStorages(count int, stgs []*StorageLoadInfo) []*StorageLoadInfo { | |||
| repeateCount := (count + len(stgs) - 1) / len(stgs) | |||
| extendStgs := make([]*StorageLoadInfo, repeateCount*len(stgs)) | |||
| // 使用复制的方式将节点数扩充到要求的数量 | |||
| // 复制之后的结构:ABCD -> AAABBBCCCDDD | |||
| for p := 0; p < repeateCount; p++ { | |||
| for i, node := range stgs { | |||
| for i, storage := range stgs { | |||
| putIdx := i*repeateCount + p | |||
| extendStgs[putIdx] = node | |||
| extendStgs[putIdx] = storage | |||
| } | |||
| } | |||
| extendStgs = extendStgs[:count] | |||
| @@ -423,7 +423,7 @@ func (t *CheckPackageRedundancy) chooseSoManyNodes(count int, stgs []*StorageLoa | |||
| func (t *CheckPackageRedundancy) noneToRep(ctx ExecuteContext, obj stgmod.ObjectDetail, red *cdssdk.RepRedundancy, uploadStgs []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) { | |||
| if len(obj.Blocks) == 0 { | |||
| return nil, fmt.Errorf("object is not cached on any nodes, cannot change its redundancy to rep") | |||
| return nil, fmt.Errorf("object is not cached on any storages, cannot change its redundancy to rep") | |||
| } | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| @@ -492,7 +492,7 @@ func (t *CheckPackageRedundancy) noneToEC(ctx ExecuteContext, obj stgmod.ObjectD | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| if len(obj.Blocks) == 0 { | |||
| return nil, fmt.Errorf("object is not cached on any nodes, cannot change its redundancy to ec") | |||
| return nil, fmt.Errorf("object is not cached on any storages, cannot change its redundancy to ec") | |||
| } | |||
| getStgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{obj.Blocks[0].StorageID})) | |||
| @@ -542,7 +542,7 @@ func (t *CheckPackageRedundancy) noneToEC(ctx ExecuteContext, obj stgmod.ObjectD | |||
| }, nil | |||
| } | |||
| func (t *CheckPackageRedundancy) noneToLRC(ctx ExecuteContext, obj stgmod.ObjectDetail, red *cdssdk.LRCRedundancy, uploadNodes []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) { | |||
| func (t *CheckPackageRedundancy) noneToLRC(ctx ExecuteContext, obj stgmod.ObjectDetail, red *cdssdk.LRCRedundancy, uploadStorages []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -550,7 +550,7 @@ func (t *CheckPackageRedundancy) noneToLRC(ctx ExecuteContext, obj stgmod.Object | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| if len(obj.Blocks) == 0 { | |||
| return nil, fmt.Errorf("object is not cached on any nodes, cannot change its redundancy to ec") | |||
| return nil, fmt.Errorf("object is not cached on any storages, cannot change its redundancy to ec") | |||
| } | |||
| getStgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{obj.Blocks[0].StorageID})) | |||
| @@ -566,11 +566,11 @@ func (t *CheckPackageRedundancy) noneToLRC(ctx ExecuteContext, obj stgmod.Object | |||
| var toes []ioswitchlrc.To | |||
| for i := 0; i < red.N; i++ { | |||
| toes = append(toes, ioswitchlrc.NewToStorage(*uploadNodes[i].Storage.MasterHub, uploadNodes[i].Storage.Storage, i, fmt.Sprintf("%d", i))) | |||
| toes = append(toes, ioswitchlrc.NewToStorage(*uploadStorages[i].Storage.MasterHub, uploadStorages[i].Storage.Storage, i, fmt.Sprintf("%d", i))) | |||
| } | |||
| plans := exec.NewPlanBuilder() | |||
| err = lrcparser.Encode(ioswitchlrc.NewFromNode(obj.Object.FileHash, *getStgs.Storages[0].MasterHub, getStgs.Storages[0].Storage, -1), toes, plans) | |||
| err = lrcparser.Encode(ioswitchlrc.NewFromStorage(obj.Object.FileHash, *getStgs.Storages[0].MasterHub, getStgs.Storages[0].Storage, -1), toes, plans) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("parsing plan: %w", err) | |||
| } | |||
| @@ -587,7 +587,7 @@ func (t *CheckPackageRedundancy) noneToLRC(ctx ExecuteContext, obj stgmod.Object | |||
| blocks = append(blocks, stgmod.ObjectBlock{ | |||
| ObjectID: obj.Object.ObjectID, | |||
| Index: i, | |||
| StorageID: uploadNodes[i].Storage.Storage.StorageID, | |||
| StorageID: uploadStorages[i].Storage.Storage.StorageID, | |||
| FileHash: ioRet[fmt.Sprintf("%d", i)].(*ops2.FileHashValue).Hash, | |||
| }) | |||
| } | |||
| @@ -601,7 +601,7 @@ func (t *CheckPackageRedundancy) noneToLRC(ctx ExecuteContext, obj stgmod.Object | |||
| func (t *CheckPackageRedundancy) repToRep(ctx ExecuteContext, obj stgmod.ObjectDetail, red *cdssdk.RepRedundancy, uploadStgs []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) { | |||
| if len(obj.Blocks) == 0 { | |||
| return nil, fmt.Errorf("object is not cached on any nodes, cannot change its redundancy to rep") | |||
| return nil, fmt.Errorf("object is not cached on any storages, cannot change its redundancy to rep") | |||
| } | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| @@ -662,8 +662,8 @@ func (t *CheckPackageRedundancy) repToRep(ctx ExecuteContext, obj stgmod.ObjectD | |||
| }, nil | |||
| } | |||
| func (t *CheckPackageRedundancy) repToEC(ctx ExecuteContext, obj stgmod.ObjectDetail, red *cdssdk.ECRedundancy, uploadNodes []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) { | |||
| return t.noneToEC(ctx, obj, red, uploadNodes) | |||
| func (t *CheckPackageRedundancy) repToEC(ctx ExecuteContext, obj stgmod.ObjectDetail, red *cdssdk.ECRedundancy, uploadStorages []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) { | |||
| return t.noneToEC(ctx, obj, red, uploadStorages) | |||
| } | |||
| func (t *CheckPackageRedundancy) ecToRep(ctx ExecuteContext, obj stgmod.ObjectDetail, srcRed *cdssdk.ECRedundancy, tarRed *cdssdk.RepRedundancy, uploadStgs []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) { | |||
| @@ -740,7 +740,7 @@ func (t *CheckPackageRedundancy) ecToRep(ctx ExecuteContext, obj stgmod.ObjectDe | |||
| }, nil | |||
| } | |||
| func (t *CheckPackageRedundancy) ecToEC(ctx ExecuteContext, obj stgmod.ObjectDetail, srcRed *cdssdk.ECRedundancy, tarRed *cdssdk.ECRedundancy, uploadNodes []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) { | |||
| func (t *CheckPackageRedundancy) ecToEC(ctx ExecuteContext, obj stgmod.ObjectDetail, srcRed *cdssdk.ECRedundancy, tarRed *cdssdk.ECRedundancy, uploadStorages []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -770,7 +770,7 @@ func (t *CheckPackageRedundancy) ecToEC(ctx ExecuteContext, obj stgmod.ObjectDet | |||
| var newBlocks []stgmod.ObjectBlock | |||
| shouldUpdateBlocks := false | |||
| for i, stg := range uploadNodes { | |||
| for i, stg := range uploadStorages { | |||
| newBlock := stgmod.ObjectBlock{ | |||
| ObjectID: obj.Object.ObjectID, | |||
| Index: i, | |||
| @@ -835,7 +835,7 @@ func (t *CheckPackageRedundancy) ecToEC(ctx ExecuteContext, obj stgmod.ObjectDet | |||
| }, nil | |||
| } | |||
| func (t *CheckPackageRedundancy) lrcToLRC(ctx ExecuteContext, obj stgmod.ObjectDetail, srcRed *cdssdk.LRCRedundancy, tarRed *cdssdk.LRCRedundancy, uploadNodes []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) { | |||
| func (t *CheckPackageRedundancy) lrcToLRC(ctx ExecuteContext, obj stgmod.ObjectDetail, srcRed *cdssdk.LRCRedundancy, tarRed *cdssdk.LRCRedundancy, uploadStorages []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -872,16 +872,16 @@ func (t *CheckPackageRedundancy) lrcToLRC(ctx ExecuteContext, obj stgmod.ObjectD | |||
| } | |||
| if canGroupReconstruct { | |||
| // return t.groupReconstructLRC(obj, lostBlocks, lostBlockGrps, blocksGrpByIndex, srcRed, uploadNodes) | |||
| // return t.groupReconstructLRC(obj, lostBlocks, lostBlockGrps, blocksGrpByIndex, srcRed, uploadStorages) | |||
| } | |||
| return t.reconstructLRC(ctx, obj, blocksGrpByIndex, srcRed, uploadNodes) | |||
| return t.reconstructLRC(ctx, obj, blocksGrpByIndex, srcRed, uploadStorages) | |||
| } | |||
| /* | |||
| TODO2 修复这一块的代码 | |||
| func (t *CheckPackageRedundancy) groupReconstructLRC(obj stgmod.ObjectDetail, lostBlocks []int, lostBlockGrps []int, grpedBlocks []stgmod.GrouppedObjectBlock, red *cdssdk.LRCRedundancy, uploadNodes []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) { | |||
| func (t *CheckPackageRedundancy) groupReconstructLRC(obj stgmod.ObjectDetail, lostBlocks []int, lostBlockGrps []int, grpedBlocks []stgmod.GrouppedObjectBlock, red *cdssdk.LRCRedundancy, uploadStorages []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) { | |||
| grped := make(map[int]stgmod.GrouppedObjectBlock) | |||
| for _, b := range grpedBlocks { | |||
| grped[b.Index] = b | |||
| @@ -897,11 +897,11 @@ TODO2 修复这一块的代码 | |||
| continue | |||
| } | |||
| froms = append(froms, ioswitchlrc.NewFromNode(grped[ele].FileHash, nil, ele)) | |||
| froms = append(froms, ioswitchlrc.NewFromStorage(grped[ele].FileHash, nil, ele)) | |||
| } | |||
| err := lrcparser.ReconstructGroup(froms, []ioswitchlrc.To{ | |||
| ioswitchlrc.NewToNode(uploadNodes[i].Storage, lostBlocks[i], fmt.Sprintf("%d", lostBlocks[i])), | |||
| ioswitchlrc.NewToStorage(uploadStorages[i].Storage, lostBlocks[i], fmt.Sprintf("%d", lostBlocks[i])), | |||
| }, plans) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("parsing plan: %w", err) | |||
| @@ -922,16 +922,16 @@ TODO2 修复这一块的代码 | |||
| newBlocks = append(newBlocks, stgmod.ObjectBlock{ | |||
| ObjectID: obj.Object.ObjectID, | |||
| Index: i, | |||
| StorageID: uploadNodes[i].Storage.Storage.StorageID, | |||
| StorageID: uploadStorages[i].Storage.Storage.StorageID, | |||
| FileHash: ret[fmt.Sprintf("%d", i)].(*ops2.FileHashValue).Hash, | |||
| }) | |||
| } | |||
| for _, b := range grpedBlocks { | |||
| for _, nodeID := range b.StorageIDs { | |||
| for _, hubID := range b.StorageIDs { | |||
| newBlocks = append(newBlocks, stgmod.ObjectBlock{ | |||
| ObjectID: obj.Object.ObjectID, | |||
| Index: b.Index, | |||
| StorageID: nodeID, | |||
| StorageID: hubID, | |||
| FileHash: b.FileHash, | |||
| }) | |||
| } | |||
| @@ -944,7 +944,7 @@ TODO2 修复这一块的代码 | |||
| }, nil | |||
| } | |||
| */ | |||
| func (t *CheckPackageRedundancy) reconstructLRC(ctx ExecuteContext, obj stgmod.ObjectDetail, grpBlocks []stgmod.GrouppedObjectBlock, red *cdssdk.LRCRedundancy, uploadNodes []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) { | |||
| func (t *CheckPackageRedundancy) reconstructLRC(ctx ExecuteContext, obj stgmod.ObjectDetail, grpBlocks []stgmod.GrouppedObjectBlock, red *cdssdk.LRCRedundancy, uploadStorages []*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) { | |||
| var chosenBlocks []stgmod.GrouppedObjectBlock | |||
| for _, block := range grpBlocks { | |||
| if len(block.StorageIDs) > 0 && block.Index < red.M() { | |||
| @@ -967,17 +967,17 @@ func (t *CheckPackageRedundancy) reconstructLRC(ctx ExecuteContext, obj stgmod.O | |||
| var toes []ioswitchlrc.To | |||
| var newBlocks []stgmod.ObjectBlock | |||
| shouldUpdateBlocks := false | |||
| for i, node := range uploadNodes { | |||
| for i, storage := range uploadStorages { | |||
| newBlock := stgmod.ObjectBlock{ | |||
| ObjectID: obj.Object.ObjectID, | |||
| Index: i, | |||
| StorageID: node.Storage.Storage.StorageID, | |||
| StorageID: storage.Storage.Storage.StorageID, | |||
| } | |||
| grp, ok := lo.Find(grpBlocks, func(grp stgmod.GrouppedObjectBlock) bool { return grp.Index == i }) | |||
| // 如果新选中的节点已经记录在Block表中,那么就不需要任何变更 | |||
| if ok && lo.Contains(grp.StorageIDs, node.Storage.Storage.StorageID) { | |||
| if ok && lo.Contains(grp.StorageIDs, storage.Storage.Storage.StorageID) { | |||
| newBlock.FileHash = grp.FileHash | |||
| newBlocks = append(newBlocks, newBlock) | |||
| continue | |||
| @@ -989,12 +989,12 @@ func (t *CheckPackageRedundancy) reconstructLRC(ctx ExecuteContext, obj stgmod.O | |||
| for _, block := range chosenBlocks { | |||
| fmt.Printf("b: %v\n", block.Index) | |||
| stg := node.Storage | |||
| froms = append(froms, ioswitchlrc.NewFromNode(block.FileHash, *stg.MasterHub, stg.Storage, block.Index)) | |||
| stg := storage.Storage | |||
| froms = append(froms, ioswitchlrc.NewFromStorage(block.FileHash, *stg.MasterHub, stg.Storage, block.Index)) | |||
| } | |||
| // 输出只需要自己要保存的那一块 | |||
| toes = append(toes, ioswitchlrc.NewToStorage(*node.Storage.MasterHub, node.Storage.Storage, i, fmt.Sprintf("%d", i))) | |||
| toes = append(toes, ioswitchlrc.NewToStorage(*storage.Storage.MasterHub, storage.Storage.Storage, i, fmt.Sprintf("%d", i))) | |||
| newBlocks = append(newBlocks, newBlock) | |||
| } | |||
| @@ -1034,8 +1034,8 @@ func (t *CheckPackageRedundancy) reconstructLRC(ctx ExecuteContext, obj stgmod.O | |||
| }, nil | |||
| } | |||
| // func (t *CheckPackageRedundancy) pinObject(nodeID cdssdk.NodeID, fileHash string) error { | |||
| // agtCli, err := stgglb.AgentMQPool.Acquire(nodeID) | |||
| // func (t *CheckPackageRedundancy) pinObject(hubID cdssdk.HubID, fileHash string) error { | |||
| // agtCli, err := stgglb.AgentMQPool.Acquire(hubID) | |||
| // if err != nil { | |||
| // return fmt.Errorf("new agent client: %w", err) | |||
| // } | |||
| @@ -119,11 +119,11 @@ func (t *CleanPinned) Execute(execCtx ExecuteContext) { | |||
| // 对于rep对象,统计出所有对象块分布最多的两个节点,用这两个节点代表所有rep对象块的分布,去进行退火算法 | |||
| var repObjectsUpdating []coormq.UpdatingObjectRedundancy | |||
| repMostNodeIDs := t.summaryRepObjectBlockNodes(repObjects) | |||
| repMostHubIDs := t.summaryRepObjectBlockNodes(repObjects) | |||
| solu := t.startAnnealing(allStgInfos, readerStgIDs, annealingObject{ | |||
| totalBlockCount: 1, | |||
| minBlockCnt: 1, | |||
| pinnedAt: repMostNodeIDs, | |||
| pinnedAt: repMostHubIDs, | |||
| blocks: nil, | |||
| }) | |||
| for _, obj := range repObjects { | |||
| @@ -184,18 +184,18 @@ func (t *CleanPinned) summaryRepObjectBlockNodes(objs []stgmod.ObjectDetail) []c | |||
| cacheBlockStgs[block.StorageID] = true | |||
| } | |||
| for _, nodeID := range obj.PinnedAt { | |||
| if cacheBlockStgs[nodeID] { | |||
| for _, hubID := range obj.PinnedAt { | |||
| if cacheBlockStgs[hubID] { | |||
| continue | |||
| } | |||
| if _, ok := stgBlocksMap[nodeID]; !ok { | |||
| stgBlocksMap[nodeID] = &stgBlocks{ | |||
| StorageID: nodeID, | |||
| if _, ok := stgBlocksMap[hubID]; !ok { | |||
| stgBlocksMap[hubID] = &stgBlocks{ | |||
| StorageID: hubID, | |||
| Count: 0, | |||
| } | |||
| } | |||
| stgBlocksMap[nodeID].Count++ | |||
| stgBlocksMap[hubID].Count++ | |||
| } | |||
| } | |||
| @@ -278,22 +278,22 @@ func newCombinatorialTree(stgBlocksMaps map[cdssdk.StorageID]*bitmap.Bitmap64) c | |||
| tree.localStgIDToStgID = append(tree.localStgIDToStgID, id) | |||
| } | |||
| tree.nodes[0].localNodeID = -1 | |||
| tree.nodes[0].localHubID = -1 | |||
| index := 1 | |||
| tree.initNode(0, &tree.nodes[0], &index) | |||
| return tree | |||
| } | |||
| func (t *combinatorialTree) initNode(minAvaiLocalNodeID int, parent *combinatorialTreeNode, index *int) { | |||
| for i := minAvaiLocalNodeID; i < len(t.stgIDToLocalStgID); i++ { | |||
| func (t *combinatorialTree) initNode(minAvaiLocalHubID int, parent *combinatorialTreeNode, index *int) { | |||
| for i := minAvaiLocalHubID; i < len(t.stgIDToLocalStgID); i++ { | |||
| curIndex := *index | |||
| *index++ | |||
| bitMp := t.blocksMaps[i] | |||
| bitMp.Or(&parent.blocksBitmap) | |||
| t.nodes[curIndex] = combinatorialTreeNode{ | |||
| localNodeID: i, | |||
| localHubID: i, | |||
| parent: parent, | |||
| blocksBitmap: bitMp, | |||
| } | |||
| @@ -339,7 +339,7 @@ func (t *combinatorialTree) UpdateBitmap(stgID cdssdk.StorageID, mp bitmap.Bitma | |||
| index := d + i | |||
| node := &t.nodes[index] | |||
| newMp := t.blocksMaps[node.localNodeID] | |||
| newMp := t.blocksMaps[node.localHubID] | |||
| newMp.Or(&node.parent.blocksBitmap) | |||
| node.blocksBitmap = newMp | |||
| if newMp.Weight() >= k { | |||
| @@ -350,7 +350,7 @@ func (t *combinatorialTree) UpdateBitmap(stgID cdssdk.StorageID, mp bitmap.Bitma | |||
| curNode := &t.nodes[index] | |||
| parentNode := t.nodes[parentIndex] | |||
| newMp := t.blocksMaps[curNode.localNodeID] | |||
| newMp := t.blocksMaps[curNode.localHubID] | |||
| newMp.Or(&parentNode.blocksBitmap) | |||
| curNode.blocksBitmap = newMp | |||
| if newMp.Weight() >= k { | |||
| @@ -377,7 +377,7 @@ func (t *combinatorialTree) FindKBlocksMaxDepth(k int) int { | |||
| // 由于遍历时采用的是深度优先的算法,因此遍历到这个叶子节点时,叶子节点再加一个节点的组合已经在前面搜索过, | |||
| // 所以用当前叶子节点深度+1来作为当前分支的结果就可以,即使当前情况下增加任意一个节点依然不够K块, | |||
| // 可以使用同样的思路去递推到当前叶子节点增加两个块的情况。 | |||
| if t.nodes[index].localNodeID == len(t.stgIDToLocalStgID)-1 { | |||
| if t.nodes[index].localHubID == len(t.stgIDToLocalStgID)-1 { | |||
| if maxDepth < depth+1 { | |||
| maxDepth = depth + 1 | |||
| } | |||
| @@ -409,7 +409,7 @@ func (t *combinatorialTree) iterChildren(index int, do func(index int, parentInd | |||
| childIndex := index + 1 | |||
| curDepth := t.GetDepth(index) | |||
| childCounts := len(t.stgIDToLocalStgID) - 1 - curNode.localNodeID | |||
| childCounts := len(t.stgIDToLocalStgID) - 1 - curNode.localHubID | |||
| if childCounts == 0 { | |||
| return | |||
| } | |||
| @@ -438,7 +438,7 @@ func (t *combinatorialTree) itering(index int, parentIndex int, depth int, do fu | |||
| curNode := &t.nodes[index] | |||
| childIndex := index + 1 | |||
| childCounts := len(t.stgIDToLocalStgID) - 1 - curNode.localNodeID | |||
| childCounts := len(t.stgIDToLocalStgID) - 1 - curNode.localHubID | |||
| if childCounts == 0 { | |||
| return iterActionNone | |||
| } | |||
| @@ -458,7 +458,7 @@ func (t *combinatorialTree) itering(index int, parentIndex int, depth int, do fu | |||
| } | |||
| type combinatorialTreeNode struct { | |||
| localNodeID int | |||
| localHubID int | |||
| parent *combinatorialTreeNode | |||
| blocksBitmap bitmap.Bitmap64 // 选择了这个中心之后,所有中心一共包含多少种块 | |||
| } | |||
| @@ -614,19 +614,19 @@ func (t *CleanPinned) sortNodeByReaderDistance(state *annealingState) { | |||
| // 同节点时距离视为0.1 | |||
| nodeDists = append(nodeDists, stgDist{ | |||
| StorageID: n, | |||
| Distance: consts.NodeDistanceSameNode, | |||
| Distance: consts.StorageDistanceSameStorage, | |||
| }) | |||
| } else if state.allStgInfos[r].MasterHub.LocationID == state.allStgInfos[n].MasterHub.LocationID { | |||
| // 同地区时距离视为1 | |||
| nodeDists = append(nodeDists, stgDist{ | |||
| StorageID: n, | |||
| Distance: consts.NodeDistanceSameLocation, | |||
| Distance: consts.StorageDistanceSameLocation, | |||
| }) | |||
| } else { | |||
| // 不同地区时距离视为5 | |||
| nodeDists = append(nodeDists, stgDist{ | |||
| StorageID: n, | |||
| Distance: consts.NodeDistanceOther, | |||
| Distance: consts.StorageDistanceOther, | |||
| }) | |||
| } | |||
| } | |||
| @@ -724,7 +724,7 @@ func (t *CleanPinned) alwaysAccept(curTemp float64, dScore float64, coolingRate | |||
| return v > rand.Float64() | |||
| } | |||
| func (t *CleanPinned) makePlansForRepObject(allStgInfos map[cdssdk.StorageID]*stgmod.StorageDetail, solu annealingSolution, obj stgmod.ObjectDetail, planBld *exec.PlanBuilder, planningNodeIDs map[cdssdk.StorageID]bool) coormq.UpdatingObjectRedundancy { | |||
| func (t *CleanPinned) makePlansForRepObject(allStgInfos map[cdssdk.StorageID]*stgmod.StorageDetail, solu annealingSolution, obj stgmod.ObjectDetail, planBld *exec.PlanBuilder, planningHubIDs map[cdssdk.StorageID]bool) coormq.UpdatingObjectRedundancy { | |||
| entry := coormq.UpdatingObjectRedundancy{ | |||
| ObjectID: obj.Object.ObjectID, | |||
| Redundancy: obj.Object.Redundancy, | |||
| @@ -751,7 +751,7 @@ func (t *CleanPinned) makePlansForRepObject(allStgInfos map[cdssdk.StorageID]*st | |||
| // TODO 错误处理 | |||
| continue | |||
| } | |||
| planningNodeIDs[solu.blockList[i].StorageID] = true | |||
| planningHubIDs[solu.blockList[i].StorageID] = true | |||
| } | |||
| entry.Blocks = append(entry.Blocks, stgmod.ObjectBlock{ | |||
| ObjectID: obj.Object.ObjectID, | |||
| @@ -765,7 +765,7 @@ func (t *CleanPinned) makePlansForRepObject(allStgInfos map[cdssdk.StorageID]*st | |||
| return entry | |||
| } | |||
| func (t *CleanPinned) makePlansForECObject(allStgInfos map[cdssdk.StorageID]*stgmod.StorageDetail, solu annealingSolution, obj stgmod.ObjectDetail, planBld *exec.PlanBuilder, planningNodeIDs map[cdssdk.StorageID]bool) coormq.UpdatingObjectRedundancy { | |||
| func (t *CleanPinned) makePlansForECObject(allStgInfos map[cdssdk.StorageID]*stgmod.StorageDetail, solu annealingSolution, obj stgmod.ObjectDetail, planBld *exec.PlanBuilder, planningHubIDs map[cdssdk.StorageID]bool) coormq.UpdatingObjectRedundancy { | |||
| entry := coormq.UpdatingObjectRedundancy{ | |||
| ObjectID: obj.Object.ObjectID, | |||
| Redundancy: obj.Object.Redundancy, | |||
| @@ -812,7 +812,7 @@ func (t *CleanPinned) makePlansForECObject(allStgInfos map[cdssdk.StorageID]*stg | |||
| continue | |||
| } | |||
| planningNodeIDs[id] = true | |||
| planningHubIDs[id] = true | |||
| } | |||
| return entry | |||
| } | |||
| @@ -21,7 +21,7 @@ func newTreeTest(nodeBlocksMap []bitmap.Bitmap64) combinatorialTree { | |||
| tree.localStgIDToStgID = append(tree.localStgIDToStgID, cdssdk.StorageID(id)) | |||
| } | |||
| tree.nodes[0].localNodeID = -1 | |||
| tree.nodes[0].localHubID = -1 | |||
| index := 1 | |||
| tree.initNode(0, &tree.nodes[0], &index) | |||
| @@ -111,7 +111,7 @@ func Test_newCombinatorialTree(t *testing.T) { | |||
| var localIDs []int | |||
| var bitmaps []int | |||
| for _, n := range t.nodes { | |||
| localIDs = append(localIDs, n.localNodeID) | |||
| localIDs = append(localIDs, n.localHubID) | |||
| bitmaps = append(bitmaps, int(n.blocksBitmap)) | |||
| } | |||
| @@ -125,7 +125,7 @@ func Test_UpdateBitmap(t *testing.T) { | |||
| testcases := []struct { | |||
| title string | |||
| nodeBlocks []bitmap.Bitmap64 | |||
| updatedNodeID cdssdk.StorageID | |||
| updatedHubID cdssdk.StorageID | |||
| updatedBitmap bitmap.Bitmap64 | |||
| k int | |||
| expectedTreeNodeBitmaps []int | |||
| @@ -134,7 +134,7 @@ func Test_UpdateBitmap(t *testing.T) { | |||
| { | |||
| title: "4个节点,更新但值不变", | |||
| nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8}, | |||
| updatedNodeID: cdssdk.StorageID(0), | |||
| updatedHubID: cdssdk.StorageID(0), | |||
| updatedBitmap: bitmap.Bitmap64(1), | |||
| k: 4, | |||
| expectedTreeNodeBitmaps: []int{0, 1, 3, 7, 15, 11, 5, 13, 9, 2, 6, 14, 10, 4, 12, 8}, | |||
| @@ -143,7 +143,7 @@ func Test_UpdateBitmap(t *testing.T) { | |||
| { | |||
| title: "4个节点,更新0", | |||
| nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8}, | |||
| updatedNodeID: cdssdk.StorageID(0), | |||
| updatedHubID: cdssdk.StorageID(0), | |||
| updatedBitmap: bitmap.Bitmap64(2), | |||
| k: 4, | |||
| expectedTreeNodeBitmaps: []int{0, 2, 2, 6, 14, 10, 6, 14, 10, 2, 6, 14, 10, 4, 12, 8}, | |||
| @@ -152,7 +152,7 @@ func Test_UpdateBitmap(t *testing.T) { | |||
| { | |||
| title: "4个节点,更新1", | |||
| nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8}, | |||
| updatedNodeID: cdssdk.StorageID(1), | |||
| updatedHubID: cdssdk.StorageID(1), | |||
| updatedBitmap: bitmap.Bitmap64(1), | |||
| k: 4, | |||
| expectedTreeNodeBitmaps: []int{0, 1, 1, 5, 13, 9, 5, 13, 9, 1, 5, 13, 9, 4, 12, 8}, | |||
| @@ -161,7 +161,7 @@ func Test_UpdateBitmap(t *testing.T) { | |||
| { | |||
| title: "4个节点,更新2", | |||
| nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8}, | |||
| updatedNodeID: cdssdk.StorageID(2), | |||
| updatedHubID: cdssdk.StorageID(2), | |||
| updatedBitmap: bitmap.Bitmap64(1), | |||
| k: 4, | |||
| expectedTreeNodeBitmaps: []int{0, 1, 3, 3, 11, 11, 1, 9, 9, 2, 3, 11, 10, 1, 9, 8}, | |||
| @@ -170,7 +170,7 @@ func Test_UpdateBitmap(t *testing.T) { | |||
| { | |||
| title: "4个节点,更新3", | |||
| nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8}, | |||
| updatedNodeID: cdssdk.StorageID(3), | |||
| updatedHubID: cdssdk.StorageID(3), | |||
| updatedBitmap: bitmap.Bitmap64(1), | |||
| k: 4, | |||
| expectedTreeNodeBitmaps: []int{0, 1, 3, 7, 7, 3, 5, 5, 1, 2, 6, 7, 3, 4, 5, 1}, | |||
| @@ -179,7 +179,7 @@ func Test_UpdateBitmap(t *testing.T) { | |||
| { | |||
| title: "4个节点,k<4,更新0,0之前没有k个块,现在拥有", | |||
| nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8}, | |||
| updatedNodeID: cdssdk.StorageID(0), | |||
| updatedHubID: cdssdk.StorageID(0), | |||
| updatedBitmap: bitmap.Bitmap64(3), | |||
| k: 2, | |||
| expectedTreeNodeBitmaps: []int{0, 3, 3, 7, 15, 11, 5, 13, 9, 2, 6, 14, 10, 4, 12, 8}, | |||
| @@ -187,7 +187,7 @@ func Test_UpdateBitmap(t *testing.T) { | |||
| { | |||
| title: "4个节点,k<4,更新0,0之前有k个块,现在没有", | |||
| nodeBlocks: []bitmap.Bitmap64{3, 4, 0, 0}, | |||
| updatedNodeID: cdssdk.StorageID(0), | |||
| updatedHubID: cdssdk.StorageID(0), | |||
| updatedBitmap: bitmap.Bitmap64(0), | |||
| k: 2, | |||
| expectedTreeNodeBitmaps: []int{0, 0, 4, 4, 4, 4, 0, 0, 0, 4, 4, 4, 4, 0, 0, 0}, | |||
| @@ -197,7 +197,7 @@ func Test_UpdateBitmap(t *testing.T) { | |||
| for _, test := range testcases { | |||
| Convey(test.title, t, func() { | |||
| t := newTreeTest(test.nodeBlocks) | |||
| t.UpdateBitmap(test.updatedNodeID, test.updatedBitmap, test.k) | |||
| t.UpdateBitmap(test.updatedHubID, test.updatedBitmap, test.k) | |||
| var bitmaps []int | |||
| for _, n := range t.nodes { | |||
| @@ -14,52 +14,52 @@ func Test_chooseSoManyNodes(t *testing.T) { | |||
| title string | |||
| allNodes []*StorageLoadInfo | |||
| count int | |||
| expectedNodeIDs []cdssdk.NodeID | |||
| expectedHubIDs []cdssdk.HubID | |||
| }{ | |||
| { | |||
| title: "节点数量充足", | |||
| allNodes: []*StorageLoadInfo{ | |||
| {Storage: cdssdk.Node{NodeID: cdssdk.NodeID(1)}}, | |||
| {Storage: cdssdk.Node{NodeID: cdssdk.NodeID(2)}}, | |||
| {Storage: cdssdk.Node{HubID: cdssdk.HubID(1)}}, | |||
| {Storage: cdssdk.Node{HubID: cdssdk.HubID(2)}}, | |||
| }, | |||
| count: 2, | |||
| expectedNodeIDs: []cdssdk.NodeID{1, 2}, | |||
| expectedHubIDs: []cdssdk.HubID{1, 2}, | |||
| }, | |||
| { | |||
| title: "节点数量超过", | |||
| allNodes: []*StorageLoadInfo{ | |||
| {Storage: cdssdk.Node{NodeID: cdssdk.NodeID(1)}}, | |||
| {Storage: cdssdk.Node{NodeID: cdssdk.NodeID(2)}}, | |||
| {Storage: cdssdk.Node{NodeID: cdssdk.NodeID(3)}}, | |||
| {Storage: cdssdk.Node{HubID: cdssdk.HubID(1)}}, | |||
| {Storage: cdssdk.Node{HubID: cdssdk.HubID(2)}}, | |||
| {Storage: cdssdk.Node{HubID: cdssdk.HubID(3)}}, | |||
| }, | |||
| count: 2, | |||
| expectedNodeIDs: []cdssdk.NodeID{1, 2}, | |||
| expectedHubIDs: []cdssdk.HubID{1, 2}, | |||
| }, | |||
| { | |||
| title: "只有一个节点,节点数量不够", | |||
| allNodes: []*StorageLoadInfo{ | |||
| {Storage: cdssdk.Node{NodeID: cdssdk.NodeID(1)}}, | |||
| {Storage: cdssdk.Node{HubID: cdssdk.HubID(1)}}, | |||
| }, | |||
| count: 3, | |||
| expectedNodeIDs: []cdssdk.NodeID{1, 1, 1}, | |||
| expectedHubIDs: []cdssdk.HubID{1, 1, 1}, | |||
| }, | |||
| { | |||
| title: "多个同地区节点,节点数量不够", | |||
| allNodes: []*StorageLoadInfo{ | |||
| {Storage: cdssdk.Node{NodeID: cdssdk.NodeID(1)}}, | |||
| {Storage: cdssdk.Node{NodeID: cdssdk.NodeID(2)}}, | |||
| {Storage: cdssdk.Node{HubID: cdssdk.HubID(1)}}, | |||
| {Storage: cdssdk.Node{HubID: cdssdk.HubID(2)}}, | |||
| }, | |||
| count: 5, | |||
| expectedNodeIDs: []cdssdk.NodeID{1, 1, 1, 2, 2}, | |||
| expectedHubIDs: []cdssdk.HubID{1, 1, 1, 2, 2}, | |||
| }, | |||
| { | |||
| title: "节点数量不够,且在不同地区", | |||
| allNodes: []*StorageLoadInfo{ | |||
| {Storage: cdssdk.Node{NodeID: cdssdk.NodeID(1), LocationID: cdssdk.LocationID(1)}}, | |||
| {Storage: cdssdk.Node{NodeID: cdssdk.NodeID(2), LocationID: cdssdk.LocationID(2)}}, | |||
| {Storage: cdssdk.Node{HubID: cdssdk.HubID(1), LocationID: cdssdk.LocationID(1)}}, | |||
| {Storage: cdssdk.Node{HubID: cdssdk.HubID(2), LocationID: cdssdk.LocationID(2)}}, | |||
| }, | |||
| count: 5, | |||
| expectedNodeIDs: []cdssdk.NodeID{1, 2, 1, 2, 1}, | |||
| expectedHubIDs: []cdssdk.HubID{1, 2, 1, 2, 1}, | |||
| }, | |||
| } | |||
| @@ -68,9 +68,9 @@ func Test_chooseSoManyNodes(t *testing.T) { | |||
| var t CheckPackageRedundancy | |||
| chosenNodes := t.chooseSoManyNodes(test.count, test.allNodes) | |||
| chosenNodeIDs := lo.Map(chosenNodes, func(item *StorageLoadInfo, idx int) cdssdk.NodeID { return item.Storage.NodeID }) | |||
| chosenHubIDs := lo.Map(chosenNodes, func(item *StorageLoadInfo, idx int) cdssdk.HubID { return item.Storage.HubID }) | |||
| So(chosenNodeIDs, ShouldResemble, test.expectedNodeIDs) | |||
| So(chosenHubIDs, ShouldResemble, test.expectedHubIDs) | |||
| }) | |||
| } | |||
| } | |||
| @@ -25,11 +25,11 @@ func (e *BatchAllAgentCheckCache) Execute(ctx ExecuteContext) { | |||
| if e.stgIDs == nil || len(e.stgIDs) == 0 { | |||
| ids, err := ctx.Args.DB.Storage().GetAllIDs(ctx.Args.DB.DefCtx()) | |||
| if err != nil { | |||
| log.Warnf("get all nodes failed, err: %s", err.Error()) | |||
| log.Warnf("get all storages failed, err: %s", err.Error()) | |||
| return | |||
| } | |||
| log.Debugf("new check start, get all nodes") | |||
| log.Debugf("new check start, get all storages") | |||
| e.stgIDs = ids | |||
| } | |||
| @@ -18,14 +18,14 @@ func (e *CheckAgentState) Execute(ctx ExecuteContext) { | |||
| log.Debugf("begin") | |||
| defer log.Debugf("end") | |||
| nodes, err := ctx.Args.DB.Node().GetAllNodes(ctx.Args.DB.DefCtx()) | |||
| hubs, err := ctx.Args.DB.Hub().GetAllHubs(ctx.Args.DB.DefCtx()) | |||
| if err != nil { | |||
| log.Warnf("get all nodes failed, err: %s", err.Error()) | |||
| log.Warnf("get all hubs failed, err: %s", err.Error()) | |||
| return | |||
| } | |||
| for _, node := range nodes { | |||
| ctx.Args.EventExecutor.Post(event.NewAgentCheckState(scevt.NewAgentCheckState(node.NodeID)), event.ExecuteOption{ | |||
| for _, hub := range hubs { | |||
| ctx.Args.EventExecutor.Post(event.NewAgentCheckState(scevt.NewAgentCheckState(hub.HubID)), event.ExecuteOption{ | |||
| IsEmergency: true, | |||
| DontMerge: true, | |||
| }) | |||