| @@ -2,26 +2,26 @@ package cmdline | |||||
| import ( | import ( | ||||
| "fmt" | "fmt" | ||||
| "time" | |||||
| "github.com/jedib0t/go-pretty/v6/table" | "github.com/jedib0t/go-pretty/v6/table" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | ) | ||||
| func BucketListUserBuckets(ctx CommandContext) error { | func BucketListUserBuckets(ctx CommandContext) error { | ||||
| userID := cdssdk.UserID(1) | |||||
| buckets, err := ctx.Cmdline.Svc.BucketSvc().GetUserBuckets(userID) | |||||
| buckets, err := ctx.Cmdline.Svc.BucketSvc().GetUserBuckets() | |||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| } | } | ||||
| fmt.Printf("Find %d buckets for user %d:\n", len(buckets), userID) | |||||
| fmt.Printf("Find %d buckets for user %d:\n", len(buckets)) | |||||
| tb := table.NewWriter() | tb := table.NewWriter() | ||||
| tb.AppendHeader(table.Row{"ID", "Name", "CreatorID"}) | |||||
| tb.AppendHeader(table.Row{"ID", "Name"}) | |||||
| for _, bucket := range buckets { | for _, bucket := range buckets { | ||||
| tb.AppendRow(table.Row{bucket.BucketID, bucket.Name, bucket.CreatorID}) | |||||
| tb.AppendRow(table.Row{bucket.BucketID, bucket.Name}) | |||||
| } | } | ||||
| fmt.Print(tb.Render()) | fmt.Print(tb.Render()) | ||||
| @@ -29,9 +29,7 @@ func BucketListUserBuckets(ctx CommandContext) error { | |||||
| } | } | ||||
| func BucketCreateBucket(ctx CommandContext, bucketName string) error { | func BucketCreateBucket(ctx CommandContext, bucketName string) error { | ||||
| userID := cdssdk.UserID(1) | |||||
| bucketID, err := ctx.Cmdline.Svc.BucketSvc().CreateBucket(userID, bucketName) | |||||
| bucketID, err := ctx.Cmdline.Svc.BucketSvc().CreateBucket(bucketName, time.Now()) | |||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| } | } | ||||
| @@ -41,9 +39,8 @@ func BucketCreateBucket(ctx CommandContext, bucketName string) error { | |||||
| } | } | ||||
| func BucketDeleteBucket(ctx CommandContext, bucketID cdssdk.BucketID) error { | func BucketDeleteBucket(ctx CommandContext, bucketID cdssdk.BucketID) error { | ||||
| userID := cdssdk.UserID(1) | |||||
| err := ctx.Cmdline.Svc.BucketSvc().DeleteBucket(userID, bucketID) | |||||
| err := ctx.Cmdline.Svc.BucketSvc().DeleteBucket(bucketID) | |||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| } | } | ||||
| @@ -1,10 +1,11 @@ | |||||
| package cmdline | package cmdline | ||||
| /* | |||||
| import ( | import ( | ||||
| "fmt" | "fmt" | ||||
| "time" | "time" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | ) | ||||
| func CacheMovePackage(ctx CommandContext, packageID cdssdk.PackageID, stgID cdssdk.StorageID) error { | func CacheMovePackage(ctx CommandContext, packageID cdssdk.PackageID, stgID cdssdk.StorageID) error { | ||||
| @@ -43,3 +44,4 @@ func init() { | |||||
| commands.Add(CacheRemovePackage, "cache", "remove") | commands.Add(CacheRemovePackage, "cache", "remove") | ||||
| } | } | ||||
| */ | |||||
| @@ -11,8 +11,8 @@ import ( | |||||
| "github.com/inhies/go-bytesize" | "github.com/inhies/go-bytesize" | ||||
| "github.com/spf13/cobra" | "github.com/spf13/cobra" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| "gitlink.org.cn/cloudream/storage2/client/internal/config" | "gitlink.org.cn/cloudream/storage2/client/internal/config" | ||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/iterator" | "gitlink.org.cn/cloudream/storage2/common/pkgs/iterator" | ||||
| ) | ) | ||||
| @@ -44,15 +44,13 @@ func init() { | |||||
| } | } | ||||
| func getpByPath(cmdCtx *CommandContext, path string, output string) { | func getpByPath(cmdCtx *CommandContext, path string, output string) { | ||||
| userID := cdssdk.UserID(1) | |||||
| comps := strings.Split(strings.Trim(path, cdssdk.ObjectPathSeparator), cdssdk.ObjectPathSeparator) | comps := strings.Split(strings.Trim(path, cdssdk.ObjectPathSeparator), cdssdk.ObjectPathSeparator) | ||||
| if len(comps) != 2 { | if len(comps) != 2 { | ||||
| fmt.Printf("Package path must be in format of <bucket>/<package>") | fmt.Printf("Package path must be in format of <bucket>/<package>") | ||||
| return | return | ||||
| } | } | ||||
| pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByFullName(userID, comps[0], comps[1]) | |||||
| pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByFullName(comps[0], comps[1]) | |||||
| if err != nil { | if err != nil { | ||||
| fmt.Println(err) | fmt.Println(err) | ||||
| return | return | ||||
| @@ -62,10 +60,9 @@ func getpByPath(cmdCtx *CommandContext, path string, output string) { | |||||
| } | } | ||||
| func getpByID(cmdCtx *CommandContext, id cdssdk.PackageID, output string) { | func getpByID(cmdCtx *CommandContext, id cdssdk.PackageID, output string) { | ||||
| userID := cdssdk.UserID(1) | |||||
| startTime := time.Now() | startTime := time.Now() | ||||
| objIter, err := cmdCtx.Cmdline.Svc.PackageSvc().DownloadPackage(userID, id) | |||||
| objIter, err := cmdCtx.Cmdline.Svc.PackageSvc().DownloadPackage(id) | |||||
| if err != nil { | if err != nil { | ||||
| fmt.Println(err) | fmt.Println(err) | ||||
| return | return | ||||
| @@ -7,7 +7,7 @@ import ( | |||||
| "time" | "time" | ||||
| "github.com/spf13/cobra" | "github.com/spf13/cobra" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | ) | ||||
| func init() { | func init() { | ||||
| @@ -41,21 +41,19 @@ func init() { | |||||
| } | } | ||||
| func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string, rootPath string) { | func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string, rootPath string) { | ||||
| userID := cdssdk.UserID(1) | |||||
| comps := strings.Split(strings.Trim(pkgPath, cdssdk.ObjectPathSeparator), cdssdk.ObjectPathSeparator) | comps := strings.Split(strings.Trim(pkgPath, cdssdk.ObjectPathSeparator), cdssdk.ObjectPathSeparator) | ||||
| if len(comps) != 2 { | if len(comps) != 2 { | ||||
| fmt.Printf("Package path must be in format of <bucket>/<package>") | fmt.Printf("Package path must be in format of <bucket>/<package>") | ||||
| return | return | ||||
| } | } | ||||
| pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByFullName(userID, comps[0], comps[1]) | |||||
| pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByFullName(comps[0], comps[1]) | |||||
| if err != nil { | if err != nil { | ||||
| fmt.Println(err) | fmt.Println(err) | ||||
| return | return | ||||
| } | } | ||||
| stg, err := cmdCtx.Cmdline.Svc.StorageSvc().GetByName(userID, stgName) | |||||
| stg, err := cmdCtx.Cmdline.Svc.StorageSvc().GetByName(stgName) | |||||
| if err != nil { | if err != nil { | ||||
| fmt.Println(err) | fmt.Println(err) | ||||
| return | return | ||||
| @@ -65,10 +63,9 @@ func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string, rootPath | |||||
| } | } | ||||
| func loadByID(cmdCtx *CommandContext, pkgID cdssdk.PackageID, stgID cdssdk.StorageID, rootPath string) { | func loadByID(cmdCtx *CommandContext, pkgID cdssdk.PackageID, stgID cdssdk.StorageID, rootPath string) { | ||||
| userID := cdssdk.UserID(1) | |||||
| startTime := time.Now() | startTime := time.Now() | ||||
| err := cmdCtx.Cmdline.Svc.StorageSvc().LoadPackage(userID, pkgID, stgID, rootPath) | |||||
| err := cmdCtx.Cmdline.Svc.StorageSvc().LoadPackage(pkgID, stgID, rootPath) | |||||
| if err != nil { | if err != nil { | ||||
| fmt.Println(err) | fmt.Println(err) | ||||
| return | return | ||||
| @@ -7,7 +7,7 @@ import ( | |||||
| "github.com/jedib0t/go-pretty/v6/table" | "github.com/jedib0t/go-pretty/v6/table" | ||||
| "github.com/spf13/cobra" | "github.com/spf13/cobra" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | ) | ||||
| func init() { | func init() { | ||||
| @@ -38,37 +38,33 @@ func init() { | |||||
| } | } | ||||
| func lspByPath(cmdCtx *CommandContext, path string) { | func lspByPath(cmdCtx *CommandContext, path string) { | ||||
| userID := cdssdk.UserID(1) | |||||
| comps := strings.Split(strings.Trim(path, cdssdk.ObjectPathSeparator), cdssdk.ObjectPathSeparator) | comps := strings.Split(strings.Trim(path, cdssdk.ObjectPathSeparator), cdssdk.ObjectPathSeparator) | ||||
| if len(comps) != 2 { | if len(comps) != 2 { | ||||
| fmt.Printf("Package path must be in format of <bucket>/<package>") | fmt.Printf("Package path must be in format of <bucket>/<package>") | ||||
| return | return | ||||
| } | } | ||||
| pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByFullName(userID, comps[0], comps[1]) | |||||
| pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByFullName(comps[0], comps[1]) | |||||
| if err != nil { | if err != nil { | ||||
| fmt.Println(err) | fmt.Println(err) | ||||
| return | return | ||||
| } | } | ||||
| wr := table.NewWriter() | wr := table.NewWriter() | ||||
| wr.AppendHeader(table.Row{"ID", "Name", "State"}) | |||||
| wr.AppendRow(table.Row{pkg.PackageID, pkg.Name, pkg.State}) | |||||
| wr.AppendHeader(table.Row{"ID", "Name"}) | |||||
| wr.AppendRow(table.Row{pkg.PackageID, pkg.Name}) | |||||
| fmt.Println(wr.Render()) | fmt.Println(wr.Render()) | ||||
| } | } | ||||
| func lspOneByID(cmdCtx *CommandContext, id cdssdk.PackageID) { | func lspOneByID(cmdCtx *CommandContext, id cdssdk.PackageID) { | ||||
| userID := cdssdk.UserID(1) | |||||
| pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().Get(userID, id) | |||||
| pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().Get(id) | |||||
| if err != nil { | if err != nil { | ||||
| fmt.Println(err) | fmt.Println(err) | ||||
| return | return | ||||
| } | } | ||||
| wr := table.NewWriter() | wr := table.NewWriter() | ||||
| wr.AppendHeader(table.Row{"ID", "Name", "State"}) | |||||
| wr.AppendRow(table.Row{pkg.PackageID, pkg.Name, pkg.State}) | |||||
| wr.AppendHeader(table.Row{"ID", "Name"}) | |||||
| wr.AppendRow(table.Row{pkg.PackageID, pkg.Name}) | |||||
| fmt.Println(wr.Render()) | fmt.Println(wr.Render()) | ||||
| } | } | ||||
| @@ -9,12 +9,12 @@ import ( | |||||
| "github.com/jedib0t/go-pretty/v6/table" | "github.com/jedib0t/go-pretty/v6/table" | ||||
| "github.com/spf13/cobra" | "github.com/spf13/cobra" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | ) | ||||
| func init() { | func init() { | ||||
| cmd := &cobra.Command{ | cmd := &cobra.Command{ | ||||
| Use: "newloadp localPath bucketID packageName storageID...", | |||||
| Use: "newloadp localPath bucketID packageName userSpaceID...", | |||||
| Short: "Create a new package then upload an load files to it at the same time", | Short: "Create a new package then upload an load files to it at the same time", | ||||
| Args: cobra.MinimumNArgs(4), | Args: cobra.MinimumNArgs(4), | ||||
| Run: func(cmd *cobra.Command, args []string) { | Run: func(cmd *cobra.Command, args []string) { | ||||
| @@ -28,7 +28,7 @@ func init() { | |||||
| } | } | ||||
| packageName := args[2] | packageName := args[2] | ||||
| storageIDs := make([]cdssdk.StorageID, 0) | |||||
| spaceIDs := make([]cdssdk.UserSpaceID, 0) | |||||
| rootPathes := make([]string, 0) | rootPathes := make([]string, 0) | ||||
| for _, dst := range args[3:] { | for _, dst := range args[3:] { | ||||
| comps := strings.Split(dst, ":") | comps := strings.Split(dst, ":") | ||||
| @@ -42,21 +42,19 @@ func init() { | |||||
| fmt.Println(err) | fmt.Println(err) | ||||
| return | return | ||||
| } | } | ||||
| storageIDs = append(storageIDs, cdssdk.StorageID(sID)) | |||||
| spaceIDs = append(spaceIDs, cdssdk.UserSpaceID(sID)) | |||||
| rootPathes = append(rootPathes, comps[1]) | rootPathes = append(rootPathes, comps[1]) | ||||
| } | } | ||||
| newloadp(cmdCtx, localPath, cdssdk.BucketID(bktID), packageName, storageIDs, rootPathes) | |||||
| newloadp(cmdCtx, localPath, cdssdk.BucketID(bktID), packageName, spaceIDs, rootPathes) | |||||
| }, | }, | ||||
| } | } | ||||
| RootCmd.AddCommand(cmd) | RootCmd.AddCommand(cmd) | ||||
| } | } | ||||
| func newloadp(cmdCtx *CommandContext, path string, bucketID cdssdk.BucketID, packageName string, storageIDs []cdssdk.StorageID, rootPathes []string) { | |||||
| userID := cdssdk.UserID(1) | |||||
| up, err := cmdCtx.Cmdline.Svc.Uploader.BeginCreateLoad(userID, bucketID, packageName, storageIDs, rootPathes) | |||||
| func newloadp(cmdCtx *CommandContext, path string, bucketID cdssdk.BucketID, packageName string, spaceIDs []cdssdk.UserSpaceID, rootPathes []string) { | |||||
| up, err := cmdCtx.Cmdline.Svc.Uploader.BeginCreateLoad(bucketID, packageName, spaceIDs, rootPathes) | |||||
| if err != nil { | if err != nil { | ||||
| fmt.Println(err) | fmt.Println(err) | ||||
| return | return | ||||
| @@ -6,7 +6,7 @@ import ( | |||||
| "path/filepath" | "path/filepath" | ||||
| "time" | "time" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | ) | ||||
| // 必须添加的命令函数,用于处理对象上传。 | // 必须添加的命令函数,用于处理对象上传。 | ||||
| @@ -16,7 +16,7 @@ import ( | |||||
| // rootPath: 本地文件系统中待上传文件的根目录。 | // rootPath: 本地文件系统中待上传文件的根目录。 | ||||
| // storageAffinity: 偏好的节点ID列表,上传任务可能会分配到这些节点上。 | // storageAffinity: 偏好的节点ID列表,上传任务可能会分配到这些节点上。 | ||||
| // 返回值: 执行过程中遇到的任何错误。 | // 返回值: 执行过程中遇到的任何错误。 | ||||
| var _ = MustAddCmd(func(ctx CommandContext, packageID cdssdk.PackageID, rootPath string, storageAffinity []cdssdk.StorageID) error { | |||||
| var _ = MustAddCmd(func(ctx CommandContext, packageID cdssdk.PackageID, rootPath string, spaceAffinity []cdssdk.UserSpaceID) error { | |||||
| // 记录函数开始时间,用于计算执行时间。 | // 记录函数开始时间,用于计算执行时间。 | ||||
| startTime := time.Now() | startTime := time.Now() | ||||
| defer func() { | defer func() { | ||||
| @@ -24,16 +24,13 @@ var _ = MustAddCmd(func(ctx CommandContext, packageID cdssdk.PackageID, rootPath | |||||
| fmt.Printf("%v\n", time.Since(startTime).Seconds()) | fmt.Printf("%v\n", time.Since(startTime).Seconds()) | ||||
| }() | }() | ||||
| // 模拟或获取用户ID。 | |||||
| userID := cdssdk.UserID(1) | |||||
| // 根据节点亲和性列表设置首选上传节点。 | // 根据节点亲和性列表设置首选上传节点。 | ||||
| var storageAff cdssdk.StorageID | |||||
| if len(storageAffinity) > 0 { | |||||
| storageAff = storageAffinity[0] | |||||
| var storageAff cdssdk.UserSpaceID | |||||
| if len(spaceAffinity) > 0 { | |||||
| storageAff = spaceAffinity[0] | |||||
| } | } | ||||
| up, err := ctx.Cmdline.Svc.Uploader.BeginUpdate(userID, packageID, storageAff, nil, nil) | |||||
| up, err := ctx.Cmdline.Svc.Uploader.BeginUpdate(packageID, storageAff, nil, nil) | |||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("begin updating package: %w", err) | return fmt.Errorf("begin updating package: %w", err) | ||||
| } | } | ||||
| @@ -8,7 +8,7 @@ import ( | |||||
| "time" | "time" | ||||
| "github.com/jedib0t/go-pretty/v6/table" | "github.com/jedib0t/go-pretty/v6/table" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/iterator" | "gitlink.org.cn/cloudream/storage2/common/pkgs/iterator" | ||||
| ) | ) | ||||
| @@ -23,20 +23,18 @@ import ( | |||||
| // | // | ||||
| // error - 操作过程中发生的任何错误。 | // error - 操作过程中发生的任何错误。 | ||||
| func PackageListBucketPackages(ctx CommandContext, bucketID cdssdk.BucketID) error { | func PackageListBucketPackages(ctx CommandContext, bucketID cdssdk.BucketID) error { | ||||
| userID := cdssdk.UserID(1) | |||||
| packages, err := ctx.Cmdline.Svc.BucketSvc().GetBucketPackages(userID, bucketID) | |||||
| packages, err := ctx.Cmdline.Svc.BucketSvc().GetBucketPackages(bucketID) | |||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| } | } | ||||
| fmt.Printf("Find %d packages in bucket %d for user %d:\n", len(packages), bucketID, userID) | |||||
| fmt.Printf("Find %d packages in bucket %d for user %d:\n", len(packages), bucketID) | |||||
| tb := table.NewWriter() | tb := table.NewWriter() | ||||
| tb.AppendHeader(table.Row{"ID", "Name", "BucketID", "State"}) | |||||
| tb.AppendHeader(table.Row{"ID", "Name", "BucketID"}) | |||||
| for _, obj := range packages { | for _, obj := range packages { | ||||
| tb.AppendRow(table.Row{obj.PackageID, obj.Name, obj.BucketID, obj.State}) | |||||
| tb.AppendRow(table.Row{obj.PackageID, obj.Name, obj.BucketID}) | |||||
| } | } | ||||
| fmt.Println(tb.Render()) | fmt.Println(tb.Render()) | ||||
| @@ -60,15 +58,13 @@ func PackageDownloadPackage(ctx CommandContext, packageID cdssdk.PackageID, outp | |||||
| fmt.Printf("%v\n", time.Since(startTime).Seconds()) | fmt.Printf("%v\n", time.Since(startTime).Seconds()) | ||||
| }() | }() | ||||
| userID := cdssdk.UserID(1) | |||||
| err := os.MkdirAll(outputDir, os.ModePerm) | err := os.MkdirAll(outputDir, os.ModePerm) | ||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("create output directory %s failed, err: %w", outputDir, err) | return fmt.Errorf("create output directory %s failed, err: %w", outputDir, err) | ||||
| } | } | ||||
| // 初始化文件下载迭代器 | // 初始化文件下载迭代器 | ||||
| objIter, err := ctx.Cmdline.Svc.PackageSvc().DownloadPackage(userID, packageID) | |||||
| objIter, err := ctx.Cmdline.Svc.PackageSvc().DownloadPackage(packageID) | |||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("download object failed, err: %w", err) | return fmt.Errorf("download object failed, err: %w", err) | ||||
| } | } | ||||
| @@ -131,9 +127,7 @@ func PackageDownloadPackage(ctx CommandContext, packageID cdssdk.PackageID, outp | |||||
| // | // | ||||
| // error - 操作过程中发生的任何错误。 | // error - 操作过程中发生的任何错误。 | ||||
| func PackageCreatePackage(ctx CommandContext, bucketID cdssdk.BucketID, name string) error { | func PackageCreatePackage(ctx CommandContext, bucketID cdssdk.BucketID, name string) error { | ||||
| userID := cdssdk.UserID(1) | |||||
| pkgID, err := ctx.Cmdline.Svc.PackageSvc().Create(userID, bucketID, name) | |||||
| pkgID, err := ctx.Cmdline.Svc.PackageSvc().Create(bucketID, name) | |||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| } | } | ||||
| @@ -153,8 +147,7 @@ func PackageCreatePackage(ctx CommandContext, bucketID cdssdk.BucketID, name str | |||||
| // | // | ||||
| // error - 操作过程中发生的任何错误。 | // error - 操作过程中发生的任何错误。 | ||||
| func PackageDeletePackage(ctx CommandContext, packageID cdssdk.PackageID) error { | func PackageDeletePackage(ctx CommandContext, packageID cdssdk.PackageID) error { | ||||
| userID := cdssdk.UserID(1) | |||||
| err := ctx.Cmdline.Svc.PackageSvc().DeletePackage(userID, packageID) | |||||
| err := ctx.Cmdline.Svc.PackageSvc().DeletePackage(packageID) | |||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("delete package %d failed, err: %w", packageID, err) | return fmt.Errorf("delete package %d failed, err: %w", packageID, err) | ||||
| } | } | ||||
| @@ -172,8 +165,7 @@ func PackageDeletePackage(ctx CommandContext, packageID cdssdk.PackageID) error | |||||
| // | // | ||||
| // error - 操作过程中发生的任何错误。 | // error - 操作过程中发生的任何错误。 | ||||
| func PackageGetCachedStorages(ctx CommandContext, packageID cdssdk.PackageID) error { | func PackageGetCachedStorages(ctx CommandContext, packageID cdssdk.PackageID) error { | ||||
| userID := cdssdk.UserID(1) | |||||
| resp, err := ctx.Cmdline.Svc.PackageSvc().GetCachedStorages(userID, packageID) | |||||
| resp, err := ctx.Cmdline.Svc.PackageSvc().GetCachedStorages(packageID) | |||||
| fmt.Printf("resp: %v\n", resp) | fmt.Printf("resp: %v\n", resp) | ||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("get package %d cached storages failed, err: %w", packageID, err) | return fmt.Errorf("get package %d cached storages failed, err: %w", packageID, err) | ||||
| @@ -11,7 +11,7 @@ import ( | |||||
| "github.com/spf13/cobra" | "github.com/spf13/cobra" | ||||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | "gitlink.org.cn/cloudream/common/consts/errorcode" | ||||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | "gitlink.org.cn/cloudream/common/pkgs/mq" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | ) | ||||
| func init() { | func init() { | ||||
| @@ -33,7 +33,6 @@ func init() { | |||||
| return nil | return nil | ||||
| }, | }, | ||||
| Run: func(cmd *cobra.Command, args []string) { | Run: func(cmd *cobra.Command, args []string) { | ||||
| userID := cdssdk.UserID(1) | |||||
| cmdCtx := GetCmdCtx(cmd) | cmdCtx := GetCmdCtx(cmd) | ||||
| local := args[0] | local := args[0] | ||||
| @@ -42,16 +41,16 @@ func init() { | |||||
| startTime := time.Now() | startTime := time.Now() | ||||
| bkt, err := cmdCtx.Cmdline.Svc.BucketSvc().GetBucketByName(userID, comps[0]) | |||||
| bkt, err := cmdCtx.Cmdline.Svc.BucketSvc().GetBucketByName(comps[0]) | |||||
| if err != nil { | if err != nil { | ||||
| fmt.Printf("getting bucket: %v\n", err) | fmt.Printf("getting bucket: %v\n", err) | ||||
| return | return | ||||
| } | } | ||||
| pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByFullName(userID, comps[0], comps[1]) | |||||
| pkg, err := cmdCtx.Cmdline.Svc.PackageSvc().GetByFullName(comps[0], comps[1]) | |||||
| if err != nil { | if err != nil { | ||||
| if codeMsg, ok := err.(*mq.CodeMessageError); ok && codeMsg.Code == errorcode.DataNotFound { | if codeMsg, ok := err.(*mq.CodeMessageError); ok && codeMsg.Code == errorcode.DataNotFound { | ||||
| pkg2, err := cmdCtx.Cmdline.Svc.PackageSvc().Create(userID, bkt.BucketID, comps[1]) | |||||
| pkg2, err := cmdCtx.Cmdline.Svc.PackageSvc().Create(bkt.BucketID, comps[1]) | |||||
| if err != nil { | if err != nil { | ||||
| fmt.Printf("creating package: %v\n", err) | fmt.Printf("creating package: %v\n", err) | ||||
| return | return | ||||
| @@ -63,12 +62,12 @@ func init() { | |||||
| return | return | ||||
| } | } | ||||
| } | } | ||||
| var storageAff cdssdk.StorageID | |||||
| var spaceAff cdssdk.UserSpaceID | |||||
| if stgID != 0 { | if stgID != 0 { | ||||
| storageAff = cdssdk.StorageID(stgID) | |||||
| spaceAff = cdssdk.UserSpaceID(stgID) | |||||
| } | } | ||||
| up, err := cmdCtx.Cmdline.Svc.Uploader.BeginUpdate(userID, pkg.PackageID, storageAff, nil, nil) | |||||
| up, err := cmdCtx.Cmdline.Svc.Uploader.BeginUpdate(pkg.PackageID, spaceAff, nil, nil) | |||||
| if err != nil { | if err != nil { | ||||
| fmt.Printf("begin updating package: %v\n", err) | fmt.Printf("begin updating package: %v\n", err) | ||||
| return | return | ||||
| @@ -1,47 +0,0 @@ | |||||
| package cmdline | |||||
| import ( | |||||
| "fmt" | |||||
| "gitlink.org.cn/cloudream/common/pkgs/cmdtrie" | |||||
| "gitlink.org.cn/cloudream/common/utils/reflect2" | |||||
| scevt "gitlink.org.cn/cloudream/storage2/common/pkgs/mq/scanner/event" | |||||
| ) | |||||
| var parseScannerEventCmdTrie cmdtrie.StaticCommandTrie[any] = cmdtrie.NewStaticCommandTrie[any]() | |||||
| func ScannerPostEvent(ctx CommandContext, args []string) error { | |||||
| ret, err := parseScannerEventCmdTrie.Execute(args, cmdtrie.ExecuteOption{ReplaceEmptyArrayWithNil: true}) | |||||
| if err != nil { | |||||
| return fmt.Errorf("execute parsing event command failed, err: %w", err) | |||||
| } | |||||
| err = ctx.Cmdline.Svc.ScannerSvc().PostEvent(ret.(scevt.Event), false, false) | |||||
| if err != nil { | |||||
| return fmt.Errorf("post event to scanner failed, err: %w", err) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func init() { | |||||
| parseScannerEventCmdTrie.MustAdd(scevt.NewAgentShardStoreGC, reflect2.TypeNameOf[scevt.AgentShardStoreGC]()) | |||||
| parseScannerEventCmdTrie.MustAdd(scevt.NewAgentCheckShardStore, reflect2.TypeNameOf[scevt.AgentCheckShardStore]()) | |||||
| parseScannerEventCmdTrie.MustAdd(scevt.NewAgentCheckState, reflect2.TypeNameOf[scevt.AgentCheckState]()) | |||||
| parseScannerEventCmdTrie.MustAdd(scevt.NewAgentStorageGC, reflect2.TypeNameOf[scevt.AgentStorageGC]()) | |||||
| parseScannerEventCmdTrie.MustAdd(scevt.NewAgentCheckStorage, reflect2.TypeNameOf[scevt.AgentCheckStorage]()) | |||||
| parseScannerEventCmdTrie.MustAdd(scevt.NewCheckPackage, reflect2.TypeNameOf[scevt.CheckPackage]()) | |||||
| parseScannerEventCmdTrie.MustAdd(scevt.NewCheckPackageRedundancy, reflect2.TypeNameOf[scevt.CheckPackageRedundancy]()) | |||||
| parseScannerEventCmdTrie.MustAdd(scevt.NewCleanPinned, reflect2.TypeNameOf[scevt.CleanPinned]()) | |||||
| parseScannerEventCmdTrie.MustAdd(scevt.NewUpdatePackageAccessStatAmount, reflect2.TypeNameOf[scevt.UpdatePackageAccessStatAmount]()) | |||||
| commands.MustAdd(ScannerPostEvent, "scanner", "event") | |||||
| } | |||||
| @@ -7,11 +7,11 @@ import ( | |||||
| "github.com/spf13/cobra" | "github.com/spf13/cobra" | ||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | "gitlink.org.cn/cloudream/common/pkgs/logger" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| "gitlink.org.cn/cloudream/storage2/client/internal/config" | "gitlink.org.cn/cloudream/storage2/client/internal/config" | ||||
| "gitlink.org.cn/cloudream/storage2/client/internal/http" | "gitlink.org.cn/cloudream/storage2/client/internal/http" | ||||
| "gitlink.org.cn/cloudream/storage2/client/internal/services" | "gitlink.org.cn/cloudream/storage2/client/internal/services" | ||||
| "gitlink.org.cn/cloudream/storage2/client/internal/task" | "gitlink.org.cn/cloudream/storage2/client/internal/task" | ||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| stgglb "gitlink.org.cn/cloudream/storage2/common/globals" | stgglb "gitlink.org.cn/cloudream/storage2/common/globals" | ||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/accessstat" | "gitlink.org.cn/cloudream/storage2/common/pkgs/accessstat" | ||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/connectivity" | "gitlink.org.cn/cloudream/storage2/common/pkgs/connectivity" | ||||
| @@ -4,17 +4,17 @@ import ( | |||||
| "fmt" | "fmt" | ||||
| "time" | "time" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | ) | ||||
| // StorageCreatePackage 创建一个新的包并上传到指定的存储系统。 | |||||
| // UserSpaceCreatePackage 创建一个新的包并上传到指定的存储系统。 | |||||
| // ctx: 命令上下文,提供必要的服务和环境配置。 | // ctx: 命令上下文,提供必要的服务和环境配置。 | ||||
| // bucketID: 存储桶的唯一标识,包将被上传到这个存储桶中。 | // bucketID: 存储桶的唯一标识,包将被上传到这个存储桶中。 | ||||
| // name: 新包的名称。 | // name: 新包的名称。 | ||||
| // storageID: 目标存储系统的唯一标识。 | // storageID: 目标存储系统的唯一标识。 | ||||
| // path: 包在存储系统中的路径。 | // path: 包在存储系统中的路径。 | ||||
| // 返回值: 执行过程中遇到的任何错误。 | // 返回值: 执行过程中遇到的任何错误。 | ||||
| func StorageCreatePackage(ctx CommandContext, bucketID cdssdk.BucketID, name string, storageID cdssdk.StorageID, path string) error { | |||||
| func UserSpaceCreatePackage(ctx CommandContext, bucketID cdssdk.BucketID, name string, spaceID cdssdk.UserSpaceID, path string) error { | |||||
| startTime := time.Now() | startTime := time.Now() | ||||
| defer func() { | defer func() { | ||||
| // 打印函数执行时间 | // 打印函数执行时间 | ||||
| @@ -22,7 +22,7 @@ func StorageCreatePackage(ctx CommandContext, bucketID cdssdk.BucketID, name str | |||||
| }() | }() | ||||
| // 开始创建并上传包到存储系统 | // 开始创建并上传包到存储系统 | ||||
| pkg, err := ctx.Cmdline.Svc.StorageSvc().StorageCreatePackage(1, bucketID, name, storageID, path, 0) | |||||
| pkg, err := ctx.Cmdline.Svc.StorageSvc().StorageCreatePackage(bucketID, name, spaceID, path, 0) | |||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("start storage uploading package: %w", err) | return fmt.Errorf("start storage uploading package: %w", err) | ||||
| } | } | ||||
| @@ -34,5 +34,5 @@ func StorageCreatePackage(ctx CommandContext, bucketID cdssdk.BucketID, name str | |||||
| // 初始化函数,注册加载包和创建包的命令到命令行解析器。 | // 初始化函数,注册加载包和创建包的命令到命令行解析器。 | ||||
| func init() { | func init() { | ||||
| // 注册创建包命令 | // 注册创建包命令 | ||||
| commands.MustAdd(StorageCreatePackage, "stg", "pkg", "new") | |||||
| commands.MustAdd(UserSpaceCreatePackage, "stg", "pkg", "new") | |||||
| } | } | ||||
| @@ -5,6 +5,7 @@ import ( | |||||
| "strings" | "strings" | ||||
| "time" | "time" | ||||
| "github.com/samber/lo" | |||||
| "gorm.io/gorm" | "gorm.io/gorm" | ||||
| "gorm.io/gorm/clause" | "gorm.io/gorm/clause" | ||||
| @@ -487,6 +488,91 @@ func (db *ObjectDB) DeleteInPackage(ctx SQLContext, packageID types.PackageID) e | |||||
| return ctx.Table("Object").Where("PackageID = ?", packageID).Delete(&types.Object{}).Error | return ctx.Table("Object").Where("PackageID = ?", packageID).Delete(&types.Object{}).Error | ||||
| } | } | ||||
| type UpdatingObjectRedundancy struct { | |||||
| ObjectID types.ObjectID `json:"objectID"` | |||||
| FileHash types.FileHash `json:"fileHash"` | |||||
| Size int64 `json:"size"` | |||||
| Redundancy types.Redundancy `json:"redundancy"` | |||||
| PinnedAt []types.UserSpaceID `json:"pinnedAt"` | |||||
| Blocks []types.ObjectBlock `json:"blocks"` | |||||
| } | |||||
| func (db *ObjectDB) BatchUpdateRedundancy(ctx SQLContext, updates []UpdatingObjectRedundancy) error { | |||||
| objs := updates | |||||
| nowTime := time.Now() | |||||
| objIDs := make([]types.ObjectID, 0, len(objs)) | |||||
| for _, obj := range objs { | |||||
| objIDs = append(objIDs, obj.ObjectID) | |||||
| } | |||||
| avaiIDs, err := db.Object().BatchTestObjectID(ctx, objIDs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch test object id: %w", err) | |||||
| } | |||||
| // 过滤掉已经不存在的对象。 | |||||
| // 注意,objIDs没有被过滤,因为后续逻辑不过滤也不会出错 | |||||
| objs = lo.Filter(objs, func(obj UpdatingObjectRedundancy, _ int) bool { | |||||
| return avaiIDs[obj.ObjectID] | |||||
| }) | |||||
| dummyObjs := make([]types.Object, 0, len(objs)) | |||||
| for _, obj := range objs { | |||||
| dummyObjs = append(dummyObjs, types.Object{ | |||||
| ObjectID: obj.ObjectID, | |||||
| FileHash: obj.FileHash, | |||||
| Size: obj.Size, | |||||
| Redundancy: obj.Redundancy, | |||||
| CreateTime: nowTime, // 实际不会更新,只因为不能是0值 | |||||
| UpdateTime: nowTime, | |||||
| }) | |||||
| } | |||||
| err = db.Object().BatchUpdateColumns(ctx, dummyObjs, []string{"FileHash", "Size", "Redundancy", "UpdateTime"}) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch update object redundancy: %w", err) | |||||
| } | |||||
| // 删除原本所有的编码块记录,重新添加 | |||||
| err = db.ObjectBlock().BatchDeleteByObjectID(ctx, objIDs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch delete object blocks: %w", err) | |||||
| } | |||||
| // 删除原本Pin住的Object。暂不考虑FileHash没有变化的情况 | |||||
| err = db.PinnedObject().BatchDeleteByObjectID(ctx, objIDs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch delete pinned object: %w", err) | |||||
| } | |||||
| blocks := make([]types.ObjectBlock, 0, len(objs)) | |||||
| for _, obj := range objs { | |||||
| blocks = append(blocks, obj.Blocks...) | |||||
| } | |||||
| err = db.ObjectBlock().BatchCreate(ctx, blocks) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch create object blocks: %w", err) | |||||
| } | |||||
| pinneds := make([]types.PinnedObject, 0, len(objs)) | |||||
| for _, obj := range objs { | |||||
| for _, p := range obj.PinnedAt { | |||||
| pinneds = append(pinneds, types.PinnedObject{ | |||||
| ObjectID: obj.ObjectID, | |||||
| UserSpaceID: p, | |||||
| CreateTime: nowTime, | |||||
| }) | |||||
| } | |||||
| } | |||||
| err = db.PinnedObject().BatchTryCreate(ctx, pinneds) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch create pinned objects: %w", err) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func (db *ObjectDB) DeleteByPath(ctx SQLContext, packageID types.PackageID, path string) error { | func (db *ObjectDB) DeleteByPath(ctx SQLContext, packageID types.PackageID, path string) error { | ||||
| return ctx.Table("Object").Where("PackageID = ? AND Path = ?", packageID, path).Delete(&types.Object{}).Error | return ctx.Table("Object").Where("PackageID = ? AND Path = ?", packageID, path).Delete(&types.Object{}).Error | ||||
| } | } | ||||
| @@ -0,0 +1,49 @@ | |||||
| package db | |||||
| import ( | |||||
| "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | |||||
| type UserSpaceDB struct { | |||||
| *DB | |||||
| } | |||||
| func (db *DB) UserSpace() *UserSpaceDB { | |||||
| return &UserSpaceDB{DB: db} | |||||
| } | |||||
| func (db *UserSpaceDB) GetByID(ctx SQLContext, stgID types.UserSpaceID) (types.UserSpace, error) { | |||||
| var stg types.UserSpace | |||||
| err := ctx.Table("UserSpace").First(&stg, stgID).Error | |||||
| return stg, err | |||||
| } | |||||
| func (UserSpaceDB) GetAllIDs(ctx SQLContext) ([]types.UserSpaceID, error) { | |||||
| var stgs []types.UserSpaceID | |||||
| err := ctx.Table("UserSpace").Select("UserSpaceID").Find(&stgs).Error | |||||
| return stgs, err | |||||
| } | |||||
| func (db *UserSpaceDB) BatchGetByID(ctx SQLContext, stgIDs []types.UserSpaceID) ([]types.UserSpace, error) { | |||||
| var stgs []types.UserSpace | |||||
| err := ctx.Table("UserSpace").Find(&stgs, "UserSpaceID IN (?)", stgIDs).Error | |||||
| return stgs, err | |||||
| } | |||||
| func (db *UserSpaceDB) GetAll(ctx SQLContext) ([]types.UserSpace, error) { | |||||
| var stgs []types.UserSpace | |||||
| err := ctx.Table("UserSpace").Find(&stgs).Error | |||||
| return stgs, err | |||||
| } | |||||
| func (db *UserSpaceDB) BatchGetAllUserSpaceIDs(ctx SQLContext, start int, count int) ([]types.UserSpaceID, error) { | |||||
| var ret []types.UserSpaceID | |||||
| err := ctx.Table("UserSpace").Select("UserSpaceID").Find(&ret).Limit(count).Offset(start).Error | |||||
| return ret, err | |||||
| } | |||||
| func (db *UserSpaceDB) GetByName(ctx SQLContext, name string) (types.UserSpace, error) { | |||||
| var stg types.UserSpace | |||||
| err := ctx.Table("UserSpace").Where("Name = ?", name).First(&stg).Error | |||||
| return stg, err | |||||
| } | |||||
| @@ -2,11 +2,12 @@ package http | |||||
| import ( | import ( | ||||
| "net/http" | "net/http" | ||||
| "time" | |||||
| "github.com/gin-gonic/gin" | "github.com/gin-gonic/gin" | ||||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | "gitlink.org.cn/cloudream/common/consts/errorcode" | ||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | "gitlink.org.cn/cloudream/common/pkgs/logger" | ||||
| "gitlink.org.cn/cloudream/common/sdks/storage/cdsapi" | |||||
| cdsapi "gitlink.org.cn/cloudream/storage2/client/sdk/api" | |||||
| ) | ) | ||||
| type BucketService struct { | type BucketService struct { | ||||
| @@ -29,7 +30,7 @@ func (s *BucketService) GetByName(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| bucket, err := s.svc.BucketSvc().GetBucketByName(req.UserID, req.Name) | |||||
| bucket, err := s.svc.BucketSvc().GetBucketByName(req.Name) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("getting bucket by name: %s", err.Error()) | log.Warnf("getting bucket by name: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, FailedError(err)) | ctx.JSON(http.StatusOK, FailedError(err)) | ||||
| @@ -51,7 +52,7 @@ func (s *BucketService) Create(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| bucket, err := s.svc.BucketSvc().CreateBucket(req.UserID, req.Name) | |||||
| bucket, err := s.svc.BucketSvc().CreateBucket(req.Name, time.Now()) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("creating bucket: %s", err.Error()) | log.Warnf("creating bucket: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, FailedError(err)) | ctx.JSON(http.StatusOK, FailedError(err)) | ||||
| @@ -73,7 +74,7 @@ func (s *BucketService) Delete(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| if err := s.svc.BucketSvc().DeleteBucket(req.UserID, req.BucketID); err != nil { | |||||
| if err := s.svc.BucketSvc().DeleteBucket(req.BucketID); err != nil { | |||||
| log.Warnf("deleting bucket: %s", err.Error()) | log.Warnf("deleting bucket: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "delete bucket failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "delete bucket failed")) | ||||
| return | return | ||||
| @@ -92,7 +93,7 @@ func (s *BucketService) ListUserBuckets(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| buckets, err := s.svc.BucketSvc().GetUserBuckets(req.UserID) | |||||
| buckets, err := s.svc.BucketSvc().GetUserBuckets() | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("getting user buckets: %s", err.Error()) | log.Warnf("getting user buckets: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get user buckets failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get user buckets failed")) | ||||
| @@ -1,5 +1,6 @@ | |||||
| package http | package http | ||||
| /* | |||||
| import ( | import ( | ||||
| "net/http" | "net/http" | ||||
| "time" | "time" | ||||
| @@ -7,8 +8,8 @@ import ( | |||||
| "github.com/gin-gonic/gin" | "github.com/gin-gonic/gin" | ||||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | "gitlink.org.cn/cloudream/common/consts/errorcode" | ||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | "gitlink.org.cn/cloudream/common/pkgs/logger" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| "gitlink.org.cn/cloudream/common/sdks/storage/cdsapi" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| cdsapi "gitlink.org.cn/cloudream/storage2/client/sdk/api" | |||||
| ) | ) | ||||
| type CacheService struct { | type CacheService struct { | ||||
| @@ -38,7 +39,7 @@ func (s *CacheService) MovePackage(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| hubID, taskID, err := s.svc.CacheSvc().StartCacheMovePackage(req.UserID, req.PackageID, req.StorageID) | |||||
| hubID, taskID, err := s.svc.CacheSvc().StartCacheMovePackage(req.PackageID, req.StorageID) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("start cache move package: %s", err.Error()) | log.Warnf("start cache move package: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "cache move package failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "cache move package failed")) | ||||
| @@ -65,3 +66,4 @@ func (s *CacheService) MovePackage(ctx *gin.Context) { | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| */ | |||||
| @@ -1,46 +0,0 @@ | |||||
| package http | |||||
| import ( | |||||
| "net/http" | |||||
| "github.com/gin-gonic/gin" | |||||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | |||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| "gitlink.org.cn/cloudream/common/sdks/storage/cdsapi" | |||||
| ) | |||||
| type HubService struct { | |||||
| *Server | |||||
| } | |||||
| func (s *Server) HubSvc() *HubService { | |||||
| return &HubService{ | |||||
| Server: s, | |||||
| } | |||||
| } | |||||
| type GetHubsReq struct { | |||||
| HubIDs *[]cdssdk.HubID `form:"hubIDs" binding:"required"` | |||||
| } | |||||
| type GetHubsResp = cdsapi.HubGetHubsResp | |||||
| func (s *ObjectService) GetHubs(ctx *gin.Context) { | |||||
| log := logger.WithField("HTTP", "Hub.GetHubs") | |||||
| var req GetHubsReq | |||||
| if err := ctx.ShouldBindQuery(&req); err != nil { | |||||
| log.Warnf("binding body: %s", err.Error()) | |||||
| ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) | |||||
| return | |||||
| } | |||||
| hubs, err := s.svc.HubSvc().GetHubs(*req.HubIDs) | |||||
| if err != nil { | |||||
| log.Warnf("getting hubs: %s", err.Error()) | |||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get hubs failed")) | |||||
| return | |||||
| } | |||||
| ctx.JSON(http.StatusOK, OK(GetHubsResp{Hubs: hubs})) | |||||
| } | |||||
| @@ -12,10 +12,10 @@ import ( | |||||
| "github.com/gin-gonic/gin" | "github.com/gin-gonic/gin" | ||||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | "gitlink.org.cn/cloudream/common/consts/errorcode" | ||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | "gitlink.org.cn/cloudream/common/pkgs/logger" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| "gitlink.org.cn/cloudream/common/sdks/storage/cdsapi" | |||||
| "gitlink.org.cn/cloudream/common/utils/math2" | "gitlink.org.cn/cloudream/common/utils/math2" | ||||
| "gitlink.org.cn/cloudream/storage2/client/internal/config" | "gitlink.org.cn/cloudream/storage2/client/internal/config" | ||||
| cdsapi "gitlink.org.cn/cloudream/storage2/client/sdk/api" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/downloader" | "gitlink.org.cn/cloudream/storage2/common/pkgs/downloader" | ||||
| ) | ) | ||||
| @@ -59,7 +59,7 @@ func (s *ObjectService) ListByIDs(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| objs, err := s.svc.ObjectSvc().GetByIDs(req.UserID, req.ObjectIDs) | |||||
| objs, err := s.svc.ObjectSvc().GetByIDs(req.ObjectIDs) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("listing objects: %s", err.Error()) | log.Warnf("listing objects: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("listing objects: %v", err))) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("listing objects: %v", err))) | ||||
| @@ -84,7 +84,7 @@ func (s *ObjectService) Upload(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| up, err := s.svc.Uploader.BeginUpdate(req.Info.UserID, req.Info.PackageID, req.Info.Affinity, req.Info.LoadTo, req.Info.LoadToPath) | |||||
| up, err := s.svc.Uploader.BeginUpdate(req.Info.PackageID, req.Info.Affinity, req.Info.LoadTo, req.Info.LoadToPath) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("begin update: %s", err.Error()) | log.Warnf("begin update: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("begin update: %v", err))) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("begin update: %v", err))) | ||||
| @@ -149,7 +149,7 @@ func (s *ObjectService) Download(ctx *gin.Context) { | |||||
| len = *req.Length | len = *req.Length | ||||
| } | } | ||||
| file, err := s.svc.ObjectSvc().Download(req.UserID, downloader.DownloadReqeust{ | |||||
| file, err := s.svc.ObjectSvc().Download(downloader.DownloadReqeust{ | |||||
| ObjectID: req.ObjectID, | ObjectID: req.ObjectID, | ||||
| Offset: off, | Offset: off, | ||||
| Length: len, | Length: len, | ||||
| @@ -192,7 +192,7 @@ func (s *ObjectService) DownloadByPath(ctx *gin.Context) { | |||||
| } | } | ||||
| resp, err := s.svc.ObjectSvc().GetByPath(cdsapi.ObjectListByPath{ | resp, err := s.svc.ObjectSvc().GetByPath(cdsapi.ObjectListByPath{ | ||||
| UserID: req.UserID, PackageID: req.PackageID, Path: req.Path, | |||||
| PackageID: req.PackageID, Path: req.Path, | |||||
| }) | }) | ||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("getting object by path: %s", err.Error()) | log.Warnf("getting object by path: %s", err.Error()) | ||||
| @@ -212,7 +212,7 @@ func (s *ObjectService) DownloadByPath(ctx *gin.Context) { | |||||
| len = *req.Length | len = *req.Length | ||||
| } | } | ||||
| file, err := s.svc.ObjectSvc().Download(req.UserID, downloader.DownloadReqeust{ | |||||
| file, err := s.svc.ObjectSvc().Download(downloader.DownloadReqeust{ | |||||
| ObjectID: resp.Objects[0].ObjectID, | ObjectID: resp.Objects[0].ObjectID, | ||||
| Offset: off, | Offset: off, | ||||
| Length: len, | Length: len, | ||||
| @@ -248,7 +248,7 @@ func (s *ObjectService) UpdateInfo(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| sucs, err := s.svc.ObjectSvc().UpdateInfo(req.UserID, req.Updatings) | |||||
| sucs, err := s.svc.ObjectSvc().UpdateInfo(req.Updatings) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("updating objects: %s", err.Error()) | log.Warnf("updating objects: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "update objects failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "update objects failed")) | ||||
| @@ -269,7 +269,7 @@ func (s *ObjectService) UpdateInfoByPath(ctx *gin.Context) { | |||||
| } | } | ||||
| resp, err := s.svc.ObjectSvc().GetByPath(cdsapi.ObjectListByPath{ | resp, err := s.svc.ObjectSvc().GetByPath(cdsapi.ObjectListByPath{ | ||||
| UserID: req.UserID, PackageID: req.PackageID, Path: req.Path, | |||||
| PackageID: req.PackageID, Path: req.Path, | |||||
| }) | }) | ||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("getting object by path: %s", err.Error()) | log.Warnf("getting object by path: %s", err.Error()) | ||||
| @@ -282,7 +282,7 @@ func (s *ObjectService) UpdateInfoByPath(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| sucs, err := s.svc.ObjectSvc().UpdateInfo(req.UserID, []cdsapi.UpdatingObject{{ | |||||
| sucs, err := s.svc.ObjectSvc().UpdateInfo([]cdsapi.UpdatingObject{{ | |||||
| ObjectID: resp.Objects[0].ObjectID, | ObjectID: resp.Objects[0].ObjectID, | ||||
| UpdateTime: req.UpdateTime, | UpdateTime: req.UpdateTime, | ||||
| }}) | }}) | ||||
| @@ -307,7 +307,7 @@ func (s *ObjectService) Move(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| sucs, err := s.svc.ObjectSvc().Move(req.UserID, req.Movings) | |||||
| sucs, err := s.svc.ObjectSvc().Move(req.Movings) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("moving objects: %s", err.Error()) | log.Warnf("moving objects: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "move objects failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "move objects failed")) | ||||
| @@ -327,7 +327,7 @@ func (s *ObjectService) Delete(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| err := s.svc.ObjectSvc().Delete(req.UserID, req.ObjectIDs) | |||||
| err := s.svc.ObjectSvc().Delete(req.ObjectIDs) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("deleting objects: %s", err.Error()) | log.Warnf("deleting objects: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "delete objects failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "delete objects failed")) | ||||
| @@ -348,7 +348,7 @@ func (s *ObjectService) DeleteByPath(ctx *gin.Context) { | |||||
| } | } | ||||
| resp, err := s.svc.ObjectSvc().GetByPath(cdsapi.ObjectListByPath{ | resp, err := s.svc.ObjectSvc().GetByPath(cdsapi.ObjectListByPath{ | ||||
| UserID: req.UserID, PackageID: req.PackageID, Path: req.Path, | |||||
| PackageID: req.PackageID, Path: req.Path, | |||||
| }) | }) | ||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("getting object by path: %s", err.Error()) | log.Warnf("getting object by path: %s", err.Error()) | ||||
| @@ -360,7 +360,7 @@ func (s *ObjectService) DeleteByPath(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| err = s.svc.ObjectSvc().Delete(req.UserID, []cdssdk.ObjectID{resp.Objects[0].ObjectID}) | |||||
| err = s.svc.ObjectSvc().Delete([]cdssdk.ObjectID{resp.Objects[0].ObjectID}) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("deleting objects: %s", err.Error()) | log.Warnf("deleting objects: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "delete objects failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "delete objects failed")) | ||||
| @@ -380,7 +380,7 @@ func (s *ObjectService) Clone(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| objs, err := s.svc.ObjectSvc().Clone(req.UserID, req.Clonings) | |||||
| objs, err := s.svc.ObjectSvc().Clone(req.Clonings) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("cloning object: %s", err.Error()) | log.Warnf("cloning object: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "clone object failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "clone object failed")) | ||||
| @@ -400,7 +400,7 @@ func (s *ObjectService) GetPackageObjects(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| objs, err := s.svc.ObjectSvc().GetPackageObjects(req.UserID, req.PackageID) | |||||
| objs, err := s.svc.ObjectSvc().GetPackageObjects(req.PackageID) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("getting package objects: %s", err.Error()) | log.Warnf("getting package objects: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get package object failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get package object failed")) | ||||
| @@ -420,7 +420,7 @@ func (s *ObjectService) NewMultipartUpload(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| obj, err := s.svc.ObjectSvc().NewMultipartUploadObject(req.UserID, req.PackageID, req.Path) | |||||
| obj, err := s.svc.ObjectSvc().NewMultipartUploadObject(req.PackageID, req.Path) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("new multipart upload object: %s", err.Error()) | log.Warnf("new multipart upload object: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "new multipart upload object failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "new multipart upload object failed")) | ||||
| @@ -453,7 +453,7 @@ func (s *ObjectService) UploadPart(ctx *gin.Context) { | |||||
| } | } | ||||
| defer file.Close() | defer file.Close() | ||||
| err = s.svc.Uploader.UploadPart(req.Info.UserID, req.Info.ObjectID, req.Info.Index, file) | |||||
| err = s.svc.Uploader.UploadPart(req.Info.ObjectID, req.Info.Index, file) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("uploading part: %s", err.Error()) | log.Warnf("uploading part: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("upload part: %v", err))) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("upload part: %v", err))) | ||||
| @@ -473,7 +473,7 @@ func (s *ObjectService) CompleteMultipartUpload(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| obj, err := s.svc.ObjectSvc().CompleteMultipartUpload(req.UserID, req.ObjectID, req.Indexes) | |||||
| obj, err := s.svc.ObjectSvc().CompleteMultipartUpload(req.ObjectID, req.Indexes) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("completing multipart upload: %s", err.Error()) | log.Warnf("completing multipart upload: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("complete multipart upload: %v", err))) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("complete multipart upload: %v", err))) | ||||
| @@ -10,8 +10,8 @@ import ( | |||||
| "github.com/gin-gonic/gin" | "github.com/gin-gonic/gin" | ||||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | "gitlink.org.cn/cloudream/common/consts/errorcode" | ||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | "gitlink.org.cn/cloudream/common/pkgs/logger" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| "gitlink.org.cn/cloudream/common/sdks/storage/cdsapi" | |||||
| cdsapi "gitlink.org.cn/cloudream/storage2/client/sdk/api" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | ) | ||||
| // PackageService 包服务,负责处理包相关的HTTP请求。 | // PackageService 包服务,负责处理包相关的HTTP请求。 | ||||
| @@ -36,7 +36,7 @@ func (s *PackageService) Get(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| pkg, err := s.svc.PackageSvc().Get(req.UserID, req.PackageID) | |||||
| pkg, err := s.svc.PackageSvc().Get(req.PackageID) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("getting package: %s", err.Error()) | log.Warnf("getting package: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, FailedError(err)) | ctx.JSON(http.StatusOK, FailedError(err)) | ||||
| @@ -56,7 +56,7 @@ func (s *PackageService) GetByFullName(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| pkg, err := s.svc.PackageSvc().GetByFullName(req.UserID, req.BucketName, req.PackageName) | |||||
| pkg, err := s.svc.PackageSvc().GetByFullName(req.BucketName, req.PackageName) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("getting package by name: %s", err.Error()) | log.Warnf("getting package by name: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, FailedError(err)) | ctx.JSON(http.StatusOK, FailedError(err)) | ||||
| @@ -76,7 +76,7 @@ func (s *PackageService) Create(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| pkg, err := s.svc.PackageSvc().Create(req.UserID, req.BucketID, req.Name) | |||||
| pkg, err := s.svc.PackageSvc().Create(req.BucketID, req.Name) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("creating package: %s", err.Error()) | log.Warnf("creating package: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, FailedError(err)) | ctx.JSON(http.StatusOK, FailedError(err)) | ||||
| @@ -109,7 +109,7 @@ func (s *PackageService) CreateLoad(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| up, err := s.svc.Uploader.BeginCreateLoad(req.Info.UserID, req.Info.BucketID, req.Info.Name, req.Info.LoadTo, req.Info.LoadToPath) | |||||
| up, err := s.svc.Uploader.BeginCreateLoad(req.Info.BucketID, req.Info.Name, req.Info.LoadTo, req.Info.LoadToPath) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("begin package create load: %s", err.Error()) | log.Warnf("begin package create load: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("begin package create load: %v", err))) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("begin package create load: %v", err))) | ||||
| @@ -168,7 +168,7 @@ func (s *PackageService) Delete(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| err := s.svc.PackageSvc().DeletePackage(req.UserID, req.PackageID) | |||||
| err := s.svc.PackageSvc().DeletePackage(req.PackageID) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("deleting package: %s", err.Error()) | log.Warnf("deleting package: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "delete package failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "delete package failed")) | ||||
| @@ -188,7 +188,7 @@ func (s *PackageService) Clone(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| pkg, err := s.svc.PackageSvc().Clone(req.UserID, req.PackageID, req.BucketID, req.Name) | |||||
| pkg, err := s.svc.PackageSvc().Clone(req.PackageID, req.BucketID, req.Name) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("cloning package: %s", err.Error()) | log.Warnf("cloning package: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "clone package failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "clone package failed")) | ||||
| @@ -210,7 +210,7 @@ func (s *PackageService) ListBucketPackages(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| pkgs, err := s.svc.PackageSvc().GetBucketPackages(req.UserID, req.BucketID) | |||||
| pkgs, err := s.svc.PackageSvc().GetBucketPackages(req.BucketID) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("getting bucket packages: %s", err.Error()) | log.Warnf("getting bucket packages: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get bucket packages failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get bucket packages failed")) | ||||
| @@ -222,6 +222,7 @@ func (s *PackageService) ListBucketPackages(ctx *gin.Context) { | |||||
| })) | })) | ||||
| } | } | ||||
| /* | |||||
| // GetCachedStorages 处理获取包的缓存节点的HTTP请求。 | // GetCachedStorages 处理获取包的缓存节点的HTTP请求。 | ||||
| func (s *PackageService) GetCachedStorages(ctx *gin.Context) { | func (s *PackageService) GetCachedStorages(ctx *gin.Context) { | ||||
| log := logger.WithField("HTTP", "Package.GetCachedStorages") | log := logger.WithField("HTTP", "Package.GetCachedStorages") | ||||
| @@ -233,7 +234,7 @@ func (s *PackageService) GetCachedStorages(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| resp, err := s.svc.PackageSvc().GetCachedStorages(req.UserID, req.PackageID) | |||||
| resp, err := s.svc.PackageSvc().GetCachedStorages(req.PackageID) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("get package cached storages failed: %s", err.Error()) | log.Warnf("get package cached storages failed: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get package cached storages failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get package cached storages failed")) | ||||
| @@ -242,3 +243,4 @@ func (s *PackageService) GetCachedStorages(ctx *gin.Context) { | |||||
| ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetCachedStoragesResp{PackageCachingInfo: resp})) | ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetCachedStoragesResp{PackageCachingInfo: resp})) | ||||
| } | } | ||||
| */ | |||||
| @@ -11,9 +11,9 @@ import ( | |||||
| "github.com/gin-gonic/gin" | "github.com/gin-gonic/gin" | ||||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | "gitlink.org.cn/cloudream/common/consts/errorcode" | ||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | "gitlink.org.cn/cloudream/common/pkgs/logger" | ||||
| "gitlink.org.cn/cloudream/common/sdks/storage/cdsapi" | |||||
| "gitlink.org.cn/cloudream/common/utils/math2" | "gitlink.org.cn/cloudream/common/utils/math2" | ||||
| "gitlink.org.cn/cloudream/storage2/client/internal/config" | "gitlink.org.cn/cloudream/storage2/client/internal/config" | ||||
| cdsapi "gitlink.org.cn/cloudream/storage2/client/sdk/api" | |||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/downloader" | "gitlink.org.cn/cloudream/storage2/common/pkgs/downloader" | ||||
| ) | ) | ||||
| @@ -58,7 +58,7 @@ func (s *PresignedService) ObjectDownloadByPath(ctx *gin.Context) { | |||||
| } | } | ||||
| resp, err := s.svc.ObjectSvc().GetByPath(cdsapi.ObjectListByPath{ | resp, err := s.svc.ObjectSvc().GetByPath(cdsapi.ObjectListByPath{ | ||||
| UserID: req.UserID, PackageID: req.PackageID, Path: req.Path, | |||||
| PackageID: req.PackageID, Path: req.Path, | |||||
| }) | }) | ||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("getting object by path: %s", err.Error()) | log.Warnf("getting object by path: %s", err.Error()) | ||||
| @@ -77,7 +77,7 @@ func (s *PresignedService) ObjectDownloadByPath(ctx *gin.Context) { | |||||
| len = *req.Length | len = *req.Length | ||||
| } | } | ||||
| file, err := s.svc.ObjectSvc().Download(req.UserID, downloader.DownloadReqeust{ | |||||
| file, err := s.svc.ObjectSvc().Download(downloader.DownloadReqeust{ | |||||
| ObjectID: resp.Objects[0].ObjectID, | ObjectID: resp.Objects[0].ObjectID, | ||||
| Offset: off, | Offset: off, | ||||
| Length: len, | Length: len, | ||||
| @@ -119,7 +119,7 @@ func (s *PresignedService) ObjectDownload(ctx *gin.Context) { | |||||
| len = *req.Length | len = *req.Length | ||||
| } | } | ||||
| file, err := s.svc.ObjectSvc().Download(req.UserID, downloader.DownloadReqeust{ | |||||
| file, err := s.svc.ObjectSvc().Download(downloader.DownloadReqeust{ | |||||
| ObjectID: req.ObjectID, | ObjectID: req.ObjectID, | ||||
| Offset: off, | Offset: off, | ||||
| Length: len, | Length: len, | ||||
| @@ -155,7 +155,7 @@ func (s *PresignedService) ObjectUpload(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| up, err := s.svc.Uploader.BeginUpdate(req.UserID, req.PackageID, req.Affinity, req.LoadTo, req.LoadToPath) | |||||
| up, err := s.svc.Uploader.BeginUpdate(req.PackageID, req.Affinity, req.LoadTo, req.LoadToPath) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("begin update: %s", err.Error()) | log.Warnf("begin update: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("begin update: %v", err))) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("begin update: %v", err))) | ||||
| @@ -192,7 +192,7 @@ func (s *PresignedService) ObjectNewMultipartUpload(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| obj, err := s.svc.ObjectSvc().NewMultipartUploadObject(req.UserID, req.PackageID, req.Path) | |||||
| obj, err := s.svc.ObjectSvc().NewMultipartUploadObject(req.PackageID, req.Path) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("new multipart upload: %s", err.Error()) | log.Warnf("new multipart upload: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("new multipart upload: %v", err))) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("new multipart upload: %v", err))) | ||||
| @@ -212,7 +212,7 @@ func (s *PresignedService) ObjectUploadPart(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| err := s.svc.Uploader.UploadPart(req.UserID, req.ObjectID, req.Index, ctx.Request.Body) | |||||
| err := s.svc.Uploader.UploadPart(req.ObjectID, req.Index, ctx.Request.Body) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("uploading part: %s", err.Error()) | log.Warnf("uploading part: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("upload part: %v", err))) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("upload part: %v", err))) | ||||
| @@ -232,7 +232,7 @@ func (s *PresignedService) ObjectCompleteMultipartUpload(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| obj, err := s.svc.ObjectSvc().CompleteMultipartUpload(req.UserID, req.ObjectID, req.Indexes) | |||||
| obj, err := s.svc.ObjectSvc().CompleteMultipartUpload(req.ObjectID, req.Indexes) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("completing multipart upload: %s", err.Error()) | log.Warnf("completing multipart upload: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("complete multipart upload: %v", err))) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("complete multipart upload: %v", err))) | ||||
| @@ -3,8 +3,8 @@ package http | |||||
| import ( | import ( | ||||
| "github.com/gin-gonic/gin" | "github.com/gin-gonic/gin" | ||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | "gitlink.org.cn/cloudream/common/pkgs/logger" | ||||
| "gitlink.org.cn/cloudream/common/sdks/storage/cdsapi" | |||||
| "gitlink.org.cn/cloudream/storage2/client/internal/services" | "gitlink.org.cn/cloudream/storage2/client/internal/services" | ||||
| cdsapi "gitlink.org.cn/cloudream/storage2/client/sdk/api" | |||||
| ) | ) | ||||
| type Server struct { | type Server struct { | ||||
| @@ -43,7 +43,7 @@ func (s *Server) Serve() error { | |||||
| func (s *Server) initRouters() { | func (s *Server) initRouters() { | ||||
| rt := s.engine.Use() | rt := s.engine.Use() | ||||
| initTemp(rt, s) | |||||
| // initTemp(rt, s) | |||||
| s.routeV1(s.engine, rt) | s.routeV1(s.engine, rt) | ||||
| @@ -67,13 +67,13 @@ func (s *Server) initRouters() { | |||||
| rt.POST(cdsapi.PackageDeletePath, s.Package().Delete) | rt.POST(cdsapi.PackageDeletePath, s.Package().Delete) | ||||
| rt.POST(cdsapi.PackageClonePath, s.Package().Clone) | rt.POST(cdsapi.PackageClonePath, s.Package().Clone) | ||||
| rt.GET(cdsapi.PackageListBucketPackagesPath, s.Package().ListBucketPackages) | rt.GET(cdsapi.PackageListBucketPackagesPath, s.Package().ListBucketPackages) | ||||
| rt.GET(cdsapi.PackageGetCachedStoragesPath, s.Package().GetCachedStorages) | |||||
| // rt.GET(cdsapi.PackageGetCachedStoragesPath, s.Package().GetCachedStorages) | |||||
| rt.POST(cdsapi.StorageLoadPackagePath, s.Storage().LoadPackage) | |||||
| rt.POST(cdsapi.StorageCreatePackagePath, s.Storage().CreatePackage) | |||||
| rt.GET(cdsapi.StorageGetPath, s.Storage().Get) | |||||
| rt.POST(cdsapi.UserSpaceLoadPackagePath, s.UserSpace().LoadPackage) | |||||
| rt.POST(cdsapi.UserSpaceCreatePackagePath, s.UserSpace().CreatePackage) | |||||
| rt.GET(cdsapi.UserSpaceGetPath, s.UserSpace().Get) | |||||
| rt.POST(cdsapi.CacheMovePackagePath, s.Cache().MovePackage) | |||||
| // rt.POST(cdsapi.CacheMovePackagePath, s.Cache().MovePackage) | |||||
| rt.GET(cdsapi.BucketGetByNamePath, s.Bucket().GetByName) | rt.GET(cdsapi.BucketGetByNamePath, s.Bucket().GetByName) | ||||
| rt.POST(cdsapi.BucketCreatePath, s.Bucket().Create) | rt.POST(cdsapi.BucketCreatePath, s.Bucket().Create) | ||||
| @@ -104,22 +104,19 @@ func (s *Server) routeV1(eg *gin.Engine, rt gin.IRoutes) { | |||||
| v1.POST(cdsapi.PackageDeletePath, s.awsAuth.Auth, s.Package().Delete) | v1.POST(cdsapi.PackageDeletePath, s.awsAuth.Auth, s.Package().Delete) | ||||
| v1.POST(cdsapi.PackageClonePath, s.awsAuth.Auth, s.Package().Clone) | v1.POST(cdsapi.PackageClonePath, s.awsAuth.Auth, s.Package().Clone) | ||||
| v1.GET(cdsapi.PackageListBucketPackagesPath, s.awsAuth.Auth, s.Package().ListBucketPackages) | v1.GET(cdsapi.PackageListBucketPackagesPath, s.awsAuth.Auth, s.Package().ListBucketPackages) | ||||
| v1.GET(cdsapi.PackageGetCachedStoragesPath, s.awsAuth.Auth, s.Package().GetCachedStorages) | |||||
| // v1.GET(cdsapi.PackageGetCachedStoragesPath, s.awsAuth.Auth, s.Package().GetCachedStorages) | |||||
| v1.POST(cdsapi.StorageLoadPackagePath, s.awsAuth.Auth, s.Storage().LoadPackage) | |||||
| v1.POST(cdsapi.StorageCreatePackagePath, s.awsAuth.Auth, s.Storage().CreatePackage) | |||||
| v1.GET(cdsapi.StorageGetPath, s.awsAuth.Auth, s.Storage().Get) | |||||
| v1.POST(cdsapi.UserSpaceLoadPackagePath, s.awsAuth.Auth, s.UserSpace().LoadPackage) | |||||
| v1.POST(cdsapi.UserSpaceCreatePackagePath, s.awsAuth.Auth, s.UserSpace().CreatePackage) | |||||
| v1.GET(cdsapi.UserSpaceGetPath, s.awsAuth.Auth, s.UserSpace().Get) | |||||
| v1.POST(cdsapi.CacheMovePackagePath, s.awsAuth.Auth, s.Cache().MovePackage) | |||||
| // v1.POST(cdsapi.CacheMovePackagePath, s.awsAuth.Auth, s.Cache().MovePackage) | |||||
| v1.GET(cdsapi.BucketGetByNamePath, s.awsAuth.Auth, s.Bucket().GetByName) | v1.GET(cdsapi.BucketGetByNamePath, s.awsAuth.Auth, s.Bucket().GetByName) | ||||
| v1.POST(cdsapi.BucketCreatePath, s.awsAuth.Auth, s.Bucket().Create) | v1.POST(cdsapi.BucketCreatePath, s.awsAuth.Auth, s.Bucket().Create) | ||||
| v1.POST(cdsapi.BucketDeletePath, s.awsAuth.Auth, s.Bucket().Delete) | v1.POST(cdsapi.BucketDeletePath, s.awsAuth.Auth, s.Bucket().Delete) | ||||
| v1.GET(cdsapi.BucketListUserBucketsPath, s.awsAuth.Auth, s.Bucket().ListUserBuckets) | v1.GET(cdsapi.BucketListUserBucketsPath, s.awsAuth.Auth, s.Bucket().ListUserBuckets) | ||||
| rt.POST(cdsapi.UserCreatePath, s.User().Create) | |||||
| rt.POST(cdsapi.UserDeletePath, s.User().Delete) | |||||
| rt.POST(cdsapi.ObjectNewMultipartUploadPath, s.Object().NewMultipartUpload) | rt.POST(cdsapi.ObjectNewMultipartUploadPath, s.Object().NewMultipartUpload) | ||||
| rt.POST(cdsapi.ObjectUploadPartPath, s.Object().UploadPart) | rt.POST(cdsapi.ObjectUploadPartPath, s.Object().UploadPart) | ||||
| rt.POST(cdsapi.ObjectCompleteMultipartUploadPath, s.Object().CompleteMultipartUpload) | rt.POST(cdsapi.ObjectCompleteMultipartUploadPath, s.Object().CompleteMultipartUpload) | ||||
| @@ -7,80 +7,80 @@ import ( | |||||
| "github.com/gin-gonic/gin" | "github.com/gin-gonic/gin" | ||||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | "gitlink.org.cn/cloudream/common/consts/errorcode" | ||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | "gitlink.org.cn/cloudream/common/pkgs/logger" | ||||
| "gitlink.org.cn/cloudream/common/sdks/storage/cdsapi" | |||||
| cdsapi "gitlink.org.cn/cloudream/storage2/client/sdk/api" | |||||
| ) | ) | ||||
| type StorageService struct { | |||||
| type UserSpaceService struct { | |||||
| *Server | *Server | ||||
| } | } | ||||
| func (s *Server) Storage() *StorageService { | |||||
| return &StorageService{ | |||||
| func (s *Server) UserSpace() *UserSpaceService { | |||||
| return &UserSpaceService{ | |||||
| Server: s, | Server: s, | ||||
| } | } | ||||
| } | } | ||||
| func (s *StorageService) LoadPackage(ctx *gin.Context) { | |||||
| log := logger.WithField("HTTP", "Storage.LoadPackage") | |||||
| func (s *UserSpaceService) LoadPackage(ctx *gin.Context) { | |||||
| log := logger.WithField("HTTP", "UserSpace.LoadPackage") | |||||
| var req cdsapi.StorageLoadPackageReq | |||||
| var req cdsapi.UserSpaceLoadPackageReq | |||||
| if err := ctx.ShouldBindJSON(&req); err != nil { | if err := ctx.ShouldBindJSON(&req); err != nil { | ||||
| log.Warnf("binding body: %s", err.Error()) | log.Warnf("binding body: %s", err.Error()) | ||||
| ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) | ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) | ||||
| return | return | ||||
| } | } | ||||
| err := s.svc.StorageSvc().LoadPackage(req.UserID, req.PackageID, req.StorageID, req.RootPath) | |||||
| err := s.svc.UserSpaceSvc().LoadPackage(req.PackageID, req.UserSpaceID, req.RootPath) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("loading package: %s", err.Error()) | log.Warnf("loading package: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "loading package failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "loading package failed")) | ||||
| return | return | ||||
| } | } | ||||
| ctx.JSON(http.StatusOK, OK(cdsapi.StorageLoadPackageResp{})) | |||||
| ctx.JSON(http.StatusOK, OK(cdsapi.UserSpaceLoadPackageResp{})) | |||||
| } | } | ||||
| func (s *StorageService) CreatePackage(ctx *gin.Context) { | |||||
| log := logger.WithField("HTTP", "Storage.CreatePackage") | |||||
| func (s *UserSpaceService) CreatePackage(ctx *gin.Context) { | |||||
| log := logger.WithField("HTTP", "UserSpace.CreatePackage") | |||||
| var req cdsapi.StorageCreatePackageReq | |||||
| var req cdsapi.UserSpaceCreatePackageReq | |||||
| if err := ctx.ShouldBindJSON(&req); err != nil { | if err := ctx.ShouldBindJSON(&req); err != nil { | ||||
| log.Warnf("binding body: %s", err.Error()) | log.Warnf("binding body: %s", err.Error()) | ||||
| ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) | ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) | ||||
| return | return | ||||
| } | } | ||||
| pkg, err := s.svc.StorageSvc().StorageCreatePackage( | |||||
| req.UserID, req.BucketID, req.Name, req.StorageID, req.Path, req.StorageAffinity) | |||||
| pkg, err := s.svc.UserSpaceSvc().UserSpaceCreatePackage( | |||||
| req.BucketID, req.Name, req.UserSpaceID, req.Path, req.SpaceAffinity) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("storage create package: %s", err.Error()) | |||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("storage create package: %v", err))) | |||||
| log.Warnf("userspace create package: %s", err.Error()) | |||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("userspace create package: %v", err))) | |||||
| return | return | ||||
| } | } | ||||
| ctx.JSON(http.StatusOK, OK(cdsapi.StorageCreatePackageResp{ | |||||
| ctx.JSON(http.StatusOK, OK(cdsapi.UserSpaceCreatePackageResp{ | |||||
| Package: pkg, | Package: pkg, | ||||
| })) | })) | ||||
| } | } | ||||
| func (s *StorageService) Get(ctx *gin.Context) { | |||||
| log := logger.WithField("HTTP", "Storage.Get") | |||||
| func (s *UserSpaceService) Get(ctx *gin.Context) { | |||||
| log := logger.WithField("HTTP", "UserSpace.Get") | |||||
| var req cdsapi.StorageGet | |||||
| var req cdsapi.UserSpaceGet | |||||
| if err := ctx.ShouldBindQuery(&req); err != nil { | if err := ctx.ShouldBindQuery(&req); err != nil { | ||||
| log.Warnf("binding query: %s", err.Error()) | log.Warnf("binding query: %s", err.Error()) | ||||
| ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) | ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) | ||||
| return | return | ||||
| } | } | ||||
| info, err := s.svc.StorageSvc().Get(req.UserID, req.StorageID) | |||||
| info, err := s.svc.UserSpaceSvc().Get(req.UserSpaceID) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("getting info: %s", err.Error()) | log.Warnf("getting info: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get storage inf failed")) | |||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get userspace inf failed")) | |||||
| return | return | ||||
| } | } | ||||
| ctx.JSON(http.StatusOK, OK(cdsapi.StorageGetResp{ | |||||
| Storage: *info, | |||||
| ctx.JSON(http.StatusOK, OK(cdsapi.UserSpaceGetResp{ | |||||
| UserSpace: *info, | |||||
| })) | })) | ||||
| } | } | ||||
| @@ -1,5 +1,6 @@ | |||||
| package http | package http | ||||
| /* | |||||
| import ( | import ( | ||||
| "net/http" | "net/http" | ||||
| @@ -7,7 +8,7 @@ import ( | |||||
| "github.com/samber/lo" | "github.com/samber/lo" | ||||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | "gitlink.org.cn/cloudream/common/consts/errorcode" | ||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | "gitlink.org.cn/cloudream/common/pkgs/logger" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | ) | ||||
| type TempService struct { | type TempService struct { | ||||
| @@ -24,9 +25,9 @@ type TempListDetailsResp struct { | |||||
| Buckets []BucketDetail `json:"buckets"` | Buckets []BucketDetail `json:"buckets"` | ||||
| } | } | ||||
| type BucketDetail struct { | type BucketDetail struct { | ||||
| BucketID cdssdk.BucketID `json:"bucketID"` | |||||
| Name string `json:"name"` | |||||
| ObjectCount int `json:"objectCount"` | |||||
| BucketID types.BucketID `json:"bucketID"` | |||||
| Name string `json:"name"` | |||||
| ObjectCount int `json:"objectCount"` | |||||
| } | } | ||||
| func (s *TempService) ListDetails(ctx *gin.Context) { | func (s *TempService) ListDetails(ctx *gin.Context) { | ||||
| @@ -58,10 +59,10 @@ func (s *TempService) ListDetails(ctx *gin.Context) { | |||||
| } | } | ||||
| type TempGetObjects struct { | type TempGetObjects struct { | ||||
| BucketID cdssdk.BucketID `form:"bucketID"` | |||||
| BucketID types.BucketID `form:"bucketID"` | |||||
| } | } | ||||
| type BucketGetObjectsResp struct { | type BucketGetObjectsResp struct { | ||||
| Objects []cdssdk.Object `json:"objects"` | |||||
| Objects []types.Object `json:"objects"` | |||||
| } | } | ||||
| func (s *TempService) GetObjects(ctx *gin.Context) { | func (s *TempService) GetObjects(ctx *gin.Context) { | ||||
| @@ -87,17 +88,17 @@ func (s *TempService) GetObjects(ctx *gin.Context) { | |||||
| } | } | ||||
| type TempGetObjectDetail struct { | type TempGetObjectDetail struct { | ||||
| ObjectID cdssdk.ObjectID `form:"objectID"` | |||||
| ObjectID types.ObjectID `form:"objectID"` | |||||
| } | } | ||||
| type TempGetObjectDetailResp struct { | type TempGetObjectDetailResp struct { | ||||
| Blocks []ObjectBlockDetail `json:"blocks"` | Blocks []ObjectBlockDetail `json:"blocks"` | ||||
| } | } | ||||
| type ObjectBlockDetail struct { | type ObjectBlockDetail struct { | ||||
| ObjectID cdssdk.ObjectID `json:"objectID"` | |||||
| Type string `json:"type"` | |||||
| FileHash cdssdk.FileHash `json:"fileHash"` | |||||
| LocationType string `json:"locationType"` | |||||
| LocationName string `json:"locationName"` | |||||
| ObjectID types.ObjectID `json:"objectID"` | |||||
| Type string `json:"type"` | |||||
| FileHash types.FileHash `json:"fileHash"` | |||||
| LocationType string `json:"locationType"` | |||||
| LocationName string `json:"locationName"` | |||||
| } | } | ||||
| func (s *TempService) GetObjectDetail(ctx *gin.Context) { | func (s *TempService) GetObjectDetail(ctx *gin.Context) { | ||||
| @@ -122,70 +123,70 @@ func (s *TempService) GetObjectDetail(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| var allStgIDs []cdssdk.StorageID | |||||
| allStgIDs = append(allStgIDs, details.PinnedAt...) | |||||
| var allSpaceIDs []types.UserSpaceID | |||||
| allSpaceIDs = append(allSpaceIDs, details.PinnedAt...) | |||||
| for _, b := range details.Blocks { | for _, b := range details.Blocks { | ||||
| allStgIDs = append(allStgIDs, b.StorageID) | |||||
| allSpaceIDs = append(allSpaceIDs, b.UserSpaceID) | |||||
| } | } | ||||
| allStgIDs = lo.Uniq(allStgIDs) | |||||
| allSpaceIDs = lo.Uniq(allSpaceIDs) | |||||
| getStgs, err := s.svc.StorageSvc().GetDetails(allStgIDs) | |||||
| getStgs, err := s.svc.StorageSvc().GetDetails(allSpaceIDs) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("getting nodes: %s", err.Error()) | log.Warnf("getting nodes: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get nodes failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get nodes failed")) | ||||
| return | return | ||||
| } | } | ||||
| allStgs := make(map[cdssdk.StorageID]cdssdk.Storage) | |||||
| allStgs := make(map[types.UserSpaceID]types.UserSpace) | |||||
| for _, n := range getStgs { | for _, n := range getStgs { | ||||
| if n != nil { | if n != nil { | ||||
| allStgs[n.Storage.StorageID] = n.Storage | |||||
| allStgs[n.Storage.UserSpaceID] = n.UserSpace | |||||
| } | } | ||||
| } | } | ||||
| var blocks []ObjectBlockDetail | var blocks []ObjectBlockDetail | ||||
| for _, stgID := range details.PinnedAt { | |||||
| for _, spaceID := range details.PinnedAt { | |||||
| blocks = append(blocks, ObjectBlockDetail{ | blocks = append(blocks, ObjectBlockDetail{ | ||||
| Type: "Rep", | Type: "Rep", | ||||
| FileHash: details.Object.FileHash, | FileHash: details.Object.FileHash, | ||||
| LocationType: "Agent", | LocationType: "Agent", | ||||
| LocationName: allStgs[stgID].Name, | |||||
| LocationName: allStgs[spaceID].Name, | |||||
| }) | }) | ||||
| } | } | ||||
| switch details.Object.Redundancy.(type) { | switch details.Object.Redundancy.(type) { | ||||
| case *cdssdk.NoneRedundancy: | |||||
| case *types.NoneRedundancy: | |||||
| for _, blk := range details.Blocks { | for _, blk := range details.Blocks { | ||||
| if !lo.Contains(details.PinnedAt, blk.StorageID) { | |||||
| if !lo.Contains(details.PinnedAt, blk.UserSpaceID) { | |||||
| blocks = append(blocks, ObjectBlockDetail{ | blocks = append(blocks, ObjectBlockDetail{ | ||||
| Type: "Rep", | Type: "Rep", | ||||
| FileHash: blk.FileHash, | FileHash: blk.FileHash, | ||||
| LocationType: "Agent", | LocationType: "Agent", | ||||
| LocationName: allStgs[blk.StorageID].Name, | |||||
| LocationName: allStgs[blk.UserSpaceID].Name, | |||||
| }) | }) | ||||
| } | } | ||||
| } | } | ||||
| case *cdssdk.RepRedundancy: | |||||
| case *types.RepRedundancy: | |||||
| for _, blk := range details.Blocks { | for _, blk := range details.Blocks { | ||||
| if !lo.Contains(details.PinnedAt, blk.StorageID) { | |||||
| if !lo.Contains(details.PinnedAt, blk.UserSpaceID) { | |||||
| blocks = append(blocks, ObjectBlockDetail{ | blocks = append(blocks, ObjectBlockDetail{ | ||||
| Type: "Rep", | Type: "Rep", | ||||
| FileHash: blk.FileHash, | FileHash: blk.FileHash, | ||||
| LocationType: "Agent", | LocationType: "Agent", | ||||
| LocationName: allStgs[blk.StorageID].Name, | |||||
| LocationName: allStgs[blk.UserSpaceID].Name, | |||||
| }) | }) | ||||
| } | } | ||||
| } | } | ||||
| case *cdssdk.ECRedundancy: | |||||
| case *types.ECRedundancy: | |||||
| for _, blk := range details.Blocks { | for _, blk := range details.Blocks { | ||||
| blocks = append(blocks, ObjectBlockDetail{ | blocks = append(blocks, ObjectBlockDetail{ | ||||
| Type: "Block", | Type: "Block", | ||||
| FileHash: blk.FileHash, | FileHash: blk.FileHash, | ||||
| LocationType: "Agent", | LocationType: "Agent", | ||||
| LocationName: allStgs[blk.StorageID].Name, | |||||
| LocationName: allStgs[blk.UserSpaceID].Name, | |||||
| }) | }) | ||||
| } | } | ||||
| } | } | ||||
| @@ -195,13 +196,13 @@ func (s *TempService) GetObjectDetail(ctx *gin.Context) { | |||||
| })) | })) | ||||
| } | } | ||||
| func (s *TempService) getBucketObjects(bktID cdssdk.BucketID) ([]cdssdk.Object, error) { | |||||
| func (s *TempService) getBucketObjects(bktID types.BucketID) ([]types.Object, error) { | |||||
| pkgs, err := s.svc.PackageSvc().GetBucketPackages(1, bktID) | pkgs, err := s.svc.PackageSvc().GetBucketPackages(1, bktID) | ||||
| if err != nil { | if err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| var allObjs []cdssdk.Object | |||||
| var allObjs []types.Object | |||||
| for _, pkg := range pkgs { | for _, pkg := range pkgs { | ||||
| objs, err := s.svc.ObjectSvc().GetPackageObjects(1, pkg.PackageID) | objs, err := s.svc.ObjectSvc().GetPackageObjects(1, pkg.PackageID) | ||||
| if err != nil { | if err != nil { | ||||
| @@ -221,8 +222,8 @@ type TempGetDatabaseAllResp struct { | |||||
| Blocks []ObjectBlockDetail `json:"blocks"` | Blocks []ObjectBlockDetail `json:"blocks"` | ||||
| } | } | ||||
| type BucketObject struct { | type BucketObject struct { | ||||
| cdssdk.Object | |||||
| BucketID cdssdk.BucketID `json:"bucketID"` | |||||
| types.Object | |||||
| BucketID types.BucketID `json:"bucketID"` | |||||
| } | } | ||||
| func (s *TempService) GetDatabaseAll(ctx *gin.Context) { | func (s *TempService) GetDatabaseAll(ctx *gin.Context) { | ||||
| @@ -235,28 +236,28 @@ func (s *TempService) GetDatabaseAll(ctx *gin.Context) { | |||||
| return | return | ||||
| } | } | ||||
| var allStgIDs []cdssdk.StorageID | |||||
| var allSpaceIDs []types.UserSpaceID | |||||
| for _, obj := range db.Objects { | for _, obj := range db.Objects { | ||||
| allStgIDs = append(allStgIDs, obj.PinnedAt...) | |||||
| allSpaceIDs = append(allSpaceIDs, obj.PinnedAt...) | |||||
| for _, blk := range obj.Blocks { | for _, blk := range obj.Blocks { | ||||
| allStgIDs = append(allStgIDs, blk.StorageID) | |||||
| allSpaceIDs = append(allSpaceIDs, blk.UserSpaceID) | |||||
| } | } | ||||
| } | } | ||||
| getStgs, err := s.svc.StorageSvc().GetDetails(allStgIDs) | |||||
| getStgs, err := s.svc.StorageSvc().GetDetails(allSpaceIDs) | |||||
| if err != nil { | if err != nil { | ||||
| log.Warnf("getting nodes: %s", err.Error()) | log.Warnf("getting nodes: %s", err.Error()) | ||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get nodes failed")) | ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get nodes failed")) | ||||
| return | return | ||||
| } | } | ||||
| allStgs := make(map[cdssdk.StorageID]cdssdk.Storage) | |||||
| allStgs := make(map[types.UserSpaceID]types.Storage) | |||||
| for _, n := range getStgs { | for _, n := range getStgs { | ||||
| if n != nil { | if n != nil { | ||||
| allStgs[n.Storage.StorageID] = n.Storage | |||||
| allStgs[n.Storage.UserSpaceID] = n.Storage | |||||
| } | } | ||||
| } | } | ||||
| bkts := make(map[cdssdk.BucketID]*BucketDetail) | |||||
| bkts := make(map[types.BucketID]*BucketDetail) | |||||
| for _, bkt := range db.Buckets { | for _, bkt := range db.Buckets { | ||||
| bkts[bkt.BucketID] = &BucketDetail{ | bkts[bkt.BucketID] = &BucketDetail{ | ||||
| BucketID: bkt.BucketID, | BucketID: bkt.BucketID, | ||||
| @@ -266,14 +267,14 @@ func (s *TempService) GetDatabaseAll(ctx *gin.Context) { | |||||
| } | } | ||||
| type PackageDetail struct { | type PackageDetail struct { | ||||
| Package cdssdk.Package | |||||
| // Loaded []cdssdk.Node | |||||
| Package types.Package | |||||
| // Loaded []types.Node | |||||
| } | } | ||||
| pkgs := make(map[cdssdk.PackageID]*PackageDetail) | |||||
| pkgs := make(map[types.PackageID]*PackageDetail) | |||||
| for _, pkg := range db.Packages { | for _, pkg := range db.Packages { | ||||
| p := PackageDetail{ | p := PackageDetail{ | ||||
| Package: pkg, | Package: pkg, | ||||
| // Loaded: make([]cdssdk.Node, 0), | |||||
| // Loaded: make([]types.Node, 0), | |||||
| } | } | ||||
| // loaded, err := s.svc.PackageSvc().GetLoadedNodes(1, pkg.PackageID) | // loaded, err := s.svc.PackageSvc().GetLoadedNodes(1, pkg.PackageID) | ||||
| @@ -314,39 +315,39 @@ func (s *TempService) GetDatabaseAll(ctx *gin.Context) { | |||||
| } | } | ||||
| switch obj.Object.Redundancy.(type) { | switch obj.Object.Redundancy.(type) { | ||||
| case *cdssdk.NoneRedundancy: | |||||
| case *types.NoneRedundancy: | |||||
| for _, blk := range obj.Blocks { | for _, blk := range obj.Blocks { | ||||
| if !lo.Contains(obj.PinnedAt, blk.StorageID) { | |||||
| if !lo.Contains(obj.PinnedAt, blk.UserSpaceID) { | |||||
| blocks = append(blocks, ObjectBlockDetail{ | blocks = append(blocks, ObjectBlockDetail{ | ||||
| ObjectID: obj.Object.ObjectID, | ObjectID: obj.Object.ObjectID, | ||||
| Type: "Rep", | Type: "Rep", | ||||
| FileHash: blk.FileHash, | FileHash: blk.FileHash, | ||||
| LocationType: "Agent", | LocationType: "Agent", | ||||
| LocationName: allStgs[blk.StorageID].Name, | |||||
| LocationName: allStgs[blk.UserSpaceID].Name, | |||||
| }) | }) | ||||
| } | } | ||||
| } | } | ||||
| case *cdssdk.RepRedundancy: | |||||
| case *types.RepRedundancy: | |||||
| for _, blk := range obj.Blocks { | for _, blk := range obj.Blocks { | ||||
| if !lo.Contains(obj.PinnedAt, blk.StorageID) { | |||||
| if !lo.Contains(obj.PinnedAt, blk.UserSpaceID) { | |||||
| blocks = append(blocks, ObjectBlockDetail{ | blocks = append(blocks, ObjectBlockDetail{ | ||||
| ObjectID: obj.Object.ObjectID, | ObjectID: obj.Object.ObjectID, | ||||
| Type: "Rep", | Type: "Rep", | ||||
| FileHash: blk.FileHash, | FileHash: blk.FileHash, | ||||
| LocationType: "Agent", | LocationType: "Agent", | ||||
| LocationName: allStgs[blk.StorageID].Name, | |||||
| LocationName: allStgs[blk.UserSpaceID].Name, | |||||
| }) | }) | ||||
| } | } | ||||
| } | } | ||||
| case *cdssdk.ECRedundancy: | |||||
| case *types.ECRedundancy: | |||||
| for _, blk := range obj.Blocks { | for _, blk := range obj.Blocks { | ||||
| blocks = append(blocks, ObjectBlockDetail{ | blocks = append(blocks, ObjectBlockDetail{ | ||||
| ObjectID: obj.Object.ObjectID, | ObjectID: obj.Object.ObjectID, | ||||
| Type: "Block", | Type: "Block", | ||||
| FileHash: blk.FileHash, | FileHash: blk.FileHash, | ||||
| LocationType: "Agent", | LocationType: "Agent", | ||||
| LocationName: allStgs[blk.StorageID].Name, | |||||
| LocationName: allStgs[blk.UserSpaceID].Name, | |||||
| }) | }) | ||||
| } | } | ||||
| } | } | ||||
| @@ -383,3 +384,4 @@ func auth(ctx *gin.Context) { | |||||
| ctx.AbortWithStatus(http.StatusUnauthorized) | ctx.AbortWithStatus(http.StatusUnauthorized) | ||||
| } | } | ||||
| } | } | ||||
| */ | |||||
| @@ -1,57 +0,0 @@ | |||||
| package http | |||||
| import ( | |||||
| "net/http" | |||||
| "github.com/gin-gonic/gin" | |||||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | |||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||||
| "gitlink.org.cn/cloudream/common/sdks/storage/cdsapi" | |||||
| ) | |||||
| type UserService struct { | |||||
| *Server | |||||
| } | |||||
| func (s *Server) User() *UserService { | |||||
| return &UserService{ | |||||
| Server: s, | |||||
| } | |||||
| } | |||||
| func (s *UserService) Create(ctx *gin.Context) { | |||||
| log := logger.WithField("HTTP", "User.Create") | |||||
| var req cdsapi.UserCreate | |||||
| if err := ctx.ShouldBindJSON(&req); err != nil { | |||||
| log.Warnf("binding body: %s", err.Error()) | |||||
| ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) | |||||
| return | |||||
| } | |||||
| user, err := s.svc.UserSvc().Create(req.Name) | |||||
| if err != nil { | |||||
| log.Warnf("create user: %s", err.Error()) | |||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, err.Error())) | |||||
| return | |||||
| } | |||||
| ctx.JSON(http.StatusOK, OK(cdsapi.UserCreateResp{User: user})) | |||||
| } | |||||
| func (s *UserService) Delete(ctx *gin.Context) { | |||||
| log := logger.WithField("HTTP", "User.Delete") | |||||
| var req cdsapi.UserDelete | |||||
| if err := ctx.ShouldBindJSON(&req); err != nil { | |||||
| log.Warnf("binding body: %s", err.Error()) | |||||
| ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) | |||||
| return | |||||
| } | |||||
| if err := s.svc.UserSvc().Delete(req.UserID); err != nil { | |||||
| log.Warnf("delete user: %s", err.Error()) | |||||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, err.Error())) | |||||
| return | |||||
| } | |||||
| ctx.JSON(http.StatusOK, OK(nil)) | |||||
| } | |||||
| @@ -1,20 +0,0 @@ | |||||
| // services 包提供了与代理服务相关的功能。 | |||||
| package services | |||||
| type AgentService struct { | |||||
| *Service // Service 是嵌入的基服务类型,为AgentService提供基本功能。 | |||||
| } | |||||
| // AgentSvc 是Service类型的一个方法,用于返回一个AgentService的实例。 | |||||
| // 该方法允许通过Service实例来访问或操作AgentService相关功能。 | |||||
| // | |||||
| // 参数: | |||||
| // | |||||
| // svc *Service - 指向当前Service实例的指针。 | |||||
| // | |||||
| // 返回值: | |||||
| // | |||||
| // *AgentService - 指向新创建的AgentService实例的指针。 | |||||
| func (svc *Service) AgentSvc() *AgentService { | |||||
| return &AgentService{Service: svc} | |||||
| } | |||||
| @@ -1,11 +1,9 @@ | |||||
| package services | package services | ||||
| import ( | import ( | ||||
| "fmt" | |||||
| "time" | |||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| stgglb "gitlink.org.cn/cloudream/storage2/common/globals" | |||||
| coormq "gitlink.org.cn/cloudream/storage2/common/pkgs/mq/coordinator" | |||||
| "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | ) | ||||
| // BucketService 是对存储桶进行操作的服务类 | // BucketService 是对存储桶进行操作的服务类 | ||||
| @@ -22,96 +20,34 @@ func (svc *Service) BucketSvc() *BucketService { | |||||
| // userID: 用户的唯一标识 | // userID: 用户的唯一标识 | ||||
| // bucketID: 桶的唯一标识 | // bucketID: 桶的唯一标识 | ||||
| // 返回值: 桶的信息和可能发生的错误 | // 返回值: 桶的信息和可能发生的错误 | ||||
| func (svc *BucketService) GetBucket(userID cdssdk.UserID, bucketID cdssdk.BucketID) (cdssdk.Bucket, error) { | |||||
| // TODO: 此函数尚未实现 | |||||
| panic("not implement yet") | |||||
| func (svc *BucketService) GetBucket(bucketID types.BucketID) (types.Bucket, error) { | |||||
| return svc.DB.Bucket().GetByID(svc.DB.DefCtx(), bucketID) | |||||
| } | } | ||||
| func (svc *BucketService) GetBucketByName(userID cdssdk.UserID, bucketName string) (cdssdk.Bucket, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return cdssdk.Bucket{}, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| resp, err := coorCli.GetBucketByName(coormq.ReqGetBucketByName(userID, bucketName)) | |||||
| if err != nil { | |||||
| return cdssdk.Bucket{}, err | |||||
| } | |||||
| return resp.Bucket, nil | |||||
| func (svc *BucketService) GetBucketByName(bucketName string) (types.Bucket, error) { | |||||
| return svc.DB.Bucket().GetByName(svc.DB.DefCtx(), bucketName) | |||||
| } | } | ||||
| func (svc *BucketService) GetUserBuckets(userID cdssdk.UserID) ([]cdssdk.Bucket, error) { | |||||
| // 从CoordinatorMQPool中获取Coordinator客户端 | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) // 确保客户端被释放 | |||||
| // 向Coordinator发送请求获取用户桶信息 | |||||
| resp, err := coorCli.GetUserBuckets(coormq.NewGetUserBuckets(userID)) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("get user buckets failed, err: %w", err) | |||||
| } | |||||
| return resp.Buckets, nil | |||||
| func (svc *BucketService) GetUserBuckets() ([]types.Bucket, error) { | |||||
| return svc.DB.Bucket().GetUserBuckets(svc.DB.DefCtx()) | |||||
| } | } | ||||
| // GetBucketPackages 获取指定用户和桶的所有包 | // GetBucketPackages 获取指定用户和桶的所有包 | ||||
| // userID: 用户的唯一标识 | // userID: 用户的唯一标识 | ||||
| // bucketID: 桶的唯一标识 | // bucketID: 桶的唯一标识 | ||||
| // 返回值: 桶的所有包列表和可能发生的错误 | // 返回值: 桶的所有包列表和可能发生的错误 | ||||
| func (svc *BucketService) GetBucketPackages(userID cdssdk.UserID, bucketID cdssdk.BucketID) ([]cdssdk.Package, error) { | |||||
| // 获取Coordinator客户端 | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) // 确保客户端被释放 | |||||
| // 请求Coordinator获取指定桶的包信息 | |||||
| resp, err := coorCli.GetBucketPackages(coormq.NewGetBucketPackages(userID, bucketID)) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("get bucket packages failed, err: %w", err) | |||||
| } | |||||
| return resp.Packages, nil | |||||
| func (svc *BucketService) GetBucketPackages(bucketID types.BucketID) ([]types.Package, error) { | |||||
| return svc.DB.Package().GetBucketPackages(svc.DB.DefCtx(), bucketID) | |||||
| } | } | ||||
| func (svc *BucketService) CreateBucket(userID cdssdk.UserID, bucketName string) (cdssdk.Bucket, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return cdssdk.Bucket{}, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) // 确保客户端被释放 | |||||
| // 请求Coordinator创建新桶 | |||||
| resp, err := coorCli.CreateBucket(coormq.NewCreateBucket(userID, bucketName)) | |||||
| if err != nil { | |||||
| return cdssdk.Bucket{}, err | |||||
| } | |||||
| return resp.Bucket, nil | |||||
| func (svc *BucketService) CreateBucket(bucketName string, createTime time.Time) (types.Bucket, error) { | |||||
| return svc.DB.Bucket().Create(svc.DB.DefCtx(), bucketName, createTime) | |||||
| } | } | ||||
| // DeleteBucket 删除指定的桶 | // DeleteBucket 删除指定的桶 | ||||
| // userID: 用户的唯一标识 | // userID: 用户的唯一标识 | ||||
| // bucketID: 桶的唯一标识 | // bucketID: 桶的唯一标识 | ||||
| // 返回值: 可能发生的错误 | // 返回值: 可能发生的错误 | ||||
| func (svc *BucketService) DeleteBucket(userID cdssdk.UserID, bucketID cdssdk.BucketID) error { | |||||
| // 获取Coordinator客户端 | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) // 确保客户端被释放 | |||||
| _, err = coorCli.DeleteBucket(coormq.NewDeleteBucket(userID, bucketID)) | |||||
| if err != nil { | |||||
| return fmt.Errorf("request to coordinator failed, err: %w", err) | |||||
| } | |||||
| return nil | |||||
| func (svc *BucketService) DeleteBucket(bucketID types.BucketID) error { | |||||
| return svc.DB.Bucket().DeleteComplete(svc.DB.DefCtx(), bucketID) | |||||
| } | } | ||||
| @@ -1,5 +1,6 @@ | |||||
| package services | package services | ||||
| /* | |||||
| import ( | import ( | ||||
| "fmt" | "fmt" | ||||
| "time" | "time" | ||||
| @@ -87,3 +88,4 @@ func (svc *CacheService) CacheRemovePackage(packageID cdssdk.PackageID, stgID cd | |||||
| return nil | return nil | ||||
| } | } | ||||
| */ | |||||
| @@ -1,47 +0,0 @@ | |||||
| package services | |||||
| import ( | |||||
| "fmt" | |||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| stgglb "gitlink.org.cn/cloudream/storage2/common/globals" | |||||
| coormq "gitlink.org.cn/cloudream/storage2/common/pkgs/mq/coordinator" | |||||
| ) | |||||
| // HubService 是关于节点操作的服务结构体 | |||||
| type HubService struct { | |||||
| *Service | |||||
| } | |||||
| // HubSvc 创建并返回一个HubService的实例 | |||||
| func (svc *Service) HubSvc() *HubService { | |||||
| return &HubService{Service: svc} | |||||
| } | |||||
| // GetHubs 根据提供的节点ID列表,获取对应的节点信息 | |||||
| // 参数: | |||||
| // | |||||
| // hubIDs []cdssdk.HubID - 需要查询的节点ID列表 | |||||
| // | |||||
| // 返回值: | |||||
| // | |||||
| // []cdssdk.Hub - 获取到的节点信息列表 | |||||
| // error - 如果过程中发生错误,则返回错误信息 | |||||
| func (svc *HubService) GetHubs(hubIDs []cdssdk.HubID) ([]*cdssdk.Hub, error) { | |||||
| // 从协调器MQ池中获取一个客户端实例 | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| // 确保在函数结束时释放客户端实例回池 | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| // 向协调器发送获取节点信息的请求 | |||||
| getResp, err := coorCli.GetHubs(coormq.NewGetHubs(hubIDs)) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("requesting to coordinator: %w", err) | |||||
| } | |||||
| // 返回获取到的节点信息 | |||||
| return getResp.Hubs, nil | |||||
| } | |||||
| @@ -2,18 +2,23 @@ package services | |||||
| import ( | import ( | ||||
| "context" | "context" | ||||
| "errors" | |||||
| "fmt" | "fmt" | ||||
| "time" | |||||
| "github.com/samber/lo" | |||||
| "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" | "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| "gitlink.org.cn/cloudream/common/sdks/storage/cdsapi" | |||||
| stgglb "gitlink.org.cn/cloudream/storage2/common/globals" | |||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||||
| "gitlink.org.cn/cloudream/common/utils/sort2" | |||||
| "gitlink.org.cn/cloudream/storage2/client/internal/db" | |||||
| "gitlink.org.cn/cloudream/storage2/client/sdk/api" | |||||
| "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| stgmod "gitlink.org.cn/cloudream/storage2/common/models" | stgmod "gitlink.org.cn/cloudream/storage2/common/models" | ||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/db2/model" | |||||
| "gitlink.org.cn/cloudream/storage2/common/models/datamap" | |||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/downloader" | "gitlink.org.cn/cloudream/storage2/common/pkgs/downloader" | ||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/ops2" | "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/ops2" | ||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/plans" | "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/plans" | ||||
| coormq "gitlink.org.cn/cloudream/storage2/common/pkgs/mq/coordinator" | |||||
| "gorm.io/gorm" | |||||
| ) | ) | ||||
| // ObjectService 定义了对象服务,负责管理对象的上传、下载等操作。 | // ObjectService 定义了对象服务,负责管理对象的上传、下载等操作。 | ||||
| @@ -26,69 +31,228 @@ func (svc *Service) ObjectSvc() *ObjectService { | |||||
| return &ObjectService{Service: svc} | return &ObjectService{Service: svc} | ||||
| } | } | ||||
| func (svc *ObjectService) GetByPath(req cdsapi.ObjectListByPath) (cdsapi.ObjectListByPathResp, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return cdsapi.ObjectListByPathResp{}, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| func (svc *ObjectService) GetByPath(req api.ObjectListByPath) (api.ObjectListByPathResp, error) { | |||||
| var resp api.ObjectListByPathResp | |||||
| listResp, err := coorCli.ListObjectsByPath(coormq.ReqListObjectsByPath(req)) | |||||
| if err != nil { | |||||
| return cdsapi.ObjectListByPathResp{}, fmt.Errorf("requsting to coodinator: %w", err) | |||||
| maxKeys := 1000 | |||||
| if req.MaxKeys > 0 { | |||||
| maxKeys = req.MaxKeys | |||||
| } | } | ||||
| return listResp.ObjectListByPathResp, nil | |||||
| err := svc.DB.DoTx(func(tx db.SQLContext) error { | |||||
| var err error | |||||
| _, err = svc.DB.Package().GetByID(tx, req.PackageID) | |||||
| if err != nil { | |||||
| return fmt.Errorf("getting package by id: %w", err) | |||||
| } | |||||
| if !req.IsPrefix { | |||||
| obj, err := svc.DB.Object().GetByPath(tx, req.PackageID, req.Path) | |||||
| if err != nil { | |||||
| return fmt.Errorf("getting object by path: %w", err) | |||||
| } | |||||
| resp.Objects = append(resp.Objects, obj) | |||||
| return nil | |||||
| } | |||||
| if !req.NoRecursive { | |||||
| resp.Objects, err = svc.DB.Object().GetWithPathPrefixPaged(tx, req.PackageID, req.Path, req.ContinuationToken, maxKeys) | |||||
| if err != nil { | |||||
| return fmt.Errorf("getting objects with prefix: %w", err) | |||||
| } | |||||
| if len(resp.Objects) > 0 { | |||||
| resp.NextContinuationToken = resp.Objects[len(resp.Objects)-1].Path | |||||
| } | |||||
| return nil | |||||
| } | |||||
| resp.Objects, resp.CommonPrefixes, resp.NextContinuationToken, err = svc.DB.Object().GetByPrefixGroupedPaged(tx, req.PackageID, req.Path, req.ContinuationToken, maxKeys) | |||||
| return err | |||||
| }) | |||||
| return resp, err | |||||
| } | } | ||||
| func (svc *ObjectService) GetByIDs(userID cdssdk.UserID, objectIDs []cdssdk.ObjectID) ([]*cdssdk.Object, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| func (svc *ObjectService) GetByIDs(objectIDs []types.ObjectID) ([]*types.Object, error) { | |||||
| var ret []*types.Object | |||||
| err := svc.DB.DoTx(func(tx db.SQLContext) error { | |||||
| // TODO 应该检查用户是否有每一个Object所在Package的权限 | |||||
| objs, err := svc.DB.Object().BatchGet(tx, objectIDs) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| listResp, err := coorCli.GetObjects(coormq.ReqGetObjects(userID, objectIDs)) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("requsting to coodinator: %w", err) | |||||
| } | |||||
| objMp := make(map[types.ObjectID]types.Object) | |||||
| for _, obj := range objs { | |||||
| objMp[obj.ObjectID] = obj | |||||
| } | |||||
| return listResp.Objects, nil | |||||
| for _, objID := range objectIDs { | |||||
| o, ok := objMp[objID] | |||||
| if ok { | |||||
| ret = append(ret, &o) | |||||
| } else { | |||||
| ret = append(ret, nil) | |||||
| } | |||||
| } | |||||
| return err | |||||
| }) | |||||
| return ret, err | |||||
| } | } | ||||
| func (svc *ObjectService) UpdateInfo(userID cdssdk.UserID, updatings []cdsapi.UpdatingObject) ([]cdssdk.ObjectID, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| func (svc *ObjectService) UpdateInfo(updatings []api.UpdatingObject) ([]types.ObjectID, error) { | |||||
| var sucs []types.ObjectID | |||||
| err := svc.DB.DoTx(func(tx db.SQLContext) error { | |||||
| updatings = sort2.Sort(updatings, func(o1, o2 api.UpdatingObject) int { | |||||
| return sort2.Cmp(o1.ObjectID, o2.ObjectID) | |||||
| }) | |||||
| resp, err := coorCli.UpdateObjectInfos(coormq.ReqUpdateObjectInfos(userID, updatings)) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("requsting to coodinator: %w", err) | |||||
| } | |||||
| objIDs := make([]types.ObjectID, len(updatings)) | |||||
| for i, obj := range updatings { | |||||
| objIDs[i] = obj.ObjectID | |||||
| } | |||||
| oldObjs, err := svc.DB.Object().BatchGet(tx, objIDs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch getting objects: %w", err) | |||||
| } | |||||
| oldObjIDs := make([]types.ObjectID, len(oldObjs)) | |||||
| for i, obj := range oldObjs { | |||||
| oldObjIDs[i] = obj.ObjectID | |||||
| } | |||||
| avaiUpdatings, notExistsObjs := pickByObjectIDs(updatings, oldObjIDs, func(obj api.UpdatingObject) types.ObjectID { return obj.ObjectID }) | |||||
| if len(notExistsObjs) > 0 { | |||||
| // TODO 部分对象已经不存在 | |||||
| } | |||||
| // TODO 考虑产生Update事件 | |||||
| newObjs := make([]types.Object, len(avaiUpdatings)) | |||||
| for i := range newObjs { | |||||
| newObjs[i] = oldObjs[i] | |||||
| avaiUpdatings[i].ApplyTo(&newObjs[i]) | |||||
| } | |||||
| err = svc.DB.Object().BatchUpdate(tx, newObjs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch create or update: %w", err) | |||||
| } | |||||
| return resp.Successes, nil | |||||
| sucs = lo.Map(newObjs, func(obj types.Object, _ int) types.ObjectID { return obj.ObjectID }) | |||||
| return nil | |||||
| }) | |||||
| return sucs, err | |||||
| } | } | ||||
| func (svc *ObjectService) Move(userID cdssdk.UserID, movings []cdsapi.MovingObject) ([]cdssdk.ObjectID, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||||
| // 根据objIDs从objs中挑选Object。 | |||||
| // len(objs) >= len(objIDs) | |||||
| func pickByObjectIDs[T any](objs []T, objIDs []types.ObjectID, getID func(T) types.ObjectID) (picked []T, notFound []T) { | |||||
| objIdx := 0 | |||||
| idIdx := 0 | |||||
| for idIdx < len(objIDs) && objIdx < len(objs) { | |||||
| if getID(objs[objIdx]) < objIDs[idIdx] { | |||||
| notFound = append(notFound, objs[objIdx]) | |||||
| objIdx++ | |||||
| continue | |||||
| } | |||||
| picked = append(picked, objs[objIdx]) | |||||
| objIdx++ | |||||
| idIdx++ | |||||
| } | } | ||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| resp, err := coorCli.MoveObjects(coormq.ReqMoveObjects(userID, movings)) | |||||
| return | |||||
| } | |||||
| func (svc *ObjectService) Move(movings []api.MovingObject) ([]types.ObjectID, error) { | |||||
| var sucs []types.ObjectID | |||||
| var evt []*datamap.BodyObjectInfoUpdated | |||||
| err := svc.DB.DoTx(func(tx db.SQLContext) error { | |||||
| movings = sort2.Sort(movings, func(o1, o2 api.MovingObject) int { | |||||
| return sort2.Cmp(o1.ObjectID, o2.ObjectID) | |||||
| }) | |||||
| objIDs := make([]types.ObjectID, len(movings)) | |||||
| for i, obj := range movings { | |||||
| objIDs[i] = obj.ObjectID | |||||
| } | |||||
| oldObjs, err := svc.DB.Object().BatchGet(tx, objIDs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch getting objects: %w", err) | |||||
| } | |||||
| oldObjIDs := make([]types.ObjectID, len(oldObjs)) | |||||
| for i, obj := range oldObjs { | |||||
| oldObjIDs[i] = obj.ObjectID | |||||
| } | |||||
| // 找出仍在数据库的Object | |||||
| avaiMovings, notExistsObjs := pickByObjectIDs(movings, oldObjIDs, func(obj api.MovingObject) types.ObjectID { return obj.ObjectID }) | |||||
| if len(notExistsObjs) > 0 { | |||||
| // TODO 部分对象已经不存在 | |||||
| } | |||||
| // 筛选出PackageID变化、Path变化的对象,这两种对象要检测改变后是否有冲突 | |||||
| var pkgIDChangedObjs []types.Object | |||||
| var pathChangedObjs []types.Object | |||||
| for i := range avaiMovings { | |||||
| if avaiMovings[i].PackageID != oldObjs[i].PackageID { | |||||
| newObj := oldObjs[i] | |||||
| avaiMovings[i].ApplyTo(&newObj) | |||||
| pkgIDChangedObjs = append(pkgIDChangedObjs, newObj) | |||||
| } else if avaiMovings[i].Path != oldObjs[i].Path { | |||||
| newObj := oldObjs[i] | |||||
| avaiMovings[i].ApplyTo(&newObj) | |||||
| pathChangedObjs = append(pathChangedObjs, newObj) | |||||
| } | |||||
| } | |||||
| var newObjs []types.Object | |||||
| // 对于PackageID发生变化的对象,需要检查目标Package内是否存在同Path的对象 | |||||
| checkedObjs, err := svc.checkPackageChangedObjects(tx, pkgIDChangedObjs) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| newObjs = append(newObjs, checkedObjs...) | |||||
| // 对于只有Path发生变化的对象,则检查同Package内有没有同Path的对象 | |||||
| checkedObjs, err = svc.checkPathChangedObjects(tx, pathChangedObjs) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| newObjs = append(newObjs, checkedObjs...) | |||||
| err = svc.DB.Object().BatchUpdate(tx, newObjs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch create or update: %w", err) | |||||
| } | |||||
| sucs = lo.Map(newObjs, func(obj types.Object, _ int) types.ObjectID { return obj.ObjectID }) | |||||
| evt = lo.Map(newObjs, func(obj types.Object, _ int) *datamap.BodyObjectInfoUpdated { | |||||
| return &datamap.BodyObjectInfoUpdated{ | |||||
| Object: obj, | |||||
| } | |||||
| }) | |||||
| return nil | |||||
| }) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, fmt.Errorf("requsting to coodinator: %w", err) | |||||
| logger.Warn(err.Error()) | |||||
| return nil, err | |||||
| } | |||||
| for _, e := range evt { | |||||
| svc.evtPub.Publish(e) | |||||
| } | } | ||||
| return resp.Successes, nil | |||||
| return sucs, nil | |||||
| } | } | ||||
| func (svc *ObjectService) Download(userID cdssdk.UserID, req downloader.DownloadReqeust) (*downloader.Downloading, error) { | |||||
| func (svc *ObjectService) Download(req downloader.DownloadReqeust) (*downloader.Downloading, error) { | |||||
| // TODO 检查用户ID | // TODO 检查用户ID | ||||
| iter := svc.Downloader.DownloadObjects([]downloader.DownloadReqeust{req}) | iter := svc.Downloader.DownloadObjects([]downloader.DownloadReqeust{req}) | ||||
| @@ -104,133 +268,520 @@ func (svc *ObjectService) Download(userID cdssdk.UserID, req downloader.Download | |||||
| return downloading, nil | return downloading, nil | ||||
| } | } | ||||
| func (svc *ObjectService) Delete(userID cdssdk.UserID, objectIDs []cdssdk.ObjectID) error { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| func (svc *Service) checkPackageChangedObjects(tx db.SQLContext, objs []types.Object) ([]types.Object, error) { | |||||
| if len(objs) == 0 { | |||||
| return nil, nil | |||||
| } | |||||
| type PackageObjects struct { | |||||
| PackageID types.PackageID | |||||
| ObjectByPath map[string]*types.Object | |||||
| } | |||||
| packages := make(map[types.PackageID]*PackageObjects) | |||||
| for _, obj := range objs { | |||||
| pkg, ok := packages[obj.PackageID] | |||||
| if !ok { | |||||
| pkg = &PackageObjects{ | |||||
| PackageID: obj.PackageID, | |||||
| ObjectByPath: make(map[string]*types.Object), | |||||
| } | |||||
| packages[obj.PackageID] = pkg | |||||
| } | |||||
| if pkg.ObjectByPath[obj.Path] == nil { | |||||
| o := obj | |||||
| pkg.ObjectByPath[obj.Path] = &o | |||||
| } else { | |||||
| // TODO 有两个对象移动到同一个路径,有冲突 | |||||
| } | |||||
| } | |||||
| var willUpdateObjs []types.Object | |||||
| for _, pkg := range packages { | |||||
| _, err := svc.DB.Package().GetByID(tx, pkg.PackageID) | |||||
| if errors.Is(err, gorm.ErrRecordNotFound) { | |||||
| continue | |||||
| } | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("getting package by id: %w", err) | |||||
| } | |||||
| existsObjs, err := svc.DB.Object().BatchGetByPackagePath(tx, pkg.PackageID, lo.Keys(pkg.ObjectByPath)) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("batch getting objects by package path: %w", err) | |||||
| } | |||||
| // 标记冲突的对象 | |||||
| for _, obj := range existsObjs { | |||||
| pkg.ObjectByPath[obj.Path] = nil | |||||
| // TODO 目标Package内有冲突的对象 | |||||
| } | |||||
| for _, obj := range pkg.ObjectByPath { | |||||
| if obj == nil { | |||||
| continue | |||||
| } | |||||
| willUpdateObjs = append(willUpdateObjs, *obj) | |||||
| } | |||||
| } | |||||
| return willUpdateObjs, nil | |||||
| } | |||||
| func (svc *Service) checkPathChangedObjects(tx db.SQLContext, objs []types.Object) ([]types.Object, error) { | |||||
| if len(objs) == 0 { | |||||
| return nil, nil | |||||
| } | |||||
| objByPath := make(map[string]*types.Object) | |||||
| for _, obj := range objs { | |||||
| if objByPath[obj.Path] == nil { | |||||
| o := obj | |||||
| objByPath[obj.Path] = &o | |||||
| } else { | |||||
| // TODO 有两个对象移动到同一个路径,有冲突 | |||||
| } | |||||
| } | |||||
| _, err := svc.DB.Package().GetByID(tx, objs[0].PackageID) | |||||
| if errors.Is(err, gorm.ErrRecordNotFound) { | |||||
| return nil, nil | |||||
| } | |||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("new coordinator client: %w", err) | |||||
| return nil, fmt.Errorf("getting package by id: %w", err) | |||||
| } | } | ||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| _, err = coorCli.DeleteObjects(coormq.ReqDeleteObjects(userID, objectIDs)) | |||||
| existsObjs, err := svc.DB.Object().BatchGetByPackagePath(tx, objs[0].PackageID, lo.Map(objs, func(obj types.Object, idx int) string { return obj.Path })) | |||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("requsting to coodinator: %w", err) | |||||
| return nil, fmt.Errorf("batch getting objects by package path: %w", err) | |||||
| } | } | ||||
| return nil | |||||
| // 不支持两个对象交换位置的情况,因为数据库不支持 | |||||
| for _, obj := range existsObjs { | |||||
| objByPath[obj.Path] = nil | |||||
| } | |||||
| var willMoveObjs []types.Object | |||||
| for _, obj := range objByPath { | |||||
| if obj == nil { | |||||
| continue | |||||
| } | |||||
| willMoveObjs = append(willMoveObjs, *obj) | |||||
| } | |||||
| return willMoveObjs, nil | |||||
| } | } | ||||
| func (svc *ObjectService) Clone(userID cdssdk.UserID, clonings []cdsapi.CloningObject) ([]*cdssdk.Object, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| func (svc *ObjectService) Delete(objectIDs []types.ObjectID) error { | |||||
| var sucs []types.ObjectID | |||||
| err := svc.DB.DoTx(func(tx db.SQLContext) error { | |||||
| avaiIDs, err := svc.DB.Object().BatchTestObjectID(tx, objectIDs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch testing object id: %w", err) | |||||
| } | |||||
| sucs = lo.Keys(avaiIDs) | |||||
| err = svc.DB.Object().BatchDelete(tx, objectIDs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch deleting objects: %w", err) | |||||
| } | |||||
| err = svc.DB.ObjectBlock().BatchDeleteByObjectID(tx, objectIDs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch deleting object blocks: %w", err) | |||||
| } | |||||
| err = svc.DB.PinnedObject().BatchDeleteByObjectID(tx, objectIDs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch deleting pinned objects: %w", err) | |||||
| } | |||||
| err = svc.DB.ObjectAccessStat().BatchDeleteByObjectID(tx, objectIDs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch deleting object access stats: %w", err) | |||||
| } | |||||
| return nil | |||||
| }) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||||
| return err | |||||
| } | |||||
| for _, objID := range sucs { | |||||
| svc.evtPub.Publish(&datamap.BodyObjectDeleted{ | |||||
| ObjectID: objID, | |||||
| }) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func (svc *ObjectService) Clone(clonings []api.CloningObject) ([]*types.Object, error) { | |||||
| type CloningObject struct { | |||||
| Cloning api.CloningObject | |||||
| OrgIndex int | |||||
| } | |||||
| type PackageClonings struct { | |||||
| PackageID types.PackageID | |||||
| Clonings map[string]CloningObject | |||||
| } | } | ||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| resp, err := coorCli.CloneObjects(coormq.ReqCloneObjects(userID, clonings)) | |||||
| var evt []*datamap.BodyNewOrUpdateObject | |||||
| // TODO 要检查用户是否有Object、Package的权限 | |||||
| cloningMap := make(map[types.PackageID]*PackageClonings) | |||||
| for i, cloning := range clonings { | |||||
| pkg, ok := cloningMap[cloning.NewPackageID] | |||||
| if !ok { | |||||
| pkg = &PackageClonings{ | |||||
| PackageID: cloning.NewPackageID, | |||||
| Clonings: make(map[string]CloningObject), | |||||
| } | |||||
| cloningMap[cloning.NewPackageID] = pkg | |||||
| } | |||||
| pkg.Clonings[cloning.NewPath] = CloningObject{ | |||||
| Cloning: cloning, | |||||
| OrgIndex: i, | |||||
| } | |||||
| } | |||||
| ret := make([]*types.Object, len(cloningMap)) | |||||
| err := svc.DB.DoTx(func(tx db.SQLContext) error { | |||||
| // 剔除掉新路径已经存在的对象 | |||||
| for _, pkg := range cloningMap { | |||||
| exists, err := svc.DB.Object().BatchGetByPackagePath(tx, pkg.PackageID, lo.Keys(pkg.Clonings)) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch getting objects by package path: %w", err) | |||||
| } | |||||
| for _, obj := range exists { | |||||
| delete(pkg.Clonings, obj.Path) | |||||
| } | |||||
| } | |||||
| // 删除目的Package不存在的对象 | |||||
| newPkg, err := svc.DB.Package().BatchTestPackageID(tx, lo.Keys(cloningMap)) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch testing package id: %w", err) | |||||
| } | |||||
| for _, pkg := range cloningMap { | |||||
| if !newPkg[pkg.PackageID] { | |||||
| delete(cloningMap, pkg.PackageID) | |||||
| } | |||||
| } | |||||
| var avaiClonings []CloningObject | |||||
| var avaiObjIDs []types.ObjectID | |||||
| for _, pkg := range cloningMap { | |||||
| for _, cloning := range pkg.Clonings { | |||||
| avaiClonings = append(avaiClonings, cloning) | |||||
| avaiObjIDs = append(avaiObjIDs, cloning.Cloning.ObjectID) | |||||
| } | |||||
| } | |||||
| avaiDetails, err := svc.DB.Object().BatchGetDetails(tx, avaiObjIDs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch getting object details: %w", err) | |||||
| } | |||||
| avaiDetailsMap := make(map[types.ObjectID]types.ObjectDetail) | |||||
| for _, detail := range avaiDetails { | |||||
| avaiDetailsMap[detail.Object.ObjectID] = detail | |||||
| } | |||||
| oldAvaiClonings := avaiClonings | |||||
| avaiClonings = nil | |||||
| var newObjs []types.Object | |||||
| for _, cloning := range oldAvaiClonings { | |||||
| // 进一步剔除原始对象不存在的情况 | |||||
| detail, ok := avaiDetailsMap[cloning.Cloning.ObjectID] | |||||
| if !ok { | |||||
| continue | |||||
| } | |||||
| avaiClonings = append(avaiClonings, cloning) | |||||
| newObj := detail.Object | |||||
| newObj.ObjectID = 0 | |||||
| newObj.Path = cloning.Cloning.NewPath | |||||
| newObj.PackageID = cloning.Cloning.NewPackageID | |||||
| newObjs = append(newObjs, newObj) | |||||
| } | |||||
| // 先创建出新对象 | |||||
| err = svc.DB.Object().BatchCreate(tx, &newObjs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch creating objects: %w", err) | |||||
| } | |||||
| // 创建了新对象就能拿到新对象ID,再创建新对象块 | |||||
| var newBlks []types.ObjectBlock | |||||
| for i, cloning := range avaiClonings { | |||||
| oldBlks := avaiDetailsMap[cloning.Cloning.ObjectID].Blocks | |||||
| for _, blk := range oldBlks { | |||||
| newBlk := blk | |||||
| newBlk.ObjectID = newObjs[i].ObjectID | |||||
| newBlks = append(newBlks, newBlk) | |||||
| } | |||||
| } | |||||
| err = svc.DB.ObjectBlock().BatchCreate(tx, newBlks) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch creating object blocks: %w", err) | |||||
| } | |||||
| for i, cloning := range avaiClonings { | |||||
| ret[cloning.OrgIndex] = &newObjs[i] | |||||
| } | |||||
| for i, cloning := range avaiClonings { | |||||
| var evtBlks []datamap.BlockDistributionObjectInfo | |||||
| blkType := getBlockTypeFromRed(newObjs[i].Redundancy) | |||||
| oldBlks := avaiDetailsMap[cloning.Cloning.ObjectID].Blocks | |||||
| for _, blk := range oldBlks { | |||||
| evtBlks = append(evtBlks, datamap.BlockDistributionObjectInfo{ | |||||
| BlockType: blkType, | |||||
| Index: blk.Index, | |||||
| StorageID: blk.StorageID, | |||||
| }) | |||||
| } | |||||
| evt = append(evt, &datamap.BodyNewOrUpdateObject{ | |||||
| Info: newObjs[i], | |||||
| BlockDistribution: evtBlks, | |||||
| }) | |||||
| } | |||||
| return nil | |||||
| }) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, fmt.Errorf("requsting to coodinator: %w", err) | |||||
| logger.Warnf("cloning objects: %s", err.Error()) | |||||
| return nil, err | |||||
| } | |||||
| for _, e := range evt { | |||||
| svc.evtPub.Publish(e) | |||||
| } | } | ||||
| return resp.Objects, nil | |||||
| return ret, nil | |||||
| } | } | ||||
| // GetPackageObjects 获取包中的对象列表。 | // GetPackageObjects 获取包中的对象列表。 | ||||
| // userID: 用户ID。 | // userID: 用户ID。 | ||||
| // packageID: 包ID。 | // packageID: 包ID。 | ||||
| // 返回值: 对象列表和错误信息。 | // 返回值: 对象列表和错误信息。 | ||||
| func (svc *ObjectService) GetPackageObjects(userID cdssdk.UserID, packageID cdssdk.PackageID) ([]model.Object, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() // 获取协调器客户端 | |||||
| func (svc *ObjectService) GetPackageObjects(packageID types.PackageID) ([]types.Object, error) { | |||||
| return svc.DB.Object().GetPackageObjects(svc.DB.DefCtx(), packageID) | |||||
| } | |||||
| func (svc *ObjectService) GetObjectDetails(objectIDs []types.ObjectID) ([]*types.ObjectDetail, error) { | |||||
| detailsMp := make(map[types.ObjectID]*types.ObjectDetail) | |||||
| err := svc.DB.DoTx(func(tx db.SQLContext) error { | |||||
| var err error | |||||
| objectIDs = sort2.SortAsc(objectIDs) | |||||
| // 根据ID依次查询Object,ObjectBlock,PinnedObject,并根据升序的特点进行合并 | |||||
| objs, err := svc.DB.Object().BatchGet(tx, objectIDs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch get objects: %w", err) | |||||
| } | |||||
| for _, obj := range objs { | |||||
| detailsMp[obj.ObjectID] = &types.ObjectDetail{ | |||||
| Object: obj, | |||||
| } | |||||
| } | |||||
| // 查询合并 | |||||
| blocks, err := svc.DB.ObjectBlock().BatchGetByObjectID(tx, objectIDs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch get object blocks: %w", err) | |||||
| } | |||||
| for _, block := range blocks { | |||||
| d := detailsMp[block.ObjectID] | |||||
| d.Blocks = append(d.Blocks, block) | |||||
| } | |||||
| // 查询合并 | |||||
| pinneds, err := svc.DB.PinnedObject().BatchGetByObjectID(tx, objectIDs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch get pinned objects: %w", err) | |||||
| } | |||||
| for _, pinned := range pinneds { | |||||
| d := detailsMp[pinned.ObjectID] | |||||
| d.PinnedAt = append(d.PinnedAt, pinned.UserSpaceID) | |||||
| } | |||||
| return nil | |||||
| }) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||||
| logger.Warn(err.Error()) | |||||
| return nil, err | |||||
| } | } | ||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) // 释放协调器客户端资源 | |||||
| getResp, err := coorCli.GetPackageObjects(coormq.ReqGetPackageObjects(userID, packageID)) // 请求协调器获取套餐对象 | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("requesting to coordinator: %w", err) | |||||
| details := make([]*types.ObjectDetail, len(objectIDs)) | |||||
| for i, objID := range objectIDs { | |||||
| details[i] = detailsMp[objID] | |||||
| } | } | ||||
| return getResp.Objects, nil | |||||
| return details, nil | |||||
| } | } | ||||
| func (svc *ObjectService) GetObjectDetail(objectID cdssdk.ObjectID) (*stgmod.ObjectDetail, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| func (svc *ObjectService) NewMultipartUploadObject(packageID types.PackageID, path string) (types.Object, error) { | |||||
| var obj types.Object | |||||
| err := svc.DB.DoTx(func(tx db.SQLContext) error { | |||||
| oldObj, err := svc.DB.Object().GetByPath(tx, packageID, path) | |||||
| if err == nil { | |||||
| obj = oldObj | |||||
| err := svc.DB.ObjectBlock().DeleteByObjectID(tx, obj.ObjectID) | |||||
| if err != nil { | |||||
| return fmt.Errorf("delete object blocks: %w", err) | |||||
| } | |||||
| obj.FileHash = types.EmptyHash | |||||
| obj.Size = 0 | |||||
| obj.Redundancy = types.NewMultipartUploadRedundancy() | |||||
| obj.UpdateTime = time.Now() | |||||
| err = svc.DB.Object().BatchUpdate(tx, []types.Object{obj}) | |||||
| if err != nil { | |||||
| return fmt.Errorf("update object: %w", err) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| obj = types.Object{ | |||||
| PackageID: packageID, | |||||
| Path: path, | |||||
| FileHash: types.EmptyHash, | |||||
| Size: 0, | |||||
| Redundancy: types.NewMultipartUploadRedundancy(), | |||||
| CreateTime: time.Now(), | |||||
| UpdateTime: time.Now(), | |||||
| } | |||||
| objID, err := svc.DB.Object().Create(tx, obj) | |||||
| if err != nil { | |||||
| return fmt.Errorf("create object: %w", err) | |||||
| } | |||||
| getResp, err := coorCli.GetObjectDetails(coormq.ReqGetObjectDetails([]cdssdk.ObjectID{objectID})) | |||||
| obj.ObjectID = objID | |||||
| return nil | |||||
| }) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, fmt.Errorf("requsting to coodinator: %w", err) | |||||
| logger.Warnf("new multipart upload object: %s", err.Error()) | |||||
| return types.Object{}, err | |||||
| } | } | ||||
| return getResp.Objects[0], nil | |||||
| return obj, nil | |||||
| } | } | ||||
| func (svc *ObjectService) NewMultipartUploadObject(userID cdssdk.UserID, pkgID cdssdk.PackageID, path string) (cdssdk.Object, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return cdssdk.Object{}, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| func (svc *Service) AddMultipartUploadPart(objectID types.ObjectID, block types.ObjectBlock) error { | |||||
| err := svc.DB.DoTx(func(tx db.SQLContext) error { | |||||
| obj, err := svc.DB.Object().GetByID(tx, objectID) | |||||
| if err != nil { | |||||
| return fmt.Errorf("getting object by id: %w", err) | |||||
| } | |||||
| _, ok := obj.Redundancy.(*types.MultipartUploadRedundancy) | |||||
| if !ok { | |||||
| return fmt.Errorf("object is not a multipart upload object") | |||||
| } | |||||
| blks, err := svc.DB.ObjectBlock().BatchGetByObjectID(tx, []types.ObjectID{obj.ObjectID}) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch getting object blocks: %w", err) | |||||
| } | |||||
| blks = lo.Reject(blks, func(blk types.ObjectBlock, idx int) bool { return blk.Index == block.Index }) | |||||
| blks = append(blks, block) | |||||
| blks = sort2.Sort(blks, func(a, b types.ObjectBlock) int { return a.Index - b.Index }) | |||||
| totalSize := int64(0) | |||||
| var hashes [][]byte | |||||
| for _, blk := range blks { | |||||
| totalSize += blk.Size | |||||
| hashes = append(hashes, blk.FileHash.GetHashBytes()) | |||||
| } | |||||
| newObjHash := types.CalculateCompositeHash(hashes) | |||||
| obj.Size = totalSize | |||||
| obj.FileHash = newObjHash | |||||
| obj.UpdateTime = time.Now() | |||||
| resp, err := coorCli.NewMultipartUploadObject(coormq.ReqNewMultipartUploadObject(userID, pkgID, path)) | |||||
| err = svc.DB.ObjectBlock().DeleteByObjectIDIndex(tx, objectID, block.Index) | |||||
| if err != nil { | |||||
| return fmt.Errorf("delete object block: %w", err) | |||||
| } | |||||
| err = svc.DB.ObjectBlock().Create(tx, objectID, block.Index, block.UserSpaceID, block.FileHash, block.Size) | |||||
| if err != nil { | |||||
| return fmt.Errorf("create object block: %w", err) | |||||
| } | |||||
| err = svc.DB.Object().BatchUpdate(tx, []types.Object{obj}) | |||||
| if err != nil { | |||||
| return fmt.Errorf("update object: %w", err) | |||||
| } | |||||
| return nil | |||||
| }) | |||||
| if err != nil { | if err != nil { | ||||
| return cdssdk.Object{}, err | |||||
| logger.Warnf("add multipart upload part: %s", err.Error()) | |||||
| return err | |||||
| } | } | ||||
| return resp.Object, nil | |||||
| return nil | |||||
| } | } | ||||
| func (svc *ObjectService) CompleteMultipartUpload(userID cdssdk.UserID, objectID cdssdk.ObjectID, indexes []int) (cdssdk.Object, error) { | |||||
| func (svc *ObjectService) CompleteMultipartUpload(objectID types.ObjectID, indexes []int) (types.Object, error) { | |||||
| if len(indexes) == 0 { | if len(indexes) == 0 { | ||||
| return cdssdk.Object{}, fmt.Errorf("no block indexes specified") | |||||
| } | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return cdssdk.Object{}, fmt.Errorf("new coordinator client: %w", err) | |||||
| return types.Object{}, fmt.Errorf("no block indexes specified") | |||||
| } | } | ||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| details, err := coorCli.GetObjectDetails(coormq.ReqGetObjectDetails([]cdssdk.ObjectID{objectID})) | |||||
| details, err := svc.ObjectSvc().GetObjectDetails([]types.ObjectID{objectID}) | |||||
| if err != nil { | if err != nil { | ||||
| return cdssdk.Object{}, err | |||||
| return types.Object{}, err | |||||
| } | } | ||||
| if details.Objects[0] == nil { | |||||
| return cdssdk.Object{}, fmt.Errorf("object %v not found", objectID) | |||||
| if details[0] == nil { | |||||
| return types.Object{}, fmt.Errorf("object %v not found", objectID) | |||||
| } | } | ||||
| objDe := details.Objects[0] | |||||
| objDe := details[0] | |||||
| _, ok := objDe.Object.Redundancy.(*cdssdk.MultipartUploadRedundancy) | |||||
| _, ok := objDe.Object.Redundancy.(*types.MultipartUploadRedundancy) | |||||
| if !ok { | if !ok { | ||||
| return cdssdk.Object{}, fmt.Errorf("object %v is not a multipart upload", objectID) | |||||
| return types.Object{}, fmt.Errorf("object %v is not a multipart upload", objectID) | |||||
| } | } | ||||
| if len(objDe.Blocks) == 0 { | if len(objDe.Blocks) == 0 { | ||||
| return cdssdk.Object{}, fmt.Errorf("object %v has no blocks", objectID) | |||||
| return types.Object{}, fmt.Errorf("object %v has no blocks", objectID) | |||||
| } | } | ||||
| objBlkMap := make(map[int]stgmod.ObjectBlock) | |||||
| objBlkMap := make(map[int]types.ObjectBlock) | |||||
| for _, blk := range objDe.Blocks { | for _, blk := range objDe.Blocks { | ||||
| objBlkMap[blk.Index] = blk | objBlkMap[blk.Index] = blk | ||||
| } | } | ||||
| var compBlks []stgmod.ObjectBlock | |||||
| var compBlks []types.ObjectBlock | |||||
| var compBlkStgs []stgmod.StorageDetail | var compBlkStgs []stgmod.StorageDetail | ||||
| var targetStg stgmod.StorageDetail | var targetStg stgmod.StorageDetail | ||||
| for i, idx := range indexes { | for i, idx := range indexes { | ||||
| blk, ok := objBlkMap[idx] | blk, ok := objBlkMap[idx] | ||||
| if !ok { | if !ok { | ||||
| return cdssdk.Object{}, fmt.Errorf("block %d not found in object %v", idx, objectID) | |||||
| return types.Object{}, fmt.Errorf("block %d not found in object %v", idx, objectID) | |||||
| } | } | ||||
| stg := svc.StorageMeta.Get(blk.StorageID) | |||||
| stg := svc.StorageMeta.Get(blk.UserSpaceID) | |||||
| if stg == nil { | if stg == nil { | ||||
| return cdssdk.Object{}, fmt.Errorf("storage %d not found", blk.StorageID) | |||||
| return types.Object{}, fmt.Errorf("storage of user space %d not found", blk.UserSpaceID) | |||||
| } | } | ||||
| compBlks = append(compBlks, blk) | compBlks = append(compBlks, blk) | ||||
| @@ -243,44 +794,40 @@ func (svc *ObjectService) CompleteMultipartUpload(userID cdssdk.UserID, objectID | |||||
| bld := exec.NewPlanBuilder() | bld := exec.NewPlanBuilder() | ||||
| err = plans.CompleteMultipart(compBlks, compBlkStgs, targetStg, "shard", bld) | err = plans.CompleteMultipart(compBlks, compBlkStgs, targetStg, "shard", bld) | ||||
| if err != nil { | if err != nil { | ||||
| return cdssdk.Object{}, err | |||||
| return types.Object{}, err | |||||
| } | } | ||||
| exeCtx := exec.NewExecContext() | exeCtx := exec.NewExecContext() | ||||
| ret, err := bld.Execute(exeCtx).Wait(context.Background()) | ret, err := bld.Execute(exeCtx).Wait(context.Background()) | ||||
| if err != nil { | if err != nil { | ||||
| return cdssdk.Object{}, err | |||||
| return types.Object{}, err | |||||
| } | } | ||||
| shardInfo := ret["shard"].(*ops2.ShardInfoValue) | shardInfo := ret["shard"].(*ops2.ShardInfoValue) | ||||
| _, err = coorCli.UpdateObjectRedundancy(coormq.ReqUpdateObjectRedundancy([]coormq.UpdatingObjectRedundancy{ | |||||
| err = svc.DB.Object().BatchUpdateRedundancy([]db.UpdatingObjectRedundancy{ | |||||
| { | { | ||||
| ObjectID: objectID, | ObjectID: objectID, | ||||
| FileHash: shardInfo.Hash, | FileHash: shardInfo.Hash, | ||||
| Size: shardInfo.Size, | Size: shardInfo.Size, | ||||
| Redundancy: cdssdk.NewNoneRedundancy(), | |||||
| Blocks: []stgmod.ObjectBlock{{ | |||||
| ObjectID: objectID, | |||||
| Index: 0, | |||||
| StorageID: targetStg.Storage.StorageID, | |||||
| FileHash: shardInfo.Hash, | |||||
| Size: shardInfo.Size, | |||||
| Redundancy: types.NewNoneRedundancy(), | |||||
| Blocks: []types.ObjectBlock{{ | |||||
| ObjectID: objectID, | |||||
| Index: 0, | |||||
| UserSpaceID: targetStg.Storage.StorageID, | |||||
| FileHash: shardInfo.Hash, | |||||
| Size: shardInfo.Size, | |||||
| }}, | }}, | ||||
| }, | }, | ||||
| })) | |||||
| }) | |||||
| if err != nil { | if err != nil { | ||||
| return cdssdk.Object{}, err | |||||
| return types.Object{}, err | |||||
| } | } | ||||
| getObj, err := coorCli.GetObjects(coormq.ReqGetObjects(userID, []cdssdk.ObjectID{objectID})) | |||||
| obj, err := svc.DB.Object().GetByID(svc.DB.DefCtx(), objectID) | |||||
| if err != nil { | if err != nil { | ||||
| return cdssdk.Object{}, err | |||||
| } | |||||
| if getObj.Objects[0] == nil { | |||||
| return cdssdk.Object{}, fmt.Errorf("object %v not found", objectID) | |||||
| return types.Object{}, err | |||||
| } | } | ||||
| return *getObj.Objects[0], nil | |||||
| return obj, nil | |||||
| } | } | ||||
| @@ -3,11 +3,12 @@ package services | |||||
| import ( | import ( | ||||
| "fmt" | "fmt" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||||
| stgglb "gitlink.org.cn/cloudream/storage2/common/globals" | |||||
| "gitlink.org.cn/cloudream/storage2/client/internal/db" | |||||
| "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| "gitlink.org.cn/cloudream/storage2/common/models/datamap" | |||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/downloader" | "gitlink.org.cn/cloudream/storage2/common/pkgs/downloader" | ||||
| coormq "gitlink.org.cn/cloudream/storage2/common/pkgs/mq/coordinator" | |||||
| ) | ) | ||||
| // PackageService 提供对包相关操作的服务接口 | // PackageService 提供对包相关操作的服务接口 | ||||
| @@ -20,126 +21,161 @@ func (svc *Service) PackageSvc() *PackageService { | |||||
| return &PackageService{Service: svc} | return &PackageService{Service: svc} | ||||
| } | } | ||||
| func (svc *PackageService) Get(userID cdssdk.UserID, packageID cdssdk.PackageID) (*cdssdk.Package, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| // 向协调器请求获取包信息 | |||||
| getResp, err := coorCli.GetPackage(coormq.NewGetPackage(userID, packageID)) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return &getResp.Package, nil | |||||
| func (svc *PackageService) Get(packageID types.PackageID) (types.Package, error) { | |||||
| return svc.DB.Package().GetByID(svc.DB.DefCtx(), packageID) | |||||
| } | } | ||||
| func (svc *PackageService) GetByFullName(userID cdssdk.UserID, bucketName string, packageName string) (*cdssdk.Package, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| getResp, err := coorCli.GetPackageByName(coormq.ReqGetPackageByName(userID, bucketName, packageName)) | |||||
| if err != nil { | |||||
| // TODO 要附加日志信息,但不能直接%w,因为外部需要判断错误吗 | |||||
| return nil, err | |||||
| } | |||||
| return &getResp.Package, nil | |||||
| func (svc *PackageService) GetByFullName(bucketName string, packageName string) (types.Package, error) { | |||||
| return svc.DB.Package().GetUserPackageByName(svc.DB.DefCtx(), bucketName, packageName) | |||||
| } | } | ||||
| func (svc *PackageService) GetBucketPackages(userID cdssdk.UserID, bucketID cdssdk.BucketID) ([]cdssdk.Package, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| getResp, err := coorCli.GetBucketPackages(coormq.NewGetBucketPackages(userID, bucketID)) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("requsting to coodinator: %w", err) | |||||
| } | |||||
| return getResp.Packages, nil | |||||
| func (svc *PackageService) GetBucketPackages(bucketID types.BucketID) ([]types.Package, error) { | |||||
| return svc.DB.Package().GetBucketPackages(svc.DB.DefCtx(), bucketID) | |||||
| } | } | ||||
| func (svc *PackageService) Create(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string) (cdssdk.Package, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| func (svc *PackageService) Create(bucketID types.BucketID, name string) (types.Package, error) { | |||||
| pkg, err := svc.DB.Package().Create(svc.DB.DefCtx(), bucketID, name) | |||||
| if err != nil { | if err != nil { | ||||
| return cdssdk.Package{}, fmt.Errorf("new coordinator client: %w", err) | |||||
| return types.Package{}, err | |||||
| } | } | ||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| // 向协调器发送创建包的请求 | |||||
| resp, err := coorCli.CreatePackage(coormq.NewCreatePackage(userID, bucketID, name)) | |||||
| if err != nil { | |||||
| return cdssdk.Package{}, err | |||||
| } | |||||
| svc.evtPub.Publish(&datamap.BodyNewPackage{ | |||||
| Info: pkg, | |||||
| }) | |||||
| return resp.Package, nil | |||||
| return pkg, nil | |||||
| } | } | ||||
| func (svc *PackageService) DownloadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID) (downloader.DownloadIterator, error) { | |||||
| func (svc *PackageService) DownloadPackage(packageID types.PackageID) (downloader.DownloadIterator, error) { | |||||
| // TODO 检查用户ID | // TODO 检查用户ID | ||||
| return svc.Downloader.DownloadPackage(packageID), nil | return svc.Downloader.DownloadPackage(packageID), nil | ||||
| } | } | ||||
| // DeletePackage 删除指定的包 | // DeletePackage 删除指定的包 | ||||
| func (svc *PackageService) DeletePackage(userID cdssdk.UserID, packageID cdssdk.PackageID) error { | |||||
| // 从协调器MQ池中获取客户端 | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| func (svc *PackageService) DeletePackage(packageID types.PackageID) error { | |||||
| err := svc.DB.Package().DeleteComplete(svc.DB.DefCtx(), packageID) | |||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("new coordinator client: %w", err) | |||||
| return err | |||||
| } | } | ||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| // 向协调器发送删除包的请求 | |||||
| _, err = coorCli.DeletePackage(coormq.NewDeletePackage(userID, packageID)) | |||||
| if err != nil { | |||||
| return fmt.Errorf("deleting package: %w", err) | |||||
| } | |||||
| svc.evtPub.Publish(&datamap.BodyPackageDeleted{ | |||||
| PackageID: packageID, | |||||
| }) | |||||
| return nil | return nil | ||||
| } | } | ||||
| func (svc *PackageService) Clone(userID cdssdk.UserID, packageID cdssdk.PackageID, bucketID cdssdk.BucketID, name string) (cdssdk.Package, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| func (svc *PackageService) Clone(packageID types.PackageID, bucketID types.BucketID, name string) (types.Package, error) { | |||||
| var pkg types.Package | |||||
| var oldObjIDs []types.ObjectID | |||||
| var newObjIDs []types.ObjectID | |||||
| err := svc.DB.DoTx(func(tx db.SQLContext) error { | |||||
| var err error | |||||
| pkg, err = svc.DB.Package().Create(tx, bucketID, name) | |||||
| if err != nil { | |||||
| return fmt.Errorf("creating package: %w", err) | |||||
| } | |||||
| objs, err := svc.DB.Object().GetPackageObjects(tx, packageID) | |||||
| if err != nil { | |||||
| return fmt.Errorf("getting package objects: %w", err) | |||||
| } | |||||
| objBlks, err := svc.DB.ObjectBlock().GetInPackageID(tx, packageID) | |||||
| if err != nil { | |||||
| return fmt.Errorf("getting object blocks: %w", err) | |||||
| } | |||||
| clonedObjs := make([]types.Object, len(objs)) | |||||
| for i, obj := range objs { | |||||
| clonedObjs[i] = obj | |||||
| clonedObjs[i].ObjectID = 0 | |||||
| clonedObjs[i].PackageID = pkg.PackageID | |||||
| } | |||||
| err = svc.DB.Object().BatchCreate(tx, &clonedObjs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch creating objects: %w", err) | |||||
| } | |||||
| oldToNew := make(map[types.ObjectID]types.ObjectID) | |||||
| for i, obj := range clonedObjs { | |||||
| oldToNew[objs[i].ObjectID] = obj.ObjectID | |||||
| oldObjIDs = append(oldObjIDs, objs[i].ObjectID) | |||||
| newObjIDs = append(newObjIDs, obj.ObjectID) | |||||
| } | |||||
| clonedBlks := make([]types.ObjectBlock, len(objBlks)) | |||||
| for i, blk := range objBlks { | |||||
| clonedBlks[i] = blk | |||||
| clonedBlks[i].ObjectID = oldToNew[blk.ObjectID] | |||||
| } | |||||
| err = svc.DB.ObjectBlock().BatchCreate(tx, clonedBlks) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch creating object blocks: %w", err) | |||||
| } | |||||
| return nil | |||||
| }) | |||||
| if err != nil { | if err != nil { | ||||
| return cdssdk.Package{}, fmt.Errorf("new coordinator client: %w", err) | |||||
| return types.Package{}, err | |||||
| } | } | ||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| resp, err := coorCli.ClonePackage(coormq.ReqClonePackage(userID, packageID, bucketID, name)) | |||||
| if err != nil { | |||||
| return cdssdk.Package{}, fmt.Errorf("cloning package: %w", err) | |||||
| } | |||||
| svc.evtPub.Publish(&datamap.BodyPackageCloned{ | |||||
| SourcePackageID: packageID, | |||||
| NewPackage: pkg, | |||||
| SourceObjectIDs: oldObjIDs, | |||||
| NewObjectIDs: newObjIDs, | |||||
| }) | |||||
| return resp.Package, nil | |||||
| return pkg, nil | |||||
| } | } | ||||
| // GetCachedStorages 获取指定包的缓存节点信息 | |||||
| func (svc *PackageService) GetCachedStorages(userID cdssdk.UserID, packageID cdssdk.PackageID) (cdssdk.PackageCachingInfo, error) { | |||||
| // 从协调器MQ池中获取客户端 | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return cdssdk.PackageCachingInfo{}, fmt.Errorf("new coordinator client: %w", err) | |||||
| func (svc *PackageService) AddAccessStat(entries []db.AddAccessStatEntry) { | |||||
| pkgIDs := make([]types.PackageID, len(entries)) | |||||
| objIDs := make([]types.ObjectID, len(entries)) | |||||
| for i, e := range entries { | |||||
| pkgIDs[i] = e.PackageID | |||||
| objIDs[i] = e.ObjectID | |||||
| } | } | ||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| // 向协调器请求获取包的缓存节点信息 | |||||
| resp, err := coorCli.GetPackageCachedStorages(coormq.ReqGetPackageCachedStorages(userID, packageID)) | |||||
| if err != nil { | |||||
| return cdssdk.PackageCachingInfo{}, fmt.Errorf("get package cached storages: %w", err) | |||||
| } | |||||
| err := svc.DB.DoTx(func(tx db.SQLContext) error { | |||||
| avaiPkgIDs, err := svc.DB.Package().BatchTestPackageID(tx, pkgIDs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch test package id: %w", err) | |||||
| } | |||||
| avaiObjIDs, err := svc.DB.Object().BatchTestObjectID(tx, objIDs) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch test object id: %w", err) | |||||
| } | |||||
| var willAdds []db.AddAccessStatEntry | |||||
| for _, e := range entries { | |||||
| if avaiPkgIDs[e.PackageID] && avaiObjIDs[e.ObjectID] { | |||||
| willAdds = append(willAdds, e) | |||||
| } | |||||
| } | |||||
| if len(willAdds) > 0 { | |||||
| err := svc.DB.PackageAccessStat().BatchAddCounter(tx, willAdds) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch add package access stat counter: %w", err) | |||||
| } | |||||
| err = svc.DB.ObjectAccessStat().BatchAddCounter(tx, willAdds) | |||||
| if err != nil { | |||||
| return fmt.Errorf("batch add object access stat counter: %w", err) | |||||
| } | |||||
| } | |||||
| return nil | |||||
| }) | |||||
| // 构造并返回缓存信息 | |||||
| tmp := cdssdk.PackageCachingInfo{ | |||||
| StorageInfos: resp.StorageInfos, | |||||
| PackageSize: resp.PackageSize, | |||||
| if err != nil { | |||||
| logger.Warn(err.Error()) | |||||
| } | } | ||||
| return tmp, nil | |||||
| } | } | ||||
| @@ -1,42 +0,0 @@ | |||||
| package services | |||||
| import ( | |||||
| "fmt" | |||||
| stgglb "gitlink.org.cn/cloudream/storage2/common/globals" | |||||
| scmq "gitlink.org.cn/cloudream/storage2/common/pkgs/mq/scanner" | |||||
| scevt "gitlink.org.cn/cloudream/storage2/common/pkgs/mq/scanner/event" | |||||
| ) | |||||
| // ScannerService 是扫描器服务结构体,封装了与扫描器相关的服务功能。 | |||||
| type ScannerService struct { | |||||
| *Service | |||||
| } | |||||
| // ScannerSvc 返回ScannerService的一个实例,提供扫描器服务。 | |||||
| func (svc *Service) ScannerSvc() *ScannerService { | |||||
| return &ScannerService{Service: svc} | |||||
| } | |||||
| // PostEvent 执行数据巡查事件 | |||||
| // event: 需要发送的事件对象。 | |||||
| // isEmergency: 是否为紧急事件,影响事件处理的优先级。 | |||||
| // dontMerge: 是否禁止将该事件与其它事件合并处理。 | |||||
| // 返回值: 发送事件过程中遇到的错误。 | |||||
| func (svc *ScannerService) PostEvent(event scevt.Event, isEmergency bool, dontMerge bool) error { | |||||
| // 从扫描器消息池中获取客户端实例 | |||||
| scCli, err := stgglb.ScannerMQPool.Acquire() | |||||
| if err != nil { | |||||
| return fmt.Errorf("new scanner client: %w", err) | |||||
| } | |||||
| // 确保扫描器客户端在函数返回前被释放 | |||||
| defer stgglb.ScannerMQPool.Release(scCli) | |||||
| // 向扫描器客户端发送事件 | |||||
| err = scCli.PostEvent(scmq.NewPostEvent(event, isEmergency, dontMerge)) | |||||
| if err != nil { | |||||
| return fmt.Errorf("request to scanner failed, err: %w", err) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| @@ -4,11 +4,13 @@ package services | |||||
| import ( | import ( | ||||
| "gitlink.org.cn/cloudream/common/pkgs/distlock" | "gitlink.org.cn/cloudream/common/pkgs/distlock" | ||||
| "gitlink.org.cn/cloudream/storage2/client/internal/db" | |||||
| "gitlink.org.cn/cloudream/storage2/client/internal/task" | "gitlink.org.cn/cloudream/storage2/client/internal/task" | ||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/accessstat" | "gitlink.org.cn/cloudream/storage2/common/pkgs/accessstat" | ||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/downloader" | "gitlink.org.cn/cloudream/storage2/common/pkgs/downloader" | ||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/downloader/strategy" | "gitlink.org.cn/cloudream/storage2/common/pkgs/downloader/strategy" | ||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/metacache" | "gitlink.org.cn/cloudream/storage2/common/pkgs/metacache" | ||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/sysevent" | |||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/uploader" | "gitlink.org.cn/cloudream/storage2/common/pkgs/uploader" | ||||
| ) | ) | ||||
| @@ -21,6 +23,8 @@ type Service struct { | |||||
| Uploader *uploader.Uploader | Uploader *uploader.Uploader | ||||
| StrategySelector *strategy.Selector | StrategySelector *strategy.Selector | ||||
| StorageMeta *metacache.StorageMeta | StorageMeta *metacache.StorageMeta | ||||
| DB *db.DB | |||||
| evtPub *sysevent.Publisher | |||||
| } | } | ||||
| func NewService( | func NewService( | ||||
| @@ -31,6 +35,8 @@ func NewService( | |||||
| uploder *uploader.Uploader, | uploder *uploader.Uploader, | ||||
| strategySelector *strategy.Selector, | strategySelector *strategy.Selector, | ||||
| storageMeta *metacache.StorageMeta, | storageMeta *metacache.StorageMeta, | ||||
| db *db.DB, | |||||
| evtPub *sysevent.Publisher, | |||||
| ) (*Service, error) { | ) (*Service, error) { | ||||
| return &Service{ | return &Service{ | ||||
| DistLock: distlock, | DistLock: distlock, | ||||
| @@ -40,5 +46,7 @@ func NewService( | |||||
| Uploader: uploder, | Uploader: uploder, | ||||
| StrategySelector: strategySelector, | StrategySelector: strategySelector, | ||||
| StorageMeta: storageMeta, | StorageMeta: storageMeta, | ||||
| DB: db, | |||||
| evtPub: evtPub, | |||||
| }, nil | }, nil | ||||
| } | } | ||||
| @@ -6,11 +6,11 @@ import ( | |||||
| "path" | "path" | ||||
| "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" | "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" | ||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| stgglb "gitlink.org.cn/cloudream/storage2/common/globals" | stgglb "gitlink.org.cn/cloudream/storage2/common/globals" | ||||
| stgmod "gitlink.org.cn/cloudream/storage2/common/models" | stgmod "gitlink.org.cn/cloudream/storage2/common/models" | ||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/db2/model" | |||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/distlock/reqbuilder" | "gitlink.org.cn/cloudream/storage2/common/pkgs/distlock/reqbuilder" | ||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/downloader/strategy" | "gitlink.org.cn/cloudream/storage2/common/pkgs/downloader/strategy" | ||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2" | "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2" | ||||
| @@ -20,72 +20,39 @@ import ( | |||||
| "gitlink.org.cn/cloudream/storage2/common/pkgs/storage/factory" | "gitlink.org.cn/cloudream/storage2/common/pkgs/storage/factory" | ||||
| ) | ) | ||||
| type StorageService struct { | |||||
| type UserSpaceService struct { | |||||
| *Service | *Service | ||||
| } | } | ||||
| func (svc *Service) StorageSvc() *StorageService { | |||||
| return &StorageService{Service: svc} | |||||
| func (svc *Service) UserSpaceSvc() *UserSpaceService { | |||||
| return &UserSpaceService{Service: svc} | |||||
| } | } | ||||
| func (svc *StorageService) Get(userID cdssdk.UserID, storageID cdssdk.StorageID) (*model.Storage, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| getResp, err := coorCli.GetStorage(coormq.ReqGetStorage(userID, storageID)) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("request to coordinator: %w", err) | |||||
| } | |||||
| return &getResp.Storage, nil | |||||
| func (svc *UserSpaceService) Get(userspaceID cdssdk.UserSpaceID) (types.UserSpace, error) { | |||||
| return svc.DB.UserSpace().GetByID(svc.DB.DefCtx(), userspaceID) | |||||
| } | } | ||||
| func (svc *StorageService) GetByName(userID cdssdk.UserID, name string) (*model.Storage, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| getResp, err := coorCli.GetStorageByName(coormq.ReqGetStorageByName(userID, name)) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("request to coordinator: %w", err) | |||||
| } | |||||
| return &getResp.Storage, nil | |||||
| func (svc *UserSpaceService) GetByName(name string) (types.UserSpace, error) { | |||||
| return svc.DB.UserSpace().GetByName(svc.DB.DefCtx(), name) | |||||
| } | } | ||||
| func (svc *StorageService) GetDetails(stgIDs []cdssdk.StorageID) ([]*stgmod.StorageDetail, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| getResp, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails(stgIDs)) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("request to coordinator: %w", err) | |||||
| } | |||||
| func (svc *UserSpaceService) GetDetails(stgIDs []cdssdk.UserSpaceID) ([]*stgmod.UserSpaceDetail, error) { | |||||
| return getResp.Storages, nil | |||||
| } | } | ||||
| func (svc *StorageService) LoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID, rootPath string) error { | |||||
| func (svc *UserSpaceService) LoadPackage(packageID cdssdk.PackageID, userspaceID cdssdk.UserSpaceID, rootPath string) error { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | coorCli, err := stgglb.CoordinatorMQPool.Acquire() | ||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("new coordinator client: %w", err) | return fmt.Errorf("new coordinator client: %w", err) | ||||
| } | } | ||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | defer stgglb.CoordinatorMQPool.Release(coorCli) | ||||
| destStg := svc.StorageMeta.Get(storageID) | |||||
| destStg := svc.UserSpaceMeta.Get(userspaceID) | |||||
| if destStg == nil { | if destStg == nil { | ||||
| return fmt.Errorf("storage not found: %d", storageID) | |||||
| return fmt.Errorf("userspace not found: %d", userspaceID) | |||||
| } | } | ||||
| if destStg.MasterHub == nil { | if destStg.MasterHub == nil { | ||||
| return fmt.Errorf("storage %v has no master hub", storageID) | |||||
| return fmt.Errorf("userspace %v has no master hub", userspaceID) | |||||
| } | } | ||||
| details, err := coorCli.GetPackageObjectDetails(coormq.ReqGetPackageObjectDetails(packageID)) | details, err := coorCli.GetPackageObjectDetails(coormq.ReqGetPackageObjectDetails(packageID)) | ||||
| @@ -107,11 +74,11 @@ func (svc *StorageService) LoadPackage(userID cdssdk.UserID, packageID cdssdk.Pa | |||||
| ft := ioswitch2.NewFromTo() | ft := ioswitch2.NewFromTo() | ||||
| switch strg := strg.(type) { | switch strg := strg.(type) { | ||||
| case *strategy.DirectStrategy: | case *strategy.DirectStrategy: | ||||
| ft.AddFrom(ioswitch2.NewFromShardstore(strg.Detail.Object.FileHash, *strg.Storage.MasterHub, strg.Storage, ioswitch2.RawStream())) | |||||
| ft.AddFrom(ioswitch2.NewFromShardstore(strg.Detail.Object.FileHash, *strg.UserSpace.MasterHub, strg.UserSpace, ioswitch2.RawStream())) | |||||
| case *strategy.ECReconstructStrategy: | case *strategy.ECReconstructStrategy: | ||||
| for i, b := range strg.Blocks { | for i, b := range strg.Blocks { | ||||
| ft.AddFrom(ioswitch2.NewFromShardstore(b.FileHash, *strg.Storages[i].MasterHub, strg.Storages[i], ioswitch2.ECStream(b.Index))) | |||||
| ft.AddFrom(ioswitch2.NewFromShardstore(b.FileHash, *strg.UserSpaces[i].MasterHub, strg.UserSpaces[i], ioswitch2.ECStream(b.Index))) | |||||
| ft.ECParam = &strg.Redundancy | ft.ECParam = &strg.Redundancy | ||||
| } | } | ||||
| default: | default: | ||||
| @@ -132,10 +99,10 @@ func (svc *StorageService) LoadPackage(userID cdssdk.UserID, packageID cdssdk.Pa | |||||
| } | } | ||||
| mutex, err := reqbuilder.NewBuilder(). | mutex, err := reqbuilder.NewBuilder(). | ||||
| // 保护在storage目录中下载的文件 | |||||
| Storage().Buzy(storageID). | |||||
| // 保护在userspace目录中下载的文件 | |||||
| UserSpace().Buzy(userspaceID). | |||||
| // 保护下载文件时同时保存到IPFS的文件 | // 保护下载文件时同时保存到IPFS的文件 | ||||
| Shard().Buzy(storageID). | |||||
| Shard().Buzy(userspaceID). | |||||
| MutexLock(svc.DistLock) | MutexLock(svc.DistLock) | ||||
| if err != nil { | if err != nil { | ||||
| return fmt.Errorf("acquire locks failed, err: %w", err) | return fmt.Errorf("acquire locks failed, err: %w", err) | ||||
| @@ -143,7 +110,7 @@ func (svc *StorageService) LoadPackage(userID cdssdk.UserID, packageID cdssdk.Pa | |||||
| // 记录访问统计 | // 记录访问统计 | ||||
| for _, obj := range details.Objects { | for _, obj := range details.Objects { | ||||
| svc.AccessStat.AddAccessCounter(obj.Object.ObjectID, packageID, storageID, 1) | |||||
| svc.AccessStat.AddAccessCounter(obj.Object.ObjectID, packageID, userspaceID, 1) | |||||
| } | } | ||||
| defer mutex.Unlock() | defer mutex.Unlock() | ||||
| @@ -155,34 +122,34 @@ func (svc *StorageService) LoadPackage(userID cdssdk.UserID, packageID cdssdk.Pa | |||||
| } | } | ||||
| // 失败也没关系 | // 失败也没关系 | ||||
| coorCli.StoragePackageLoaded(coormq.ReqStoragePackageLoaded(userID, storageID, packageID, rootPath, pinned)) | |||||
| coorCli.UserSpacePackageLoaded(coormq.ReqUserSpacePackageLoaded(userID, userspaceID, packageID, rootPath, pinned)) | |||||
| return nil | return nil | ||||
| } | } | ||||
| // 请求节点启动从Storage中上传文件的任务。会返回节点ID和任务ID | |||||
| func (svc *StorageService) StorageCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, storageID cdssdk.StorageID, path string, storageAffinity cdssdk.StorageID) (cdssdk.Package, error) { | |||||
| // 请求节点启动从UserSpace中上传文件的任务。会返回节点ID和任务ID | |||||
| func (svc *UserSpaceService) UserSpaceCreatePackage(bucketID cdssdk.BucketID, name string, userspaceID cdssdk.UserSpaceID, path string, userspaceAffinity cdssdk.UserSpaceID) (cdssdk.Package, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | coorCli, err := stgglb.CoordinatorMQPool.Acquire() | ||||
| if err != nil { | if err != nil { | ||||
| return cdssdk.Package{}, fmt.Errorf("new coordinator client: %w", err) | return cdssdk.Package{}, fmt.Errorf("new coordinator client: %w", err) | ||||
| } | } | ||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | defer stgglb.CoordinatorMQPool.Release(coorCli) | ||||
| stgResp, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{storageID})) | |||||
| stgResp, err := coorCli.GetUserSpaceDetails(coormq.ReqGetUserSpaceDetails([]cdssdk.UserSpaceID{userspaceID})) | |||||
| if err != nil { | if err != nil { | ||||
| return cdssdk.Package{}, fmt.Errorf("getting storage info: %w", err) | |||||
| return cdssdk.Package{}, fmt.Errorf("getting userspace info: %w", err) | |||||
| } | } | ||||
| if stgResp.Storages[0].Storage.ShardStore == nil { | |||||
| return cdssdk.Package{}, fmt.Errorf("shard storage is not enabled") | |||||
| if stgResp.UserSpaces[0].UserSpace.ShardStore == nil { | |||||
| return cdssdk.Package{}, fmt.Errorf("shard userspace is not enabled") | |||||
| } | } | ||||
| agentCli, err := stgglb.AgentMQPool.Acquire(stgResp.Storages[0].MasterHub.HubID) | |||||
| agentCli, err := stgglb.AgentMQPool.Acquire(stgResp.UserSpaces[0].MasterHub.HubID) | |||||
| if err != nil { | if err != nil { | ||||
| return cdssdk.Package{}, fmt.Errorf("new agent client: %w", err) | return cdssdk.Package{}, fmt.Errorf("new agent client: %w", err) | ||||
| } | } | ||||
| defer stgglb.AgentMQPool.Release(agentCli) | defer stgglb.AgentMQPool.Release(agentCli) | ||||
| createResp, err := agentCli.StorageCreatePackage(agtmq.ReqStorageCreatePackage(userID, bucketID, name, storageID, path, storageAffinity)) | |||||
| createResp, err := agentCli.UserSpaceCreatePackage(agtmq.ReqUserSpaceCreatePackage(bucketID, name, userspaceID, path, userspaceAffinity)) | |||||
| if err != nil { | if err != nil { | ||||
| return cdssdk.Package{}, err | return cdssdk.Package{}, err | ||||
| } | } | ||||
| @@ -1,5 +1,6 @@ | |||||
| package services | package services | ||||
| /* | |||||
| import ( | import ( | ||||
| "fmt" | "fmt" | ||||
| @@ -21,3 +22,4 @@ func (svc *ObjectService) GetDatabaseAll() (*coormq.GetDatabaseAllResp, error) { | |||||
| return getResp, nil | return getResp, nil | ||||
| } | } | ||||
| */ | |||||
| @@ -1,43 +0,0 @@ | |||||
| package services | |||||
| import ( | |||||
| "fmt" | |||||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||||
| stgglb "gitlink.org.cn/cloudream/storage2/common/globals" | |||||
| coormq "gitlink.org.cn/cloudream/storage2/common/pkgs/mq/coordinator" | |||||
| ) | |||||
| type UserService struct { | |||||
| *Service | |||||
| } | |||||
| func (svc *Service) UserSvc() *UserService { | |||||
| return &UserService{Service: svc} | |||||
| } | |||||
| func (svc *UserService) Create(name string) (cdssdk.User, error) { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return cdssdk.User{}, fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| resp, err := coorCli.CreateUser(coormq.ReqCreateUser(name)) | |||||
| if err != nil { | |||||
| return cdssdk.User{}, err | |||||
| } | |||||
| return resp.User, nil | |||||
| } | |||||
| func (svc *UserService) Delete(userID cdssdk.UserID) error { | |||||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||||
| if err != nil { | |||||
| return fmt.Errorf("new coordinator client: %w", err) | |||||
| } | |||||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||||
| _, err = coorCli.DeleteUser(coormq.ReqDeleteUser(userID)) | |||||
| return err | |||||
| } | |||||
| @@ -0,0 +1,23 @@ | |||||
| package services | |||||
| import ( | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| "gitlink.org.cn/cloudream/storage2/common/models/datamap" | |||||
| ) | |||||
| func getBlockTypeFromRed(red cdssdk.Redundancy) string { | |||||
| switch red.(type) { | |||||
| case *cdssdk.NoneRedundancy: | |||||
| return datamap.BlockTypeRaw | |||||
| case *cdssdk.ECRedundancy: | |||||
| return datamap.BlockTypeEC | |||||
| case *cdssdk.LRCRedundancy: | |||||
| return datamap.BlockTypeEC | |||||
| case *cdssdk.SegmentRedundancy: | |||||
| return datamap.BlockTypeSegment | |||||
| } | |||||
| return "" | |||||
| } | |||||
| @@ -0,0 +1,101 @@ | |||||
| package api | |||||
| import ( | |||||
| "net/http" | |||||
| "gitlink.org.cn/cloudream/common/sdks" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | |||||
| type BucketService struct { | |||||
| *Client | |||||
| } | |||||
| func (c *Client) Bucket() *BucketService { | |||||
| return &BucketService{c} | |||||
| } | |||||
| const BucketGetByNamePath = "/bucket/getByName" | |||||
| type BucketGetByName struct { | |||||
| Name string `url:"name" form:"name" binding:"required"` | |||||
| } | |||||
| func (r *BucketGetByName) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeQueryParam(http.MethodGet, BucketGetByNamePath, r) | |||||
| } | |||||
| type BucketGetByNameResp struct { | |||||
| Bucket cdssdk.Bucket `json:"bucket"` | |||||
| } | |||||
| func (r *BucketGetByNameResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *BucketService) GetByName(req BucketGetByName) (*BucketGetByNameResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &BucketGetByNameResp{}) | |||||
| } | |||||
| const BucketCreatePath = "/bucket/create" | |||||
| type BucketCreate struct { | |||||
| Name string `json:"name" binding:"required"` | |||||
| } | |||||
| func (r *BucketCreate) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, BucketCreatePath, r) | |||||
| } | |||||
| type BucketCreateResp struct { | |||||
| Bucket cdssdk.Bucket `json:"bucket"` | |||||
| } | |||||
| func (r *BucketCreateResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *BucketService) Create(req BucketCreate) (*BucketCreateResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &BucketCreateResp{}) | |||||
| } | |||||
| const BucketDeletePath = "/bucket/delete" | |||||
| type BucketDelete struct { | |||||
| BucketID cdssdk.BucketID `json:"bucketID" binding:"required"` | |||||
| } | |||||
| func (r *BucketDelete) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, BucketDeletePath, r) | |||||
| } | |||||
| type BucketDeleteResp struct{} | |||||
| func (r *BucketDeleteResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *BucketService) Delete(req BucketDelete) error { | |||||
| return JSONAPINoData(c.cfg, http.DefaultClient, &req) | |||||
| } | |||||
| const BucketListUserBucketsPath = "/bucket/listUserBuckets" | |||||
| type BucketListUserBucketsReq struct { | |||||
| } | |||||
| func (r *BucketListUserBucketsReq) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeQueryParam(http.MethodGet, BucketListUserBucketsPath, r) | |||||
| } | |||||
| type BucketListUserBucketsResp struct { | |||||
| Buckets []cdssdk.Bucket `json:"buckets"` | |||||
| } | |||||
| func (r *BucketListUserBucketsResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *BucketService) ListUserBuckets(req BucketListUserBucketsReq) (*BucketListUserBucketsResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &BucketListUserBucketsResp{}) | |||||
| } | |||||
| @@ -0,0 +1,31 @@ | |||||
| package api | |||||
| /* | |||||
| import ( | |||||
| "net/http" | |||||
| "gitlink.org.cn/cloudream/common/sdks" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | |||||
| const CacheMovePackagePath = "/cache/movePackage" | |||||
| type CacheMovePackageReq struct { | |||||
| PackageID cdssdk.PackageID `json:"packageID"` | |||||
| StorageID cdssdk.StorageID `json:"storageID"` | |||||
| } | |||||
| func (r *CacheMovePackageReq) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, CacheMovePackagePath, r) | |||||
| } | |||||
| type CacheMovePackageResp struct{} | |||||
| func (r *CacheMovePackageResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *Client) CacheMovePackage(req CacheMovePackageReq) (*CacheMovePackageResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &CacheMovePackageResp{}) | |||||
| } | |||||
| */ | |||||
| @@ -0,0 +1,51 @@ | |||||
| package api | |||||
| import ( | |||||
| "gitlink.org.cn/cloudream/common/sdks" | |||||
| ) | |||||
| type response[T any] struct { | |||||
| Code string `json:"code"` | |||||
| Message string `json:"message"` | |||||
| Data T `json:"data"` | |||||
| } | |||||
| func (r *response[T]) ToError() *sdks.CodeMessageError { | |||||
| return &sdks.CodeMessageError{ | |||||
| Code: r.Code, | |||||
| Message: r.Message, | |||||
| } | |||||
| } | |||||
| type Client struct { | |||||
| cfg *Config | |||||
| } | |||||
| func NewClient(cfg *Config) *Client { | |||||
| return &Client{ | |||||
| cfg: cfg, | |||||
| } | |||||
| } | |||||
| type Pool interface { | |||||
| Acquire() (*Client, error) | |||||
| Release(cli *Client) | |||||
| } | |||||
| type pool struct { | |||||
| cfg *Config | |||||
| } | |||||
| func NewPool(cfg *Config) Pool { | |||||
| return &pool{ | |||||
| cfg: cfg, | |||||
| } | |||||
| } | |||||
| func (p *pool) Acquire() (*Client, error) { | |||||
| cli := NewClient(p.cfg) | |||||
| return cli, nil | |||||
| } | |||||
| func (p *pool) Release(cli *Client) { | |||||
| } | |||||
| @@ -0,0 +1,7 @@ | |||||
| package api | |||||
| type Config struct { | |||||
| URL string `json:"url"` | |||||
| AccessKey string `json:"accessKey"` | |||||
| SecretKey string `json:"secretKey"` | |||||
| } | |||||
| @@ -0,0 +1,556 @@ | |||||
| package api | |||||
| import ( | |||||
| "context" | |||||
| "fmt" | |||||
| "io" | |||||
| "mime" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "strings" | |||||
| "time" | |||||
| v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" | |||||
| "github.com/aws/aws-sdk-go-v2/credentials" | |||||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | |||||
| "gitlink.org.cn/cloudream/common/pkgs/iterator" | |||||
| "gitlink.org.cn/cloudream/common/sdks" | |||||
| "gitlink.org.cn/cloudream/common/utils/http2" | |||||
| "gitlink.org.cn/cloudream/common/utils/serder" | |||||
| "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | |||||
| type ObjectService struct { | |||||
| *Client | |||||
| } | |||||
| func (c *Client) Object() *ObjectService { | |||||
| return &ObjectService{ | |||||
| Client: c, | |||||
| } | |||||
| } | |||||
| const ObjectListPathByPath = "/object/listByPath" | |||||
| type ObjectListByPath struct { | |||||
| PackageID types.PackageID `form:"packageID" binding:"required" url:"packageID" json:"packageID"` | |||||
| Path string `form:"path" url:"path" json:"path"` // 允许为空字符串 | |||||
| IsPrefix bool `form:"isPrefix" url:"isPrefix" json:"isPrefix"` | |||||
| NoRecursive bool `form:"noRecursive" url:"noRecursive" json:"noRecursive"` // 仅当isPrefix为true时有效,表示仅查询直接属于Prefix下的对象,对于更深的对象,返回它们的公共前缀 | |||||
| MaxKeys int `form:"maxKeys" url:"maxKeys" json:"maxKeys"` | |||||
| ContinuationToken string `form:"continuationToken" url:"continuationToken" json:"continuationToken"` // 用于分页,如果为空字符串,表示从头开始 | |||||
| } | |||||
| func (r *ObjectListByPath) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeQueryParam(http.MethodGet, ObjectListPathByPath, r) | |||||
| } | |||||
| type ObjectListByPathResp struct { | |||||
| CommonPrefixes []string `json:"commonPrefixes"` // 仅在IsPrefix为true且NoRecursive为true时有效,包含更深层对象的shared prefix | |||||
| Objects []types.Object `json:"objects"` // 如果IsPrefix为true且NoRecursive为false,则返回所有匹配的对象,否则只返回直接属于Prefix下的对象 | |||||
| IsTruncated bool `json:"isTruncated"` // 是否还有更多对象 | |||||
| NextContinuationToken string `json:"nextContinuationToken"` // 用于分页,如果IsTruncated为true,则下次请求的ContinuationToken为该值 | |||||
| } | |||||
| func (r *ObjectListByPathResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *ObjectService) ListByPath(req ObjectListByPath) (*ObjectListByPathResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &ObjectListByPathResp{}) | |||||
| } | |||||
| const ObjectListByIDsPath = "/object/listByIDs" | |||||
| type ObjectListByIDs struct { | |||||
| ObjectIDs []types.ObjectID `form:"objectIDs" binding:"required" url:"objectIDs"` | |||||
| } | |||||
| func (r *ObjectListByIDs) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeQueryParam(http.MethodGet, ObjectListByIDsPath, r) | |||||
| } | |||||
| type ObjectListByIDsResp struct { | |||||
| Objects []*types.Object `json:"object"` // 与ObjectIDs一一对应,如果某个ID不存在,则对应位置为nil | |||||
| } | |||||
| func (r *ObjectListByIDsResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *ObjectService) ListByIDs(req ObjectListByIDs) (*ObjectListByIDsResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &ObjectListByIDsResp{}) | |||||
| } | |||||
| const ObjectUploadPath = "/object/upload" | |||||
| type ObjectUpload struct { | |||||
| ObjectUploadInfo | |||||
| Files UploadObjectIterator `json:"-"` | |||||
| } | |||||
| type ObjectUploadInfo struct { | |||||
| PackageID types.PackageID `json:"packageID" binding:"required"` | |||||
| Affinity types.UserSpaceID `json:"affinity"` | |||||
| LoadTo []types.UserSpaceID `json:"loadTo"` | |||||
| LoadToPath []string `json:"loadToPath"` | |||||
| } | |||||
| type UploadingObject struct { | |||||
| Path string | |||||
| File io.ReadCloser | |||||
| } | |||||
| type UploadObjectIterator = iterator.Iterator[*UploadingObject] | |||||
| type ObjectUploadResp struct { | |||||
| Uploadeds []types.Object `json:"uploadeds"` | |||||
| } | |||||
| func (c *ObjectService) Upload(req ObjectUpload) (*ObjectUploadResp, error) { | |||||
| type uploadInfo struct { | |||||
| Info string `url:"info"` | |||||
| } | |||||
| url, err := url.JoinPath(c.cfg.URL, ObjectUploadPath) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| infoJSON, err := serder.ObjectToJSON(req) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("upload info to json: %w", err) | |||||
| } | |||||
| resp, err := PostMultiPart(c.cfg, url, | |||||
| uploadInfo{Info: string(infoJSON)}, | |||||
| iterator.Map(req.Files, func(src *UploadingObject) (*http2.IterMultiPartFile, error) { | |||||
| return &http2.IterMultiPartFile{ | |||||
| FieldName: "files", | |||||
| FileName: src.Path, | |||||
| File: src.File, | |||||
| }, nil | |||||
| })) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| contType := resp.Header.Get("Content-Type") | |||||
| if strings.Contains(contType, http2.ContentTypeJSON) { | |||||
| var err error | |||||
| var codeResp response[ObjectUploadResp] | |||||
| if codeResp, err = serder.JSONToObjectStreamEx[response[ObjectUploadResp]](resp.Body); err != nil { | |||||
| return nil, fmt.Errorf("parsing response: %w", err) | |||||
| } | |||||
| if codeResp.Code == errorcode.OK { | |||||
| return &codeResp.Data, nil | |||||
| } | |||||
| return nil, codeResp.ToError() | |||||
| } | |||||
| return nil, fmt.Errorf("unknow response content type: %s", contType) | |||||
| } | |||||
| const ObjectDownloadPath = "/object/download" | |||||
| type ObjectDownload struct { | |||||
| ObjectID types.ObjectID `form:"objectID" url:"objectID" binding:"required"` | |||||
| Offset int64 `form:"offset" url:"offset,omitempty"` | |||||
| Length *int64 `form:"length" url:"length,omitempty"` | |||||
| } | |||||
| func (r *ObjectDownload) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeQueryParam(http.MethodGet, ObjectDownloadPath, r) | |||||
| } | |||||
| type DownloadingObject struct { | |||||
| Path string | |||||
| File io.ReadCloser | |||||
| } | |||||
| func (c *ObjectService) Download(req ObjectDownload) (*DownloadingObject, error) { | |||||
| httpReq, err := req.MakeParam().MakeRequest(c.cfg.URL) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if c.cfg.AccessKey != "" && c.cfg.SecretKey != "" { | |||||
| prod := credentials.NewStaticCredentialsProvider(c.cfg.AccessKey, c.cfg.SecretKey, "") | |||||
| cred, err := prod.Retrieve(context.TODO()) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| signer := v4.NewSigner() | |||||
| err = signer.SignHTTP(context.Background(), cred, httpReq, "", AuthService, AuthRegion, time.Now()) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| resp, err := http.DefaultClient.Do(httpReq) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| contType := resp.Header.Get("Content-Type") | |||||
| if strings.Contains(contType, http2.ContentTypeJSON) { | |||||
| var codeResp response[any] | |||||
| if err := serder.JSONToObjectStream(resp.Body, &codeResp); err != nil { | |||||
| return nil, fmt.Errorf("parsing response: %w", err) | |||||
| } | |||||
| return nil, codeResp.ToError() | |||||
| } | |||||
| _, params, err := mime.ParseMediaType(resp.Header.Get("Content-Disposition")) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("parsing content disposition: %w", err) | |||||
| } | |||||
| return &DownloadingObject{ | |||||
| Path: params["filename"], | |||||
| File: resp.Body, | |||||
| }, nil | |||||
| } | |||||
| const ObjectDownloadByPathPath = "/object/downloadByPath" | |||||
| type ObjectDownloadByPath struct { | |||||
| PackageID types.PackageID `form:"packageID" url:"packageID" binding:"required"` | |||||
| Path string `form:"path" url:"path" binding:"required"` | |||||
| Offset int64 `form:"offset" url:"offset,omitempty"` | |||||
| Length *int64 `form:"length" url:"length,omitempty"` | |||||
| } | |||||
| func (r *ObjectDownloadByPath) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeQueryParam(http.MethodGet, ObjectDownloadByPathPath, r) | |||||
| } | |||||
| func (c *ObjectService) DownloadByPath(req ObjectDownloadByPath) (*DownloadingObject, error) { | |||||
| httpReq, err := req.MakeParam().MakeRequest(c.cfg.URL) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if c.cfg.AccessKey != "" && c.cfg.SecretKey != "" { | |||||
| prod := credentials.NewStaticCredentialsProvider(c.cfg.AccessKey, c.cfg.SecretKey, "") | |||||
| cred, err := prod.Retrieve(context.TODO()) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| signer := v4.NewSigner() | |||||
| err = signer.SignHTTP(context.Background(), cred, httpReq, "", AuthService, AuthRegion, time.Now()) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| resp, err := http.DefaultClient.Do(httpReq) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| contType := resp.Header.Get("Content-Type") | |||||
| if strings.Contains(contType, http2.ContentTypeJSON) { | |||||
| var codeResp response[any] | |||||
| if err := serder.JSONToObjectStream(resp.Body, &codeResp); err != nil { | |||||
| return nil, fmt.Errorf("parsing response: %w", err) | |||||
| } | |||||
| return nil, codeResp.ToError() | |||||
| } | |||||
| _, params, err := mime.ParseMediaType(resp.Header.Get("Content-Disposition")) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("parsing content disposition: %w", err) | |||||
| } | |||||
| return &DownloadingObject{ | |||||
| Path: params["filename"], | |||||
| File: resp.Body, | |||||
| }, nil | |||||
| } | |||||
| const ObjectUpdateInfoPath = "/object/updateInfo" | |||||
| type UpdatingObject struct { | |||||
| ObjectID types.ObjectID `json:"objectID" binding:"required"` | |||||
| UpdateTime time.Time `json:"updateTime" binding:"required"` | |||||
| } | |||||
| func (u *UpdatingObject) ApplyTo(obj *types.Object) { | |||||
| obj.UpdateTime = u.UpdateTime | |||||
| } | |||||
| type ObjectUpdateInfo struct { | |||||
| Updatings []UpdatingObject `json:"updatings" binding:"required"` | |||||
| } | |||||
| func (r *ObjectUpdateInfo) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, ObjectUpdateInfoPath, r) | |||||
| } | |||||
| type ObjectUpdateInfoResp struct { | |||||
| Successes []types.ObjectID `json:"successes"` | |||||
| } | |||||
| func (r *ObjectUpdateInfoResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *ObjectService) UpdateInfo(req ObjectUpdateInfo) (*ObjectUpdateInfoResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &ObjectUpdateInfoResp{}) | |||||
| } | |||||
| const ObjectUpdateInfoByPathPath = "/object/updateInfoByPath" | |||||
| type ObjectUpdateInfoByPath struct { | |||||
| PackageID types.PackageID `json:"packageID" binding:"required"` | |||||
| Path string `json:"path" binding:"required"` | |||||
| UpdateTime time.Time `json:"updateTime" binding:"required"` | |||||
| } | |||||
| func (r *ObjectUpdateInfoByPath) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, ObjectUpdateInfoByPathPath, r) | |||||
| } | |||||
| type ObjectUpdateInfoByPathResp struct{} | |||||
| func (r *ObjectUpdateInfoByPathResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *ObjectService) UpdateInfoByPath(req ObjectUpdateInfoByPath) (*ObjectUpdateInfoByPathResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &ObjectUpdateInfoByPathResp{}) | |||||
| } | |||||
| const ObjectMovePath = "/object/move" | |||||
| type MovingObject struct { | |||||
| ObjectID types.ObjectID `json:"objectID" binding:"required"` | |||||
| PackageID types.PackageID `json:"packageID" binding:"required"` | |||||
| Path string `json:"path" binding:"required"` | |||||
| } | |||||
| func (m *MovingObject) ApplyTo(obj *types.Object) { | |||||
| obj.PackageID = m.PackageID | |||||
| obj.Path = m.Path | |||||
| } | |||||
| type ObjectMove struct { | |||||
| Movings []MovingObject `json:"movings" binding:"required"` | |||||
| } | |||||
| func (r *ObjectMove) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, ObjectMovePath, r) | |||||
| } | |||||
| type ObjectMoveResp struct { | |||||
| Successes []types.ObjectID `json:"successes"` | |||||
| } | |||||
| func (r *ObjectMoveResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *ObjectService) Move(req ObjectMove) (*ObjectMoveResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &ObjectMoveResp{}) | |||||
| } | |||||
| const ObjectDeletePath = "/object/delete" | |||||
| type ObjectDelete struct { | |||||
| ObjectIDs []types.ObjectID `json:"objectIDs" binding:"required"` | |||||
| } | |||||
| func (r *ObjectDelete) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, ObjectDeletePath, r) | |||||
| } | |||||
| type ObjectDeleteResp struct{} | |||||
| func (r *ObjectDeleteResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *ObjectService) Delete(req ObjectDelete) error { | |||||
| return JSONAPINoData(c.cfg, http.DefaultClient, &req) | |||||
| } | |||||
| const ObjectDeleteByPathPath = "/object/deleteByPath" | |||||
| type ObjectDeleteByPath struct { | |||||
| PackageID types.PackageID `json:"packageID" binding:"required"` | |||||
| Path string `json:"path" binding:"required"` | |||||
| } | |||||
| func (r *ObjectDeleteByPath) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, ObjectDeleteByPathPath, r) | |||||
| } | |||||
| type ObjectDeleteByPathResp struct{} | |||||
| func (r *ObjectDeleteByPathResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *ObjectService) DeleteByPath(req ObjectDeleteByPath) error { | |||||
| return JSONAPINoData(c.cfg, http.DefaultClient, &req) | |||||
| } | |||||
| const ObjectClonePath = "/object/clone" | |||||
| type ObjectClone struct { | |||||
| Clonings []CloningObject `json:"clonings" binding:"required"` | |||||
| } | |||||
| func (r *ObjectClone) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, ObjectClonePath, r) | |||||
| } | |||||
| type CloningObject struct { | |||||
| ObjectID types.ObjectID `json:"objectID" binding:"required"` | |||||
| NewPath string `json:"newPath" binding:"required"` | |||||
| NewPackageID types.PackageID `json:"newPackageID" binding:"required"` | |||||
| } | |||||
| type ObjectCloneResp struct { | |||||
| Objects []*types.Object `json:"objects"` | |||||
| } | |||||
| func (r *ObjectCloneResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *ObjectService) Clone(req ObjectClone) (*ObjectCloneResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &ObjectCloneResp{}) | |||||
| } | |||||
| const ObjectGetPackageObjectsPath = "/object/getPackageObjects" | |||||
| type ObjectGetPackageObjects struct { | |||||
| PackageID types.PackageID `form:"packageID" url:"packageID" binding:"required"` | |||||
| } | |||||
| func (r *ObjectGetPackageObjects) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeQueryParam(http.MethodGet, ObjectGetPackageObjectsPath, r) | |||||
| } | |||||
| type ObjectGetPackageObjectsResp struct { | |||||
| Objects []types.Object `json:"objects"` | |||||
| } | |||||
| func (r *ObjectGetPackageObjectsResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *ObjectService) GetPackageObjects(req ObjectGetPackageObjects) (*ObjectGetPackageObjectsResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &ObjectGetPackageObjectsResp{}) | |||||
| } | |||||
| const ObjectNewMultipartUploadPath = "/v1/object/newMultipartUpload" | |||||
| type ObjectNewMultipartUpload struct { | |||||
| PackageID types.PackageID `json:"packageID" binding:"required"` | |||||
| Path string `json:"path" binding:"required"` | |||||
| } | |||||
| func (r *ObjectNewMultipartUpload) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, ObjectNewMultipartUploadPath, r) | |||||
| } | |||||
| type ObjectNewMultipartUploadResp struct { | |||||
| Object types.Object `json:"object"` | |||||
| } | |||||
| func (r *ObjectNewMultipartUploadResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *ObjectService) NewMultipartUpload(req ObjectNewMultipartUpload) (*ObjectNewMultipartUploadResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &ObjectNewMultipartUploadResp{}) | |||||
| } | |||||
| const ObjectUploadPartPath = "/v1/object/uploadPart" | |||||
| type ObjectUploadPart struct { | |||||
| ObjectUploadPartInfo | |||||
| File io.ReadCloser `json:"-"` | |||||
| } | |||||
| type ObjectUploadPartInfo struct { | |||||
| ObjectID types.ObjectID `json:"objectID" binding:"required"` | |||||
| Index int `json:"index"` | |||||
| } | |||||
| type ObjectUploadPartResp struct{} | |||||
| func (c *ObjectService) UploadPart(req ObjectUploadPart) (*ObjectUploadPartResp, error) { | |||||
| url, err := url.JoinPath(c.cfg.URL, ObjectUploadPartPath) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| infoJSON, err := serder.ObjectToJSON(req) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("upload info to json: %w", err) | |||||
| } | |||||
| resp, err := http2.PostMultiPart(url, http2.MultiPartRequestParam{ | |||||
| Form: map[string]string{"info": string(infoJSON)}, | |||||
| Files: iterator.Array(&http2.IterMultiPartFile{ | |||||
| FieldName: "file", | |||||
| File: req.File, | |||||
| }), | |||||
| }) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| contType := resp.Header.Get("Content-Type") | |||||
| if strings.Contains(contType, http2.ContentTypeJSON) { | |||||
| var err error | |||||
| var codeResp response[ObjectUploadPartResp] | |||||
| if codeResp, err = serder.JSONToObjectStreamEx[response[ObjectUploadPartResp]](resp.Body); err != nil { | |||||
| return nil, fmt.Errorf("parsing response: %w", err) | |||||
| } | |||||
| if codeResp.Code == errorcode.OK { | |||||
| return &codeResp.Data, nil | |||||
| } | |||||
| return nil, codeResp.ToError() | |||||
| } | |||||
| return nil, fmt.Errorf("unknow response content type: %s", contType) | |||||
| } | |||||
| const ObjectCompleteMultipartUploadPath = "/v1/object/completeMultipartUpload" | |||||
| type ObjectCompleteMultipartUpload struct { | |||||
| ObjectID types.ObjectID `json:"objectID" binding:"required"` | |||||
| Indexes []int `json:"indexes" binding:"required"` | |||||
| } | |||||
| func (r *ObjectCompleteMultipartUpload) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, ObjectCompleteMultipartUploadPath, r) | |||||
| } | |||||
| type ObjectCompleteMultipartUploadResp struct { | |||||
| Object types.Object `json:"object"` | |||||
| } | |||||
| func (r *ObjectCompleteMultipartUploadResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *ObjectService) CompleteMultipartUpload(req ObjectCompleteMultipartUpload) (*ObjectCompleteMultipartUploadResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &ObjectCompleteMultipartUploadResp{}) | |||||
| } | |||||
| @@ -0,0 +1,233 @@ | |||||
| package api | |||||
| import ( | |||||
| "fmt" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | |||||
| "gitlink.org.cn/cloudream/common/pkgs/iterator" | |||||
| "gitlink.org.cn/cloudream/common/sdks" | |||||
| "gitlink.org.cn/cloudream/common/utils/http2" | |||||
| "gitlink.org.cn/cloudream/common/utils/serder" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | |||||
| type PackageService struct { | |||||
| *Client | |||||
| } | |||||
| func (c *Client) Package() *PackageService { | |||||
| return &PackageService{c} | |||||
| } | |||||
| const PackageGetPath = "/package/get" | |||||
| type PackageGetReq struct { | |||||
| PackageID cdssdk.PackageID `form:"packageID" url:"packageID" binding:"required"` | |||||
| } | |||||
| func (r *PackageGetReq) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeQueryParam(http.MethodGet, PackageGetPath, r) | |||||
| } | |||||
| type PackageGetResp struct { | |||||
| cdssdk.Package | |||||
| } | |||||
| func (r *PackageGetResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *PackageService) Get(req PackageGetReq) (*PackageGetResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &PackageGetResp{}) | |||||
| } | |||||
| const PackageGetByFullNamePath = "/package/getByFullName" | |||||
| type PackageGetByFullName struct { | |||||
| BucketName string `form:"bucketName" url:"bucketName" binding:"required"` | |||||
| PackageName string `form:"packageName" url:"packageName" binding:"required"` | |||||
| } | |||||
| func (r *PackageGetByFullName) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeQueryParam(http.MethodGet, PackageGetByFullNamePath, r) | |||||
| } | |||||
| type PackageGetByFullNameResp struct { | |||||
| Package cdssdk.Package `json:"package"` | |||||
| } | |||||
| func (r *PackageGetByFullNameResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *PackageService) GetByName(req PackageGetByFullName) (*PackageGetByFullNameResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &PackageGetByFullNameResp{}) | |||||
| } | |||||
| const PackageCreatePath = "/package/create" | |||||
| type PackageCreate struct { | |||||
| BucketID cdssdk.BucketID `json:"bucketID"` | |||||
| Name string `json:"name"` | |||||
| } | |||||
| func (r *PackageCreate) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, PackageCreatePath, r) | |||||
| } | |||||
| type PackageCreateResp struct { | |||||
| Package cdssdk.Package `json:"package"` | |||||
| } | |||||
| func (r *PackageCreateResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (s *PackageService) Create(req PackageCreate) (*PackageCreateResp, error) { | |||||
| return JSONAPI(s.cfg, http.DefaultClient, &req, &PackageCreateResp{}) | |||||
| } | |||||
| const PackageCreateLoadPath = "/package/createLoad" | |||||
| type PackageCreateLoad struct { | |||||
| PackageCreateLoadInfo | |||||
| Files UploadObjectIterator `json:"-"` | |||||
| } | |||||
| type PackageCreateLoadInfo struct { | |||||
| BucketID cdssdk.BucketID `json:"bucketID" binding:"required"` | |||||
| Name string `json:"name" binding:"required"` | |||||
| LoadTo []cdssdk.UserSpaceID `json:"loadTo"` | |||||
| LoadToPath []string `json:"loadToPath"` | |||||
| } | |||||
| type PackageCreateLoadResp struct { | |||||
| Package cdssdk.Package `json:"package"` | |||||
| Objects []cdssdk.Object `json:"objects"` | |||||
| } | |||||
| func (c *PackageService) CreateLoad(req PackageCreateLoad) (*PackageCreateLoadResp, error) { | |||||
| url, err := url.JoinPath(c.cfg.URL, PackageCreateLoadPath) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| infoJSON, err := serder.ObjectToJSON(req) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("upload info to json: %w", err) | |||||
| } | |||||
| resp, err := PostMultiPart(c.cfg, url, | |||||
| map[string]string{"info": string(infoJSON)}, | |||||
| iterator.Map(req.Files, func(src *UploadingObject) (*http2.IterMultiPartFile, error) { | |||||
| return &http2.IterMultiPartFile{ | |||||
| FieldName: "files", | |||||
| FileName: src.Path, | |||||
| File: src.File, | |||||
| }, nil | |||||
| })) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| codeResp, err := ParseJSONResponse[response[PackageCreateLoadResp]](resp) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if codeResp.Code == errorcode.OK { | |||||
| return &codeResp.Data, nil | |||||
| } | |||||
| return nil, codeResp.ToError() | |||||
| } | |||||
| const PackageDeletePath = "/package/delete" | |||||
| type PackageDelete struct { | |||||
| PackageID cdssdk.PackageID `json:"packageID" binding:"required"` | |||||
| } | |||||
| func (r *PackageDelete) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, PackageDeletePath, r) | |||||
| } | |||||
| type PackageDeleteResp struct{} | |||||
| func (r *PackageDeleteResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *PackageService) Delete(req PackageDelete) error { | |||||
| return JSONAPINoData(c.cfg, http.DefaultClient, &req) | |||||
| } | |||||
| const PackageClonePath = "/package/clone" | |||||
| type PackageClone struct { | |||||
| PackageID cdssdk.PackageID `json:"packageID" binding:"required"` | |||||
| BucketID cdssdk.BucketID `json:"bucketID" binding:"required"` | |||||
| Name string `json:"name" binding:"required"` | |||||
| } | |||||
| func (r *PackageClone) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, PackageClonePath, r) | |||||
| } | |||||
| type PackageCloneResp struct { | |||||
| Package cdssdk.Package `json:"package"` | |||||
| } | |||||
| func (r *PackageCloneResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *PackageService) Clone(req PackageClone) (*PackageCloneResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &PackageCloneResp{}) | |||||
| } | |||||
| const PackageListBucketPackagesPath = "/package/listBucketPackages" | |||||
| type PackageListBucketPackages struct { | |||||
| BucketID cdssdk.BucketID `form:"bucketID" url:"bucketID" binding:"required"` | |||||
| } | |||||
| func (r *PackageListBucketPackages) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeQueryParam(http.MethodGet, PackageListBucketPackagesPath, r) | |||||
| } | |||||
| type PackageListBucketPackagesResp struct { | |||||
| Packages []cdssdk.Package `json:"packages"` | |||||
| } | |||||
| func (r *PackageListBucketPackagesResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *PackageService) ListBucketPackages(req PackageListBucketPackages) (*PackageListBucketPackagesResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &PackageListBucketPackagesResp{}) | |||||
| } | |||||
| const PackageGetCachedStoragesPath = "/package/getCachedStorages" | |||||
| type PackageGetCachedStoragesReq struct { | |||||
| PackageID cdssdk.PackageID `form:"packageID" url:"packageID" binding:"required"` | |||||
| } | |||||
| func (r *PackageGetCachedStoragesReq) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeQueryParam(http.MethodGet, PackageGetCachedStoragesPath, r) | |||||
| } | |||||
| /* | |||||
| type PackageGetCachedStoragesResp struct { | |||||
| cdssdk.PackageCachingInfo | |||||
| } | |||||
| func (r *PackageGetCachedStoragesResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *PackageService) GetCachedStorages(req PackageGetCachedStoragesReq) (*PackageGetCachedStoragesResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &PackageGetCachedStoragesResp{}) | |||||
| } | |||||
| */ | |||||
| @@ -0,0 +1,151 @@ | |||||
| package api | |||||
| import ( | |||||
| "context" | |||||
| "fmt" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "time" | |||||
| v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" | |||||
| "github.com/aws/aws-sdk-go-v2/credentials" | |||||
| "github.com/google/go-querystring/query" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | |||||
| type PresignedService struct { | |||||
| *Client | |||||
| } | |||||
| func (c *Client) Presigned() *PresignedService { | |||||
| return &PresignedService{ | |||||
| Client: c, | |||||
| } | |||||
| } | |||||
| const PresignedObjectListByPathPath = "/v1/presigned/object/listByPath" | |||||
| type PresignedObjectListByPath struct { | |||||
| ObjectListByPath | |||||
| } | |||||
| func (c *PresignedService) ObjectListByPath(req PresignedObjectListByPath, expireIn int) (string, error) { | |||||
| return c.presign(req, PresignedObjectListByPathPath, http.MethodGet, expireIn) | |||||
| } | |||||
| const PresignedObjectDownloadByPathPath = "/v1/presigned/object/downloadByPath" | |||||
| type PresignedObjectDownloadByPath struct { | |||||
| PackageID cdssdk.PackageID `form:"packageID" url:"packageID" binding:"required"` | |||||
| Path string `form:"path" url:"path" binding:"required"` | |||||
| Offset int64 `form:"offset" url:"offset,omitempty"` | |||||
| Length *int64 `form:"length" url:"length,omitempty"` | |||||
| } | |||||
| func (c *PresignedService) ObjectDownloadByPath(req PresignedObjectDownloadByPath, expireIn int) (string, error) { | |||||
| return c.presign(req, PresignedObjectDownloadByPathPath, http.MethodGet, expireIn) | |||||
| } | |||||
| const PresignedObjectDownloadPath = "/v1/presigned/object/download" | |||||
| type PresignedObjectDownload struct { | |||||
| ObjectID cdssdk.ObjectID `form:"objectID" url:"objectID" binding:"required"` | |||||
| Offset int64 `form:"offset" url:"offset,omitempty"` | |||||
| Length *int64 `form:"length" url:"length,omitempty"` | |||||
| } | |||||
| func (c *PresignedService) ObjectDownload(req PresignedObjectDownload, expireIn int) (string, error) { | |||||
| return c.presign(req, PresignedObjectDownloadPath, http.MethodGet, expireIn) | |||||
| } | |||||
| const PresignedObjectUploadPath = "/v1/presigned/object/upload" | |||||
| type PresignedObjectUpload struct { | |||||
| PackageID cdssdk.PackageID `form:"packageID" binding:"required" url:"packageID"` | |||||
| Path string `form:"path" binding:"required" url:"path"` | |||||
| Affinity cdssdk.UserSpaceID `form:"affinity" url:"affinity,omitempty"` | |||||
| LoadTo []cdssdk.UserSpaceID `form:"loadTo" url:"loadTo,omitempty"` | |||||
| LoadToPath []string `form:"loadToPath" url:"loadToPath,omitempty"` | |||||
| } | |||||
| type PresignedObjectUploadResp struct { | |||||
| Object cdssdk.Object `json:"object"` | |||||
| } | |||||
| func (c *PresignedService) ObjectUpload(req PresignedObjectUpload, expireIn int) (string, error) { | |||||
| return c.presign(req, PresignedObjectUploadPath, http.MethodPost, expireIn) | |||||
| } | |||||
| const PresignedObjectNewMultipartUploadPath = "/v1/presigned/object/newMultipartUpload" | |||||
| type PresignedObjectNewMultipartUpload struct { | |||||
| PackageID cdssdk.PackageID `form:"packageID" binding:"required" url:"packageID"` | |||||
| Path string `form:"path" binding:"required" url:"path"` | |||||
| } | |||||
| type PresignedObjectNewMultipartUploadResp struct { | |||||
| Object cdssdk.Object `json:"object"` | |||||
| } | |||||
| func (c *PresignedService) ObjectNewMultipartUpload(req PresignedObjectNewMultipartUpload, expireIn int) (string, error) { | |||||
| return c.presign(req, PresignedObjectNewMultipartUploadPath, http.MethodPost, expireIn) | |||||
| } | |||||
| const PresignedObjectUploadPartPath = "/v1/presigned/object/uploadPart" | |||||
| type PresignedObjectUploadPart struct { | |||||
| ObjectID cdssdk.ObjectID `form:"objectID" binding:"required" url:"objectID"` | |||||
| Index int `form:"index" binding:"required" url:"index"` | |||||
| } | |||||
| type PresignedUploadPartResp struct{} | |||||
| func (c *PresignedService) ObjectUploadPart(req PresignedObjectUploadPart, expireIn int) (string, error) { | |||||
| return c.presign(req, PresignedObjectUploadPartPath, http.MethodPost, expireIn) | |||||
| } | |||||
| const PresignedObjectCompleteMultipartUploadPath = "/v1/presigned/object/completeMultipartUpload" | |||||
| type PresignedObjectCompleteMultipartUpload struct { | |||||
| ObjectID cdssdk.ObjectID `form:"objectID" binding:"required" url:"objectID"` | |||||
| Indexes []int `form:"indexes" binding:"required" url:"indexes"` | |||||
| } | |||||
| type PresignedObjectCompleteMultipartUploadResp struct { | |||||
| Object cdssdk.Object `json:"object"` | |||||
| } | |||||
| func (c *PresignedService) ObjectCompleteMultipartUpload(req PresignedObjectCompleteMultipartUpload, expireIn int) (string, error) { | |||||
| return c.presign(req, PresignedObjectCompleteMultipartUploadPath, http.MethodPost, expireIn) | |||||
| } | |||||
| func (c *PresignedService) presign(req any, path string, method string, expireIn int) (string, error) { | |||||
| u, err := url.Parse(c.cfg.URL) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| u = u.JoinPath(path) | |||||
| us, err := query.Values(req) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| us.Add("X-Expires", fmt.Sprintf("%v", expireIn)) | |||||
| u.RawQuery = us.Encode() | |||||
| prod := credentials.NewStaticCredentialsProvider(c.cfg.AccessKey, c.cfg.SecretKey, "") | |||||
| cred, err := prod.Retrieve(context.TODO()) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| r, err := http.NewRequest(method, u.String(), nil) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| signer := v4.NewSigner() | |||||
| signedURL, _, err := signer.PresignHTTP(context.Background(), cred, r, "", AuthService, AuthRegion, time.Now()) | |||||
| return signedURL, err | |||||
| } | |||||
| @@ -0,0 +1,169 @@ | |||||
| package api | |||||
| import ( | |||||
| "testing" | |||||
| . "github.com/smartystreets/goconvey/convey" | |||||
| "gitlink.org.cn/cloudream/common/pkgs/types" | |||||
| ) | |||||
| func Test_Presigned(t *testing.T) { | |||||
| cli := NewClient(&Config{ | |||||
| URL: "http://localhost:7890", | |||||
| AccessKey: "123456", | |||||
| SecretKey: "123456", | |||||
| }) | |||||
| Convey("下载文件", t, func() { | |||||
| pre := cli.Presigned() | |||||
| url, err := pre.ObjectDownloadByPath(PresignedObjectDownloadByPath{ | |||||
| PackageID: 3, | |||||
| Path: "example.java", | |||||
| Offset: 1, | |||||
| Length: types.Ref(int64(100)), | |||||
| }, 100) | |||||
| So(err, ShouldEqual, nil) | |||||
| t.Logf("url: %s", url) | |||||
| }) | |||||
| Convey("上传文件", t, func() { | |||||
| pre := cli.Presigned() | |||||
| url, err := pre.ObjectUpload(PresignedObjectUpload{ | |||||
| PackageID: 3, | |||||
| Path: "example.java", | |||||
| }, 100) | |||||
| So(err, ShouldEqual, nil) | |||||
| t.Logf("url: %s", url) | |||||
| }) | |||||
| } | |||||
| func Test_PresignedObjectListByPath(t *testing.T) { | |||||
| cli := NewClient(&Config{ | |||||
| URL: "http://localhost:7890", | |||||
| AccessKey: "123456", | |||||
| SecretKey: "123456", | |||||
| }) | |||||
| Convey("下载文件", t, func() { | |||||
| pre := cli.Presigned() | |||||
| url, err := pre.ObjectListByPath(PresignedObjectListByPath{ | |||||
| ObjectListByPath: ObjectListByPath{ | |||||
| PackageID: 12, | |||||
| Path: "a/", | |||||
| IsPrefix: true, | |||||
| NoRecursive: true, | |||||
| MaxKeys: 10, | |||||
| ContinuationToken: "123456", | |||||
| }, | |||||
| }, 100) | |||||
| So(err, ShouldEqual, nil) | |||||
| t.Logf("url: %s", url) | |||||
| }) | |||||
| } | |||||
| func Test_PresignedObjectDownloadByPath(t *testing.T) { | |||||
| cli := NewClient(&Config{ | |||||
| URL: "http://localhost:7890", | |||||
| AccessKey: "123456", | |||||
| SecretKey: "123456", | |||||
| }) | |||||
| Convey("下载文件", t, func() { | |||||
| pre := cli.Presigned() | |||||
| url, err := pre.ObjectDownloadByPath(PresignedObjectDownloadByPath{ | |||||
| PackageID: 3, | |||||
| Path: "example.java", | |||||
| // Offset: 1, | |||||
| // Length: types.Ref(int64(100)), | |||||
| }, 100) | |||||
| So(err, ShouldEqual, nil) | |||||
| t.Logf("url: %s", url) | |||||
| }) | |||||
| } | |||||
| func Test_PresignedObjectDownload(t *testing.T) { | |||||
| cli := NewClient(&Config{ | |||||
| URL: "http://localhost:7890", | |||||
| AccessKey: "123456", | |||||
| SecretKey: "123456", | |||||
| }) | |||||
| Convey("下载文件", t, func() { | |||||
| pre := cli.Presigned() | |||||
| url, err := pre.ObjectDownload(PresignedObjectDownload{ | |||||
| ObjectID: 1039, | |||||
| // Offset: 1, | |||||
| // Length: types.Ref(int64(100)), | |||||
| }, 100) | |||||
| So(err, ShouldEqual, nil) | |||||
| t.Logf("url: %s", url) | |||||
| }) | |||||
| } | |||||
| func Test_PresignedObjectUpload(t *testing.T) { | |||||
| cli := NewClient(&Config{ | |||||
| URL: "http://localhost:7890", | |||||
| }) | |||||
| Convey("上传文件", t, func() { | |||||
| pre := cli.Presigned() | |||||
| url, err := pre.ObjectUpload(PresignedObjectUpload{ | |||||
| PackageID: 3, | |||||
| Path: "example.java", | |||||
| }, 100) | |||||
| So(err, ShouldEqual, nil) | |||||
| t.Logf("url: %s", url) | |||||
| }) | |||||
| } | |||||
| func Test_PresignedNewMultipartUpload(t *testing.T) { | |||||
| cli := NewClient(&Config{ | |||||
| URL: "http://localhost:7890", | |||||
| }) | |||||
| Convey("启动分片上传", t, func() { | |||||
| pre := cli.Presigned() | |||||
| url, err := pre.ObjectNewMultipartUpload(PresignedObjectNewMultipartUpload{ | |||||
| PackageID: 3, | |||||
| Path: "example.java", | |||||
| }, 600) | |||||
| So(err, ShouldEqual, nil) | |||||
| t.Logf("url: %s", url) | |||||
| }) | |||||
| } | |||||
| func Test_PresignedObjectUploadPart(t *testing.T) { | |||||
| cli := NewClient(&Config{ | |||||
| URL: "http://localhost:7890", | |||||
| AccessKey: "123456", | |||||
| SecretKey: "123456", | |||||
| }) | |||||
| Convey("上传分片", t, func() { | |||||
| pre := cli.Presigned() | |||||
| url, err := pre.ObjectUploadPart(PresignedObjectUploadPart{ | |||||
| ObjectID: 7, | |||||
| Index: 3, | |||||
| }, 600) | |||||
| So(err, ShouldEqual, nil) | |||||
| t.Logf("url: %s", url) | |||||
| }) | |||||
| } | |||||
| func Test_PresignedCompleteMultipartUpload(t *testing.T) { | |||||
| cli := NewClient(&Config{ | |||||
| URL: "http://localhost:7890", | |||||
| AccessKey: "123456", | |||||
| SecretKey: "123456", | |||||
| }) | |||||
| Convey("合并分片", t, func() { | |||||
| pre := cli.Presigned() | |||||
| url, err := pre.ObjectCompleteMultipartUpload(PresignedObjectCompleteMultipartUpload{ | |||||
| ObjectID: 7, | |||||
| Indexes: []int{1, 2, 3}, | |||||
| }, 600) | |||||
| So(err, ShouldEqual, nil) | |||||
| t.Logf("url: %s", url) | |||||
| }) | |||||
| } | |||||
| @@ -0,0 +1,114 @@ | |||||
| package api | |||||
| import ( | |||||
| "bytes" | |||||
| "context" | |||||
| "crypto/sha256" | |||||
| "encoding/hex" | |||||
| "fmt" | |||||
| "io" | |||||
| "net/http" | |||||
| "time" | |||||
| v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" | |||||
| "github.com/aws/aws-sdk-go-v2/credentials" | |||||
| ) | |||||
| const ( | |||||
| AuthService = "jcs" | |||||
| AuthRegion = "any" | |||||
| ) | |||||
| // 对一个请求进行签名,并将签名信息添加到请求头中。 | |||||
| // | |||||
| // 会读取请求体计算sha256哈希值。如果hash值已知,可以使用SignWithPayloadHash方法。 | |||||
| func Sign(req *http.Request, accessKey, secretKey string) error { | |||||
| prod := credentials.NewStaticCredentialsProvider(accessKey, secretKey, "") | |||||
| cred, err := prod.Retrieve(context.TODO()) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| payloadHash := "" | |||||
| if req.Body != nil { | |||||
| data, err := io.ReadAll(req.Body) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| req.Body.Close() | |||||
| req.Body = io.NopCloser(bytes.NewReader(data)) | |||||
| hasher := sha256.New() | |||||
| hasher.Write(data) | |||||
| payloadHash = hex.EncodeToString(hasher.Sum(nil)) | |||||
| } else { | |||||
| hash := sha256.Sum256([]byte("")) | |||||
| payloadHash = hex.EncodeToString(hash[:]) | |||||
| } | |||||
| signer := v4.NewSigner() | |||||
| err = signer.SignHTTP(context.Background(), cred, req, payloadHash, AuthService, AuthRegion, time.Now()) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // 对一个请求进行签名,不计算请求体的哈希,适合上传文件接口。 | |||||
| func SignWithoutBody(req *http.Request, accessKey, secretKey string) error { | |||||
| prod := credentials.NewStaticCredentialsProvider(accessKey, secretKey, "") | |||||
| cred, err := prod.Retrieve(context.TODO()) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| signer := v4.NewSigner() | |||||
| err = signer.SignHTTP(context.Background(), cred, req, "", AuthService, AuthRegion, time.Now()) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // 对一个请求进行签名,签名时使用指定的哈希值作为请求体的哈希值。 | |||||
| // | |||||
| // 参数payloadHash必须为sha256哈希值的16进制字符串,全小写。 | |||||
| func SignWithPayloadHash(req *http.Request, payloadHash string, accessKey, secretKey string) error { | |||||
| prod := credentials.NewStaticCredentialsProvider(accessKey, secretKey, "") | |||||
| cred, err := prod.Retrieve(context.TODO()) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| signer := v4.NewSigner() | |||||
| err = signer.SignHTTP(context.Background(), cred, req, payloadHash, AuthService, AuthRegion, time.Now()) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // 生成一个带签名的URL。 | |||||
| // | |||||
| // expiration为签名过期时间,单位为秒。 | |||||
| // | |||||
| // 签名时不会包含请求体的哈希值。注:不要设置任何额外的Header(除了自动添加的Host),以免签名校验不通过 | |||||
| func Presign(req *http.Request, accessKey, secretKey string, expiration int) (string, error) { | |||||
| query := req.URL.Query() | |||||
| query.Add("X-Expires", fmt.Sprintf("%v", expiration)) | |||||
| req.URL.RawQuery = query.Encode() | |||||
| prod := credentials.NewStaticCredentialsProvider(accessKey, secretKey, "") | |||||
| cred, err := prod.Retrieve(context.TODO()) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| signer := v4.NewSigner() | |||||
| signedURL, _, err := signer.PresignHTTP(context.Background(), cred, req, "", AuthService, AuthRegion, time.Now()) | |||||
| return signedURL, err | |||||
| } | |||||
| @@ -0,0 +1,283 @@ | |||||
| package api | |||||
| import ( | |||||
| "bytes" | |||||
| "fmt" | |||||
| "io" | |||||
| "testing" | |||||
| "github.com/google/uuid" | |||||
| . "github.com/smartystreets/goconvey/convey" | |||||
| "gitlink.org.cn/cloudream/common/pkgs/iterator" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | |||||
| func Test_PackageGet(t *testing.T) { | |||||
| Convey("上传后获取Package信息", t, func() { | |||||
| cli := NewClient(&Config{ | |||||
| URL: "http://localhost:7890", | |||||
| }) | |||||
| fileData := make([]byte, 4096) | |||||
| for i := 0; i < len(fileData); i++ { | |||||
| fileData[i] = byte(i) | |||||
| } | |||||
| pkgName := uuid.NewString() | |||||
| createResp, err := cli.Package().Create(PackageCreate{ | |||||
| BucketID: 1, | |||||
| Name: pkgName, | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| _, err = cli.Object().Upload(ObjectUpload{ | |||||
| ObjectUploadInfo: ObjectUploadInfo{ | |||||
| PackageID: createResp.Package.PackageID, | |||||
| }, | |||||
| Files: iterator.Array( | |||||
| &UploadingObject{ | |||||
| Path: "abc/test", | |||||
| File: io.NopCloser(bytes.NewBuffer(fileData)), | |||||
| }, | |||||
| &UploadingObject{ | |||||
| Path: "test2", | |||||
| File: io.NopCloser(bytes.NewBuffer(fileData)), | |||||
| }, | |||||
| ), | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| getResp, err := cli.Package().Get(PackageGetReq{ | |||||
| PackageID: createResp.Package.PackageID, | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| So(getResp.PackageID, ShouldEqual, createResp.Package.PackageID) | |||||
| So(getResp.Package.Name, ShouldEqual, pkgName) | |||||
| err = cli.Package().Delete(PackageDelete{ | |||||
| PackageID: createResp.Package.PackageID, | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| }) | |||||
| } | |||||
| func Test_Object(t *testing.T) { | |||||
| Convey("上传,下载,删除", t, func() { | |||||
| cli := NewClient(&Config{ | |||||
| URL: "http://localhost:7890", | |||||
| }) | |||||
| fileData := make([]byte, 4096) | |||||
| for i := 0; i < len(fileData); i++ { | |||||
| fileData[i] = byte(i) | |||||
| } | |||||
| stgAff := cdssdk.UserSpaceID(2) | |||||
| pkgName := uuid.NewString() | |||||
| createResp, err := cli.Package().Create(PackageCreate{ | |||||
| BucketID: 1, | |||||
| Name: pkgName, | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| _, err = cli.Object().Upload(ObjectUpload{ | |||||
| ObjectUploadInfo: ObjectUploadInfo{ | |||||
| PackageID: createResp.Package.PackageID, | |||||
| Affinity: stgAff, | |||||
| }, | |||||
| Files: iterator.Array( | |||||
| &UploadingObject{ | |||||
| Path: "test", | |||||
| File: io.NopCloser(bytes.NewBuffer(fileData)), | |||||
| }, | |||||
| &UploadingObject{ | |||||
| Path: "test2", | |||||
| File: io.NopCloser(bytes.NewBuffer(fileData)), | |||||
| }, | |||||
| ), | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| // downFs, err := cli.ObjectDownload(ObjectDownloadReq{ | |||||
| // ObjectID: upResp.ObjectID, | |||||
| // }) | |||||
| // So(err, ShouldBeNil) | |||||
| // | |||||
| // downFileData, err := io.ReadAll(downFs) | |||||
| // So(err, ShouldBeNil) | |||||
| // So(downFileData, ShouldResemble, fileData) | |||||
| // downFs.Close() | |||||
| err = cli.Package().Delete(PackageDelete{ | |||||
| PackageID: createResp.Package.PackageID, | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| }) | |||||
| } | |||||
| func Test_ObjectList(t *testing.T) { | |||||
| Convey("路径查询", t, func() { | |||||
| cli := NewClient(&Config{ | |||||
| URL: "http://localhost:7890", | |||||
| }) | |||||
| resp, err := cli.Object().ListByPath(ObjectListByPath{ | |||||
| PackageID: 10, | |||||
| Path: "100x100K/zexema", | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| fmt.Printf("\n") | |||||
| fmt.Printf("%+v\n", resp.Objects[0]) | |||||
| }) | |||||
| } | |||||
| func Test_Storage(t *testing.T) { | |||||
| Convey("上传后调度文件", t, func() { | |||||
| cli := NewClient(&Config{ | |||||
| URL: "http://localhost:7890", | |||||
| }) | |||||
| fileData := make([]byte, 4096) | |||||
| for i := 0; i < len(fileData); i++ { | |||||
| fileData[i] = byte(i) | |||||
| } | |||||
| pkgName := uuid.NewString() | |||||
| createResp, err := cli.Package().Create(PackageCreate{ | |||||
| BucketID: 1, | |||||
| Name: pkgName, | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| _, err = cli.Object().Upload(ObjectUpload{ | |||||
| ObjectUploadInfo: ObjectUploadInfo{ | |||||
| PackageID: createResp.Package.PackageID, | |||||
| }, | |||||
| Files: iterator.Array( | |||||
| &UploadingObject{ | |||||
| Path: "test", | |||||
| File: io.NopCloser(bytes.NewBuffer(fileData)), | |||||
| }, | |||||
| &UploadingObject{ | |||||
| Path: "test2", | |||||
| File: io.NopCloser(bytes.NewBuffer(fileData)), | |||||
| }, | |||||
| ), | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| _, err = cli.UserSpaceLoadPackage(UserSpaceLoadPackageReq{ | |||||
| PackageID: createResp.Package.PackageID, | |||||
| UserSpaceID: 1, | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| err = cli.Package().Delete(PackageDelete{ | |||||
| PackageID: createResp.Package.PackageID, | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| }) | |||||
| } | |||||
| /* | |||||
| func Test_Cache(t *testing.T) { | |||||
| Convey("上传后移动文件", t, func() { | |||||
| cli := NewClient(&Config{ | |||||
| URL: "http://localhost:7890", | |||||
| }) | |||||
| fileData := make([]byte, 4096) | |||||
| for i := 0; i < len(fileData); i++ { | |||||
| fileData[i] = byte(i) | |||||
| } | |||||
| pkgName := uuid.NewString() | |||||
| createResp, err := cli.Package().Create(PackageCreate{ | |||||
| BucketID: 1, | |||||
| Name: pkgName, | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| _, err = cli.Object().Upload(ObjectUpload{ | |||||
| ObjectUploadInfo: ObjectUploadInfo{ | |||||
| PackageID: createResp.Package.PackageID, | |||||
| }, | |||||
| Files: iterator.Array( | |||||
| &UploadingObject{ | |||||
| Path: "test.txt", | |||||
| File: io.NopCloser(bytes.NewBuffer(fileData)), | |||||
| }, | |||||
| &UploadingObject{ | |||||
| Path: "test2.txt", | |||||
| File: io.NopCloser(bytes.NewBuffer(fileData)), | |||||
| }, | |||||
| ), | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| _, err = cli.CacheMovePackage(CacheMovePackageReq{ | |||||
| PackageID: createResp.Package.PackageID, | |||||
| StorageID: 1, | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| err = cli.Package().Delete(PackageDelete{ | |||||
| PackageID: createResp.Package.PackageID, | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| }) | |||||
| } | |||||
| */ | |||||
| func Test_Sign(t *testing.T) { | |||||
| Convey("签名接口", t, func() { | |||||
| cli := NewClient(&Config{ | |||||
| URL: "http://localhost:7890/v1", | |||||
| AccessKey: "123456", | |||||
| SecretKey: "123456", | |||||
| }) | |||||
| fileData := make([]byte, 4096) | |||||
| for i := 0; i < len(fileData); i++ { | |||||
| fileData[i] = byte(i) | |||||
| } | |||||
| pkgName := uuid.NewString() | |||||
| createResp, err := cli.Package().Create(PackageCreate{ | |||||
| BucketID: 1, | |||||
| Name: pkgName, | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| _, err = cli.Object().Upload(ObjectUpload{ | |||||
| ObjectUploadInfo: ObjectUploadInfo{ | |||||
| PackageID: createResp.Package.PackageID, | |||||
| }, | |||||
| Files: iterator.Array( | |||||
| &UploadingObject{ | |||||
| Path: "abc/test", | |||||
| File: io.NopCloser(bytes.NewBuffer(fileData)), | |||||
| }, | |||||
| &UploadingObject{ | |||||
| Path: "test4", | |||||
| File: io.NopCloser(bytes.NewBuffer(fileData)), | |||||
| }, | |||||
| ), | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| getResp, err := cli.Package().Get(PackageGetReq{ | |||||
| PackageID: createResp.Package.PackageID, | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| So(getResp.PackageID, ShouldEqual, createResp.Package.PackageID) | |||||
| So(getResp.Package.Name, ShouldEqual, pkgName) | |||||
| err = cli.Package().Delete(PackageDelete{ | |||||
| PackageID: createResp.Package.PackageID, | |||||
| }) | |||||
| So(err, ShouldBeNil) | |||||
| }) | |||||
| } | |||||
| @@ -0,0 +1,78 @@ | |||||
| package api | |||||
| import ( | |||||
| "net/http" | |||||
| "gitlink.org.cn/cloudream/common/sdks" | |||||
| cdssdk "gitlink.org.cn/cloudream/storage2/client/types" | |||||
| ) | |||||
| const UserSpaceLoadPackagePath = "/userspace/loadPackage" | |||||
| type UserSpaceLoadPackageReq struct { | |||||
| PackageID cdssdk.PackageID `json:"packageID" binding:"required"` | |||||
| UserSpaceID cdssdk.UserSpaceID `json:"userSpaceID" binding:"required"` | |||||
| RootPath string `json:"rootPath"` | |||||
| } | |||||
| func (r *UserSpaceLoadPackageReq) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, UserSpaceLoadPackagePath, r) | |||||
| } | |||||
| type UserSpaceLoadPackageResp struct{} | |||||
| func (r *UserSpaceLoadPackageResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *Client) UserSpaceLoadPackage(req UserSpaceLoadPackageReq) (*UserSpaceLoadPackageResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &UserSpaceLoadPackageResp{}) | |||||
| } | |||||
| const UserSpaceCreatePackagePath = "/userspace/createPackage" | |||||
| type UserSpaceCreatePackageReq struct { | |||||
| UserSpaceID cdssdk.UserSpaceID `json:"userSpaceID" binding:"required"` | |||||
| Path string `json:"path" binding:"required"` | |||||
| BucketID cdssdk.BucketID `json:"bucketID" binding:"required"` | |||||
| Name string `json:"name" binding:"required"` | |||||
| SpaceAffinity cdssdk.UserSpaceID `json:"spaceAffinity"` | |||||
| } | |||||
| func (r *UserSpaceCreatePackageReq) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, UserSpaceCreatePackagePath, r) | |||||
| } | |||||
| type UserSpaceCreatePackageResp struct { | |||||
| Package cdssdk.Package `json:"package"` | |||||
| } | |||||
| func (r *UserSpaceCreatePackageResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *Client) UserSpaceCreatePackage(req UserSpaceCreatePackageReq) (*UserSpaceCreatePackageResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &UserSpaceCreatePackageResp{}) | |||||
| } | |||||
| const UserSpaceGetPath = "/userspace/get" | |||||
| type UserSpaceGet struct { | |||||
| UserSpaceID cdssdk.UserSpaceID `form:"userSpaceID" url:"userSpaceID" binding:"required"` | |||||
| } | |||||
| func (r *UserSpaceGet) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeQueryParam(http.MethodGet, UserSpaceGetPath, r) | |||||
| } | |||||
| type UserSpaceGetResp struct { | |||||
| cdssdk.UserSpace | |||||
| } | |||||
| func (r *UserSpaceGetResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *Client) UserSpaceGet(req UserSpaceGet) (*UserSpaceGetResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &UserSpaceGetResp{}) | |||||
| } | |||||
| @@ -0,0 +1,197 @@ | |||||
| package api | |||||
| import ( | |||||
| "crypto/sha256" | |||||
| "encoding/hex" | |||||
| "fmt" | |||||
| "io" | |||||
| "mime/multipart" | |||||
| "net/http" | |||||
| ul "net/url" | |||||
| "path/filepath" | |||||
| "strings" | |||||
| "github.com/google/go-querystring/query" | |||||
| "gitlink.org.cn/cloudream/common/pkgs/iterator" | |||||
| "gitlink.org.cn/cloudream/common/sdks" | |||||
| "gitlink.org.cn/cloudream/common/utils/http2" | |||||
| "gitlink.org.cn/cloudream/common/utils/math2" | |||||
| "gitlink.org.cn/cloudream/common/utils/serder" | |||||
| ) | |||||
| func MakeIPFSFilePath(fileHash string) string { | |||||
| return filepath.Join("ipfs", fileHash) | |||||
| } | |||||
| func ParseJSONResponse[TBody any](resp *http.Response) (TBody, error) { | |||||
| var ret TBody | |||||
| contType := resp.Header.Get("Content-Type") | |||||
| if strings.Contains(contType, http2.ContentTypeJSON) { | |||||
| var err error | |||||
| if ret, err = serder.JSONToObjectStreamEx[TBody](resp.Body); err != nil { | |||||
| return ret, fmt.Errorf("parsing response: %w", err) | |||||
| } | |||||
| return ret, nil | |||||
| } | |||||
| cont, err := io.ReadAll(resp.Body) | |||||
| if err != nil { | |||||
| return ret, fmt.Errorf("unknow response content type: %s, status: %d", contType, resp.StatusCode) | |||||
| } | |||||
| strCont := string(cont) | |||||
| return ret, fmt.Errorf("unknow response content type: %s, status: %d, body(prefix): %s", contType, resp.StatusCode, strCont[:math2.Min(len(strCont), 200)]) | |||||
| } | |||||
| func JSONAPI[Resp sdks.APIResponse, Req sdks.APIRequest](cfg *Config, cli *http.Client, req Req, resp Resp) (Resp, error) { | |||||
| param := req.MakeParam() | |||||
| httpReq, err := param.MakeRequest(cfg.URL) | |||||
| if err != nil { | |||||
| return resp, err | |||||
| } | |||||
| if cfg.AccessKey != "" && cfg.SecretKey != "" { | |||||
| err = SignWithPayloadHash(httpReq, calcSha256(param.Body), cfg.AccessKey, cfg.SecretKey) | |||||
| if err != nil { | |||||
| return resp, err | |||||
| } | |||||
| } | |||||
| httpResp, err := cli.Do(httpReq) | |||||
| if err != nil { | |||||
| return resp, err | |||||
| } | |||||
| err = resp.ParseResponse(httpResp) | |||||
| return resp, err | |||||
| } | |||||
| func JSONAPINoData[Req sdks.APIRequest](cfg *Config, cli *http.Client, req Req) error { | |||||
| param := req.MakeParam() | |||||
| httpReq, err := param.MakeRequest(cfg.URL) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| if cfg.AccessKey != "" && cfg.SecretKey != "" { | |||||
| err = SignWithPayloadHash(httpReq, calcSha256(param.Body), cfg.AccessKey, cfg.SecretKey) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| } | |||||
| resp, err := cli.Do(httpReq) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| return sdks.ParseCodeDataJSONResponse(resp, any(nil)) | |||||
| } | |||||
| func calcSha256(body sdks.RequestBody) string { | |||||
| hasher := sha256.New() | |||||
| switch body := body.(type) { | |||||
| case *sdks.StringBody: | |||||
| hasher.Write([]byte(body.Value)) | |||||
| return hex.EncodeToString(hasher.Sum(nil)) | |||||
| case *sdks.BytesBody: | |||||
| hasher.Write(body.Value) | |||||
| return hex.EncodeToString(hasher.Sum(nil)) | |||||
| case *sdks.StreamBody: | |||||
| return "" | |||||
| default: | |||||
| hash := sha256.Sum256([]byte("")) | |||||
| return hex.EncodeToString(hash[:]) | |||||
| } | |||||
| } | |||||
| func PostMultiPart(cfg *Config, url string, info any, files http2.MultiPartFileIterator) (*http.Response, error) { | |||||
| req, err := http.NewRequest(http.MethodPost, url, nil) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| pr, pw := io.Pipe() | |||||
| muWriter := multipart.NewWriter(pw) | |||||
| req.Header.Set("Content-Type", fmt.Sprintf("%s;boundary=%s", http2.ContentTypeMultiPart, muWriter.Boundary())) | |||||
| writeResult := make(chan error, 1) | |||||
| go func() { | |||||
| writeResult <- func() error { | |||||
| defer pw.Close() | |||||
| defer muWriter.Close() | |||||
| if info != nil { | |||||
| mp, err := query.Values(info) | |||||
| if err != nil { | |||||
| return fmt.Errorf("formValues object to map failed, err: %w", err) | |||||
| } | |||||
| for k, v := range mp { | |||||
| err := muWriter.WriteField(k, v[0]) | |||||
| if err != nil { | |||||
| return fmt.Errorf("write form field failed, err: %w", err) | |||||
| } | |||||
| } | |||||
| } | |||||
| for { | |||||
| file, err := files.MoveNext() | |||||
| if err == iterator.ErrNoMoreItem { | |||||
| break | |||||
| } | |||||
| if err != nil { | |||||
| return fmt.Errorf("opening file: %w", err) | |||||
| } | |||||
| err = sendFileOnePart(muWriter, file.FieldName, file.FileName, file.File) | |||||
| file.File.Close() | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| } | |||||
| return nil | |||||
| }() | |||||
| }() | |||||
| req.Body = pr | |||||
| if cfg.AccessKey != "" && cfg.SecretKey != "" { | |||||
| err = SignWithoutBody(req, cfg.AccessKey, cfg.SecretKey) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| cli := http.Client{} | |||||
| resp, err := cli.Do(req) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| writeErr := <-writeResult | |||||
| if writeErr != nil { | |||||
| return nil, writeErr | |||||
| } | |||||
| return resp, nil | |||||
| } | |||||
| func sendFileOnePart(muWriter *multipart.Writer, fieldName, fileName string, file io.ReadCloser) error { | |||||
| w, err := muWriter.CreateFormFile(fieldName, ul.PathEscape(fileName)) | |||||
| if err != nil { | |||||
| return fmt.Errorf("create form file failed, err: %w", err) | |||||
| } | |||||
| _, err = io.Copy(w, file) | |||||
| return err | |||||
| } | |||||
| @@ -0,0 +1,91 @@ | |||||
| package types | |||||
| import ( | |||||
| "crypto/sha256" | |||||
| "encoding/hex" | |||||
| "fmt" | |||||
| "strings" | |||||
| ) | |||||
| // 文件的哈希值,格式:[前缀: 4个字符][哈希值: 64个字符] | |||||
| // 前缀用于区分哈希值的类型: | |||||
| // | |||||
| // - "Full":完整文件的哈希值 | |||||
| // | |||||
| // - "Comp":将文件拆分成多个分片,每一个分片计算Hash之后再合并的哈希值 | |||||
| // | |||||
| // 哈希值:SHA256哈希值,全大写的16进制字符串格式 | |||||
| type FileHash string | |||||
| const ( | |||||
| FullHashPrefix = "Full" | |||||
| CompositeHashPrefix = "Comp" | |||||
| EmptyHash = FileHash("Full0000000000000000000000000000000000000000000000000000000000000000") | |||||
| ) | |||||
| func (h *FileHash) GetPrefix() string { | |||||
| return string((*h)[:4]) | |||||
| } | |||||
| func (h *FileHash) GetHash() string { | |||||
| return string((*h)[4:]) | |||||
| } | |||||
| // 由调用者保证Hash值有效 | |||||
| func (h *FileHash) GetHashBytes() []byte { | |||||
| bytes, _ := hex.DecodeString(h.GetHash()) | |||||
| return bytes | |||||
| } | |||||
| func (h *FileHash) GetHashPrefix(len int) string { | |||||
| return string((*h)[4 : 4+len]) | |||||
| } | |||||
| func (h *FileHash) IsFullHash() bool { | |||||
| return (*h)[:4] == FullHashPrefix | |||||
| } | |||||
| func (h *FileHash) IsCompositeHash() bool { | |||||
| return (*h)[:4] == CompositeHashPrefix | |||||
| } | |||||
| func ParseHash(hashStr string) (FileHash, error) { | |||||
| if len(hashStr) != 4+64 { | |||||
| return "", fmt.Errorf("hash string length should be 4+64, but got %d", len(hashStr)) | |||||
| } | |||||
| prefix := hashStr[:4] | |||||
| hash := hashStr[4:] | |||||
| if prefix != FullHashPrefix && prefix != CompositeHashPrefix { | |||||
| return "", fmt.Errorf("invalid hash prefix: %s", prefix) | |||||
| } | |||||
| if len(hash) != 64 { | |||||
| return "", fmt.Errorf("invalid hash length: %d", len(hash)) | |||||
| } | |||||
| for _, c := range hash { | |||||
| if (c < '0' || c > '9') && (c < 'A' || c > 'F') { | |||||
| return "", fmt.Errorf("invalid hash character: %c", c) | |||||
| } | |||||
| } | |||||
| return FileHash(hashStr), nil | |||||
| } | |||||
| func NewFullHash(hash []byte) FileHash { | |||||
| return FileHash(FullHashPrefix + strings.ToUpper(hex.EncodeToString(hash))) | |||||
| } | |||||
| func NewFullHashFromString(hashStr string) FileHash { | |||||
| return FileHash(FullHashPrefix + strings.ToUpper(hashStr)) | |||||
| } | |||||
| func CalculateCompositeHash(segmentHashes [][]byte) FileHash { | |||||
| data := make([]byte, len(segmentHashes)*32) | |||||
| for i, segmentHash := range segmentHashes { | |||||
| copy(data[i*32:], segmentHash) | |||||
| } | |||||
| hash := sha256.Sum256(data) | |||||
| return FileHash(CompositeHashPrefix + strings.ToUpper(hex.EncodeToString(hash[:]))) | |||||
| } | |||||
| @@ -0,0 +1,213 @@ | |||||
| package types | |||||
| import ( | |||||
| "github.com/samber/lo" | |||||
| "gitlink.org.cn/cloudream/common/pkgs/types" | |||||
| "gitlink.org.cn/cloudream/common/utils/math2" | |||||
| "gitlink.org.cn/cloudream/common/utils/serder" | |||||
| ) | |||||
| type Redundancy interface { | |||||
| GetRedundancyType() string | |||||
| } | |||||
| var RedundancyUnion = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[Redundancy]( | |||||
| (*NoneRedundancy)(nil), | |||||
| (*RepRedundancy)(nil), | |||||
| (*ECRedundancy)(nil), | |||||
| (*LRCRedundancy)(nil), | |||||
| (*SegmentRedundancy)(nil), | |||||
| (*MultipartUploadRedundancy)(nil), | |||||
| )), "type") | |||||
| type NoneRedundancy struct { | |||||
| Redundancy | |||||
| serder.Metadata `union:"none"` | |||||
| Type string `json:"type"` | |||||
| } | |||||
| func NewNoneRedundancy() *NoneRedundancy { | |||||
| return &NoneRedundancy{ | |||||
| Type: "none", | |||||
| } | |||||
| } | |||||
| var DefaultRepRedundancy = *NewRepRedundancy(2) | |||||
| type RepRedundancy struct { | |||||
| Redundancy | |||||
| serder.Metadata `union:"rep"` | |||||
| Type string `json:"type"` | |||||
| RepCount int `json:"repCount"` | |||||
| } | |||||
| func NewRepRedundancy(repCount int) *RepRedundancy { | |||||
| return &RepRedundancy{ | |||||
| Type: "rep", | |||||
| RepCount: repCount, | |||||
| } | |||||
| } | |||||
| var DefaultECRedundancy = *NewECRedundancy(2, 3, 1024*1024*5) | |||||
| type ECRedundancy struct { | |||||
| Redundancy | |||||
| serder.Metadata `union:"ec"` | |||||
| Type string `json:"type"` | |||||
| K int `json:"k"` | |||||
| N int `json:"n"` | |||||
| ChunkSize int `json:"chunkSize"` | |||||
| } | |||||
| func NewECRedundancy(k int, n int, chunkSize int) *ECRedundancy { | |||||
| return &ECRedundancy{ | |||||
| Type: "ec", | |||||
| K: k, | |||||
| N: n, | |||||
| ChunkSize: chunkSize, | |||||
| } | |||||
| } | |||||
| func (b *ECRedundancy) StripSize() int64 { | |||||
| return int64(b.ChunkSize) * int64(b.K) | |||||
| } | |||||
| var DefaultLRCRedundancy = *NewLRCRedundancy(2, 4, []int{2}, 1024*1024*5) | |||||
| type LRCRedundancy struct { | |||||
| Redundancy | |||||
| serder.Metadata `union:"lrc"` | |||||
| Type string `json:"type"` | |||||
| K int `json:"k"` | |||||
| N int `json:"n"` | |||||
| Groups []int `json:"groups"` | |||||
| ChunkSize int `json:"chunkSize"` | |||||
| } | |||||
| func NewLRCRedundancy(k int, n int, groups []int, chunkSize int) *LRCRedundancy { | |||||
| return &LRCRedundancy{ | |||||
| Type: "lrc", | |||||
| K: k, | |||||
| N: n, | |||||
| Groups: groups, | |||||
| ChunkSize: chunkSize, | |||||
| } | |||||
| } | |||||
| // 判断指定块属于哪个组。如果都不属于,则返回-1。 | |||||
| func (b *LRCRedundancy) FindGroup(idx int) int { | |||||
| if idx >= b.N-len(b.Groups) { | |||||
| return idx - (b.N - len(b.Groups)) | |||||
| } | |||||
| for i, group := range b.Groups { | |||||
| if idx < group { | |||||
| return i | |||||
| } | |||||
| idx -= group | |||||
| } | |||||
| return -1 | |||||
| } | |||||
| // M = N - len(Groups),即数据块+校验块的总数,不包括组校验块。 | |||||
| func (b *LRCRedundancy) M() int { | |||||
| return b.N - len(b.Groups) | |||||
| } | |||||
| func (b *LRCRedundancy) GetGroupElements(grp int) []int { | |||||
| var idxes []int | |||||
| grpStart := 0 | |||||
| for i := 0; i < grp; i++ { | |||||
| grpStart += b.Groups[i] | |||||
| } | |||||
| for i := 0; i < b.Groups[grp]; i++ { | |||||
| idxes = append(idxes, grpStart+i) | |||||
| } | |||||
| idxes = append(idxes, b.N-len(b.Groups)+grp) | |||||
| return idxes | |||||
| } | |||||
| type SegmentRedundancy struct { | |||||
| Redundancy | |||||
| serder.Metadata `union:"segment"` | |||||
| Type string `json:"type"` | |||||
| Segments []int64 `json:"segments"` // 每一段的大小 | |||||
| } | |||||
| func NewSegmentRedundancy(totalSize int64, segmentCount int) *SegmentRedundancy { | |||||
| return &SegmentRedundancy{ | |||||
| Type: "segment", | |||||
| Segments: math2.SplitN(totalSize, segmentCount), | |||||
| } | |||||
| } | |||||
| func (r *SegmentRedundancy) SegmentCount() int { | |||||
| return len(r.Segments) | |||||
| } | |||||
| func (r *SegmentRedundancy) CalcSegmentStart(index int) int64 { | |||||
| return lo.Sum(r.Segments[:index]) | |||||
| } | |||||
| // 计算指定位置取整到最近的段的起始位置。 | |||||
| func (r *SegmentRedundancy) FloorSegmentPosition(pos int64) int64 { | |||||
| fpos := int64(0) | |||||
| for _, segLen := range r.Segments { | |||||
| segEnd := fpos + segLen | |||||
| if pos < segEnd { | |||||
| break | |||||
| } | |||||
| fpos += segLen | |||||
| } | |||||
| return fpos | |||||
| } | |||||
| // 计算指定范围内的段索引范围,参数和返回值所代表的范围都是左闭右开的。 | |||||
| // 如果end == -1,则代表计算从start到最后一个字节的范围。 | |||||
| func (b *SegmentRedundancy) CalcSegmentRange(start int64, end *int64) (segIdxStart int, segIdxEnd int) { | |||||
| segIdxStart = len(b.Segments) | |||||
| segIdxEnd = len(b.Segments) | |||||
| // 找到第一个包含start的段索引 | |||||
| segStart := int64(0) | |||||
| for i, segLen := range b.Segments { | |||||
| segEnd := segStart + segLen | |||||
| if start < segEnd { | |||||
| segIdxStart = i | |||||
| break | |||||
| } | |||||
| segStart += segLen | |||||
| } | |||||
| if end != nil { | |||||
| // 找到第一个包含end的段索引 | |||||
| segStart = int64(0) | |||||
| for i, segLen := range b.Segments { | |||||
| segEnd := segStart + segLen | |||||
| if *end <= segEnd { | |||||
| segIdxEnd = i + 1 | |||||
| break | |||||
| } | |||||
| segStart += segLen | |||||
| } | |||||
| } | |||||
| return | |||||
| } | |||||
| type MultipartUploadRedundancy struct { | |||||
| Redundancy | |||||
| serder.Metadata `union:"multipartUpload"` | |||||
| Type string `json:"type"` | |||||
| } | |||||
| func NewMultipartUploadRedundancy() *MultipartUploadRedundancy { | |||||
| return &MultipartUploadRedundancy{ | |||||
| Type: "multipartUpload", | |||||
| } | |||||
| } | |||||
| @@ -0,0 +1,201 @@ | |||||
| package types | |||||
| import ( | |||||
| "time" | |||||
| "github.com/samber/lo" | |||||
| "gitlink.org.cn/cloudream/common/utils/sort2" | |||||
| cotypes "gitlink.org.cn/cloudream/storage2/coordinator/types" | |||||
| ) | |||||
| const ( | |||||
| ObjectPathSeparator = "/" | |||||
| ) | |||||
| type PackageID int64 | |||||
| type ObjectID int64 | |||||
| type BucketID int64 | |||||
| type UserSpaceID int64 | |||||
| type Bucket struct { | |||||
| BucketID BucketID `gorm:"column:BucketID; primaryKey; type:bigint; autoIncrement" json:"bucketID"` | |||||
| Name string `gorm:"column:Name; type:varchar(255); not null" json:"name"` | |||||
| CreateTime time.Time `gorm:"column:CreateTime; type:datetime; not null" json:"createTime"` | |||||
| } | |||||
| func (Bucket) TableName() string { | |||||
| return "Bucket" | |||||
| } | |||||
| type Package struct { | |||||
| PackageID PackageID `gorm:"column:PackageID; primaryKey; type:bigint; autoIncrement" json:"packageID"` | |||||
| Name string `gorm:"column:Name; type:varchar(255); not null" json:"name"` | |||||
| BucketID BucketID `gorm:"column:BucketID; type:bigint; not null" json:"bucketID"` | |||||
| CreateTime time.Time `gorm:"column:CreateTime; type:datetime; not null" json:"createTime"` | |||||
| } | |||||
| func (Package) TableName() string { | |||||
| return "Package" | |||||
| } | |||||
| type Object struct { | |||||
| ObjectID ObjectID `json:"objectID" gorm:"column:ObjectID; primaryKey; type:bigint; autoIncrement" ` | |||||
| PackageID PackageID `json:"packageID" gorm:"column:PackageID; type:bigint; not null"` | |||||
| Path string `json:"path" gorm:"column:Path; type:varchar(1024); not null"` | |||||
| Size int64 `json:"size,string" gorm:"column:Size; type:bigint; not null"` | |||||
| FileHash FileHash `json:"fileHash" gorm:"column:FileHash; type:char(68); not null"` | |||||
| Redundancy Redundancy `json:"redundancy" gorm:"column:Redundancy; type: json; serializer:union"` | |||||
| CreateTime time.Time `json:"createTime" gorm:"column:CreateTime; type:datetime; not null"` | |||||
| UpdateTime time.Time `json:"updateTime" gorm:"column:UpdateTime; type:datetime; not null"` | |||||
| } | |||||
| func (Object) TableName() string { | |||||
| return "Object" | |||||
| } | |||||
| type ObjectBlock struct { | |||||
| ObjectID ObjectID `gorm:"column:ObjectID; primaryKey; type:bigint" json:"objectID"` | |||||
| Index int `gorm:"column:Index; primaryKey; type:int" json:"index"` | |||||
| // 这个块应该在哪个空间中 | |||||
| UserSpaceID UserSpaceID `gorm:"column:UserSpaceID; primaryKey; type:bigint" json:"userSpaceID"` | |||||
| FileHash FileHash `gorm:"column:FileHash; type:char(68); not null" json:"fileHash"` | |||||
| Size int64 `gorm:"column:Size; type:bigint" json:"size"` | |||||
| } | |||||
| func (ObjectBlock) TableName() string { | |||||
| return "ObjectBlock" | |||||
| } | |||||
| type UserSpace struct { | |||||
| UserSpaceID UserSpaceID `gorm:"column:UserSpaceID; primaryKey; type:bigint" json:"userSpaceID"` | |||||
| // 用户空间所在的存储节点 | |||||
| StorageID cotypes.StorageID `gorm:"column:StorageID; type:bigint; not null" json:"storageID"` | |||||
| } | |||||
| func (UserSpace) TableName() string { | |||||
| return "UserSpace" | |||||
| } | |||||
| type PackageAccessStat struct { | |||||
| PackageID PackageID `gorm:"column:PackageID; primaryKey; type:bigint" json:"packageID"` | |||||
| // 发起读取(调度)的用户空间ID | |||||
| UserSpaceID UserSpaceID `gorm:"column:UserSpaceID; primaryKey; type:bigint" json:"storageID"` | |||||
| // 前一日的读取量的滑动平均值 | |||||
| Amount float64 `gorm:"column:Amount; type:double" json:"amount"` | |||||
| // 当日的读取量 | |||||
| Counter float64 `gorm:"column:Counter; type:double" json:"counter"` | |||||
| } | |||||
| func (PackageAccessStat) TableName() string { | |||||
| return "PackageAccessStat" | |||||
| } | |||||
| type ObjectAccessStat struct { | |||||
| ObjectID ObjectID `gorm:"column:ObjectID; primaryKey; type:bigint" json:"objectID"` | |||||
| // 发起读取(调度)的用户空间ID | |||||
| UserSpaceID UserSpaceID `gorm:"column:UserSpaceID; primaryKey; type:bigint" json:"userStorageID"` | |||||
| // 前一日的读取量的滑动平均值 | |||||
| Amount float64 `gorm:"column:Amount; type:float; not null" json:"amount"` | |||||
| // 当日的读取量 | |||||
| Counter float64 `gorm:"column:Counter; type:float; not null" json:"counter"` | |||||
| } | |||||
| func (ObjectAccessStat) TableName() string { | |||||
| return "ObjectAccessStat" | |||||
| } | |||||
| type PinnedObject struct { | |||||
| ObjectID ObjectID `gorm:"column:ObjectID; primaryKey; type:bigint" json:"objectID"` | |||||
| UserSpaceID UserSpaceID `gorm:"column:UserSpaceID; primaryKey; type:bigint" json:"userSpaceID"` | |||||
| CreateTime time.Time `gorm:"column:CreateTime; type:datetime; not null" json:"createTime"` | |||||
| } | |||||
| func (PinnedObject) TableName() string { | |||||
| return "PinnedObject" | |||||
| } | |||||
| type ObjectDetail struct { | |||||
| Object Object `json:"object"` | |||||
| PinnedAt []UserSpaceID `json:"pinnedAt"` | |||||
| Blocks []ObjectBlock `json:"blocks"` | |||||
| } | |||||
| func NewObjectDetail(object Object, pinnedAt []UserSpaceID, blocks []ObjectBlock) ObjectDetail { | |||||
| return ObjectDetail{ | |||||
| Object: object, | |||||
| PinnedAt: pinnedAt, | |||||
| Blocks: blocks, | |||||
| } | |||||
| } | |||||
| func DetailsFromObjects(objects []Object) []ObjectDetail { | |||||
| details := make([]ObjectDetail, len(objects)) | |||||
| for i, object := range objects { | |||||
| details[i] = ObjectDetail{ | |||||
| Object: object, | |||||
| } | |||||
| } | |||||
| return details | |||||
| } | |||||
| // 将blocks放到对应的object中。要求objs和blocks都按ObjectID升序 | |||||
| func DetailsFillObjectBlocks(objs []ObjectDetail, blocks []ObjectBlock) { | |||||
| blksCur := 0 | |||||
| for i := range objs { | |||||
| obj := &objs[i] | |||||
| // 1. 查询Object和ObjectBlock时均按照ObjectID升序排序 | |||||
| // 2. ObjectBlock结果集中的不同ObjectID数只会比Object结果集的少 | |||||
| // 因此在两个结果集上同时从头开始遍历时,如果两边的ObjectID字段不同,那么一定是ObjectBlock这边的ObjectID > Object的ObjectID, | |||||
| // 此时让Object的遍历游标前进,直到两边的ObjectID再次相等 | |||||
| for ; blksCur < len(blocks); blksCur++ { | |||||
| if blocks[blksCur].ObjectID != obj.Object.ObjectID { | |||||
| break | |||||
| } | |||||
| obj.Blocks = append(obj.Blocks, blocks[blksCur]) | |||||
| } | |||||
| } | |||||
| } | |||||
| // 将pinnedAt放到对应的object中。要求objs和pinnedAt都按ObjectID升序 | |||||
| func DetailsFillPinnedAt(objs []ObjectDetail, pinnedAt []PinnedObject) { | |||||
| pinnedCur := 0 | |||||
| for i := range objs { | |||||
| obj := &objs[i] | |||||
| for ; pinnedCur < len(pinnedAt); pinnedCur++ { | |||||
| if pinnedAt[pinnedCur].ObjectID != obj.Object.ObjectID { | |||||
| break | |||||
| } | |||||
| obj.PinnedAt = append(obj.PinnedAt, pinnedAt[pinnedCur].UserSpaceID) | |||||
| } | |||||
| } | |||||
| } | |||||
| type GrouppedObjectBlock struct { | |||||
| ObjectID ObjectID | |||||
| Index int | |||||
| FileHash FileHash | |||||
| Size int64 | |||||
| UserSpaceIDs []UserSpaceID | |||||
| } | |||||
| func (o *ObjectDetail) GroupBlocks() []GrouppedObjectBlock { | |||||
| grps := make(map[int]GrouppedObjectBlock) | |||||
| for _, block := range o.Blocks { | |||||
| grp, ok := grps[block.Index] | |||||
| if !ok { | |||||
| grp = GrouppedObjectBlock{ | |||||
| ObjectID: block.ObjectID, | |||||
| Index: block.Index, | |||||
| FileHash: block.FileHash, | |||||
| Size: block.Size, | |||||
| } | |||||
| } | |||||
| grp.UserSpaceIDs = append(grp.UserSpaceIDs, block.UserSpaceID) | |||||
| grps[block.Index] = grp | |||||
| } | |||||
| return sort2.Sort(lo.Values(grps), func(l, r GrouppedObjectBlock) int { return l.Index - r.Index }) | |||||
| } | |||||