From 92f5c18e9de735d9355659148ca697971268addb Mon Sep 17 00:00:00 2001 From: Sydonian <794346190@qq.com> Date: Wed, 9 Apr 2025 15:11:49 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=B8=80=E4=BA=9B=E8=B0=83?= =?UTF-8?q?=E8=AF=95=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- client/internal/db/json_serializer.go | 44 ++++++++ client/internal/http/aws_auth.go | 31 ++++-- .../http/{storage.go => user_space.go} | 0 client/internal/uploader/create_load.go | 13 ++- client/internal/uploader/update.go | 13 ++- client/internal/uploader/uploader.go | 32 +++--- client/types/redundancy.go | 12 +-- client/types/types.go | 2 +- .../{agent.config.json => hub.config.json} | 0 coordinator/internal/db/json_serializer.go | 44 ++++++++ coordinator/types/public_storage.go | 50 --------- coordinator/types/shard_storage.go | 51 --------- coordinator/types/storage_credential.go | 100 +++++++++--------- hub/internal/cmd/serve.go | 2 + hub/main.go | 5 +- 15 files changed, 205 insertions(+), 194 deletions(-) create mode 100644 client/internal/db/json_serializer.go rename client/internal/http/{storage.go => user_space.go} (100%) rename common/assets/confs/{agent.config.json => hub.config.json} (100%) create mode 100644 coordinator/internal/db/json_serializer.go delete mode 100644 coordinator/types/public_storage.go delete mode 100644 coordinator/types/shard_storage.go diff --git a/client/internal/db/json_serializer.go b/client/internal/db/json_serializer.go new file mode 100644 index 0000000..b9a9cdd --- /dev/null +++ b/client/internal/db/json_serializer.go @@ -0,0 +1,44 @@ +package db + +import ( + "context" + "fmt" + "reflect" + + "gitlink.org.cn/cloudream/common/utils/serder" + "gorm.io/gorm/schema" +) + +type JSONSerializer struct { +} + +func (JSONSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error { + fieldValue := reflect.New(field.FieldType) + if dbValue != nil { + var data []byte + switch v := dbValue.(type) { + case []byte: + data = v + case string: + data = []byte(v) + default: + return fmt.Errorf("failed to unmarshal JSONB value: %#v", dbValue) + } + + err := serder.JSONToObject(data, fieldValue.Interface()) + if err != nil { + return err + } + } + + field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem()) + return nil +} + +func (JSONSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) { + return serder.ObjectToJSON(fieldValue) +} + +func init() { + schema.RegisterSerializer("json", JSONSerializer{}) +} diff --git a/client/internal/http/aws_auth.go b/client/internal/http/aws_auth.go index e8b97c5..9353175 100644 --- a/client/internal/http/aws_auth.go +++ b/client/internal/http/aws_auth.go @@ -87,10 +87,14 @@ func (a *AWSAuth) Auth(c *gin.Context) { return } for _, h := range headers { - verifyReq.Header.Add(h, c.Request.Header.Get(h)) + if strings.EqualFold(h, "content-length") { + verifyReq.ContentLength = c.Request.ContentLength + } else if strings.EqualFold(h, "host") { + verifyReq.Host = c.Request.Host + } else { + verifyReq.Header.Add(h, c.Request.Header.Get(h)) + } } - verifyReq.Host = c.Request.Host - verifyReq.ContentLength = c.Request.ContentLength signer := v4.NewSigner() err = signer.SignHTTP(context.TODO(), a.cred, verifyReq, hexPayloadHash, AuthService, AuthRegion, timestamp) @@ -102,7 +106,7 @@ func (a *AWSAuth) Auth(c *gin.Context) { verifySig := getSignatureFromAWSHeader(verifyReq) if !strings.EqualFold(verifySig, reqSig) { - logger.Warnf("signature mismatch, input header: %s, verify: %s", authorizationHeader, verifySig) + logger.Warnf("signature mismatch, input header: %s, verify: %s", authorizationHeader, verifyReq.Header.Get(AuthorizationHeader)) c.AbortWithStatusJSON(http.StatusOK, Failed(errorcode.Unauthorized, "signature mismatch")) return } @@ -143,10 +147,14 @@ func (a *AWSAuth) AuthWithoutBody(c *gin.Context) { return } for _, h := range headers { - verifyReq.Header.Add(h, c.Request.Header.Get(h)) + if strings.EqualFold(h, "content-length") { + verifyReq.ContentLength = c.Request.ContentLength + } else if strings.EqualFold(h, "host") { + verifyReq.Host = c.Request.Host + } else { + verifyReq.Header.Add(h, c.Request.Header.Get(h)) + } } - verifyReq.Host = c.Request.Host - verifyReq.ContentLength = c.Request.ContentLength err = a.signer.SignHTTP(context.TODO(), a.cred, verifyReq, "", AuthService, AuthRegion, timestamp) @@ -197,9 +205,14 @@ func (a *AWSAuth) PresignedAuth(c *gin.Context) { return } for _, h := range signedHeaders { - verifyReq.Header.Add(h, c.Request.Header.Get(h)) + if strings.EqualFold(h, "content-length") { + verifyReq.ContentLength = c.Request.ContentLength + } else if strings.EqualFold(h, "host") { + verifyReq.Host = c.Request.Host + } else { + verifyReq.Header.Add(h, c.Request.Header.Get(h)) + } } - verifyReq.Host = c.Request.Host timestamp, err := time.Parse("20060102T150405Z", date) if err != nil { diff --git a/client/internal/http/storage.go b/client/internal/http/user_space.go similarity index 100% rename from client/internal/http/storage.go rename to client/internal/http/user_space.go diff --git a/client/internal/uploader/create_load.go b/client/internal/uploader/create_load.go index f92515a..2f18521 100644 --- a/client/internal/uploader/create_load.go +++ b/client/internal/uploader/create_load.go @@ -11,7 +11,6 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/storage2/client/internal/db" "gitlink.org.cn/cloudream/storage2/client/types" - "gitlink.org.cn/cloudream/storage2/common/pkgs/distlock" "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/parser" @@ -22,10 +21,10 @@ type CreateLoadUploader struct { targetSpaces []types.UserSpaceDetail loadRoots []string uploader *Uploader - distlock *distlock.Mutex - successes []db.AddObjectEntry - lock sync.Mutex - commited bool + // distlock *distlock.Mutex + successes []db.AddObjectEntry + lock sync.Mutex + commited bool } type CreateLoadResult struct { @@ -85,7 +84,7 @@ func (u *CreateLoadUploader) Commit() (CreateLoadResult, error) { } u.commited = true - defer u.distlock.Unlock() + // defer u.distlock.Unlock() var addedObjs []types.Object err := u.uploader.db.DoTx(func(tx db.SQLContext) error { @@ -118,7 +117,7 @@ func (u *CreateLoadUploader) Abort() { } u.commited = true - u.distlock.Unlock() + // u.distlock.Unlock() // TODO 可以考虑删除PackageID } diff --git a/client/internal/uploader/update.go b/client/internal/uploader/update.go index 956b902..4465780 100644 --- a/client/internal/uploader/update.go +++ b/client/internal/uploader/update.go @@ -12,17 +12,16 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/storage2/client/internal/db" "gitlink.org.cn/cloudream/storage2/client/types" - "gitlink.org.cn/cloudream/storage2/common/pkgs/distlock" "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/parser" ) type UpdateUploader struct { - uploader *Uploader - pkgID types.PackageID - targetSpace types.UserSpaceDetail - distMutex *distlock.Mutex + uploader *Uploader + pkgID types.PackageID + targetSpace types.UserSpaceDetail + // distMutex *distlock.Mutex loadToSpaces []types.UserSpaceDetail loadToPath []string successes []db.AddObjectEntry @@ -115,7 +114,7 @@ func (w *UpdateUploader) Commit() (UpdateResult, error) { } w.commited = true - defer w.distMutex.Unlock() + // defer w.distMutex.Unlock() var addedObjs []types.Object err := w.uploader.db.DoTx(func(tx db.SQLContext) error { @@ -147,5 +146,5 @@ func (w *UpdateUploader) Abort() { } w.commited = true - w.distMutex.Unlock() + // w.distMutex.Unlock() } diff --git a/client/internal/uploader/uploader.go b/client/internal/uploader/uploader.go index 5da253b..1299953 100644 --- a/client/internal/uploader/uploader.go +++ b/client/internal/uploader/uploader.go @@ -14,7 +14,7 @@ import ( "gitlink.org.cn/cloudream/common/utils/sort2" "gitlink.org.cn/cloudream/storage2/client/internal/db" "gitlink.org.cn/cloudream/storage2/client/internal/metacache" - "gitlink.org.cn/cloudream/storage2/client/types" + clitypes "gitlink.org.cn/cloudream/storage2/client/types" stgglb "gitlink.org.cn/cloudream/storage2/common/globals" "gitlink.org.cn/cloudream/storage2/common/pkgs/connectivity" "gitlink.org.cn/cloudream/storage2/common/pkgs/distlock" @@ -38,10 +38,11 @@ func NewUploader(distlock *distlock.Service, connectivity *connectivity.Collecto connectivity: connectivity, stgPool: stgPool, spaceMeta: spaceMeta, + db: db, } } -func (u *Uploader) BeginUpdate(pkgID types.PackageID, affinity types.UserSpaceID, loadTo []types.UserSpaceID, loadToPath []string) (*UpdateUploader, error) { +func (u *Uploader) BeginUpdate(pkgID clitypes.PackageID, affinity clitypes.UserSpaceID, loadTo []clitypes.UserSpaceID, loadToPath []string) (*UpdateUploader, error) { spaceIDs, err := u.db.UserSpace().GetAllIDs(u.db.DefCtx()) if err != nil { return nil, fmt.Errorf("getting user space ids: %w", err) @@ -75,9 +76,9 @@ func (u *Uploader) BeginUpdate(pkgID types.PackageID, affinity types.UserSpaceID return nil, fmt.Errorf("user no available storages") } - loadToSpaces := make([]types.UserSpaceDetail, len(loadTo)) + loadToSpaces := make([]clitypes.UserSpaceDetail, len(loadTo)) for i, spaceID := range loadTo { - space, ok := lo.Find(spaceDetails, func(space *types.UserSpaceDetail) bool { + space, ok := lo.Find(spaceDetails, func(space *clitypes.UserSpaceDetail) bool { return space.UserSpace.UserSpaceID == spaceID }) if !ok { @@ -115,7 +116,7 @@ func (u *Uploader) BeginUpdate(pkgID types.PackageID, affinity types.UserSpaceID // 1. 选择设置了亲和性的节点 // 2. 从与当前客户端相同地域的节点中随机选一个 // 3. 没有的话从所有节点选择延迟最低的节点 -func (w *Uploader) chooseUploadStorage(spaces []UploadSpaceInfo, spaceAffinity types.UserSpaceID) UploadSpaceInfo { +func (w *Uploader) chooseUploadStorage(spaces []UploadSpaceInfo, spaceAffinity clitypes.UserSpaceID) UploadSpaceInfo { if spaceAffinity > 0 { aff, ok := lo.Find(spaces, func(space UploadSpaceInfo) bool { return space.Space.UserSpace.UserSpaceID == spaceAffinity }) if ok { @@ -134,10 +135,10 @@ func (w *Uploader) chooseUploadStorage(spaces []UploadSpaceInfo, spaceAffinity t return spaces[0] } -func (u *Uploader) BeginCreateLoad(bktID types.BucketID, pkgName string, loadTo []types.UserSpaceID, loadToPath []string) (*CreateLoadUploader, error) { +func (u *Uploader) BeginCreateLoad(bktID clitypes.BucketID, pkgName string, loadTo []clitypes.UserSpaceID, loadToPath []string) (*CreateLoadUploader, error) { getSpaces := u.spaceMeta.GetMany(loadTo) - spacesStgs := make([]types.UserSpaceDetail, len(loadTo)) + spacesStgs := make([]clitypes.UserSpaceDetail, len(loadTo)) for i, stg := range getSpaces { if stg == nil { return nil, fmt.Errorf("storage %v not found", loadTo[i]) @@ -145,7 +146,14 @@ func (u *Uploader) BeginCreateLoad(bktID types.BucketID, pkgName string, loadTo spacesStgs[i] = *stg } - pkg, err := u.db.Package().Create(u.db.DefCtx(), bktID, pkgName) + pkg, err := db.DoTx01(u.db, func(tx db.SQLContext) (clitypes.Package, error) { + _, err := u.db.Bucket().GetByID(tx, bktID) + if err != nil { + return clitypes.Package{}, err + } + + return u.db.Package().Create(u.db.DefCtx(), bktID, pkgName) + }) if err != nil { return nil, fmt.Errorf("create package: %w", err) } @@ -170,19 +178,19 @@ func (u *Uploader) BeginCreateLoad(bktID types.BucketID, pkgName string, loadTo }, nil } -func (u *Uploader) UploadPart(objID types.ObjectID, index int, stream io.Reader) error { +func (u *Uploader) UploadPart(objID clitypes.ObjectID, index int, stream io.Reader) error { detail, err := u.db.Object().GetDetail(u.db.DefCtx(), objID) if err != nil { return fmt.Errorf("getting object detail: %w", err) } objDe := detail - _, ok := objDe.Object.Redundancy.(*types.MultipartUploadRedundancy) + _, ok := objDe.Object.Redundancy.(*clitypes.MultipartUploadRedundancy) if !ok { return fmt.Errorf("object %v is not a multipart upload", objID) } - var space types.UserSpaceDetail + var space clitypes.UserSpaceDetail if len(objDe.Blocks) > 0 { cstg := u.spaceMeta.Get(objDe.Blocks[0].UserSpaceID) if cstg == nil { @@ -257,7 +265,7 @@ func (u *Uploader) UploadPart(objID types.ObjectID, index int, stream io.Reader) shardInfo := ret["shard"].(*ops2.ShardInfoValue) err = u.db.DoTx(func(tx db.SQLContext) error { - return u.db.Object().AppendPart(tx, types.ObjectBlock{ + return u.db.Object().AppendPart(tx, clitypes.ObjectBlock{ ObjectID: objID, Index: index, UserSpaceID: space.UserSpace.UserSpaceID, diff --git a/client/types/redundancy.go b/client/types/redundancy.go index e42e81d..c60e6f8 100644 --- a/client/types/redundancy.go +++ b/client/types/redundancy.go @@ -21,7 +21,7 @@ var RedundancyUnion = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTyp )), "type") type NoneRedundancy struct { - Redundancy + Redundancy `json:"-"` serder.Metadata `union:"none"` Type string `json:"type"` } @@ -35,7 +35,7 @@ func NewNoneRedundancy() *NoneRedundancy { var DefaultRepRedundancy = *NewRepRedundancy(2) type RepRedundancy struct { - Redundancy + Redundancy `json:"-"` serder.Metadata `union:"rep"` Type string `json:"type"` RepCount int `json:"repCount"` @@ -51,7 +51,7 @@ func NewRepRedundancy(repCount int) *RepRedundancy { var DefaultECRedundancy = *NewECRedundancy(2, 3, 1024*1024*5) type ECRedundancy struct { - Redundancy + Redundancy `json:"-"` serder.Metadata `union:"ec"` Type string `json:"type"` K int `json:"k"` @@ -75,7 +75,7 @@ func (b *ECRedundancy) StripSize() int64 { var DefaultLRCRedundancy = *NewLRCRedundancy(2, 4, []int{2}, 1024*1024*5) type LRCRedundancy struct { - Redundancy + Redundancy `json:"-"` serder.Metadata `union:"lrc"` Type string `json:"type"` K int `json:"k"` @@ -132,7 +132,7 @@ func (b *LRCRedundancy) GetGroupElements(grp int) []int { } type SegmentRedundancy struct { - Redundancy + Redundancy `json:"-"` serder.Metadata `union:"segment"` Type string `json:"type"` Segments []int64 `json:"segments"` // 每一段的大小 @@ -201,7 +201,7 @@ func (b *SegmentRedundancy) CalcSegmentRange(start int64, end *int64) (segIdxSta } type MultipartUploadRedundancy struct { - Redundancy + Redundancy `json:"-"` serder.Metadata `union:"multipartUpload"` Type string `json:"type"` } diff --git a/client/types/types.go b/client/types/types.go index 337ca69..056822b 100644 --- a/client/types/types.go +++ b/client/types/types.go @@ -79,7 +79,7 @@ type UserSpace struct { // 用户在指定存储节点的凭证信息,比如用户账户,AK/SK等 Credential cotypes.StorageCredential `gorm:"column:Credential; type:json; not null; serializer:union" json:"credential"` // 用户空间的分片存储配置,如果为空,则表示不使用分片存储 - ShardStore *cotypes.ShardStoreUserConfig `gorm:"column:ShardStore; type:json;" json:"shardStore"` + ShardStore *cotypes.ShardStoreUserConfig `gorm:"column:ShardStore; type:json; serializer:json" json:"shardStore"` // 用户空间信息的版本号,每一次更改都需要更新版本号 Revision int64 `gorm:"column:Revision; type:bigint; not null" json:"revision"` } diff --git a/common/assets/confs/agent.config.json b/common/assets/confs/hub.config.json similarity index 100% rename from common/assets/confs/agent.config.json rename to common/assets/confs/hub.config.json diff --git a/coordinator/internal/db/json_serializer.go b/coordinator/internal/db/json_serializer.go new file mode 100644 index 0000000..b9a9cdd --- /dev/null +++ b/coordinator/internal/db/json_serializer.go @@ -0,0 +1,44 @@ +package db + +import ( + "context" + "fmt" + "reflect" + + "gitlink.org.cn/cloudream/common/utils/serder" + "gorm.io/gorm/schema" +) + +type JSONSerializer struct { +} + +func (JSONSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error { + fieldValue := reflect.New(field.FieldType) + if dbValue != nil { + var data []byte + switch v := dbValue.(type) { + case []byte: + data = v + case string: + data = []byte(v) + default: + return fmt.Errorf("failed to unmarshal JSONB value: %#v", dbValue) + } + + err := serder.JSONToObject(data, fieldValue.Interface()) + if err != nil { + return err + } + } + + field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem()) + return nil +} + +func (JSONSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) { + return serder.ObjectToJSON(fieldValue) +} + +func init() { + schema.RegisterSerializer("json", JSONSerializer{}) +} diff --git a/coordinator/types/public_storage.go b/coordinator/types/public_storage.go deleted file mode 100644 index 9ed5be3..0000000 --- a/coordinator/types/public_storage.go +++ /dev/null @@ -1,50 +0,0 @@ -package types - -/* -import ( - "fmt" - - "gitlink.org.cn/cloudream/common/pkgs/types" - "gitlink.org.cn/cloudream/common/utils/serder" -) - -type PublicStoreConfig interface { - GetPublicStoreType() string - // 输出调试用的字符串,不要包含敏感信息 - String() string -} - -var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[PublicStoreConfig]( - (*LocalPublicStorage)(nil), - (*S3PublicStorage)(nil), -)), "type") - -type LocalPublicStorage struct { - serder.Metadata `union:"Local"` - Type string `json:"type"` - // 调度Package时的Package的根路径 - LoadBase string `json:"loadBase"` -} - -func (s *LocalPublicStorage) GetPublicStoreType() string { - return "Local" -} - -func (s *LocalPublicStorage) String() string { - return fmt.Sprintf("Local[LoadBase=%v]", s.LoadBase) -} - -type S3PublicStorage struct { - serder.Metadata `union:"S3"` - Type string `json:"type"` - LoadBase string `json:"loadBase"` -} - -func (s *S3PublicStorage) GetPublicStoreType() string { - return "S3" -} - -func (s *S3PublicStorage) String() string { - return fmt.Sprintf("S3[LoadBase=%v]", s.LoadBase) -} -*/ diff --git a/coordinator/types/shard_storage.go b/coordinator/types/shard_storage.go deleted file mode 100644 index 04be462..0000000 --- a/coordinator/types/shard_storage.go +++ /dev/null @@ -1,51 +0,0 @@ -package types - -/* -import ( - "fmt" - - "gitlink.org.cn/cloudream/common/pkgs/types" - "gitlink.org.cn/cloudream/common/utils/serder" -) - -// 分片存储服务的配置数据 -type ShardStoreConfig interface { - GetShardStoreType() string - // 输出调试用的字符串,不要包含敏感信息 - String() string -} - -var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[ShardStoreConfig]( - (*LocalShardStorage)(nil), - (*S3ShardStorage)(nil), -)), "type") - -type LocalShardStorage struct { - serder.Metadata `union:"Local"` - Type string `json:"type"` - Root string `json:"root"` - MaxSize int64 `json:"maxSize"` -} - -func (s *LocalShardStorage) GetShardStoreType() string { - return "Local" -} - -func (s *LocalShardStorage) String() string { - return fmt.Sprintf("Local[root=%s, maxSize=%d]", s.Root, s.MaxSize) -} - -type S3ShardStorage struct { - serder.Metadata `union:"S3"` - Type string `json:"type"` - Root string `json:"root"` -} - -func (s *S3ShardStorage) GetShardStoreType() string { - return "S3" -} - -func (s *S3ShardStorage) String() string { - return fmt.Sprintf("S3[root=%s]", s.Root) -} -*/ diff --git a/coordinator/types/storage_credential.go b/coordinator/types/storage_credential.go index 98dbcfb..3a838e7 100644 --- a/coordinator/types/storage_credential.go +++ b/coordinator/types/storage_credential.go @@ -20,73 +20,73 @@ var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[Storage )), "type") type LocalCred struct { - StorageCredential - serder.Metadata `union:"Local"` - Type string `json:"type"` + StorageCredential `json:"-"` + serder.Metadata `union:"Local"` + Type string `json:"type"` } type MashupCred struct { - StorageCredential - serder.Metadata `union:"Mashup"` - Store StorageCredential `json:"store"` - Feature StorageCredential `json:"feature"` + StorageCredential `json:"-"` + serder.Metadata `union:"Mashup"` + Store StorageCredential `json:"store"` + Feature StorageCredential `json:"feature"` } type OSSCred struct { - StorageCredential - serder.Metadata `union:"OSS"` - Type string `json:"type"` - Region string `json:"region"` - AK string `json:"accessKeyId"` - SK string `json:"secretAccessKey"` - Endpoint string `json:"endpoint"` - Bucket string `json:"bucket"` + StorageCredential `json:"-"` + serder.Metadata `union:"OSS"` + Type string `json:"type"` + Region string `json:"region"` + AK string `json:"accessKeyId"` + SK string `json:"secretAccessKey"` + Endpoint string `json:"endpoint"` + Bucket string `json:"bucket"` } type OBSCred struct { - StorageCredential - serder.Metadata `union:"OBS"` - Type string `json:"type"` - Region string `json:"region"` - AK string `json:"accessKeyId"` - SK string `json:"secretAccessKey"` - Endpoint string `json:"endpoint"` - Bucket string `json:"bucket"` - ProjectID string `json:"projectID"` + StorageCredential `json:"-"` + serder.Metadata `union:"OBS"` + Type string `json:"type"` + Region string `json:"region"` + AK string `json:"accessKeyId"` + SK string `json:"secretAccessKey"` + Endpoint string `json:"endpoint"` + Bucket string `json:"bucket"` + ProjectID string `json:"projectID"` } type COSCred struct { - StorageCredential - serder.Metadata `union:"COS"` - Type string `json:"type"` - Region string `json:"region"` - AK string `json:"accessKeyId"` - SK string `json:"secretAccessKey"` - Endpoint string `json:"endpoint"` - Bucket string `json:"bucket"` + StorageCredential `json:"-"` + serder.Metadata `union:"COS"` + Type string `json:"type"` + Region string `json:"region"` + AK string `json:"accessKeyId"` + SK string `json:"secretAccessKey"` + Endpoint string `json:"endpoint"` + Bucket string `json:"bucket"` } type EFileCred struct { - StorageCredential - serder.Metadata `union:"EFile"` - Type string `json:"type"` - TokenURL string `json:"tokenURL"` - APIURL string `json:"apiURL"` - TokenExpire int `json:"tokenExpire"` // 单位秒 - User string `json:"user"` - Password string `json:"password"` - OrgID string `json:"orgID"` - ClusterID string `json:"clusterID"` + StorageCredential `json:"-"` + serder.Metadata `union:"EFile"` + Type string `json:"type"` + TokenURL string `json:"tokenURL"` + APIURL string `json:"apiURL"` + TokenExpire int `json:"tokenExpire"` // 单位秒 + User string `json:"user"` + Password string `json:"password"` + OrgID string `json:"orgID"` + ClusterID string `json:"clusterID"` } // 通用的S3协议的存储服务 type S3Cred struct { - StorageCredential - serder.Metadata `union:"S3"` - Type string `json:"type"` - Region string `json:"region"` - AK string `json:"accessKeyId"` - SK string `json:"secretAccessKey"` - Endpoint string `json:"endpoint"` - Bucket string `json:"bucket"` + StorageCredential `json:"-"` + serder.Metadata `union:"S3"` + Type string `json:"type"` + Region string `json:"region"` + AK string `json:"accessKeyId"` + SK string `json:"secretAccessKey"` + Endpoint string `json:"endpoint"` + Bucket string `json:"bucket"` } diff --git a/hub/internal/cmd/serve.go b/hub/internal/cmd/serve.go index c93513d..85827a5 100644 --- a/hub/internal/cmd/serve.go +++ b/hub/internal/cmd/serve.go @@ -36,6 +36,8 @@ func init() { var httpAddr string cmd := &cobra.Command{ + Use: "serve", + Short: "start storage2 hub service", Run: func(cmd *cobra.Command, args []string) { serve(configPath, httpAddr) }, diff --git a/hub/main.go b/hub/main.go index 02f6831..75b4835 100644 --- a/hub/main.go +++ b/hub/main.go @@ -1,6 +1,9 @@ package main -import "gitlink.org.cn/cloudream/storage2/hub/internal/cmd" +import ( + _ "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/ops2" + "gitlink.org.cn/cloudream/storage2/hub/internal/cmd" +) func main() { cmd.RootCmd.Execute()