Browse Source

修复一些调试问题

gitlink
Sydonian 7 months ago
parent
commit
92f5c18e9d
15 changed files with 205 additions and 194 deletions
  1. +44
    -0
      client/internal/db/json_serializer.go
  2. +22
    -9
      client/internal/http/aws_auth.go
  3. +0
    -0
      client/internal/http/user_space.go
  4. +6
    -7
      client/internal/uploader/create_load.go
  5. +6
    -7
      client/internal/uploader/update.go
  6. +20
    -12
      client/internal/uploader/uploader.go
  7. +6
    -6
      client/types/redundancy.go
  8. +1
    -1
      client/types/types.go
  9. +0
    -0
      common/assets/confs/hub.config.json
  10. +44
    -0
      coordinator/internal/db/json_serializer.go
  11. +0
    -50
      coordinator/types/public_storage.go
  12. +0
    -51
      coordinator/types/shard_storage.go
  13. +50
    -50
      coordinator/types/storage_credential.go
  14. +2
    -0
      hub/internal/cmd/serve.go
  15. +4
    -1
      hub/main.go

+ 44
- 0
client/internal/db/json_serializer.go View File

@@ -0,0 +1,44 @@
package db

import (
"context"
"fmt"
"reflect"

"gitlink.org.cn/cloudream/common/utils/serder"
"gorm.io/gorm/schema"
)

type JSONSerializer struct {
}

func (JSONSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error {
fieldValue := reflect.New(field.FieldType)
if dbValue != nil {
var data []byte
switch v := dbValue.(type) {
case []byte:
data = v
case string:
data = []byte(v)
default:
return fmt.Errorf("failed to unmarshal JSONB value: %#v", dbValue)
}

err := serder.JSONToObject(data, fieldValue.Interface())
if err != nil {
return err
}
}

field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem())
return nil
}

func (JSONSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) {
return serder.ObjectToJSON(fieldValue)
}

func init() {
schema.RegisterSerializer("json", JSONSerializer{})
}

+ 22
- 9
client/internal/http/aws_auth.go View File

@@ -87,10 +87,14 @@ func (a *AWSAuth) Auth(c *gin.Context) {
return return
} }
for _, h := range headers { for _, h := range headers {
verifyReq.Header.Add(h, c.Request.Header.Get(h))
if strings.EqualFold(h, "content-length") {
verifyReq.ContentLength = c.Request.ContentLength
} else if strings.EqualFold(h, "host") {
verifyReq.Host = c.Request.Host
} else {
verifyReq.Header.Add(h, c.Request.Header.Get(h))
}
} }
verifyReq.Host = c.Request.Host
verifyReq.ContentLength = c.Request.ContentLength


signer := v4.NewSigner() signer := v4.NewSigner()
err = signer.SignHTTP(context.TODO(), a.cred, verifyReq, hexPayloadHash, AuthService, AuthRegion, timestamp) err = signer.SignHTTP(context.TODO(), a.cred, verifyReq, hexPayloadHash, AuthService, AuthRegion, timestamp)
@@ -102,7 +106,7 @@ func (a *AWSAuth) Auth(c *gin.Context) {


verifySig := getSignatureFromAWSHeader(verifyReq) verifySig := getSignatureFromAWSHeader(verifyReq)
if !strings.EqualFold(verifySig, reqSig) { if !strings.EqualFold(verifySig, reqSig) {
logger.Warnf("signature mismatch, input header: %s, verify: %s", authorizationHeader, verifySig)
logger.Warnf("signature mismatch, input header: %s, verify: %s", authorizationHeader, verifyReq.Header.Get(AuthorizationHeader))
c.AbortWithStatusJSON(http.StatusOK, Failed(errorcode.Unauthorized, "signature mismatch")) c.AbortWithStatusJSON(http.StatusOK, Failed(errorcode.Unauthorized, "signature mismatch"))
return return
} }
@@ -143,10 +147,14 @@ func (a *AWSAuth) AuthWithoutBody(c *gin.Context) {
return return
} }
for _, h := range headers { for _, h := range headers {
verifyReq.Header.Add(h, c.Request.Header.Get(h))
if strings.EqualFold(h, "content-length") {
verifyReq.ContentLength = c.Request.ContentLength
} else if strings.EqualFold(h, "host") {
verifyReq.Host = c.Request.Host
} else {
verifyReq.Header.Add(h, c.Request.Header.Get(h))
}
} }
verifyReq.Host = c.Request.Host
verifyReq.ContentLength = c.Request.ContentLength


err = a.signer.SignHTTP(context.TODO(), a.cred, verifyReq, "", AuthService, AuthRegion, timestamp) err = a.signer.SignHTTP(context.TODO(), a.cred, verifyReq, "", AuthService, AuthRegion, timestamp)


@@ -197,9 +205,14 @@ func (a *AWSAuth) PresignedAuth(c *gin.Context) {
return return
} }
for _, h := range signedHeaders { for _, h := range signedHeaders {
verifyReq.Header.Add(h, c.Request.Header.Get(h))
if strings.EqualFold(h, "content-length") {
verifyReq.ContentLength = c.Request.ContentLength
} else if strings.EqualFold(h, "host") {
verifyReq.Host = c.Request.Host
} else {
verifyReq.Header.Add(h, c.Request.Header.Get(h))
}
} }
verifyReq.Host = c.Request.Host


timestamp, err := time.Parse("20060102T150405Z", date) timestamp, err := time.Parse("20060102T150405Z", date)
if err != nil { if err != nil {


client/internal/http/storage.go → client/internal/http/user_space.go View File


+ 6
- 7
client/internal/uploader/create_load.go View File

@@ -11,7 +11,6 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/storage2/client/internal/db" "gitlink.org.cn/cloudream/storage2/client/internal/db"
"gitlink.org.cn/cloudream/storage2/client/types" "gitlink.org.cn/cloudream/storage2/client/types"
"gitlink.org.cn/cloudream/storage2/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/parser" "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/parser"
@@ -22,10 +21,10 @@ type CreateLoadUploader struct {
targetSpaces []types.UserSpaceDetail targetSpaces []types.UserSpaceDetail
loadRoots []string loadRoots []string
uploader *Uploader uploader *Uploader
distlock *distlock.Mutex
successes []db.AddObjectEntry
lock sync.Mutex
commited bool
// distlock *distlock.Mutex
successes []db.AddObjectEntry
lock sync.Mutex
commited bool
} }


type CreateLoadResult struct { type CreateLoadResult struct {
@@ -85,7 +84,7 @@ func (u *CreateLoadUploader) Commit() (CreateLoadResult, error) {
} }
u.commited = true u.commited = true


defer u.distlock.Unlock()
// defer u.distlock.Unlock()


var addedObjs []types.Object var addedObjs []types.Object
err := u.uploader.db.DoTx(func(tx db.SQLContext) error { err := u.uploader.db.DoTx(func(tx db.SQLContext) error {
@@ -118,7 +117,7 @@ func (u *CreateLoadUploader) Abort() {
} }
u.commited = true u.commited = true


u.distlock.Unlock()
// u.distlock.Unlock()


// TODO 可以考虑删除PackageID // TODO 可以考虑删除PackageID
} }

+ 6
- 7
client/internal/uploader/update.go View File

@@ -12,17 +12,16 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/storage2/client/internal/db" "gitlink.org.cn/cloudream/storage2/client/internal/db"
"gitlink.org.cn/cloudream/storage2/client/types" "gitlink.org.cn/cloudream/storage2/client/types"
"gitlink.org.cn/cloudream/storage2/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/parser" "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/parser"
) )


type UpdateUploader struct { type UpdateUploader struct {
uploader *Uploader
pkgID types.PackageID
targetSpace types.UserSpaceDetail
distMutex *distlock.Mutex
uploader *Uploader
pkgID types.PackageID
targetSpace types.UserSpaceDetail
// distMutex *distlock.Mutex
loadToSpaces []types.UserSpaceDetail loadToSpaces []types.UserSpaceDetail
loadToPath []string loadToPath []string
successes []db.AddObjectEntry successes []db.AddObjectEntry
@@ -115,7 +114,7 @@ func (w *UpdateUploader) Commit() (UpdateResult, error) {
} }
w.commited = true w.commited = true


defer w.distMutex.Unlock()
// defer w.distMutex.Unlock()


var addedObjs []types.Object var addedObjs []types.Object
err := w.uploader.db.DoTx(func(tx db.SQLContext) error { err := w.uploader.db.DoTx(func(tx db.SQLContext) error {
@@ -147,5 +146,5 @@ func (w *UpdateUploader) Abort() {
} }


w.commited = true w.commited = true
w.distMutex.Unlock()
// w.distMutex.Unlock()
} }

+ 20
- 12
client/internal/uploader/uploader.go View File

@@ -14,7 +14,7 @@ import (
"gitlink.org.cn/cloudream/common/utils/sort2" "gitlink.org.cn/cloudream/common/utils/sort2"
"gitlink.org.cn/cloudream/storage2/client/internal/db" "gitlink.org.cn/cloudream/storage2/client/internal/db"
"gitlink.org.cn/cloudream/storage2/client/internal/metacache" "gitlink.org.cn/cloudream/storage2/client/internal/metacache"
"gitlink.org.cn/cloudream/storage2/client/types"
clitypes "gitlink.org.cn/cloudream/storage2/client/types"
stgglb "gitlink.org.cn/cloudream/storage2/common/globals" stgglb "gitlink.org.cn/cloudream/storage2/common/globals"
"gitlink.org.cn/cloudream/storage2/common/pkgs/connectivity" "gitlink.org.cn/cloudream/storage2/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage2/common/pkgs/distlock" "gitlink.org.cn/cloudream/storage2/common/pkgs/distlock"
@@ -38,10 +38,11 @@ func NewUploader(distlock *distlock.Service, connectivity *connectivity.Collecto
connectivity: connectivity, connectivity: connectivity,
stgPool: stgPool, stgPool: stgPool,
spaceMeta: spaceMeta, spaceMeta: spaceMeta,
db: db,
} }
} }


func (u *Uploader) BeginUpdate(pkgID types.PackageID, affinity types.UserSpaceID, loadTo []types.UserSpaceID, loadToPath []string) (*UpdateUploader, error) {
func (u *Uploader) BeginUpdate(pkgID clitypes.PackageID, affinity clitypes.UserSpaceID, loadTo []clitypes.UserSpaceID, loadToPath []string) (*UpdateUploader, error) {
spaceIDs, err := u.db.UserSpace().GetAllIDs(u.db.DefCtx()) spaceIDs, err := u.db.UserSpace().GetAllIDs(u.db.DefCtx())
if err != nil { if err != nil {
return nil, fmt.Errorf("getting user space ids: %w", err) return nil, fmt.Errorf("getting user space ids: %w", err)
@@ -75,9 +76,9 @@ func (u *Uploader) BeginUpdate(pkgID types.PackageID, affinity types.UserSpaceID
return nil, fmt.Errorf("user no available storages") return nil, fmt.Errorf("user no available storages")
} }


loadToSpaces := make([]types.UserSpaceDetail, len(loadTo))
loadToSpaces := make([]clitypes.UserSpaceDetail, len(loadTo))
for i, spaceID := range loadTo { for i, spaceID := range loadTo {
space, ok := lo.Find(spaceDetails, func(space *types.UserSpaceDetail) bool {
space, ok := lo.Find(spaceDetails, func(space *clitypes.UserSpaceDetail) bool {
return space.UserSpace.UserSpaceID == spaceID return space.UserSpace.UserSpaceID == spaceID
}) })
if !ok { if !ok {
@@ -115,7 +116,7 @@ func (u *Uploader) BeginUpdate(pkgID types.PackageID, affinity types.UserSpaceID
// 1. 选择设置了亲和性的节点 // 1. 选择设置了亲和性的节点
// 2. 从与当前客户端相同地域的节点中随机选一个 // 2. 从与当前客户端相同地域的节点中随机选一个
// 3. 没有的话从所有节点选择延迟最低的节点 // 3. 没有的话从所有节点选择延迟最低的节点
func (w *Uploader) chooseUploadStorage(spaces []UploadSpaceInfo, spaceAffinity types.UserSpaceID) UploadSpaceInfo {
func (w *Uploader) chooseUploadStorage(spaces []UploadSpaceInfo, spaceAffinity clitypes.UserSpaceID) UploadSpaceInfo {
if spaceAffinity > 0 { if spaceAffinity > 0 {
aff, ok := lo.Find(spaces, func(space UploadSpaceInfo) bool { return space.Space.UserSpace.UserSpaceID == spaceAffinity }) aff, ok := lo.Find(spaces, func(space UploadSpaceInfo) bool { return space.Space.UserSpace.UserSpaceID == spaceAffinity })
if ok { if ok {
@@ -134,10 +135,10 @@ func (w *Uploader) chooseUploadStorage(spaces []UploadSpaceInfo, spaceAffinity t
return spaces[0] return spaces[0]
} }


func (u *Uploader) BeginCreateLoad(bktID types.BucketID, pkgName string, loadTo []types.UserSpaceID, loadToPath []string) (*CreateLoadUploader, error) {
func (u *Uploader) BeginCreateLoad(bktID clitypes.BucketID, pkgName string, loadTo []clitypes.UserSpaceID, loadToPath []string) (*CreateLoadUploader, error) {
getSpaces := u.spaceMeta.GetMany(loadTo) getSpaces := u.spaceMeta.GetMany(loadTo)


spacesStgs := make([]types.UserSpaceDetail, len(loadTo))
spacesStgs := make([]clitypes.UserSpaceDetail, len(loadTo))
for i, stg := range getSpaces { for i, stg := range getSpaces {
if stg == nil { if stg == nil {
return nil, fmt.Errorf("storage %v not found", loadTo[i]) return nil, fmt.Errorf("storage %v not found", loadTo[i])
@@ -145,7 +146,14 @@ func (u *Uploader) BeginCreateLoad(bktID types.BucketID, pkgName string, loadTo
spacesStgs[i] = *stg spacesStgs[i] = *stg
} }


pkg, err := u.db.Package().Create(u.db.DefCtx(), bktID, pkgName)
pkg, err := db.DoTx01(u.db, func(tx db.SQLContext) (clitypes.Package, error) {
_, err := u.db.Bucket().GetByID(tx, bktID)
if err != nil {
return clitypes.Package{}, err
}

return u.db.Package().Create(u.db.DefCtx(), bktID, pkgName)
})
if err != nil { if err != nil {
return nil, fmt.Errorf("create package: %w", err) return nil, fmt.Errorf("create package: %w", err)
} }
@@ -170,19 +178,19 @@ func (u *Uploader) BeginCreateLoad(bktID types.BucketID, pkgName string, loadTo
}, nil }, nil
} }


func (u *Uploader) UploadPart(objID types.ObjectID, index int, stream io.Reader) error {
func (u *Uploader) UploadPart(objID clitypes.ObjectID, index int, stream io.Reader) error {
detail, err := u.db.Object().GetDetail(u.db.DefCtx(), objID) detail, err := u.db.Object().GetDetail(u.db.DefCtx(), objID)
if err != nil { if err != nil {
return fmt.Errorf("getting object detail: %w", err) return fmt.Errorf("getting object detail: %w", err)
} }


objDe := detail objDe := detail
_, ok := objDe.Object.Redundancy.(*types.MultipartUploadRedundancy)
_, ok := objDe.Object.Redundancy.(*clitypes.MultipartUploadRedundancy)
if !ok { if !ok {
return fmt.Errorf("object %v is not a multipart upload", objID) return fmt.Errorf("object %v is not a multipart upload", objID)
} }


var space types.UserSpaceDetail
var space clitypes.UserSpaceDetail
if len(objDe.Blocks) > 0 { if len(objDe.Blocks) > 0 {
cstg := u.spaceMeta.Get(objDe.Blocks[0].UserSpaceID) cstg := u.spaceMeta.Get(objDe.Blocks[0].UserSpaceID)
if cstg == nil { if cstg == nil {
@@ -257,7 +265,7 @@ func (u *Uploader) UploadPart(objID types.ObjectID, index int, stream io.Reader)


shardInfo := ret["shard"].(*ops2.ShardInfoValue) shardInfo := ret["shard"].(*ops2.ShardInfoValue)
err = u.db.DoTx(func(tx db.SQLContext) error { err = u.db.DoTx(func(tx db.SQLContext) error {
return u.db.Object().AppendPart(tx, types.ObjectBlock{
return u.db.Object().AppendPart(tx, clitypes.ObjectBlock{
ObjectID: objID, ObjectID: objID,
Index: index, Index: index,
UserSpaceID: space.UserSpace.UserSpaceID, UserSpaceID: space.UserSpace.UserSpaceID,


+ 6
- 6
client/types/redundancy.go View File

@@ -21,7 +21,7 @@ var RedundancyUnion = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTyp
)), "type") )), "type")


type NoneRedundancy struct { type NoneRedundancy struct {
Redundancy
Redundancy `json:"-"`
serder.Metadata `union:"none"` serder.Metadata `union:"none"`
Type string `json:"type"` Type string `json:"type"`
} }
@@ -35,7 +35,7 @@ func NewNoneRedundancy() *NoneRedundancy {
var DefaultRepRedundancy = *NewRepRedundancy(2) var DefaultRepRedundancy = *NewRepRedundancy(2)


type RepRedundancy struct { type RepRedundancy struct {
Redundancy
Redundancy `json:"-"`
serder.Metadata `union:"rep"` serder.Metadata `union:"rep"`
Type string `json:"type"` Type string `json:"type"`
RepCount int `json:"repCount"` RepCount int `json:"repCount"`
@@ -51,7 +51,7 @@ func NewRepRedundancy(repCount int) *RepRedundancy {
var DefaultECRedundancy = *NewECRedundancy(2, 3, 1024*1024*5) var DefaultECRedundancy = *NewECRedundancy(2, 3, 1024*1024*5)


type ECRedundancy struct { type ECRedundancy struct {
Redundancy
Redundancy `json:"-"`
serder.Metadata `union:"ec"` serder.Metadata `union:"ec"`
Type string `json:"type"` Type string `json:"type"`
K int `json:"k"` K int `json:"k"`
@@ -75,7 +75,7 @@ func (b *ECRedundancy) StripSize() int64 {
var DefaultLRCRedundancy = *NewLRCRedundancy(2, 4, []int{2}, 1024*1024*5) var DefaultLRCRedundancy = *NewLRCRedundancy(2, 4, []int{2}, 1024*1024*5)


type LRCRedundancy struct { type LRCRedundancy struct {
Redundancy
Redundancy `json:"-"`
serder.Metadata `union:"lrc"` serder.Metadata `union:"lrc"`
Type string `json:"type"` Type string `json:"type"`
K int `json:"k"` K int `json:"k"`
@@ -132,7 +132,7 @@ func (b *LRCRedundancy) GetGroupElements(grp int) []int {
} }


type SegmentRedundancy struct { type SegmentRedundancy struct {
Redundancy
Redundancy `json:"-"`
serder.Metadata `union:"segment"` serder.Metadata `union:"segment"`
Type string `json:"type"` Type string `json:"type"`
Segments []int64 `json:"segments"` // 每一段的大小 Segments []int64 `json:"segments"` // 每一段的大小
@@ -201,7 +201,7 @@ func (b *SegmentRedundancy) CalcSegmentRange(start int64, end *int64) (segIdxSta
} }


type MultipartUploadRedundancy struct { type MultipartUploadRedundancy struct {
Redundancy
Redundancy `json:"-"`
serder.Metadata `union:"multipartUpload"` serder.Metadata `union:"multipartUpload"`
Type string `json:"type"` Type string `json:"type"`
} }


+ 1
- 1
client/types/types.go View File

@@ -79,7 +79,7 @@ type UserSpace struct {
// 用户在指定存储节点的凭证信息,比如用户账户,AK/SK等 // 用户在指定存储节点的凭证信息,比如用户账户,AK/SK等
Credential cotypes.StorageCredential `gorm:"column:Credential; type:json; not null; serializer:union" json:"credential"` Credential cotypes.StorageCredential `gorm:"column:Credential; type:json; not null; serializer:union" json:"credential"`
// 用户空间的分片存储配置,如果为空,则表示不使用分片存储 // 用户空间的分片存储配置,如果为空,则表示不使用分片存储
ShardStore *cotypes.ShardStoreUserConfig `gorm:"column:ShardStore; type:json;" json:"shardStore"`
ShardStore *cotypes.ShardStoreUserConfig `gorm:"column:ShardStore; type:json; serializer:json" json:"shardStore"`
// 用户空间信息的版本号,每一次更改都需要更新版本号 // 用户空间信息的版本号,每一次更改都需要更新版本号
Revision int64 `gorm:"column:Revision; type:bigint; not null" json:"revision"` Revision int64 `gorm:"column:Revision; type:bigint; not null" json:"revision"`
} }


common/assets/confs/agent.config.json → common/assets/confs/hub.config.json View File


+ 44
- 0
coordinator/internal/db/json_serializer.go View File

@@ -0,0 +1,44 @@
package db

import (
"context"
"fmt"
"reflect"

"gitlink.org.cn/cloudream/common/utils/serder"
"gorm.io/gorm/schema"
)

type JSONSerializer struct {
}

func (JSONSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error {
fieldValue := reflect.New(field.FieldType)
if dbValue != nil {
var data []byte
switch v := dbValue.(type) {
case []byte:
data = v
case string:
data = []byte(v)
default:
return fmt.Errorf("failed to unmarshal JSONB value: %#v", dbValue)
}

err := serder.JSONToObject(data, fieldValue.Interface())
if err != nil {
return err
}
}

field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem())
return nil
}

func (JSONSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) {
return serder.ObjectToJSON(fieldValue)
}

func init() {
schema.RegisterSerializer("json", JSONSerializer{})
}

+ 0
- 50
coordinator/types/public_storage.go View File

@@ -1,50 +0,0 @@
package types

/*
import (
"fmt"

"gitlink.org.cn/cloudream/common/pkgs/types"
"gitlink.org.cn/cloudream/common/utils/serder"
)

type PublicStoreConfig interface {
GetPublicStoreType() string
// 输出调试用的字符串,不要包含敏感信息
String() string
}

var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[PublicStoreConfig](
(*LocalPublicStorage)(nil),
(*S3PublicStorage)(nil),
)), "type")

type LocalPublicStorage struct {
serder.Metadata `union:"Local"`
Type string `json:"type"`
// 调度Package时的Package的根路径
LoadBase string `json:"loadBase"`
}

func (s *LocalPublicStorage) GetPublicStoreType() string {
return "Local"
}

func (s *LocalPublicStorage) String() string {
return fmt.Sprintf("Local[LoadBase=%v]", s.LoadBase)
}

type S3PublicStorage struct {
serder.Metadata `union:"S3"`
Type string `json:"type"`
LoadBase string `json:"loadBase"`
}

func (s *S3PublicStorage) GetPublicStoreType() string {
return "S3"
}

func (s *S3PublicStorage) String() string {
return fmt.Sprintf("S3[LoadBase=%v]", s.LoadBase)
}
*/

+ 0
- 51
coordinator/types/shard_storage.go View File

@@ -1,51 +0,0 @@
package types

/*
import (
"fmt"

"gitlink.org.cn/cloudream/common/pkgs/types"
"gitlink.org.cn/cloudream/common/utils/serder"
)

// 分片存储服务的配置数据
type ShardStoreConfig interface {
GetShardStoreType() string
// 输出调试用的字符串,不要包含敏感信息
String() string
}

var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[ShardStoreConfig](
(*LocalShardStorage)(nil),
(*S3ShardStorage)(nil),
)), "type")

type LocalShardStorage struct {
serder.Metadata `union:"Local"`
Type string `json:"type"`
Root string `json:"root"`
MaxSize int64 `json:"maxSize"`
}

func (s *LocalShardStorage) GetShardStoreType() string {
return "Local"
}

func (s *LocalShardStorage) String() string {
return fmt.Sprintf("Local[root=%s, maxSize=%d]", s.Root, s.MaxSize)
}

type S3ShardStorage struct {
serder.Metadata `union:"S3"`
Type string `json:"type"`
Root string `json:"root"`
}

func (s *S3ShardStorage) GetShardStoreType() string {
return "S3"
}

func (s *S3ShardStorage) String() string {
return fmt.Sprintf("S3[root=%s]", s.Root)
}
*/

+ 50
- 50
coordinator/types/storage_credential.go View File

@@ -20,73 +20,73 @@ var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[Storage
)), "type") )), "type")


type LocalCred struct { type LocalCred struct {
StorageCredential
serder.Metadata `union:"Local"`
Type string `json:"type"`
StorageCredential `json:"-"`
serder.Metadata `union:"Local"`
Type string `json:"type"`
} }


type MashupCred struct { type MashupCred struct {
StorageCredential
serder.Metadata `union:"Mashup"`
Store StorageCredential `json:"store"`
Feature StorageCredential `json:"feature"`
StorageCredential `json:"-"`
serder.Metadata `union:"Mashup"`
Store StorageCredential `json:"store"`
Feature StorageCredential `json:"feature"`
} }


type OSSCred struct { type OSSCred struct {
StorageCredential
serder.Metadata `union:"OSS"`
Type string `json:"type"`
Region string `json:"region"`
AK string `json:"accessKeyId"`
SK string `json:"secretAccessKey"`
Endpoint string `json:"endpoint"`
Bucket string `json:"bucket"`
StorageCredential `json:"-"`
serder.Metadata `union:"OSS"`
Type string `json:"type"`
Region string `json:"region"`
AK string `json:"accessKeyId"`
SK string `json:"secretAccessKey"`
Endpoint string `json:"endpoint"`
Bucket string `json:"bucket"`
} }


type OBSCred struct { type OBSCred struct {
StorageCredential
serder.Metadata `union:"OBS"`
Type string `json:"type"`
Region string `json:"region"`
AK string `json:"accessKeyId"`
SK string `json:"secretAccessKey"`
Endpoint string `json:"endpoint"`
Bucket string `json:"bucket"`
ProjectID string `json:"projectID"`
StorageCredential `json:"-"`
serder.Metadata `union:"OBS"`
Type string `json:"type"`
Region string `json:"region"`
AK string `json:"accessKeyId"`
SK string `json:"secretAccessKey"`
Endpoint string `json:"endpoint"`
Bucket string `json:"bucket"`
ProjectID string `json:"projectID"`
} }


type COSCred struct { type COSCred struct {
StorageCredential
serder.Metadata `union:"COS"`
Type string `json:"type"`
Region string `json:"region"`
AK string `json:"accessKeyId"`
SK string `json:"secretAccessKey"`
Endpoint string `json:"endpoint"`
Bucket string `json:"bucket"`
StorageCredential `json:"-"`
serder.Metadata `union:"COS"`
Type string `json:"type"`
Region string `json:"region"`
AK string `json:"accessKeyId"`
SK string `json:"secretAccessKey"`
Endpoint string `json:"endpoint"`
Bucket string `json:"bucket"`
} }


type EFileCred struct { type EFileCred struct {
StorageCredential
serder.Metadata `union:"EFile"`
Type string `json:"type"`
TokenURL string `json:"tokenURL"`
APIURL string `json:"apiURL"`
TokenExpire int `json:"tokenExpire"` // 单位秒
User string `json:"user"`
Password string `json:"password"`
OrgID string `json:"orgID"`
ClusterID string `json:"clusterID"`
StorageCredential `json:"-"`
serder.Metadata `union:"EFile"`
Type string `json:"type"`
TokenURL string `json:"tokenURL"`
APIURL string `json:"apiURL"`
TokenExpire int `json:"tokenExpire"` // 单位秒
User string `json:"user"`
Password string `json:"password"`
OrgID string `json:"orgID"`
ClusterID string `json:"clusterID"`
} }


// 通用的S3协议的存储服务 // 通用的S3协议的存储服务
type S3Cred struct { type S3Cred struct {
StorageCredential
serder.Metadata `union:"S3"`
Type string `json:"type"`
Region string `json:"region"`
AK string `json:"accessKeyId"`
SK string `json:"secretAccessKey"`
Endpoint string `json:"endpoint"`
Bucket string `json:"bucket"`
StorageCredential `json:"-"`
serder.Metadata `union:"S3"`
Type string `json:"type"`
Region string `json:"region"`
AK string `json:"accessKeyId"`
SK string `json:"secretAccessKey"`
Endpoint string `json:"endpoint"`
Bucket string `json:"bucket"`
} }

+ 2
- 0
hub/internal/cmd/serve.go View File

@@ -36,6 +36,8 @@ func init() {
var httpAddr string var httpAddr string


cmd := &cobra.Command{ cmd := &cobra.Command{
Use: "serve",
Short: "start storage2 hub service",
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
serve(configPath, httpAddr) serve(configPath, httpAddr)
}, },


+ 4
- 1
hub/main.go View File

@@ -1,6 +1,9 @@
package main package main


import "gitlink.org.cn/cloudream/storage2/hub/internal/cmd"
import (
_ "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/storage2/hub/internal/cmd"
)


func main() { func main() {
cmd.RootCmd.Execute() cmd.RootCmd.Execute()


Loading…
Cancel
Save