Browse Source

Merge branch 'master' into gitlink

gitlink
Sydonian 6 months ago
parent
commit
9b5c236360
100 changed files with 1874 additions and 4017 deletions
  1. +0
    -2
      client/internal/cmdline/migrate.go
  2. +7
    -28
      client/internal/cmdline/serve.go
  3. +150
    -257
      client/internal/cmdline/test.go
  4. +6
    -12
      client/internal/cmdline/vfstest.go
  5. +6
    -6
      client/internal/config/config.go
  6. +6
    -0
      client/internal/db/db.go
  7. +3
    -3
      client/internal/db/package.go
  8. +1
    -1
      client/internal/downloader/iterator.go
  9. +1
    -6
      client/internal/http/server.go
  10. +24
    -3
      client/internal/http/user_space.go
  11. +8
    -10
      client/internal/metacache/connectivity.go
  12. +7
    -10
      client/internal/metacache/hubmeta.go
  13. +7
    -10
      client/internal/metacache/storagemeta.go
  14. +1
    -4
      client/internal/mount/vfs/fuse_bucket.go
  15. +0
    -1
      client/internal/mount/vfs/fuse_root.go
  16. +1
    -1
      client/internal/repl/sysevent.go
  17. +0
    -3
      client/internal/services/object.go
  18. +3
    -3
      client/internal/services/package.go
  19. +4
    -4
      client/internal/services/service.go
  20. +0
    -156
      client/internal/services/storage.go
  21. +269
    -0
      client/internal/services/user_space.go
  22. +27
    -23
      client/internal/ticktock/change_redundancy.go
  23. +18
    -11
      client/internal/ticktock/check_shardstore.go
  24. +13
    -13
      client/internal/ticktock/redundancy_shrink.go
  25. +27
    -24
      client/internal/ticktock/shardstore_gc.go
  26. +4
    -1
      client/internal/ticktock/ticktock.go
  27. +2
    -4
      client/internal/ticktock/update_package_access_stat_amount.go
  28. +10
    -8
      client/internal/uploader/create_load.go
  29. +8
    -7
      client/internal/uploader/update.go
  30. +27
    -32
      client/internal/uploader/uploader.go
  31. +181
    -0
      client/internal/uploader/user_space_upload.go
  32. +25
    -0
      client/sdk/api/userspace.go
  33. +5
    -0
      client/types/types.go
  34. +0
    -1
      common/README.md
  35. +7
    -15
      common/assets/confs/client.config.json
  36. +3
    -10
      common/assets/confs/coordinator.config.json
  37. +14
    -18
      common/assets/confs/hub.config.json
  38. +0
    -35
      common/assets/confs/scanner.config.json
  39. +11
    -30
      common/globals/pools.go
  40. +1
    -1
      common/globals/utils.go
  41. +15
    -4
      common/magefiles/main.go
  42. +11
    -20
      common/pkgs/connectivity/collector.go
  43. +14
    -0
      common/pkgs/distlock/lockprovider/empty_target.go
  44. +5
    -5
      common/pkgs/distlock/lockprovider/lock_compatibility_table.go
  45. +5
    -5
      common/pkgs/distlock/lockprovider/lock_compatibility_table_test.go
  46. +0
    -122
      common/pkgs/distlock/lockprovider/metadata_lock.go
  47. +9
    -20
      common/pkgs/distlock/lockprovider/shard_store.go
  48. +13
    -13
      common/pkgs/distlock/lockprovider/shard_store_test.go
  49. +0
    -140
      common/pkgs/distlock/lockprovider/storage_lock.go
  50. +20
    -0
      common/pkgs/distlock/lockprovider/string_lock_target.go
  51. +15
    -0
      common/pkgs/distlock/mutex.go
  52. +53
    -0
      common/pkgs/distlock/reentrant.go
  53. +11
    -12
      common/pkgs/distlock/reqbuilder/lock_request_builder.go
  54. +0
    -17
      common/pkgs/distlock/reqbuilder/metadata.go
  55. +0
    -24
      common/pkgs/distlock/reqbuilder/metadata_object.go
  56. +11
    -11
      common/pkgs/distlock/reqbuilder/shard_store.go
  57. +0
    -39
      common/pkgs/distlock/reqbuilder/storage.go
  58. +165
    -33
      common/pkgs/distlock/service.go
  59. +60
    -0
      common/pkgs/distlock/types/models.go
  60. +0
    -12
      common/pkgs/grpc/config.go
  61. +0
    -206
      common/pkgs/grpc/hub/client.go
  62. +0
    -983
      common/pkgs/grpc/hub/hub.pb.go
  63. +0
    -75
      common/pkgs/grpc/hub/hub.proto
  64. +0
    -358
      common/pkgs/grpc/hub/hub_grpc.pb.go
  65. +0
    -60
      common/pkgs/grpc/hub/pool.go
  66. +36
    -16
      common/pkgs/ioswitch2/fromto.go
  67. +32
    -19
      common/pkgs/ioswitch2/hub_worker.go
  68. +167
    -15
      common/pkgs/ioswitch2/ops2/bypass.go
  69. +3
    -3
      common/pkgs/ioswitch2/ops2/ec.go
  70. +2
    -2
      common/pkgs/ioswitch2/ops2/multipart.go
  71. +101
    -15
      common/pkgs/ioswitch2/ops2/public_store.go
  72. +8
    -4
      common/pkgs/ioswitch2/ops2/s2s.go
  73. +21
    -3
      common/pkgs/ioswitch2/parser/gen/generator.go
  74. +1
    -1
      common/pkgs/ioswitch2/parser/opt/ec.go
  75. +178
    -91
      common/pkgs/ioswitch2/parser/opt/s2s.go
  76. +31
    -13
      common/pkgs/ioswitchlrc/hub_worker.go
  77. +0
    -13
      common/pkgs/mq/consts.go
  78. +0
    -60
      common/pkgs/mq/coordinator/client.go
  79. +0
    -15
      common/pkgs/mq/coordinator/coordinator_test.go
  80. +0
    -100
      common/pkgs/mq/coordinator/hub.go
  81. +0
    -72
      common/pkgs/mq/coordinator/server.go
  82. +0
    -36
      common/pkgs/mq/coordinator/storage.go
  83. +0
    -61
      common/pkgs/mq/hub/cache.go
  84. +0
    -68
      common/pkgs/mq/hub/client.go
  85. +0
    -29
      common/pkgs/mq/hub/hub.go
  86. +0
    -75
      common/pkgs/mq/hub/server.go
  87. +0
    -48
      common/pkgs/mq/hub/storage.go
  88. +0
    -60
      common/pkgs/mq/scanner/client.go
  89. +0
    -31
      common/pkgs/mq/scanner/event.go
  90. +0
    -18
      common/pkgs/mq/scanner/event/agent_check_shardstore.go
  91. +0
    -18
      common/pkgs/mq/scanner/event/agent_check_state.go
  92. +0
    -18
      common/pkgs/mq/scanner/event/agent_check_storage.go
  93. +0
    -18
      common/pkgs/mq/scanner/event/agent_shardstore_gc.go
  94. +0
    -18
      common/pkgs/mq/scanner/event/agent_storage_gc.go
  95. +0
    -18
      common/pkgs/mq/scanner/event/check_package.go
  96. +0
    -18
      common/pkgs/mq/scanner/event/check_package_redundancy.go
  97. +0
    -18
      common/pkgs/mq/scanner/event/clean_pinned.go
  98. +0
    -23
      common/pkgs/mq/scanner/event/event.go
  99. +0
    -18
      common/pkgs/mq/scanner/event/update_package_access_stat_amount.go
  100. +0
    -70
      common/pkgs/mq/scanner/server.go

+ 0
- 2
client/internal/cmdline/migrate.go View File

@@ -25,8 +25,6 @@ func init() {
} }


func migrate(configPath string) { func migrate(configPath string) {
// TODO 将create_database.sql的内容逐渐移动到这里来

err := config.Init(configPath) err := config.Init(configPath)
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)


+ 7
- 28
client/internal/cmdline/serve.go View File

@@ -67,8 +67,7 @@ func serveHTTP(configPath string, opts serveHTTPOptions) {
} }


stgglb.InitLocal(config.Cfg().Local) stgglb.InitLocal(config.Cfg().Local)
stgglb.InitMQPool(config.Cfg().RabbitMQ)
stgglb.InitHubRPCPool(&config.Cfg().HubGRPC)
stgglb.InitPools(&config.Cfg().HubRPC, &config.Cfg().CoordinatorRPC)


// 数据库 // 数据库
db, err := db.NewDB(&config.Cfg().DB) db, err := db.NewDB(&config.Cfg().DB)
@@ -77,7 +76,7 @@ func serveHTTP(configPath string, opts serveHTTPOptions) {
} }


// 初始化系统事件发布器 // 初始化系统事件发布器
evtPub, err := sysevent.NewPublisher(sysevent.ConfigFromMQConfig(config.Cfg().RabbitMQ), &datamap.SourceClient{
evtPub, err := sysevent.NewPublisher(config.Cfg().SysEvent, &datamap.SourceClient{
UserID: config.Cfg().Local.UserID, UserID: config.Cfg().Local.UserID,
}) })
if err != nil { if err != nil {
@@ -98,13 +97,8 @@ func serveHTTP(configPath string, opts serveHTTPOptions) {
hubMeta := metaCacheHost.AddHubMeta() hubMeta := metaCacheHost.AddHubMeta()
conMeta := metaCacheHost.AddConnectivity() conMeta := metaCacheHost.AddConnectivity()


// 分布式锁
distlockSvc, err := distlock.NewService(&config.Cfg().DistLock)
if err != nil {
logger.Warnf("new distlock service failed, err: %s", err.Error())
os.Exit(1)
}
go serveDistLock(distlockSvc)
// 公共锁
publock := distlock.NewService()


// 访问统计 // 访问统计
acStat := accessstat.NewAccessStat(accessstat.Config{ acStat := accessstat.NewAccessStat(accessstat.Config{
@@ -124,10 +118,10 @@ func serveHTTP(configPath string, opts serveHTTPOptions) {
dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgPool, strgSel, db) dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgPool, strgSel, db)


// 上传器 // 上传器
uploader := uploader.NewUploader(distlockSvc, &conCol, stgPool, stgMeta, db)
uploader := uploader.NewUploader(publock, &conCol, stgPool, stgMeta, db)


// 定时任务 // 定时任务
tktk := ticktock.New(config.Cfg().TickTock, db, stgMeta, stgPool, evtPub)
tktk := ticktock.New(config.Cfg().TickTock, db, stgMeta, stgPool, evtPub, publock)
tktk.Start() tktk.Start()
defer tktk.Stop() defer tktk.Stop()


@@ -148,7 +142,7 @@ func serveHTTP(configPath string, opts serveHTTPOptions) {
mntChan := mnt.Start() mntChan := mnt.Start()
defer mnt.Stop() defer mnt.Stop()


svc := services.NewService(distlockSvc, dlder, acStat, uploader, strgSel, stgMeta, db, evtPub, mnt)
svc := services.NewService(publock, dlder, acStat, uploader, strgSel, stgMeta, db, evtPub, mnt)


// HTTP接口 // HTTP接口
httpCfg := config.Cfg().HTTP httpCfg := config.Cfg().HTTP
@@ -250,18 +244,3 @@ loop:
} }
} }
} }

func serveDistLock(svc *distlock.Service) {
logger.Info("start serving distlock")

err := svc.Serve()

if err != nil {
logger.Errorf("distlock stopped with error: %s", err.Error())
}

logger.Info("distlock stopped")

// TODO 仅简单结束了程序
os.Exit(1)
}

+ 150
- 257
client/internal/cmdline/test.go View File

@@ -1,293 +1,186 @@
package cmdline package cmdline


/*
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"os"
"time"


"github.com/spf13/cobra" "github.com/spf13/cobra"
"gitlink.org.cn/cloudream/common/pkgs/future"
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/accessstat"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/config"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/downloader"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/downloader/strategy"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/metacache"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/services"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/uploader"
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
"gitlink.org.cn/cloudream/jcs-pub/common/models/datamap"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser"
coormq "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq/coordinator"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/sysevent"
) )


func init() { func init() {
RootCmd.AddCommand(&cobra.Command{
var configPath string
cmd := cobra.Command{
Use: "test", Use: "test",
Short: "test", Short: "test",
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
panic(err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

stgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{1, 2, 3, 4, 5}))
if err != nil {
panic(err)
}

ft := ioswitch2.NewFromTo()
ft.ECParam = cdssdk.NewECRedundancy(3, 6, 1024*1024*5)
// ft.SegmentParam = cdssdk.NewSegmentRedundancy(1024*100*3, 3)
ft.AddFrom(ioswitch2.NewFromShardstore("FullC036CBB7553A909F8B8877D4461924307F27ECB66CFF928EEEAFD569C3887E29", *stgs.Storages[3].MasterHub, *stgs.Storages[3], ioswitch2.ECStream(2)))
ft.AddFrom(ioswitch2.NewFromShardstore("Full543F38D9F524238AC0239263AA0DD4B4328763818EA98A7A5F72E59748FDA27A", *stgs.Storages[3].MasterHub, *stgs.Storages[3], ioswitch2.ECStream(3)))
ft.AddFrom(ioswitch2.NewFromShardstore("Full50B464DB2FDDC29D0380D9FFAB6D944FAF5C7624955D757939280590F01F3ECD", *stgs.Storages[3].MasterHub, *stgs.Storages[3], ioswitch2.ECStream(4)))
// ft.AddFrom(ioswitch2.NewFromShardstore("Full4D142C458F2399175232D5636235B09A84664D60869E925EB20FFBE931045BDD", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(2)))
// ft.AddFrom(ioswitch2.NewFromShardstore("Full03B5CF4B57251D7BB4308FE5C81AF5A21E2B28994CC7CB1FB37698DAE271DC22", *stgs.Storages[2].MasterHub, *stgs.Storages[2], ioswitch2.RawStream()))
// ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[3].MasterHub, *stgs.Storages[3], ioswitch2.RawStream(), "0"))
// ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(0), "0"))
// ft.AddTo(ioswitch2.NewToShardStoreWithRange(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1), "1", math2.Range{Offset: 1}))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[4].MasterHub, *stgs.Storages[4], ioswitch2.ECStream(0), "0"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[4].MasterHub, *stgs.Storages[4], ioswitch2.ECStream(1), "1"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[4].MasterHub, *stgs.Storages[4], ioswitch2.ECStream(5), "2"))

err = parser.Parse(ft, plans)
if err != nil {
panic(err)
}

fmt.Printf("plans: %v\n", plans)

exec := plans.Execute(exec.NewExecContext())

fut := future.NewSetVoid()
go func() {
mp, err := exec.Wait(context.Background())
if err != nil {
panic(err)
}

fmt.Printf("0: %v, 1: %v, 2: %v\n", mp["0"], mp["1"], mp["2"])
fut.SetVoid()
}()

fut.Wait(context.TODO())
test(configPath)
}, },
})

RootCmd.AddCommand(&cobra.Command{
Use: "test32",
Short: "test32",
Run: func(cmd *cobra.Command, args []string) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
panic(err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

stgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{1, 2}))
if err != nil {
panic(err)
}

ft := ioswitch2.NewFromTo()
ft.SegmentParam = cdssdk.NewSegmentRedundancy(1293, 3)
ft.AddFrom(ioswitch2.NewFromShardstore("4E69A8B8CD9F42EDE371DA94458BADFB2308AFCA736AA393784A3D81F4746377", *stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.RawStream()))
// ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.SegmentStream(0), "0"))
ft.AddTo(ioswitch2.NewToShardStoreWithRange(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1), "1", math2.Range{Offset: 1}))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(2), "2"))

plans := exec.NewPlanBuilder()
err = parser.Parse(ft, plans)
if err != nil {
panic(err)
}

fmt.Printf("plans: %v\n", plans)

exec := plans.Execute(exec.NewExecContext())

fut := future.NewSetVoid()
go func() {
mp, err := exec.Wait(context.Background())
if err != nil {
panic(err)
}

fmt.Printf("0: %v, 1: %v, 2: %v\n", mp["0"], mp["1"], mp["2"])
fut.SetVoid()
}()

fut.Wait(context.TODO())
},
})
RootCmd.AddCommand(&cobra.Command{
Use: "test1",
Short: "test1",
Run: func(cmd *cobra.Command, args []string) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
panic(err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

stgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{1, 2}))
if err != nil {
panic(err)
}

ft := ioswitch2.NewFromTo()
ft.SegmentParam = cdssdk.NewSegmentRedundancy(1293, 3)
ft.ECParam = &cdssdk.DefaultECRedundancy
ft.AddFrom(ioswitch2.NewFromShardstore("22CC59CE3297F78F2D20DC1E33181B77F21E6782097C94E1664F99F129834069", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(0)))
ft.AddFrom(ioswitch2.NewFromShardstore("5EAC20EB3EBC7B5FA176C5BD1C01041FB2A6D14C35D6A232CA83D7F1E4B01ADE", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1)))
ft.AddFrom(ioswitch2.NewFromShardstore("A9BC1802F37100C80C72A1D6E8F53C0E0B73F85F99153D8C78FB01CEC9D8D903", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(2)))

toDrv, drvStr := ioswitch2.NewToDriverWithRange(ioswitch2.RawStream(), math2.NewRange(0, 1293))
ft.AddTo(toDrv)
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(0), "EC0"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(1), "EC1"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(2), "EC2"))

plans := exec.NewPlanBuilder()
err = parser.Parse(ft, plans)
if err != nil {
panic(err)
}
}
cmd.Flags().StringVarP(&configPath, "config", "c", "", "config file path")
RootCmd.AddCommand(&cmd)
}


fmt.Printf("plans: %v\n", plans)
func doTest(svc *services.Service) {
ft := ioswitch2.NewFromTo()


exec := plans.Execute(exec.NewExecContext())
space1 := svc.UserSpaceMeta.Get(3)
space2 := svc.UserSpaceMeta.Get(4)


fut := future.NewSetVoid()
go func() {
mp, err := exec.Wait(context.Background())
if err != nil {
panic(err)
}
// ft.AddFrom(ioswitch2.NewFromPublicStore(*space1.MasterHub, *space1, "space3/blocks/1A/Full1AE5436AF72D8EF93923486E0E167315CEF0C91898064DADFAC22216FFBC5E3D"))
// ft.AddTo(ioswitch2.NewToPublicStore(*space2.MasterHub, *space2, "block"))
// plans := exec.NewPlanBuilder()
// parser.Parse(ft, plans)
// fmt.Println(plans)


for k, v := range mp {
fmt.Printf("%s: %v\n", k, v)
}
// _, err := plans.Execute(exec.NewExecContext()).Wait(context.Background())
// fmt.Println(err)


fut.SetVoid()
}()
go func() {
str, err := exec.BeginRead(drvStr)
if err != nil {
panic(err)
}
ft = ioswitch2.NewFromTo()
ft.AddFrom(ioswitch2.NewFromShardstore("Full1AE5436AF72D8EF93923486E0E167315CEF0C91898064DADFAC22216FFBC5E3D", *space1.MasterHub, *space1, ioswitch2.RawStream()))
ft.AddTo(ioswitch2.NewToPublicStore(*space2.MasterHub, *space2, "test3.txt"))
plans := exec.NewPlanBuilder()
parser.Parse(ft, plans)
fmt.Println(plans)
_, err := plans.Execute(exec.NewExecContext()).Wait(context.Background())
fmt.Println(err)


data, err := io.ReadAll(str)
if err != nil {
panic(err)
}

fmt.Printf("read(%v): %s\n", len(data), string(data))
}()
}


fut.Wait(context.TODO())
},
func test(configPath string) {
err := config.Init(configPath)
if err != nil {
fmt.Printf("init config failed, err: %s", err.Error())
os.Exit(1)
}

err = logger.Init(&config.Cfg().Logger)
if err != nil {
fmt.Printf("init logger failed, err: %s", err.Error())
os.Exit(1)
}

stgglb.InitLocal(config.Cfg().Local)
stgglb.InitPools(&config.Cfg().HubRPC, &config.Cfg().CoordinatorRPC)

// 数据库
db, err := db.NewDB(&config.Cfg().DB)
if err != nil {
logger.Fatalf("new db failed, err: %s", err.Error())
}

// 初始化系统事件发布器
evtPub, err := sysevent.NewPublisher(config.Cfg().SysEvent, &datamap.SourceClient{
UserID: config.Cfg().Local.UserID,
}) })

RootCmd.AddCommand(&cobra.Command{
Use: "test4",
Short: "test4",
Run: func(cmd *cobra.Command, args []string) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
panic(err)
if err != nil {
logger.Errorf("new sysevent publisher: %v", err)
os.Exit(1)
}
evtPubChan := evtPub.Start()
defer evtPub.Stop()

// 连接性信息收集
conCol := connectivity.NewCollector(&config.Cfg().Connectivity, nil)
conCol.CollecNow()

// 元数据缓存
metaCacheHost := metacache.NewHost(db)
go metaCacheHost.Serve()
stgMeta := metaCacheHost.AddStorageMeta()
hubMeta := metaCacheHost.AddHubMeta()
conMeta := metaCacheHost.AddConnectivity()

// 公共锁
publock := distlock.NewService()

// 访问统计
acStat := accessstat.NewAccessStat(accessstat.Config{
// TODO 考虑放到配置里
ReportInterval: time.Second * 10,
}, db)
acStatChan := acStat.Start()
defer acStat.Stop()

// 存储管理器
stgPool := pool.NewPool()

// 下载策略
strgSel := strategy.NewSelector(config.Cfg().DownloadStrategy, stgMeta, hubMeta, conMeta)

// 下载器
dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgPool, strgSel, db)

// 上传器
uploader := uploader.NewUploader(publock, &conCol, stgPool, stgMeta, db)

svc := services.NewService(publock, dlder, acStat, uploader, strgSel, stgMeta, db, evtPub, nil)

go func() {
doTest(svc)
os.Exit(0)
}()
/// 开始监听各个模块的事件

evtPubEvt := evtPubChan.Receive()
acStatEvt := acStatChan.Receive()

loop:
for {
select {
case e := <-evtPubEvt.Chan():
if e.Err != nil {
logger.Errorf("receive publisher event: %v", err)
break loop
} }
defer stgglb.CoordinatorMQPool.Release(coorCli)

stgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{1, 2}))
if err != nil {
panic(err)
}

ft := ioswitch2.NewFromTo()
ft.ECParam = &cdssdk.DefaultECRedundancy
ft.AddFrom(ioswitch2.NewFromShardstore("4E69A8B8CD9F42EDE371DA94458BADFB2308AFCA736AA393784A3D81F4746377", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.RawStream()))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(0), "EC0"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(1), "EC1"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(2), "EC2"))

plans := exec.NewPlanBuilder()
err = parser.Parse(ft, plans)
if err != nil {
panic(err)
}

fmt.Printf("plans: %v\n", plans)

exec := plans.Execute(exec.NewExecContext())


fut := future.NewSetVoid()
go func() {
mp, err := exec.Wait(context.Background())
if err != nil {
panic(err)
}
switch val := e.Value.(type) {
case sysevent.PublishError:
logger.Errorf("publishing event: %v", val)


for k, v := range mp {
fmt.Printf("%s: %v\n", k, v)
case sysevent.PublisherExited:
if val.Err != nil {
logger.Errorf("publisher exited with error: %v", val.Err)
} else {
logger.Info("publisher exited")
} }
break loop


fut.SetVoid()
}()

fut.Wait(context.TODO())
},
})

RootCmd.AddCommand(&cobra.Command{
RootCmd.AddCommand(&cobra.Command{
Use: "test11",
Short: "test11",
Run: func(cmd *cobra.Command, args []string) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
panic(err)
case sysevent.OtherError:
logger.Errorf("sysevent: %v", val)
} }
defer stgglb.CoordinatorMQPool.Release(coorCli)
evtPubEvt = evtPubChan.Receive()


stgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{1, 2}))
if err != nil {
panic(err)
case e := <-acStatEvt.Chan():
if e.Err != nil {
logger.Errorf("receive access stat event: %v", err)
break loop
} }

ft := ioswitch2.NewFromTo()
ft.SegmentParam = cdssdk.NewSegmentRedundancy(1293, 3)
ft.ECParam = &cdssdk.DefaultECRedundancy
ft.AddFrom(ioswitch2.NewFromShardstore("22CC59CE3297F78F2D20DC1E33181B77F21E6782097C94E1664F99F129834069", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(0)))
ft.AddFrom(ioswitch2.NewFromShardstore("5EAC20EB3EBC7B5FA176C5BD1C01041FB2A6D14C35D6A232CA83D7F1E4B01ADE", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(1)))
ft.AddFrom(ioswitch2.NewFromShardstore("A9BC1802F37100C80C72A1D6E8F53C0E0B73F85F99153D8C78FB01CEC9D8D903", *stgs.Storages[1].MasterHub, *stgs.Storages[1], ioswitch2.SegmentStream(2)))
ft.AddTo(ioswitch2.NewToShardStoreWithRange(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.RawStream(), "raw", math2.NewRange(10, 645)))

plans := exec.NewPlanBuilder()
err = parser.Parse(ft, plans)
if err != nil {
panic(err)
switch e := e.Value.(type) {
case accessstat.ExitEvent:
logger.Infof("access stat exited, err: %v", e.Err)
break loop
} }

fmt.Printf("plans: %v\n", plans)

exec := plans.Execute(exec.NewExecContext())

fut := future.NewSetVoid()
go func() {
mp, err := exec.Wait(context.Background())
if err != nil {
panic(err)
}

for k, v := range mp {
fmt.Printf("%s: %v\n", k, v)
}

fut.SetVoid()
}()

fut.Wait(context.TODO())
},
})
acStatEvt = acStatChan.Receive()
}
}
} }
*/

+ 6
- 12
client/internal/cmdline/vfstest.go View File

@@ -60,8 +60,7 @@ func vfsTest(configPath string, opts serveHTTPOptions) {
} }


stgglb.InitLocal(config.Cfg().Local) stgglb.InitLocal(config.Cfg().Local)
stgglb.InitMQPool(config.Cfg().RabbitMQ)
stgglb.InitHubRPCPool(&config.Cfg().HubGRPC)
stgglb.InitPools(&config.Cfg().HubRPC, &config.Cfg().CoordinatorRPC)


// 数据库 // 数据库
db, err := db.NewDB(&config.Cfg().DB) db, err := db.NewDB(&config.Cfg().DB)
@@ -70,7 +69,7 @@ func vfsTest(configPath string, opts serveHTTPOptions) {
} }


// 初始化系统事件发布器 // 初始化系统事件发布器
evtPub, err := sysevent.NewPublisher(sysevent.ConfigFromMQConfig(config.Cfg().RabbitMQ), &datamap.SourceClient{
evtPub, err := sysevent.NewPublisher(config.Cfg().SysEvent, &datamap.SourceClient{
UserID: config.Cfg().Local.UserID, UserID: config.Cfg().Local.UserID,
}) })
if err != nil { if err != nil {
@@ -91,13 +90,8 @@ func vfsTest(configPath string, opts serveHTTPOptions) {
hubMeta := metaCacheHost.AddHubMeta() hubMeta := metaCacheHost.AddHubMeta()
conMeta := metaCacheHost.AddConnectivity() conMeta := metaCacheHost.AddConnectivity()


// 分布式锁
distlockSvc, err := distlock.NewService(&config.Cfg().DistLock)
if err != nil {
logger.Warnf("new distlock service failed, err: %s", err.Error())
os.Exit(1)
}
go serveDistLock(distlockSvc)
// 公共锁
publock := distlock.NewService()


// 访问统计 // 访问统计
acStat := accessstat.NewAccessStat(accessstat.Config{ acStat := accessstat.NewAccessStat(accessstat.Config{
@@ -117,7 +111,7 @@ func vfsTest(configPath string, opts serveHTTPOptions) {
dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgPool, strgSel, db) dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgPool, strgSel, db)


// 上传器 // 上传器
uploader := uploader.NewUploader(distlockSvc, &conCol, stgPool, stgMeta, db)
uploader := uploader.NewUploader(publock, &conCol, stgPool, stgMeta, db)


// 挂载 // 挂载
mntCfg := config.Cfg().Mount mntCfg := config.Cfg().Mount
@@ -132,7 +126,7 @@ func vfsTest(configPath string, opts serveHTTPOptions) {
mntChan := mnt.Start() mntChan := mnt.Start()
defer mnt.Stop() defer mnt.Stop()


svc := services.NewService(distlockSvc, dlder, acStat, uploader, strgSel, stgMeta, db, evtPub, mnt)
svc := services.NewService(publock, dlder, acStat, uploader, strgSel, stgMeta, db, evtPub, mnt)


// HTTP接口 // HTTP接口
httpCfg := config.Cfg().HTTP httpCfg := config.Cfg().HTTP


+ 6
- 6
client/internal/config/config.go View File

@@ -1,9 +1,7 @@
package config package config


import ( import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/common/utils/config" "gitlink.org.cn/cloudream/common/utils/config"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/downloader" "gitlink.org.cn/cloudream/jcs-pub/client/internal/downloader"
@@ -13,16 +11,18 @@ import (
"gitlink.org.cn/cloudream/jcs-pub/client/internal/ticktock" "gitlink.org.cn/cloudream/jcs-pub/client/internal/ticktock"
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/connectivity" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/connectivity"
hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/grpc/hub"
corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator"
hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/sysevent"
) )


type Config struct { type Config struct {
Local stgglb.LocalMachineInfo `json:"local"` Local stgglb.LocalMachineInfo `json:"local"`
HubGRPC hubrpc.PoolConfig `json:"hubGRPC"`
HubRPC hubrpc.PoolConfig `json:"hubRPC"`
CoordinatorRPC corrpc.PoolConfig `json:"coordinatorRPC"`
Logger logger.Config `json:"logger"` Logger logger.Config `json:"logger"`
DB db.Config `json:"db"` DB db.Config `json:"db"`
RabbitMQ mq.Config `json:"rabbitMQ"`
DistLock distlock.Config `json:"distlock"`
SysEvent sysevent.Config `json:"sysEvent"`
Connectivity connectivity.Config `json:"connectivity"` Connectivity connectivity.Config `json:"connectivity"`
Downloader downloader.Config `json:"downloader"` Downloader downloader.Config `json:"downloader"`
DownloadStrategy strategy.Config `json:"downloadStrategy"` DownloadStrategy strategy.Config `json:"downloadStrategy"`


+ 6
- 0
client/internal/db/db.go View File

@@ -54,6 +54,12 @@ func DoTx11[T any, R any](db *DB, do func(tx SQLContext, t T) (R, error), t T) (
return ret, err return ret, err
} }


func DoTx20[T1 any, T2 any](db *DB, do func(tx SQLContext, t1 T1, t2 T2) error, t1 T1, t2 T2) error {
return db.db.Transaction(func(tx *gorm.DB) error {
return do(SQLContext{tx}, t1, t2)
})
}

func DoTx21[T1 any, T2 any, R any](db *DB, do func(tx SQLContext, t1 T1, t2 T2) (R, error), t1 T1, t2 T2) (R, error) { func DoTx21[T1 any, T2 any, R any](db *DB, do func(tx SQLContext, t1 T1, t2 T2) (R, error), t1 T1, t2 T2) (R, error) {
var ret R var ret R
err := db.db.Transaction(func(tx *gorm.DB) error { err := db.db.Transaction(func(tx *gorm.DB) error {


+ 3
- 3
client/internal/db/package.go View File

@@ -162,7 +162,7 @@ func (*PackageDB) GetByFullName(ctx SQLContext, bucketName string, packageName s
return ret, err return ret, err
} }


func (db *PackageDB) Create(ctx SQLContext, bucketID types.BucketID, name string) (types.Package, error) {
func (db *PackageDB) Create(ctx SQLContext, bucketID types.BucketID, name string, createTime time.Time) (types.Package, error) {
var packageID int64 var packageID int64
err := ctx.Table("Package"). err := ctx.Table("Package").
Select("PackageID"). Select("PackageID").
@@ -176,7 +176,7 @@ func (db *PackageDB) Create(ctx SQLContext, bucketID types.BucketID, name string
return types.Package{}, gorm.ErrDuplicatedKey return types.Package{}, gorm.ErrDuplicatedKey
} }


newPackage := types.Package{Name: name, BucketID: bucketID, CreateTime: time.Now()}
newPackage := types.Package{Name: name, BucketID: bucketID, CreateTime: createTime}
if err := ctx.Create(&newPackage).Error; err != nil { if err := ctx.Create(&newPackage).Error; err != nil {
return types.Package{}, fmt.Errorf("insert package failed, err: %w", err) return types.Package{}, fmt.Errorf("insert package failed, err: %w", err)
} }
@@ -301,7 +301,7 @@ func (db *PackageDB) TryCreateAll(ctx SQLContext, bktName string, pkgName string
return types.Package{}, fmt.Errorf("get package by name: %w", err) return types.Package{}, fmt.Errorf("get package by name: %w", err)
} }


pkg, err = db.Create(ctx, bkt.BucketID, pkgName)
pkg, err = db.Create(ctx, bkt.BucketID, pkgName, time.Now())
if err != nil { if err != nil {
return types.Package{}, fmt.Errorf("create package: %w", err) return types.Package{}, fmt.Errorf("create package: %w", err)
} }


+ 1
- 1
client/internal/downloader/iterator.go View File

@@ -29,7 +29,7 @@ type downloadSpaceInfo struct {
} }


type DownloadContext struct { type DownloadContext struct {
Distlock *distlock.Service
PubLock *distlock.Service
} }
type DownloadObjectIterator struct { type DownloadObjectIterator struct {
OnClosing func() OnClosing func()


+ 1
- 6
client/internal/http/server.go View File

@@ -54,12 +54,6 @@ func (s *Server) Start() *ServerEventChan {
logger.Infof("start serving http at: %s", s.cfg.Listen) logger.Infof("start serving http at: %s", s.cfg.Listen)


err := s.httpSrv.ListenAndServe() err := s.httpSrv.ListenAndServe()
if err != nil {
logger.Infof("http stopped with error: %s", err.Error())
} else {
logger.Infof("http stopped")
}

s.eventChan.Send(ExitEvent{Err: err}) s.eventChan.Send(ExitEvent{Err: err})
}() }()
return s.eventChan return s.eventChan
@@ -145,6 +139,7 @@ func (s *Server) routeV1(eg *gin.Engine, rt gin.IRoutes) {
v1.POST(cliapi.UserSpaceLoadPackagePath, awsAuth.Auth, s.UserSpace().LoadPackage) v1.POST(cliapi.UserSpaceLoadPackagePath, awsAuth.Auth, s.UserSpace().LoadPackage)
v1.POST(cliapi.UserSpaceCreatePackagePath, awsAuth.Auth, s.UserSpace().CreatePackage) v1.POST(cliapi.UserSpaceCreatePackagePath, awsAuth.Auth, s.UserSpace().CreatePackage)
v1.GET(cliapi.UserSpaceGetPath, awsAuth.Auth, s.UserSpace().Get) v1.GET(cliapi.UserSpaceGetPath, awsAuth.Auth, s.UserSpace().Get)
rt.POST(cliapi.UserSpaceSpaceToSpacePath, s.UserSpace().SpaceToSpace)


// v1.POST(cdsapi.CacheMovePackagePath, awsAuth.Auth, s.Cache().MovePackage) // v1.POST(cdsapi.CacheMovePackagePath, awsAuth.Auth, s.Cache().MovePackage)




+ 24
- 3
client/internal/http/user_space.go View File

@@ -50,8 +50,7 @@ func (s *UserSpaceService) CreatePackage(ctx *gin.Context) {
return return
} }


pkg, err := s.svc.UserSpaceSvc().UserSpaceCreatePackage(
req.BucketID, req.Name, req.UserSpaceID, req.Path, req.SpaceAffinity)
pkg, err := s.svc.Uploader.UserSpaceUpload(req.UserSpaceID, req.Path, req.BucketID, req.Name, req.SpaceAffinity)
if err != nil { if err != nil {
log.Warnf("userspace create package: %s", err.Error()) log.Warnf("userspace create package: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("userspace create package: %v", err))) ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("userspace create package: %v", err)))
@@ -59,7 +58,7 @@ func (s *UserSpaceService) CreatePackage(ctx *gin.Context) {
} }


ctx.JSON(http.StatusOK, OK(cliapi.UserSpaceCreatePackageResp{ ctx.JSON(http.StatusOK, OK(cliapi.UserSpaceCreatePackageResp{
Package: pkg,
Package: *pkg,
})) }))
} }


@@ -84,3 +83,25 @@ func (s *UserSpaceService) Get(ctx *gin.Context) {
UserSpace: info, UserSpace: info,
})) }))
} }

func (s *UserSpaceService) SpaceToSpace(ctx *gin.Context) {
log := logger.WithField("HTTP", "UserSpace.SpaceToSpace")

var req cliapi.UserSpaceSpaceToSpace
if err := ctx.ShouldBindJSON(&req); err != nil {
log.Warnf("binding body: %s", err.Error())
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
return
}

ret, err := s.svc.UserSpaceSvc().SpaceToSpace(req.SrcUserSpaceID, req.SrcPath, req.DstUserSpaceID, req.DstPath)
if err != nil {
log.Warnf("space2space: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "space2space failed"))
return
}

ctx.JSON(http.StatusOK, OK(cliapi.UserSpaceSpaceToSpaceResp{
SpaceToSpaceResult: ret,
}))
}

+ 8
- 10
client/internal/metacache/connectivity.go View File

@@ -1,12 +1,13 @@
package metacache package metacache


import ( import (
"context"
"sync" "sync"
"time" "time"


"gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/logger"
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
coormq "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq/coordinator"
corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
) )


@@ -60,16 +61,13 @@ func (c *Connectivity) ClearOutdated() {
} }


func (c *Connectivity) load(hubID cortypes.HubID) { func (c *Connectivity) load(hubID cortypes.HubID) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
logger.Warnf("new coordinator client: %v", err)
return
}
defer stgglb.CoordinatorMQPool.Release(coorCli)
coorCli := stgglb.CoordinatorRPCPool.Get()

defer coorCli.Release()


get, err := coorCli.GetHubConnectivities(coormq.ReqGetHubConnectivities([]cortypes.HubID{hubID}))
if err != nil {
logger.Warnf("get hub connectivities: %v", err)
get, cerr := coorCli.GetHubConnectivities(context.Background(), corrpc.ReqGetHubConnectivities([]cortypes.HubID{hubID}))
if cerr != nil {
logger.Warnf("get hub connectivities: %v", cerr)
return return
} }




+ 7
- 10
client/internal/metacache/hubmeta.go View File

@@ -1,11 +1,12 @@
package metacache package metacache


import ( import (
"context"
"time" "time"


"gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/logger"
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
coormq "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq/coordinator"
corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
) )


@@ -51,16 +52,12 @@ func (h *HubMeta) load(keys []cortypes.HubID) ([]cortypes.Hub, []bool) {
vs := make([]cortypes.Hub, len(keys)) vs := make([]cortypes.Hub, len(keys))
oks := make([]bool, len(keys)) oks := make([]bool, len(keys))


coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
logger.Warnf("new coordinator client: %v", err)
return vs, oks
}
defer stgglb.CoordinatorMQPool.Release(coorCli)
coorCli := stgglb.CoordinatorRPCPool.Get()
defer coorCli.Release()


get, err := coorCli.GetHubs(coormq.NewGetHubs(keys))
if err != nil {
logger.Warnf("get hubs: %v", err)
get, cerr := coorCli.GetHubs(context.Background(), corrpc.NewGetHubs(keys))
if cerr != nil {
logger.Warnf("get hubs: %v", cerr)
return vs, oks return vs, oks
} }




+ 7
- 10
client/internal/metacache/storagemeta.go View File

@@ -1,12 +1,13 @@
package metacache package metacache


import ( import (
"context"
"time" "time"


"gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/jcs-pub/client/types" "gitlink.org.cn/cloudream/jcs-pub/client/types"
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq/coordinator"
corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
) )


@@ -61,21 +62,17 @@ func (s *UserSpaceMeta) load(keys []types.UserSpaceID) ([]types.UserSpaceDetail,
return vs, oks return vs, oks
} }


coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
logger.Warnf("new coordinator client: %v", err)
return vs, oks
}
defer stgglb.CoordinatorMQPool.Release(coorCli)
coorCli := stgglb.CoordinatorRPCPool.Get()
defer coorCli.Release()


stgIDs := make([]cortypes.StorageID, len(spaces)) stgIDs := make([]cortypes.StorageID, len(spaces))
for i := range spaces { for i := range spaces {
stgIDs[i] = spaces[i].StorageID stgIDs[i] = spaces[i].StorageID
} }


getStgs, err := coorCli.GetStorageDetails(coordinator.ReqGetStorageDetails(stgIDs))
if err != nil {
logger.Warnf("get storage details: %v", err)
getStgs, cerr := coorCli.GetStorageDetails(context.Background(), corrpc.ReqGetStorageDetails(stgIDs))
if cerr != nil {
logger.Warnf("get storage details: %v", cerr)
return vs, oks return vs, oks
} }




+ 1
- 4
client/internal/mount/vfs/fuse_bucket.go View File

@@ -68,8 +68,6 @@ func (r *FuseBucket) Child(ctx context.Context, name string) (fuse.FsEntry, erro
ca := r.vfs.cache.Stat(childPathComps) ca := r.vfs.cache.Stat(childPathComps)


if ca == nil { if ca == nil {
// TODO UserID

pkg, err := r.vfs.db.Package().GetByFullName(r.vfs.db.DefCtx(), r.bktName, name) pkg, err := r.vfs.db.Package().GetByFullName(r.vfs.db.DefCtx(), r.bktName, name)
if err == nil { if err == nil {
dir := r.vfs.cache.LoadDir(childPathComps, &cache.CreateDirOption{ dir := r.vfs.cache.LoadDir(childPathComps, &cache.CreateDirOption{
@@ -156,7 +154,6 @@ func (r *FuseBucket) NewDir(ctx context.Context, name string) (fuse.FsDir, error
return nil, fuse.ErrPermission return nil, fuse.ErrPermission
} }


// TODO 用户ID,失败了可以打个日志
// TODO 生成系统事件 // TODO 生成系统事件
// 不关注创建是否成功,仅尝试一下 // 不关注创建是否成功,仅尝试一下
r.vfs.db.DoTx(func(tx db.SQLContext) error { r.vfs.db.DoTx(func(tx db.SQLContext) error {
@@ -166,7 +163,7 @@ func (r *FuseBucket) NewDir(ctx context.Context, name string) (fuse.FsDir, error
return fmt.Errorf("get bucket: %v", err) return fmt.Errorf("get bucket: %v", err)
} }


_, err = db.Package().Create(tx, bkt.BucketID, name)
_, err = db.Package().Create(tx, bkt.BucketID, name, time.Now())
if err != nil { if err != nil {
return fmt.Errorf("create package: %v", err) return fmt.Errorf("create package: %v", err)
} }


+ 0
- 1
client/internal/mount/vfs/fuse_root.go View File

@@ -142,7 +142,6 @@ func (r *FuseRoot) NewDir(ctx context.Context, name string) (fuse.FsDir, error)
return nil, fuse.ErrPermission return nil, fuse.ErrPermission
} }


// TODO 用户ID,失败了可以打个日志
// TODO 生成系统事件 // TODO 生成系统事件
// 不关注创建是否成功,仅尝试一下 // 不关注创建是否成功,仅尝试一下
r.vfs.db.Bucket().Create(r.vfs.db.DefCtx(), name, cache.ModTime()) r.vfs.db.Bucket().Create(r.vfs.db.DefCtx(), name, cache.ModTime())


+ 1
- 1
client/internal/repl/sysevent.go View File

@@ -31,7 +31,7 @@ func init() {
} }


func watchSysEvent(outputJSON bool) { func watchSysEvent(outputJSON bool) {
host, err := sysevent.NewWatcherHost(sysevent.ConfigFromMQConfig(config.Cfg().RabbitMQ))
host, err := sysevent.NewWatcherHost(config.Cfg().SysEvent)
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
return return


+ 0
- 3
client/internal/services/object.go View File

@@ -78,7 +78,6 @@ func (svc *ObjectService) GetByPath(req api.ObjectListByPath) (api.ObjectListByP
func (svc *ObjectService) GetByIDs(objectIDs []types.ObjectID) ([]*types.Object, error) { func (svc *ObjectService) GetByIDs(objectIDs []types.ObjectID) ([]*types.Object, error) {
var ret []*types.Object var ret []*types.Object
err := svc.DB.DoTx(func(tx db.SQLContext) error { err := svc.DB.DoTx(func(tx db.SQLContext) error {
// TODO 应该检查用户是否有每一个Object所在Package的权限
objs, err := svc.DB.Object().BatchGet(tx, objectIDs) objs, err := svc.DB.Object().BatchGet(tx, objectIDs)
if err != nil { if err != nil {
return err return err
@@ -252,7 +251,6 @@ func (svc *ObjectService) Move(movings []api.MovingObject) ([]types.ObjectID, er
} }


func (svc *ObjectService) Download(req downloader.DownloadReqeust) (*downloader.Downloading, error) { func (svc *ObjectService) Download(req downloader.DownloadReqeust) (*downloader.Downloading, error) {
// TODO 检查用户ID
iter := svc.Downloader.DownloadObjects([]downloader.DownloadReqeust{req}) iter := svc.Downloader.DownloadObjects([]downloader.DownloadReqeust{req})


// 初始化下载过程 // 初始化下载过程
@@ -408,7 +406,6 @@ func (svc *ObjectService) Clone(clonings []api.CloningObject) ([]*types.Object,


var evt []*datamap.BodyNewOrUpdateObject var evt []*datamap.BodyNewOrUpdateObject


// TODO 要检查用户是否有Object、Package的权限
cloningMap := make(map[types.PackageID]*PackageClonings) cloningMap := make(map[types.PackageID]*PackageClonings)
for i, cloning := range clonings { for i, cloning := range clonings {
pkg, ok := cloningMap[cloning.NewPackageID] pkg, ok := cloningMap[cloning.NewPackageID]


+ 3
- 3
client/internal/services/package.go View File

@@ -2,6 +2,7 @@ package services


import ( import (
"fmt" "fmt"
"time"


"gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/logger"


@@ -34,7 +35,7 @@ func (svc *PackageService) GetBucketPackages(bucketID types.BucketID) ([]types.P
} }


func (svc *PackageService) Create(bucketID types.BucketID, name string) (types.Package, error) { func (svc *PackageService) Create(bucketID types.BucketID, name string) (types.Package, error) {
pkg, err := svc.DB.Package().Create(svc.DB.DefCtx(), bucketID, name)
pkg, err := svc.DB.Package().Create(svc.DB.DefCtx(), bucketID, name, time.Now())
if err != nil { if err != nil {
return types.Package{}, err return types.Package{}, err
} }
@@ -47,7 +48,6 @@ func (svc *PackageService) Create(bucketID types.BucketID, name string) (types.P
} }


func (svc *PackageService) DownloadPackage(packageID types.PackageID) (downloader.DownloadIterator, error) { func (svc *PackageService) DownloadPackage(packageID types.PackageID) (downloader.DownloadIterator, error) {
// TODO 检查用户ID
return svc.Downloader.DownloadPackage(packageID), nil return svc.Downloader.DownloadPackage(packageID), nil
} }


@@ -72,7 +72,7 @@ func (svc *PackageService) Clone(packageID types.PackageID, bucketID types.Bucke
err := svc.DB.DoTx(func(tx db.SQLContext) error { err := svc.DB.DoTx(func(tx db.SQLContext) error {
var err error var err error


pkg, err = svc.DB.Package().Create(tx, bucketID, name)
pkg, err = svc.DB.Package().Create(tx, bucketID, name, time.Now())
if err != nil { if err != nil {
return fmt.Errorf("creating package: %w", err) return fmt.Errorf("creating package: %w", err)
} }


+ 4
- 4
client/internal/services/service.go View File

@@ -1,7 +1,6 @@
package services package services


import ( import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/accessstat" "gitlink.org.cn/cloudream/jcs-pub/client/internal/accessstat"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/downloader" "gitlink.org.cn/cloudream/jcs-pub/client/internal/downloader"
@@ -9,12 +8,13 @@ import (
"gitlink.org.cn/cloudream/jcs-pub/client/internal/metacache" "gitlink.org.cn/cloudream/jcs-pub/client/internal/metacache"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/mount" "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/uploader" "gitlink.org.cn/cloudream/jcs-pub/client/internal/uploader"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/sysevent" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/sysevent"
) )


// Service 结构体封装了分布锁服务和任务管理服务。 // Service 结构体封装了分布锁服务和任务管理服务。
type Service struct { type Service struct {
DistLock *distlock.Service
PubLock *distlock.Service
Downloader *downloader.Downloader Downloader *downloader.Downloader
AccessStat *accessstat.AccessStat AccessStat *accessstat.AccessStat
Uploader *uploader.Uploader Uploader *uploader.Uploader
@@ -26,7 +26,7 @@ type Service struct {
} }


func NewService( func NewService(
distlock *distlock.Service,
publock *distlock.Service,
downloader *downloader.Downloader, downloader *downloader.Downloader,
accStat *accessstat.AccessStat, accStat *accessstat.AccessStat,
uploder *uploader.Uploader, uploder *uploader.Uploader,
@@ -37,7 +37,7 @@ func NewService(
mount *mount.Mount, mount *mount.Mount,
) *Service { ) *Service {
return &Service{ return &Service{
DistLock: distlock,
PubLock: publock,
Downloader: downloader, Downloader: downloader,
AccessStat: accStat, AccessStat: accStat,
Uploader: uploder, Uploader: uploder,


+ 0
- 156
client/internal/services/storage.go View File

@@ -1,156 +0,0 @@
package services

import (
"context"
"fmt"
"path"

"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types"

"gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/downloader/strategy"
"gitlink.org.cn/cloudream/jcs-pub/client/types"
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser"
)

type UserSpaceService struct {
*Service
}

func (svc *Service) UserSpaceSvc() *UserSpaceService {
return &UserSpaceService{Service: svc}
}

func (svc *UserSpaceService) Get(userspaceID clitypes.UserSpaceID) (types.UserSpace, error) {
return svc.DB.UserSpace().GetByID(svc.DB.DefCtx(), userspaceID)
}

func (svc *UserSpaceService) GetByName(name string) (types.UserSpace, error) {
return svc.DB.UserSpace().GetByName(svc.DB.DefCtx(), name)
}

func (svc *UserSpaceService) LoadPackage(packageID clitypes.PackageID, userspaceID clitypes.UserSpaceID, rootPath string) error {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return fmt.Errorf("new coordinator client: %w", err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

destStg := svc.UserSpaceMeta.Get(userspaceID)
if destStg == nil {
return fmt.Errorf("userspace not found: %d", userspaceID)
}
if destStg.MasterHub == nil {
return fmt.Errorf("userspace %v has no master hub", userspaceID)
}

details, err := db.DoTx11(svc.DB, svc.DB.Object().GetPackageObjectDetails, packageID)
if err != nil {
return err
}

var pinned []clitypes.ObjectID
plans := exec.NewPlanBuilder()
for _, obj := range details {
strg, err := svc.StrategySelector.Select(strategy.Request{
Detail: obj,
DestHub: destStg.MasterHub.HubID,
})
if err != nil {
return fmt.Errorf("select download strategy: %w", err)
}

ft := ioswitch2.NewFromTo()
switch strg := strg.(type) {
case *strategy.DirectStrategy:
ft.AddFrom(ioswitch2.NewFromShardstore(strg.Detail.Object.FileHash, *strg.UserSpace.MasterHub, strg.UserSpace, ioswitch2.RawStream()))

case *strategy.ECReconstructStrategy:
for i, b := range strg.Blocks {
ft.AddFrom(ioswitch2.NewFromShardstore(b.FileHash, *strg.UserSpaces[i].MasterHub, strg.UserSpaces[i], ioswitch2.ECStream(b.Index)))
ft.ECParam = &strg.Redundancy
}
default:
return fmt.Errorf("unsupported download strategy: %T", strg)
}

ft.AddTo(ioswitch2.NewLoadToPublic(*destStg.MasterHub, *destStg, path.Join(rootPath, obj.Object.Path)))
// 顺便保存到同存储服务的分片存储中
if destStg.UserSpace.ShardStore != nil {
ft.AddTo(ioswitch2.NewToShardStore(*destStg.MasterHub, *destStg, ioswitch2.RawStream(), ""))
pinned = append(pinned, obj.Object.ObjectID)
}

err = parser.Parse(ft, plans)
if err != nil {
return fmt.Errorf("parse plan: %w", err)
}
}

// TODO2 加锁
// mutex, err := reqbuilder.NewBuilder().
// // 保护在userspace目录中下载的文件
// UserSpace().Buzy(userspaceID).
// // 保护下载文件时同时保存到IPFS的文件
// Shard().Buzy(userspaceID).
// MutexLock(svc.DistLock)
// if err != nil {
// return fmt.Errorf("acquire locks failed, err: %w", err)
// }
// defer mutex.Unlock()

// 记录访问统计
for _, obj := range details {
svc.AccessStat.AddAccessCounter(obj.Object.ObjectID, packageID, userspaceID, 1)
}

drv := plans.Execute(exec.NewExecContext())
_, err = drv.Wait(context.Background())
if err != nil {
return err
}

return nil
}

// 请求节点启动从UserSpace中上传文件的任务。会返回节点ID和任务ID
func (svc *UserSpaceService) UserSpaceCreatePackage(bucketID clitypes.BucketID, name string, userspaceID clitypes.UserSpaceID, path string, userspaceAffinity clitypes.UserSpaceID) (clitypes.Package, error) {
// coorCli, err := stgglb.CoordinatorMQPool.Acquire()
// if err != nil {
// return cdssdk.Package{}, fmt.Errorf("new coordinator client: %w", err)
// }
// defer stgglb.CoordinatorMQPool.Release(coorCli)

// stgResp, err := coorCli.GetUserSpaceDetails(coormq.ReqGetUserSpaceDetails([]cdssdk.UserSpaceID{userspaceID}))
// if err != nil {
// return cdssdk.Package{}, fmt.Errorf("getting userspace info: %w", err)
// }

// spaceDetail := svc.UserSpaceMeta.Get(userspaceID)
// if spaceDetail == nil {
// return cdssdk.Package{}, fmt.Errorf("userspace not found: %d", userspaceID)
// }

// if spaceDetail.UserSpace.ShardStore == nil {
// return cdssdk.Package{}, fmt.Errorf("shard userspace is not enabled")
// }

// hubCli, err := stgglb.HubMQPool.Acquire(spaceDetail.MasterHub.HubID)
// if err != nil {
// return cdssdk.Package{}, fmt.Errorf("new hub client: %w", err)
// }
// defer stgglb.HubMQPool.Release(hubCli)

// createResp, err := hubCli.UserSpaceCreatePackage(hubmq.ReqUserSpaceCreatePackage(bucketID, name, userspaceID, path, userspaceAffinity))
// if err != nil {
// return cdssdk.Package{}, err
// }

// return createResp.Package, nil

// TODO 待实现
return clitypes.Package{}, fmt.Errorf("not implemented")
}

+ 269
- 0
client/internal/services/user_space.go View File

@@ -0,0 +1,269 @@
package services

import (
"context"
"fmt"
"path"
"strings"

"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/trie"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"

"gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/downloader/strategy"
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser"
hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
)

type UserSpaceService struct {
*Service
}

func (svc *Service) UserSpaceSvc() *UserSpaceService {
return &UserSpaceService{Service: svc}
}

func (svc *UserSpaceService) Get(userspaceID clitypes.UserSpaceID) (clitypes.UserSpace, error) {
return svc.DB.UserSpace().GetByID(svc.DB.DefCtx(), userspaceID)
}

func (svc *UserSpaceService) GetByName(name string) (clitypes.UserSpace, error) {
return svc.DB.UserSpace().GetByName(svc.DB.DefCtx(), name)
}

func (svc *UserSpaceService) LoadPackage(packageID clitypes.PackageID, userspaceID clitypes.UserSpaceID, rootPath string) error {
coorCli := stgglb.CoordinatorRPCPool.Get()
defer coorCli.Release()

destStg := svc.UserSpaceMeta.Get(userspaceID)
if destStg == nil {
return fmt.Errorf("userspace not found: %d", userspaceID)
}
if destStg.MasterHub == nil {
return fmt.Errorf("userspace %v has no master hub", userspaceID)
}

details, err := db.DoTx11(svc.DB, svc.DB.Object().GetPackageObjectDetails, packageID)
if err != nil {
return err
}

var pinned []clitypes.ObjectID
plans := exec.NewPlanBuilder()
for _, obj := range details {
strg, err := svc.StrategySelector.Select(strategy.Request{
Detail: obj,
DestHub: destStg.MasterHub.HubID,
})
if err != nil {
return fmt.Errorf("select download strategy: %w", err)
}

ft := ioswitch2.NewFromTo()
switch strg := strg.(type) {
case *strategy.DirectStrategy:
ft.AddFrom(ioswitch2.NewFromShardstore(strg.Detail.Object.FileHash, *strg.UserSpace.MasterHub, strg.UserSpace, ioswitch2.RawStream()))

case *strategy.ECReconstructStrategy:
for i, b := range strg.Blocks {
ft.AddFrom(ioswitch2.NewFromShardstore(b.FileHash, *strg.UserSpaces[i].MasterHub, strg.UserSpaces[i], ioswitch2.ECStream(b.Index)))
ft.ECParam = &strg.Redundancy
}
default:
return fmt.Errorf("unsupported download strategy: %T", strg)
}

ft.AddTo(ioswitch2.NewToPublicStore(*destStg.MasterHub, *destStg, path.Join(rootPath, obj.Object.Path)))
// 顺便保存到同存储服务的分片存储中
if destStg.UserSpace.ShardStore != nil {
ft.AddTo(ioswitch2.NewToShardStore(*destStg.MasterHub, *destStg, ioswitch2.RawStream(), ""))
pinned = append(pinned, obj.Object.ObjectID)
}

err = parser.Parse(ft, plans)
if err != nil {
return fmt.Errorf("parse plan: %w", err)
}
}

mutex, err := reqbuilder.NewBuilder().
Shard().Buzy(userspaceID).
MutexLock(svc.PubLock)
if err != nil {
return fmt.Errorf("acquire locks failed, err: %w", err)
}
defer mutex.Unlock()

// 记录访问统计
for _, obj := range details {
svc.AccessStat.AddAccessCounter(obj.Object.ObjectID, packageID, userspaceID, 1)
}

drv := plans.Execute(exec.NewExecContext())
_, err = drv.Wait(context.Background())
if err != nil {
return err
}

return nil
}

func (svc *UserSpaceService) SpaceToSpace(srcSpaceID clitypes.UserSpaceID, srcPath string, dstSpaceID clitypes.UserSpaceID, dstPath string) (clitypes.SpaceToSpaceResult, error) {
srcSpace := svc.UserSpaceMeta.Get(srcSpaceID)
if srcSpace == nil {
return clitypes.SpaceToSpaceResult{}, fmt.Errorf("source userspace not found: %d", srcSpaceID)
}
if srcSpace.MasterHub == nil {
return clitypes.SpaceToSpaceResult{}, fmt.Errorf("source userspace %v has no master hub", srcSpaceID)
}

srcAddr, ok := srcSpace.MasterHub.Address.(*cortypes.GRPCAddressInfo)
if !ok {
return clitypes.SpaceToSpaceResult{}, fmt.Errorf("source userspace %v has no grpc address", srcSpaceID)
}
srcSpaceCli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(srcSpace.MasterHub, srcAddr))
defer srcSpaceCli.Release()

dstSpace := svc.UserSpaceMeta.Get(dstSpaceID)
if dstSpace == nil {
return clitypes.SpaceToSpaceResult{}, fmt.Errorf("destination userspace not found: %d", dstSpaceID)
}
if dstSpace.MasterHub == nil {
return clitypes.SpaceToSpaceResult{}, fmt.Errorf("destination userspace %v has no master hub", dstSpaceID)
}
dstAddr, ok := dstSpace.MasterHub.Address.(*cortypes.GRPCAddressInfo)
if !ok {
return clitypes.SpaceToSpaceResult{}, fmt.Errorf("destination userspace %v has no grpc address", srcSpaceID)
}
dstSpaceCli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(dstSpace.MasterHub, dstAddr))
defer dstSpaceCli.Release()

srcPath = strings.Trim(srcPath, cdssdk.ObjectPathSeparator)
dstPath = strings.Trim(dstPath, cdssdk.ObjectPathSeparator)

if srcPath == "" {
return clitypes.SpaceToSpaceResult{}, fmt.Errorf("source path is empty")
}

if dstPath == "" {
return clitypes.SpaceToSpaceResult{}, fmt.Errorf("destination path is empty")
}

listAllResp, cerr := srcSpaceCli.PublicStoreListAll(context.Background(), &hubrpc.PublicStoreListAll{
UserSpace: *srcSpace,
Path: srcPath,
})
if cerr != nil {
return clitypes.SpaceToSpaceResult{}, fmt.Errorf("list all from source userspace: %w", cerr.ToError())
}

srcPathComps := clitypes.SplitObjectPath(srcPath)
srcDirCompLen := len(srcPathComps) - 1

entryTree := trie.NewTrie[*types.PublicStoreEntry]()
for _, e := range listAllResp.Entries {
pa, ok := strings.CutSuffix(e.Path, clitypes.ObjectPathSeparator)
comps := clitypes.SplitObjectPath(pa)
e.Path = pa

e2 := e
entryTree.CreateWords(comps[srcDirCompLen:]).Value = &e2
e2.IsDir = e2.IsDir || ok
}

entryTree.Iterate(func(path []string, node *trie.Node[*types.PublicStoreEntry], isWordNode bool) trie.VisitCtrl {
if node.Value == nil {
return trie.VisitContinue
}

if node.Value.IsDir && len(node.WordNexts) > 0 {
node.Value = nil
return trie.VisitContinue
}

if !node.Value.IsDir && len(node.WordNexts) == 0 {
node.WordNexts = nil
}

return trie.VisitContinue
})

var filePathes []string
var dirPathes []string
entryTree.Iterate(func(path []string, node *trie.Node[*types.PublicStoreEntry], isWordNode bool) trie.VisitCtrl {
if node.Value == nil {
return trie.VisitContinue
}

if node.Value.IsDir {
dirPathes = append(dirPathes, node.Value.Path)
} else {
filePathes = append(filePathes, node.Value.Path)
}

return trie.VisitContinue
})

var success []string
var failed []string

for _, f := range filePathes {
newPath := strings.Replace(f, srcPath, dstPath, 1)

ft := ioswitch2.NewFromTo()
ft.AddFrom(ioswitch2.NewFromPublicStore(*srcSpace.MasterHub, *srcSpace, f))
ft.AddTo(ioswitch2.NewToPublicStore(*dstSpace.MasterHub, *dstSpace, newPath))

plans := exec.NewPlanBuilder()
err := parser.Parse(ft, plans)
if err != nil {
failed = append(failed, f)
logger.Warnf("s2s: parse plan of file %v: %v", f, err)
continue
}

_, cerr := plans.Execute(exec.NewExecContext()).Wait(context.Background())
if cerr != nil {
failed = append(failed, f)
logger.Warnf("s2s: execute plan of file %v: %v", f, cerr)
continue
}

success = append(success, f)
}

newDirPathes := make([]string, 0, len(dirPathes))
for i := range dirPathes {
newDirPathes = append(newDirPathes, strings.Replace(dirPathes[i], srcPath, dstPath, 1))
}

mkdirResp, err := dstSpaceCli.PublicStoreMkdirs(context.Background(), &hubrpc.PublicStoreMkdirs{
UserSpace: *dstSpace,
Pathes: newDirPathes,
})
if err != nil {
failed = append(failed, dirPathes...)
logger.Warnf("s2s: mkdirs to destination userspace: %v", err)
} else {
for i := range dirPathes {
if mkdirResp.Successes[i] {
success = append(success, dirPathes[i])
} else {
failed = append(failed, dirPathes[i])
}
}
}

return clitypes.SpaceToSpaceResult{
Success: success,
Failed: failed,
}, nil
}

+ 27
- 23
client/internal/ticktock/change_redundancy.go View File

@@ -9,6 +9,7 @@ import (
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types"
"gitlink.org.cn/cloudream/jcs-pub/common/models/datamap" "gitlink.org.cn/cloudream/jcs-pub/common/models/datamap"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/reqbuilder"
) )


const ( const (
@@ -26,9 +27,9 @@ func (j *ChangeRedundancy) Name() string {
func (j *ChangeRedundancy) Execute(t *TickTock) { func (j *ChangeRedundancy) Execute(t *TickTock) {
log := logger.WithType[ChangeRedundancy]("TickTock") log := logger.WithType[ChangeRedundancy]("TickTock")
startTime := time.Now() startTime := time.Now()
log.Debugf("job start")
log.Infof("job start")
defer func() { defer func() {
log.Debugf("job end, time: %v", time.Since(startTime))
log.Infof("job end, time: %v", time.Since(startTime))
}() }()


ctx := &changeRedundancyContext{ ctx := &changeRedundancyContext{
@@ -47,6 +48,9 @@ func (j *ChangeRedundancy) Execute(t *TickTock) {
if space == nil { if space == nil {
continue continue
} }
if space.MasterHub == nil {
continue
}


ctx.allUserSpaces[space.UserSpace.UserSpaceID] = &userSpaceLoadInfo{ ctx.allUserSpaces[space.UserSpace.UserSpaceID] = &userSpaceLoadInfo{
UserSpace: space, UserSpace: space,
@@ -119,37 +123,34 @@ func (j *ChangeRedundancy) changeOne(ctx *changeRedundancyContext, pkg clitypes.
} }
lastObjID = objs[len(objs)-1].Object.ObjectID lastObjID = objs[len(objs)-1].Object.ObjectID


reen := ctx.ticktock.pubLock.BeginReentrant()

var allUpdatings []db.UpdatingObjectRedundancy var allUpdatings []db.UpdatingObjectRedundancy
var allSysEvts []datamap.SysEventBody var allSysEvts []datamap.SysEventBody


ctx.mostBlockStgIDs = j.summaryRepObjectBlockUserSpaces(ctx, objs, 2) ctx.mostBlockStgIDs = j.summaryRepObjectBlockUserSpaces(ctx, objs, 2)


// // TODO 加锁
// builder := reqbuilder.NewBuilder()
// for _, storage := range newRepStgs {
// builder.Shard().Buzy(storage.Storage.Storage.StorageID)
// }
// for _, storage := range newECStgs {
// builder.Shard().Buzy(storage.Storage.Storage.StorageID)
// }
// mutex, err := builder.MutexLock(execCtx.Args.DistLock)
// if err != nil {
// log.Warnf("acquiring dist lock: %s", err.Error())
// return
// }
// defer mutex.Unlock()

var willShrinks []clitypes.ObjectDetail var willShrinks []clitypes.ObjectDetail


for _, obj := range objs { for _, obj := range objs {
newRed, selectedStorages := j.chooseRedundancy(ctx, obj)
newRed, selectedSpaces := j.chooseRedundancy(ctx, obj)
// 冗余策略不需要调整,就检查是否需要收缩 // 冗余策略不需要调整,就检查是否需要收缩
if newRed == nil { if newRed == nil {
willShrinks = append(willShrinks, obj) willShrinks = append(willShrinks, obj)
continue continue
} }


updating, evt, err := j.doChangeRedundancy(ctx, obj, newRed, selectedStorages)
reqBlder := reqbuilder.NewBuilder()
for _, space := range selectedSpaces {
reqBlder.Shard().Buzy(space.UserSpace.UserSpace.UserSpaceID)
}
err := reen.Lock(reqBlder.Build())
if err != nil {
log.WithField("ObjectID", obj.Object.ObjectID).Warnf("acquire lock: %s", err.Error())
continue
}

updating, evt, err := j.doChangeRedundancy(ctx, obj, newRed, selectedSpaces)
if updating != nil { if updating != nil {
allUpdatings = append(allUpdatings, *updating) allUpdatings = append(allUpdatings, *updating)
} }
@@ -158,24 +159,27 @@ func (j *ChangeRedundancy) changeOne(ctx *changeRedundancyContext, pkg clitypes.
} }
if err != nil { if err != nil {
log.WithField("ObjectID", obj.Object.ObjectID).Warnf("%s, its redundancy wont be changed", err.Error()) log.WithField("ObjectID", obj.Object.ObjectID).Warnf("%s, its redundancy wont be changed", err.Error())
continue
} }
} }


udpatings, sysEvts, err := j.doRedundancyShrink(ctx, pkg, willShrinks)
udpatings, sysEvts, err := j.doRedundancyShrink(ctx, pkg, willShrinks, reen)
if err != nil { if err != nil {
log.Warnf("redundancy shrink: %s", err.Error()) log.Warnf("redundancy shrink: %s", err.Error())
return err
} else {
allUpdatings = append(allUpdatings, udpatings...)
allSysEvts = append(allSysEvts, sysEvts...)
} }
allUpdatings = append(allUpdatings, udpatings...)
allSysEvts = append(allSysEvts, sysEvts...)


if len(allUpdatings) > 0 { if len(allUpdatings) > 0 {
err := db.DoTx10(db2, db2.Object().BatchUpdateRedundancy, allUpdatings) err := db.DoTx10(db2, db2.Object().BatchUpdateRedundancy, allUpdatings)
if err != nil { if err != nil {
reen.Unlock()
log.Warnf("update object redundancy: %s", err.Error()) log.Warnf("update object redundancy: %s", err.Error())
return err return err
} }
} }
reen.Unlock()


for _, e := range allSysEvts { for _, e := range allSysEvts {
ctx.ticktock.evtPub.Publish(e) ctx.ticktock.evtPub.Publish(e)


+ 18
- 11
client/internal/ticktock/check_shardstore.go View File

@@ -1,18 +1,19 @@
package ticktock package ticktock


import ( import (
"context"
"fmt" "fmt"
"time" "time"


"github.com/samber/lo" "github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/common/utils/reflect2" "gitlink.org.cn/cloudream/common/utils/reflect2"


"gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types"
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
hubmq "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq/hub"
hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
) )


// CheckShardStore 代表一个用于处理代理缓存检查事件的结构体 // CheckShardStore 代表一个用于处理代理缓存检查事件的结构体
@@ -27,9 +28,9 @@ func (j *CheckShardStore) Name() string {
func (j *CheckShardStore) Execute(t *TickTock) { func (j *CheckShardStore) Execute(t *TickTock) {
log := logger.WithType[CheckShardStore]("TickTock") log := logger.WithType[CheckShardStore]("TickTock")
startTime := time.Now() startTime := time.Now()
log.Debugf("job start")
log.Infof("job start")
defer func() { defer func() {
log.Debugf("job end, time: %v", time.Since(startTime))
log.Infof("job end, time: %v", time.Since(startTime))
}() }()


db2 := t.db db2 := t.db
@@ -62,15 +63,21 @@ func (j *CheckShardStore) checkOne(t *TickTock, space *clitypes.UserSpaceDetail)
return nil return nil
} }


agtCli, err := stgglb.HubMQPool.Acquire(space.MasterHub.HubID)
if err != nil {
return fmt.Errorf("new hub mq client: %w", err)
addr, ok := space.MasterHub.Address.(*cortypes.GRPCAddressInfo)
if !ok {
return fmt.Errorf("master of user space %v has no grpc address", space.UserSpace)
} }
defer stgglb.HubMQPool.Release(agtCli)
agtCli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(space.MasterHub, addr))
defer agtCli.Release()


checkResp, err := agtCli.CheckCache(hubmq.NewCheckCache(*space), mq.RequestOption{Timeout: time.Minute})
if err != nil {
return fmt.Errorf("request to check cache: %w", err)
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))
defer cancel()

checkResp, cerr := agtCli.CheckCache(ctx, &hubrpc.CheckCache{
UserSpace: *space,
})
if cerr != nil {
return fmt.Errorf("request to check cache: %w", cerr.ToError())
} }


realFileHashes := lo.SliceToMap(checkResp.FileHashes, func(hash clitypes.FileHash) (clitypes.FileHash, bool) { return hash, true }) realFileHashes := lo.SliceToMap(checkResp.FileHashes, func(hash clitypes.FileHash) (clitypes.FileHash, bool) { return hash, true })


+ 13
- 13
client/internal/ticktock/redundancy_shrink.go View File

@@ -18,12 +18,14 @@ import (
clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types"
"gitlink.org.cn/cloudream/jcs-pub/common/consts" "gitlink.org.cn/cloudream/jcs-pub/common/consts"
"gitlink.org.cn/cloudream/jcs-pub/common/models/datamap" "gitlink.org.cn/cloudream/jcs-pub/common/models/datamap"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser"
) )


func (t *ChangeRedundancy) doRedundancyShrink(execCtx *changeRedundancyContext, pkg clitypes.PackageDetail, objs []clitypes.ObjectDetail) ([]db.UpdatingObjectRedundancy, []datamap.SysEventBody, error) {
func (t *ChangeRedundancy) doRedundancyShrink(execCtx *changeRedundancyContext, pkg clitypes.PackageDetail, objs []clitypes.ObjectDetail, reen *distlock.Reentrant) ([]db.UpdatingObjectRedundancy, []datamap.SysEventBody, error) {
log := logger.WithType[ChangeRedundancy]("TickTock") log := logger.WithType[ChangeRedundancy]("TickTock")


var readerStgIDs []clitypes.UserSpaceID var readerStgIDs []clitypes.UserSpaceID
@@ -78,7 +80,7 @@ func (t *ChangeRedundancy) doRedundancyShrink(execCtx *changeRedundancyContext,
sysEvents = append(sysEvents, t.generateSysEventForECObject(solu, obj)...) sysEvents = append(sysEvents, t.generateSysEventForECObject(solu, obj)...)
} }


ioSwRets, err := t.executePlans(execCtx, planBld, planningStgIDs)
ioSwRets, err := t.executePlans(execCtx, planBld, planningStgIDs, reen)
if err != nil { if err != nil {
log.Warn(err.Error()) log.Warn(err.Error())
return nil, nil, fmt.Errorf("execute plans: %w", err) return nil, nil, fmt.Errorf("execute plans: %w", err)
@@ -904,17 +906,15 @@ func (t *ChangeRedundancy) generateSysEventForECObject(solu annealingSolution, o
return []datamap.SysEventBody{transEvt, distEvt} return []datamap.SysEventBody{transEvt, distEvt}
} }


func (t *ChangeRedundancy) executePlans(ctx *changeRedundancyContext, planBld *exec.PlanBuilder, planningStgIDs map[clitypes.UserSpaceID]bool) (map[string]exec.VarValue, error) {
// TODO 统一加锁,有重复也没关系
// lockBld := reqbuilder.NewBuilder()
// for id := range planningStgIDs {
// lockBld.Shard().Buzy(id)
// }
// lock, err := lockBld.MutexLock(ctx.Args.DistLock)
// if err != nil {
// return nil, fmt.Errorf("acquiring distlock: %w", err)
// }
// defer lock.Unlock()
func (t *ChangeRedundancy) executePlans(ctx *changeRedundancyContext, planBld *exec.PlanBuilder, planningSpaceIDs map[clitypes.UserSpaceID]bool, reen *distlock.Reentrant) (map[string]exec.VarValue, error) {
reqBlder := reqbuilder.NewBuilder()
for id, _ := range planningSpaceIDs {
reqBlder.Shard().Buzy(id)
}
err := reen.Lock(reqBlder.Build())
if err != nil {
return nil, fmt.Errorf("locking shard resources: %w", err)
}


wg := sync.WaitGroup{} wg := sync.WaitGroup{}




+ 27
- 24
client/internal/ticktock/shardstore_gc.go View File

@@ -1,17 +1,19 @@
package ticktock package ticktock


import ( import (
"context"
"fmt" "fmt"
"time" "time"


"gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/common/utils/reflect2" "gitlink.org.cn/cloudream/common/utils/reflect2"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
"gitlink.org.cn/cloudream/jcs-pub/client/types" "gitlink.org.cn/cloudream/jcs-pub/client/types"
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"


hubmq "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq/hub"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/reqbuilder"
hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub"
) )


type ShardStoreGC struct { type ShardStoreGC struct {
@@ -25,23 +27,11 @@ func (j *ShardStoreGC) Name() string {
func (j *ShardStoreGC) Execute(t *TickTock) { func (j *ShardStoreGC) Execute(t *TickTock) {
log := logger.WithType[ShardStoreGC]("Event") log := logger.WithType[ShardStoreGC]("Event")
startTime := time.Now() startTime := time.Now()
log.Debugf("job start")
log.Infof("job start")
defer func() { defer func() {
log.Debugf("job end, time: %v", time.Since(startTime))
log.Infof("job end, time: %v", time.Since(startTime))
}() }()


// TODO 加锁
// // 使用分布式锁进行资源锁定
// mutex, err := reqbuilder.NewBuilder().
// // 执行IPFS垃圾回收
// Shard().GC(j.StorageID).
// MutexLock(execCtx.Args.DistLock)
// if err != nil {
// log.Warnf("acquire locks failed, err: %s", err.Error())
// return
// }
// defer mutex.Unlock()

spaceIDs, err := t.db.UserSpace().GetAllIDs(t.db.DefCtx()) spaceIDs, err := t.db.UserSpace().GetAllIDs(t.db.DefCtx())
if err != nil { if err != nil {
log.Warnf("getting user space ids: %v", err) log.Warnf("getting user space ids: %v", err)
@@ -63,11 +53,17 @@ func (j *ShardStoreGC) Execute(t *TickTock) {
} }


func (j *ShardStoreGC) gcOne(t *TickTock, space *types.UserSpaceDetail) error { func (j *ShardStoreGC) gcOne(t *TickTock, space *types.UserSpaceDetail) error {
mutex, err := reqbuilder.NewBuilder().Shard().GC(space.UserSpace.UserSpaceID).MutexLock(t.pubLock)
if err != nil {
return fmt.Errorf("acquire lock: %w", err)
}
defer mutex.Unlock()

db2 := t.db db2 := t.db


// 收集需要进行垃圾回收的文件哈希值 // 收集需要进行垃圾回收的文件哈希值
var allFileHashes []types.FileHash var allFileHashes []types.FileHash
err := db2.DoTx(func(tx db.SQLContext) error {
err = db2.DoTx(func(tx db.SQLContext) error {
blocks, err := db2.ObjectBlock().GetByUserSpaceID(tx, space.UserSpace.UserSpaceID) blocks, err := db2.ObjectBlock().GetByUserSpaceID(tx, space.UserSpace.UserSpaceID)
if err != nil { if err != nil {
return fmt.Errorf("getting object blocks by hub id: %w", err) return fmt.Errorf("getting object blocks by hub id: %w", err)
@@ -91,16 +87,23 @@ func (j *ShardStoreGC) gcOne(t *TickTock, space *types.UserSpaceDetail) error {
} }


// 获取与节点通信的代理客户端 // 获取与节点通信的代理客户端
agtCli, err := stgglb.HubMQPool.Acquire(space.MasterHub.HubID)
if err != nil {
return fmt.Errorf("new hub mq client: %w", err)
addr, ok := space.MasterHub.Address.(*cortypes.GRPCAddressInfo)
if !ok {
return fmt.Errorf("master of user space %v has no grpc address", space.UserSpace)
} }
defer stgglb.HubMQPool.Release(agtCli)
agtCli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(space.MasterHub, addr))
defer agtCli.Release()


// 向代理发送垃圾回收请求 // 向代理发送垃圾回收请求
_, err = agtCli.CacheGC(hubmq.ReqCacheGC(*space, allFileHashes), mq.RequestOption{Timeout: time.Minute})
if err != nil {
return fmt.Errorf("request to cache gc: %w", err)
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))
defer cancel()

_, cerr := agtCli.CacheGC(ctx, &hubrpc.CacheGC{
UserSpace: *space,
Availables: allFileHashes,
})
if cerr != nil {
return fmt.Errorf("request to cache gc: %w", cerr.ToError())
} }
return nil return nil
} }

+ 4
- 1
client/internal/ticktock/ticktock.go View File

@@ -7,6 +7,7 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/metacache" "gitlink.org.cn/cloudream/jcs-pub/client/internal/metacache"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/sysevent" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/sysevent"
) )
@@ -29,9 +30,10 @@ type TickTock struct {
spaceMeta *metacache.UserSpaceMeta spaceMeta *metacache.UserSpaceMeta
stgPool *pool.Pool stgPool *pool.Pool
evtPub *sysevent.Publisher evtPub *sysevent.Publisher
pubLock *distlock.Service
} }


func New(cfg Config, db *db.DB, spaceMeta *metacache.UserSpaceMeta, stgPool *pool.Pool, evtPub *sysevent.Publisher) *TickTock {
func New(cfg Config, db *db.DB, spaceMeta *metacache.UserSpaceMeta, stgPool *pool.Pool, evtPub *sysevent.Publisher, pubLock *distlock.Service) *TickTock {
sch, _ := gocron.NewScheduler() sch, _ := gocron.NewScheduler()
t := &TickTock{ t := &TickTock{
cfg: cfg, cfg: cfg,
@@ -41,6 +43,7 @@ func New(cfg Config, db *db.DB, spaceMeta *metacache.UserSpaceMeta, stgPool *poo
spaceMeta: spaceMeta, spaceMeta: spaceMeta,
stgPool: stgPool, stgPool: stgPool,
evtPub: evtPub, evtPub: evtPub,
pubLock: pubLock,
} }
t.initJobs() t.initJobs()
return t return t


+ 2
- 4
client/internal/ticktock/update_package_access_stat_amount.go View File

@@ -5,11 +5,9 @@ import (


"gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/utils/reflect2" "gitlink.org.cn/cloudream/common/utils/reflect2"
scevt "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq/scanner/event"
) )


type UpdatePackageAccessStatAmount struct { type UpdatePackageAccessStatAmount struct {
*scevt.UpdatePackageAccessStatAmount
} }


func (j *UpdatePackageAccessStatAmount) Name() string { func (j *UpdatePackageAccessStatAmount) Name() string {
@@ -19,9 +17,9 @@ func (j *UpdatePackageAccessStatAmount) Name() string {
func (j *UpdatePackageAccessStatAmount) Execute(t *TickTock) { func (j *UpdatePackageAccessStatAmount) Execute(t *TickTock) {
log := logger.WithType[UpdatePackageAccessStatAmount]("TickTock") log := logger.WithType[UpdatePackageAccessStatAmount]("TickTock")
startTime := time.Now() startTime := time.Now()
log.Debugf("job start")
log.Infof("job start")
defer func() { defer func() {
log.Debugf("job end, time: %v", time.Since(startTime))
log.Infof("job end, time: %v", time.Since(startTime))
}() }()


err := t.db.PackageAccessStat().UpdateAllAmount(t.db.DefCtx(), t.cfg.AccessStatHistoryWeight) err := t.db.PackageAccessStat().UpdateAllAmount(t.db.DefCtx(), t.cfg.AccessStatHistoryWeight)


+ 10
- 8
client/internal/uploader/create_load.go View File

@@ -11,6 +11,7 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
"gitlink.org.cn/cloudream/jcs-pub/client/types" "gitlink.org.cn/cloudream/jcs-pub/client/types"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser"
@@ -21,10 +22,10 @@ type CreateLoadUploader struct {
targetSpaces []types.UserSpaceDetail targetSpaces []types.UserSpaceDetail
loadRoots []string loadRoots []string
uploader *Uploader uploader *Uploader
// distlock *distlock.Mutex
successes []db.AddObjectEntry
lock sync.Mutex
commited bool
pubLock *distlock.Mutex
successes []db.AddObjectEntry
lock sync.Mutex
commited bool
} }


type CreateLoadResult struct { type CreateLoadResult struct {
@@ -49,7 +50,7 @@ func (u *CreateLoadUploader) Upload(pa string, stream io.Reader, opts ...UploadO
ft.AddFrom(fromExec) ft.AddFrom(fromExec)
for i, space := range u.targetSpaces { for i, space := range u.targetSpaces {
ft.AddTo(ioswitch2.NewToShardStore(*space.MasterHub, space, ioswitch2.RawStream(), "shardInfo")) ft.AddTo(ioswitch2.NewToShardStore(*space.MasterHub, space, ioswitch2.RawStream(), "shardInfo"))
ft.AddTo(ioswitch2.NewLoadToPublic(*space.MasterHub, space, path.Join(u.loadRoots[i], pa)))
ft.AddTo(ioswitch2.NewToPublicStore(*space.MasterHub, space, path.Join(u.loadRoots[i], pa)))
spaceIDs = append(spaceIDs, space.UserSpace.UserSpaceID) spaceIDs = append(spaceIDs, space.UserSpace.UserSpaceID)
} }


@@ -92,7 +93,7 @@ func (u *CreateLoadUploader) Commit() (CreateLoadResult, error) {
} }
u.commited = true u.commited = true


// defer u.distlock.Unlock()
defer u.pubLock.Unlock()


var addedObjs []types.Object var addedObjs []types.Object
err := u.uploader.db.DoTx(func(tx db.SQLContext) error { err := u.uploader.db.DoTx(func(tx db.SQLContext) error {
@@ -125,7 +126,8 @@ func (u *CreateLoadUploader) Abort() {
} }
u.commited = true u.commited = true


// u.distlock.Unlock()
u.pubLock.Unlock()


// TODO 可以考虑删除PackageID
db2 := u.uploader.db
db.DoTx10(db2, db2.Package().DeleteComplete, u.pkg.PackageID)
} }

+ 8
- 7
client/internal/uploader/update.go View File

@@ -12,16 +12,17 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
"gitlink.org.cn/cloudream/jcs-pub/client/types" "gitlink.org.cn/cloudream/jcs-pub/client/types"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser"
) )


type UpdateUploader struct { type UpdateUploader struct {
uploader *Uploader
pkgID types.PackageID
targetSpace types.UserSpaceDetail
// distMutex *distlock.Mutex
uploader *Uploader
pkgID types.PackageID
targetSpace types.UserSpaceDetail
pubLock *distlock.Mutex
loadToSpaces []types.UserSpaceDetail loadToSpaces []types.UserSpaceDetail
loadToPath []string loadToPath []string
successes []db.AddObjectEntry successes []db.AddObjectEntry
@@ -60,7 +61,7 @@ func (w *UpdateUploader) Upload(pat string, stream io.Reader, opts ...UploadOpti
AddTo(ioswitch2.NewToShardStore(*w.targetSpace.MasterHub, w.targetSpace, ioswitch2.RawStream(), "shardInfo")) AddTo(ioswitch2.NewToShardStore(*w.targetSpace.MasterHub, w.targetSpace, ioswitch2.RawStream(), "shardInfo"))


for i, space := range w.loadToSpaces { for i, space := range w.loadToSpaces {
ft.AddTo(ioswitch2.NewLoadToPublic(*space.MasterHub, space, path.Join(w.loadToPath[i], pat)))
ft.AddTo(ioswitch2.NewToPublicStore(*space.MasterHub, space, path.Join(w.loadToPath[i], pat)))
} }


plans := exec.NewPlanBuilder() plans := exec.NewPlanBuilder()
@@ -125,7 +126,7 @@ func (w *UpdateUploader) Commit() (UpdateResult, error) {
} }
w.commited = true w.commited = true


// defer w.distMutex.Unlock()
defer w.pubLock.Unlock()


var addedObjs []types.Object var addedObjs []types.Object
err := w.uploader.db.DoTx(func(tx db.SQLContext) error { err := w.uploader.db.DoTx(func(tx db.SQLContext) error {
@@ -157,5 +158,5 @@ func (w *UpdateUploader) Abort() {
} }


w.commited = true w.commited = true
// w.distMutex.Unlock()
w.pubLock.Unlock()
} }

+ 27
- 32
client/internal/uploader/uploader.go View File

@@ -18,6 +18,7 @@ import (
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/connectivity" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser"
@@ -25,16 +26,16 @@ import (
) )


type Uploader struct { type Uploader struct {
distlock *distlock.Service
pubLock *distlock.Service
connectivity *connectivity.Collector connectivity *connectivity.Collector
stgPool *pool.Pool stgPool *pool.Pool
spaceMeta *metacache.UserSpaceMeta spaceMeta *metacache.UserSpaceMeta
db *db.DB db *db.DB
} }


func NewUploader(distlock *distlock.Service, connectivity *connectivity.Collector, stgPool *pool.Pool, spaceMeta *metacache.UserSpaceMeta, db *db.DB) *Uploader {
func NewUploader(pubLock *distlock.Service, connectivity *connectivity.Collector, stgPool *pool.Pool, spaceMeta *metacache.UserSpaceMeta, db *db.DB) *Uploader {
return &Uploader{ return &Uploader{
distlock: distlock,
pubLock: pubLock,
connectivity: connectivity, connectivity: connectivity,
stgPool: stgPool, stgPool: stgPool,
spaceMeta: spaceMeta, spaceMeta: spaceMeta,
@@ -93,20 +94,17 @@ func (u *Uploader) BeginUpdate(pkgID clitypes.PackageID, affinity clitypes.UserS


target := u.chooseUploadStorage(uploadSpaces, affinity) target := u.chooseUploadStorage(uploadSpaces, affinity)


// TODO2 加锁
// 给上传节点的IPFS加锁
// TODO 考虑加Object的Create锁
// 防止上传的副本被清除 // 防止上传的副本被清除
// distMutex, err := reqbuilder.NewBuilder().Shard().Buzy(target.Space.Storage.StorageID).MutexLock(u.distlock)
// if err != nil {
// return nil, fmt.Errorf("acquire distlock: %w", err)
// }
pubLock, err := reqbuilder.NewBuilder().Shard().Buzy(target.Space.UserSpace.UserSpaceID).MutexLock(u.pubLock)
if err != nil {
return nil, fmt.Errorf("acquire lock: %w", err)
}


return &UpdateUploader{ return &UpdateUploader{
uploader: u,
pkgID: pkgID,
targetSpace: target.Space,
// distMutex: distMutex,
uploader: u,
pkgID: pkgID,
targetSpace: target.Space,
pubLock: pubLock,
loadToSpaces: loadToSpaces, loadToSpaces: loadToSpaces,
loadToPath: loadToPath, loadToPath: loadToPath,
}, nil }, nil
@@ -152,29 +150,27 @@ func (u *Uploader) BeginCreateLoad(bktID clitypes.BucketID, pkgName string, load
return clitypes.Package{}, err return clitypes.Package{}, err
} }


return u.db.Package().Create(u.db.DefCtx(), bktID, pkgName)
return u.db.Package().Create(u.db.DefCtx(), bktID, pkgName, time.Now())
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("create package: %w", err) return nil, fmt.Errorf("create package: %w", err)
} }


// TODO2 加锁
// reqBld := reqbuilder.NewBuilder()
// for _, stg := range spacesStgs {
// reqBld.Shard().Buzy(stg.Storage.StorageID)
// reqBld.Storage().Buzy(stg.Storage.StorageID)
// }
// lock, err := reqBld.MutexLock(u.distlock)
// if err != nil {
// return nil, fmt.Errorf("acquire distlock: %w", err)
// }
reqBld := reqbuilder.NewBuilder()
for _, stg := range spacesStgs {
reqBld.Shard().Buzy(stg.UserSpace.UserSpaceID)
}
lock, err := reqBld.MutexLock(u.pubLock)
if err != nil {
return nil, fmt.Errorf("acquire lock: %w", err)
}


return &CreateLoadUploader{ return &CreateLoadUploader{
pkg: pkg, pkg: pkg,
targetSpaces: spacesStgs, targetSpaces: spacesStgs,
loadRoots: loadToPath, loadRoots: loadToPath,
uploader: u, uploader: u,
// distlock: lock,
pubLock: lock,
}, nil }, nil
} }


@@ -236,12 +232,11 @@ func (u *Uploader) UploadPart(objID clitypes.ObjectID, index int, stream io.Read
space = u.chooseUploadStorage(userStgs, 0).Space space = u.chooseUploadStorage(userStgs, 0).Space
} }


// TODO2 加锁
// lock, err := reqbuilder.NewBuilder().Shard().Buzy(space.Storage.StorageID).MutexLock(u.distlock)
// if err != nil {
// return fmt.Errorf("acquire distlock: %w", err)
// }
// defer lock.Unlock()
lock, err := reqbuilder.NewBuilder().Shard().Buzy(space.UserSpace.UserSpaceID).MutexLock(u.pubLock)
if err != nil {
return fmt.Errorf("acquire lock: %w", err)
}
defer lock.Unlock()


ft := ioswitch2.NewFromTo() ft := ioswitch2.NewFromTo()
fromDrv, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream()) fromDrv, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream())


+ 181
- 0
client/internal/uploader/user_space_upload.go View File

@@ -0,0 +1,181 @@
package uploader

import (
"context"
"fmt"
"math"
"strings"
"time"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types"
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser"
corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator"
hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
)

func (u *Uploader) UserSpaceUpload(userSpaceID clitypes.UserSpaceID, rootPath string, targetBktID clitypes.BucketID, newPkgName string, uploadAffinity clitypes.UserSpaceID) (*clitypes.Package, error) {
srcSpace := u.spaceMeta.Get(userSpaceID)
if srcSpace == nil {
return nil, fmt.Errorf("user space %d not found", userSpaceID)
}
if srcSpace.MasterHub == nil {
return nil, fmt.Errorf("master hub not found for user space %d", userSpaceID)
}

pkg, err := db.DoTx01(u.db, func(tx db.SQLContext) (clitypes.Package, error) {
_, err := u.db.Bucket().GetByID(tx, targetBktID)
if err != nil {
return clitypes.Package{}, err
}

return u.db.Package().Create(tx, targetBktID, newPkgName, time.Now())
})
if err != nil {
return nil, fmt.Errorf("creating package: %w", err)
}
delPkg := func() {
u.db.Package().Delete(u.db.DefCtx(), pkg.PackageID)
}

spaceIDs, err := u.db.UserSpace().GetAllIDs(u.db.DefCtx())
if err != nil {
delPkg()
return nil, fmt.Errorf("getting user space ids: %w", err)
}

spaceDetails := u.spaceMeta.GetMany(spaceIDs)
spaceDetails = lo.Filter(spaceDetails, func(e *clitypes.UserSpaceDetail, i int) bool {
return e != nil && e.MasterHub != nil && e.UserSpace.ShardStore != nil
})

coorCli := stgglb.CoordinatorRPCPool.Get()
defer coorCli.Release()

resp, cerr := coorCli.GetHubConnectivities(context.Background(), corrpc.ReqGetHubConnectivities([]cortypes.HubID{srcSpace.MasterHub.HubID}))
if cerr != nil {
delPkg()
return nil, fmt.Errorf("getting hub connectivities: %w", cerr.ToError())
}

cons := make(map[cortypes.HubID]cortypes.HubConnectivity)
for _, c := range resp.Connectivities {
cons[c.ToHubID] = c
}

var uploadSpaces []UploadSpaceInfo
for _, space := range spaceDetails {
if space.MasterHub == nil {
continue
}

latency := time.Duration(math.MaxInt64)

con, ok := cons[space.MasterHub.HubID]
if ok && con.Latency != nil {
latency = time.Duration(*con.Latency * float32(time.Millisecond))
}

uploadSpaces = append(uploadSpaces, UploadSpaceInfo{
Space: *space,
Delay: latency,
IsSameLocation: space.MasterHub.LocationID == srcSpace.MasterHub.LocationID,
})
}

if len(uploadSpaces) == 0 {
delPkg()
return nil, fmt.Errorf("user no available userspaces")
}

targetSapce := u.chooseUploadStorage(uploadSpaces, uploadAffinity)

addr, ok := srcSpace.MasterHub.Address.(*cortypes.GRPCAddressInfo)
if !ok {
delPkg()
return nil, fmt.Errorf("master of user space %v has no grpc address", srcSpace.UserSpace)
}
srcHubCli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(srcSpace.MasterHub, addr))
defer srcHubCli.Release()

listAllResp, cerr := srcHubCli.PublicStoreListAll(context.Background(), &hubrpc.PublicStoreListAll{
UserSpace: *srcSpace,
Path: rootPath,
})
if cerr != nil {
delPkg()
return nil, fmt.Errorf("listing public store: %w", cerr.ToError())
}

adds, err := u.uploadFromPublicStore(srcSpace, &targetSapce.Space, listAllResp.Entries, rootPath)
if err != nil {
delPkg()
return nil, fmt.Errorf("uploading from public store: %w", err)
}

_, err = db.DoTx21(u.db, u.db.Object().BatchAdd, pkg.PackageID, adds)
if err != nil {
delPkg()
return nil, fmt.Errorf("adding objects: %w", err)
}

return &pkg, nil
}

func (u *Uploader) uploadFromPublicStore(srcSpace *clitypes.UserSpaceDetail, targetSpace *clitypes.UserSpaceDetail, entries []types.PublicStoreEntry, rootPath string) ([]db.AddObjectEntry, error) {
ft := ioswitch2.FromTo{}

for _, e := range entries {
// 可以考虑增加一个配置项来控制是否上传空目录
if e.IsDir {
continue
}

ft.AddFrom(ioswitch2.NewFromPublicStore(*srcSpace.MasterHub, *srcSpace, e.Path))
ft.AddTo(ioswitch2.NewToShardStore(*targetSpace.MasterHub, *targetSpace, ioswitch2.RawStream(), e.Path))
}

plans := exec.NewPlanBuilder()
err := parser.Parse(ft, plans)
if err != nil {
return nil, fmt.Errorf("parsing plan: %w", err)
}

exeCtx := exec.NewExecContext()
exec.SetValueByType(exeCtx, u.stgPool)
ret, err := plans.Execute(exeCtx).Wait(context.Background())
if err != nil {
return nil, fmt.Errorf("executing plan: %w", err)
}

cleanRoot := strings.TrimSuffix(rootPath, clitypes.ObjectPathSeparator)

adds := make([]db.AddObjectEntry, 0, len(ret))
for _, e := range entries {
if e.IsDir {
continue
}
pat := strings.TrimPrefix(e.Path, cleanRoot+clitypes.ObjectPathSeparator)
if pat == cleanRoot {
pat = clitypes.BaseName(e.Path)
}

info := ret[e.Path].(*ops2.ShardInfoValue)
adds = append(adds, db.AddObjectEntry{
Path: pat,
Size: info.Size,
FileHash: info.Hash,
CreateTime: time.Now(),
UserSpaceIDs: []clitypes.UserSpaceID{targetSpace.UserSpace.UserSpaceID},
})
}

return adds, nil
}

+ 25
- 0
client/sdk/api/userspace.go View File

@@ -76,3 +76,28 @@ func (r *UserSpaceGetResp) ParseResponse(resp *http.Response) error {
func (c *Client) UserSpaceGet(req UserSpaceGet) (*UserSpaceGetResp, error) { func (c *Client) UserSpaceGet(req UserSpaceGet) (*UserSpaceGetResp, error) {
return JSONAPI(c.cfg, http.DefaultClient, &req, &UserSpaceGetResp{}) return JSONAPI(c.cfg, http.DefaultClient, &req, &UserSpaceGetResp{})
} }

const UserSpaceSpaceToSpacePath = "/v1/userspace/spaceToSpace"

type UserSpaceSpaceToSpace struct {
SrcUserSpaceID clitypes.UserSpaceID `json:"srcUserSpaceID" binding:"required"`
DstUserSpaceID clitypes.UserSpaceID `json:"dstUserSpaceID" binding:"required"`
SrcPath string `json:"srcPath" binding:"required"`
DstPath string `json:"dstPath" binding:"required"`
}

func (r *UserSpaceSpaceToSpace) MakeParam() *sdks.RequestParam {
return sdks.MakeJSONParam(http.MethodPost, UserSpaceSpaceToSpacePath, r)
}

type UserSpaceSpaceToSpaceResp struct {
clitypes.SpaceToSpaceResult
}

func (r *UserSpaceSpaceToSpaceResp) ParseResponse(resp *http.Response) error {
return sdks.ParseCodeDataJSONResponse(resp, r)
}

func (c *Client) UserSpaceSpaceToSpace(req UserSpaceSpaceToSpace) (*UserSpaceSpaceToSpaceResp, error) {
return JSONAPI(c.cfg, http.DefaultClient, &req, &UserSpaceSpaceToSpaceResp{})
}

+ 5
- 0
client/types/types.go View File

@@ -229,3 +229,8 @@ type PackageDetail struct {
ObjectCount int64 ObjectCount int64
TotalSize int64 TotalSize int64
} }

type SpaceToSpaceResult struct {
Success []string `json:"success"`
Failed []string `json:"failed"`
}

+ 0
- 1
common/README.md View File

@@ -12,7 +12,6 @@
- `pkgs`:一些相对独立的功能模块。 - `pkgs`:一些相对独立的功能模块。
- `cmd`:公用的业务逻辑,比如上传Package和下载Package。 - `cmd`:公用的业务逻辑,比如上传Package和下载Package。
- `db`:数据库的数据结构和操作函数。 - `db`:数据库的数据结构和操作函数。
- `distlock`:分布式锁服务,核心机制使用的是`common/pkgs/distlock`,增加了根据存储系统的业务需求设计的锁。
- `ec`:纠删码的库。 - `ec`:纠删码的库。
- `grpc`:存放proto文件,以及使用protogen工具生成的代码文件。 - `grpc`:存放proto文件,以及使用protogen工具生成的代码文件。
- `ioswitch`:IOSwitch模块。 - `ioswitch`:IOSwitch模块。


+ 7
- 15
common/assets/confs/client.config.json View File

@@ -5,8 +5,9 @@
"externalIP": "127.0.0.1", "externalIP": "127.0.0.1",
"locationID": 1 "locationID": 1
}, },
"hubGRPC": {
"port": 5010
"hubRPC": {},
"coordinatorRPC": {
"address": "127.0.0.1:5009"
}, },
"logger": { "logger": {
"output": "stdout", "output": "stdout",
@@ -18,23 +19,14 @@
"password": "123456", "password": "123456",
"databaseName": "cloudream" "databaseName": "cloudream"
}, },
"rabbitMQ": {
"sysEvent": {
"enabled": false,
"address": "127.0.0.1:5672", "address": "127.0.0.1:5672",
"account": "cloudream", "account": "cloudream",
"password": "123456", "password": "123456",
"vhost": "/", "vhost": "/",
"param": {
"retryNum": 5,
"retryInterval": 5000
}
},
"distlock": {
"etcdAddress": "127.0.0.1:2379",
"etcdUsername": "",
"etcdPassword": "",
"etcdLockLeaseTimeSec": 5,
"randomReleasingDelayMs": 3000,
"serviceDescription": "I am a client"
"exchange": "SysEvent",
"queue": "SysEvent"
}, },
"connectivity": { "connectivity": {
"testInterval": 300 "testInterval": 300


+ 3
- 10
common/assets/confs/coordinator.config.json View File

@@ -11,17 +11,10 @@
"password": "123456", "password": "123456",
"databaseName": "cloudream" "databaseName": "cloudream"
}, },
"rabbitMQ": {
"address": "127.0.0.1:5672",
"account": "cloudream",
"password": "123456",
"vhost": "/",
"param": {
"retryNum": 5,
"retryInterval": 5000
}
},
"tickTock": { "tickTock": {
"hubUnavailableTime": "20s" "hubUnavailableTime": "20s"
},
"rpc": {
"listen": "127.0.0.1:5009"
} }
} }

+ 14
- 18
common/assets/confs/hub.config.json View File

@@ -5,9 +5,14 @@
"externalIP": "127.0.0.1", "externalIP": "127.0.0.1",
"locationID": 1 "locationID": 1
}, },
"grpc": {
"ip": "127.0.0.1",
"port": 5010
"rpc": {
"listen": "127.0.0.1:5010"
},
"http": {
"listen": "127.0.0.1:5110"
},
"coordinatorRPC": {
"address": "127.0.0.1:5009"
}, },
"logger": { "logger": {
"output": "file", "output": "file",
@@ -15,25 +20,16 @@
"outputDirectory": "log", "outputDirectory": "log",
"level": "debug" "level": "debug"
}, },
"rabbitMQ": {
"sysEvent": {
"enabled": false,
"address": "127.0.0.1:5672", "address": "127.0.0.1:5672",
"account": "cloudream", "account": "cloudream",
"password": "123456", "password": "123456",
"vhost": "/", "vhost": "/",
"param": {
"retryNum": 5,
"retryInterval": 5000
}
},
"distlock": {
"etcdAddress": "127.0.0.1:2379",
"etcdUsername": "",
"etcdPassword": "",
"etcdLockLeaseTimeSec": 5,
"randomReleasingDelayMs": 3000,
"serviceDescription": "I am a hub"
"exchange": "SysEvent",
"queue": "SysEvent"
}, },
"connectivity": {
"testInterval": 300
"tickTock": {
"testHubConnectivitiesInterval": "5m"
} }
} }

+ 0
- 35
common/assets/confs/scanner.config.json View File

@@ -1,35 +0,0 @@
{
"accessStatHistoryAmount": 0.8,
"ecFileSizeThreshold": 104857600,
"hubUnavailableSeconds": 300,
"logger": {
"output": "file",
"outputFileName": "scanner",
"outputDirectory": "log",
"level": "debug"
},
"db": {
"address": "127.0.0.1:3306",
"account": "",
"password": "",
"databaseName": "cloudream"
},
"rabbitMQ": {
"address": "127.0.0.1:5672",
"account": "",
"password": "",
"vhost": "/",
"param": {
"retryNum": 5,
"retryInterval": 5000
}
},
"distlock": {
"etcdAddress": "127.0.0.1:2379",
"etcdUsername": "",
"etcdPassword": "",
"etcdLockLeaseTimeSec": 5,
"randomReleasingDelayMs": 3000,
"serviceDescription": "I am a scanner"
}
}

+ 11
- 30
common/globals/pools.go View File

@@ -1,38 +1,19 @@
package stgglb package stgglb


import ( import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/grpc/hub"
coormq "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq/coordinator"
hubmq "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq/hub"
scmq "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq/scanner"
corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator"
hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub"
) )


var HubMQPool hubmq.Pool

var CoordinatorMQPool coormq.Pool

var ScannerMQPool scmq.Pool

// InitMQPool
//
// @Description: 初始化MQ连接池
// @param cfg
func InitMQPool(cfg mq.Config) {
HubMQPool = hubmq.NewPool(cfg)

CoordinatorMQPool = coormq.NewPool(cfg)

ScannerMQPool = scmq.NewPool(cfg)

}

var CoordinatorRPCPool *corrpc.Pool
var HubRPCPool *hubrpc.Pool var HubRPCPool *hubrpc.Pool


// InitHubRPCPool
//
// @Description: 初始化HubRPC连接池
// @param cfg
func InitHubRPCPool(cfg *hubrpc.PoolConfig) {
HubRPCPool = hubrpc.NewPool(cfg)
func InitPools(hubRPC *hubrpc.PoolConfig, corRPC *corrpc.PoolConfig) {
if hubRPC != nil {
HubRPCPool = hubrpc.NewPool(*hubRPC)
}

if corRPC != nil {
CoordinatorRPCPool = corrpc.NewPool(*corRPC)
}
} }

+ 1
- 1
common/globals/utils.go View File

@@ -3,7 +3,7 @@ package stgglb
import cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" import cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"


// 根据当前节点与目标地址的距离关系,选择合适的地址 // 根据当前节点与目标地址的距离关系,选择合适的地址
func SelectGRPCAddress(hub cortypes.Hub, addr cortypes.GRPCAddressInfo) (string, int) {
func SelectGRPCAddress(hub *cortypes.Hub, addr *cortypes.GRPCAddressInfo) (string, int) {
if Local != nil && Local.LocationID == hub.LocationID { if Local != nil && Local.LocationID == hub.LocationID {
return addr.LocalIP, addr.LocalGRPCPort return addr.LocalIP, addr.LocalGRPCPort
} }


+ 15
- 4
common/magefiles/main.go View File

@@ -3,15 +3,26 @@
package main package main


import ( import (
"io/fs"
"path/filepath" "path/filepath"


"github.com/magefile/mage/sh" "github.com/magefile/mage/sh"
) )


func Protos() error { func Protos() error {
return proto("pkgs/grpc/hub", "hub.proto")
}
var fileNames []string

filepath.WalkDir("pkgs/rpc", func(path string, d fs.DirEntry, err error) error {
if d.IsDir() {
return nil
}
if filepath.Ext(path) == ".proto" {
fileNames = append(fileNames, path)
}
return nil
})


func proto(dir string, fileName string) error {
return sh.Run("protoc", "--go_out="+dir, "--go-grpc_out="+dir, filepath.Join(dir, fileName))
args := []string{"--go_out=.", "--go_opt=paths=source_relative", "--go-grpc_out=.", "--go-grpc_opt=paths=source_relative"}
args = append(args, fileNames...)
return sh.Run("protoc", args...)
} }

+ 11
- 20
common/pkgs/connectivity/collector.go View File

@@ -1,13 +1,15 @@
package connectivity package connectivity


import ( import (
"context"
"math/rand" "math/rand"
"sync" "sync"
"time" "time"


"gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/logger"
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
coormq "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq/coordinator"
corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator"
hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
) )


@@ -119,14 +121,11 @@ func (r *Collector) testing() {
log := logger.WithType[Collector]("") log := logger.WithType[Collector]("")
log.Debug("do testing") log.Debug("do testing")


coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return
}
defer stgglb.CoordinatorMQPool.Release(coorCli)
coorCli := stgglb.CoordinatorRPCPool.Get()
defer coorCli.Release()


getHubResp, err := coorCli.GetHubs(coormq.NewGetHubs(nil))
if err != nil {
getHubResp, cerr := coorCli.GetHubs(context.Background(), corrpc.NewGetHubs(nil))
if cerr != nil {
return return
} }


@@ -184,19 +183,11 @@ func (r *Collector) ping(hub cortypes.Hub) Connectivity {
} }
} }


agtCli, err := stgglb.HubRPCPool.Acquire(ip, port)
if err != nil {
log.Warnf("new hub %v:%v rpc client: %w", ip, port, err)
return Connectivity{
ToHubID: hub.HubID,
Latency: nil,
TestTime: time.Now(),
}
}
defer stgglb.HubRPCPool.Release(agtCli)
agtCli := stgglb.HubRPCPool.Get(ip, port)
defer agtCli.Release()


// 第一次ping保证网络连接建立成功 // 第一次ping保证网络连接建立成功
err = agtCli.Ping()
_, err := agtCli.Ping(context.Background(), &hubrpc.Ping{})
if err != nil { if err != nil {
log.Warnf("pre ping: %v", err) log.Warnf("pre ping: %v", err)
return Connectivity{ return Connectivity{
@@ -210,7 +201,7 @@ func (r *Collector) ping(hub cortypes.Hub) Connectivity {
var avgLatency time.Duration var avgLatency time.Duration
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
start := time.Now() start := time.Now()
err = agtCli.Ping()
_, err := agtCli.Ping(context.Background(), &hubrpc.Ping{})
if err != nil { if err != nil {
log.Warnf("ping: %v", err) log.Warnf("ping: %v", err)
return Connectivity{ return Connectivity{


+ 14
- 0
common/pkgs/distlock/lockprovider/empty_target.go View File

@@ -0,0 +1,14 @@
package lockprovider

import "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/types"

type EmptyTarget struct{}

func NewEmptyTarget() *EmptyTarget {
return &EmptyTarget{}
}

func (e *EmptyTarget) Equals(other types.LockTarget) bool {
_, ok := other.(*EmptyTarget)
return ok
}

+ 5
- 5
common/pkgs/distlock/lockprovider/lock_compatibility_table.go View File

@@ -4,7 +4,7 @@ import (
"fmt" "fmt"


"github.com/samber/lo" "github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/types"
) )


const ( const (
@@ -16,7 +16,7 @@ const (
type HasSuchLockFn = func() bool type HasSuchLockFn = func() bool


// LockCompatibilitySpecialFn 判断锁与指定的锁名是否兼容 // LockCompatibilitySpecialFn 判断锁与指定的锁名是否兼容
type LockCompatibilitySpecialFn func(lock distlock.Lock, testLockName string) bool
type LockCompatibilitySpecialFn func(lock types.Lock, testLockName string) bool


type LockCompatibilityType string type LockCompatibilityType string


@@ -95,7 +95,7 @@ func (t *LockCompatibilityTable) Row(comps ...LockCompatibility) error {
return nil return nil
} }


func (t *LockCompatibilityTable) Test(lock distlock.Lock) error {
func (t *LockCompatibilityTable) Test(lock types.Lock) error {
row, ok := lo.Find(t.rows, func(row LockCompatibilityTableRow) bool { return lock.Name == row.LockName }) row, ok := lo.Find(t.rows, func(row LockCompatibilityTableRow) bool { return lock.Name == row.LockName })
if !ok { if !ok {
return fmt.Errorf("unknow lock name %s", lock.Name) return fmt.Errorf("unknow lock name %s", lock.Name)
@@ -108,13 +108,13 @@ func (t *LockCompatibilityTable) Test(lock distlock.Lock) error {


if c.Type == LOCK_COMPATIBILITY_UNCOMPATIBLE { if c.Type == LOCK_COMPATIBILITY_UNCOMPATIBLE {
if t.rows[i].HasSuchLockFn() { if t.rows[i].HasSuchLockFn() {
return distlock.NewLockTargetBusyError(t.rows[i].LockName)
return types.NewLockTargetBusyError(t.rows[i].LockName)
} }
} }


if c.Type == LOCK_COMPATIBILITY_SPECIAL { if c.Type == LOCK_COMPATIBILITY_SPECIAL {
if !c.SpecialFn(lock, t.rows[i].LockName) { if !c.SpecialFn(lock, t.rows[i].LockName) {
return distlock.NewLockTargetBusyError(t.rows[i].LockName)
return types.NewLockTargetBusyError(t.rows[i].LockName)
} }
} }
} }


+ 5
- 5
common/pkgs/distlock/lockprovider/lock_compatibility_table_test.go View File

@@ -4,7 +4,7 @@ import (
"testing" "testing"


. "github.com/smartystreets/goconvey/convey" . "github.com/smartystreets/goconvey/convey"
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/types"
) )


func Test_LockCompatibilityTable(t *testing.T) { func Test_LockCompatibilityTable(t *testing.T) {
@@ -18,22 +18,22 @@ func Test_LockCompatibilityTable(t *testing.T) {


comp := LockCompatible() comp := LockCompatible()
uncp := LockUncompatible() uncp := LockUncompatible()
spcl := LockSpecial(func(lock distlock.Lock, testLockName string) bool { return true })
spcl := LockSpecial(func(lock types.Lock, testLockName string) bool { return true })
table.Row(comp, comp, comp) table.Row(comp, comp, comp)
table.Row(comp, uncp, comp) table.Row(comp, uncp, comp)
table.Row(comp, comp, spcl) table.Row(comp, comp, spcl)


err := table.Test(distlock.Lock{
err := table.Test(types.Lock{
Name: "l1", Name: "l1",
}) })
So(err, ShouldBeNil) So(err, ShouldBeNil)


err = table.Test(distlock.Lock{
err = table.Test(types.Lock{
Name: "l2", Name: "l2",
}) })
So(err, ShouldNotBeNil) So(err, ShouldNotBeNil)


err = table.Test(distlock.Lock{
err = table.Test(types.Lock{
Name: "l3", Name: "l3",
}) })
So(err, ShouldBeNil) So(err, ShouldBeNil)


+ 0
- 122
common/pkgs/distlock/lockprovider/metadata_lock.go View File

@@ -1,122 +0,0 @@
package lockprovider

import (
"fmt"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/common/utils/lo2"
)

const (
MetadataLockPathPrefix = "Metadata"
MetadataCreateLock = "Create"
)

type metadataElementLock struct {
target StringLockTarget
requestIDs []string
}

type MetadataLock struct {
createReqIDs []*metadataElementLock

lockCompatibilityTable LockCompatibilityTable
}

func NewMetadataLock() *MetadataLock {

metadataLock := MetadataLock{
lockCompatibilityTable: LockCompatibilityTable{},
}

compTable := &metadataLock.lockCompatibilityTable

compTable.
Column(MetadataCreateLock, func() bool { return len(metadataLock.createReqIDs) > 0 })
trgt := LockSpecial(func(lock distlock.Lock, testLockName string) bool {
strTar := lock.Target.(StringLockTarget)
return lo.NoneBy(metadataLock.createReqIDs, func(other *metadataElementLock) bool { return strTar.IsConflict(&other.target) })
})

compTable.MustRow(trgt)

return &metadataLock
}

// CanLock 判断这个锁能否锁定成功
func (l *MetadataLock) CanLock(lock distlock.Lock) error {
return l.lockCompatibilityTable.Test(lock)
}

// 锁定
func (l *MetadataLock) Lock(reqID string, lock distlock.Lock) error {
switch lock.Name {
case MetadataCreateLock:
l.createReqIDs = l.addElementLock(lock, l.createReqIDs, reqID)

default:
return fmt.Errorf("unknow lock name: %s", lock.Name)
}

return nil
}

func (l *MetadataLock) addElementLock(lock distlock.Lock, locks []*metadataElementLock, reqID string) []*metadataElementLock {
strTarget := lock.Target.(StringLockTarget)
lck, ok := lo.Find(locks, func(l *metadataElementLock) bool { return strTarget.IsConflict(&l.target) })
if !ok {
lck = &metadataElementLock{
target: strTarget,
}
locks = append(locks, lck)
}

lck.requestIDs = append(lck.requestIDs, reqID)
return locks
}

// 解锁
func (l *MetadataLock) Unlock(reqID string, lock distlock.Lock) error {
switch lock.Name {
case MetadataCreateLock:
l.createReqIDs = l.removeElementLock(lock, l.createReqIDs, reqID)

default:
return fmt.Errorf("unknow lock name: %s", lock.Name)
}

return nil
}

func (l *MetadataLock) removeElementLock(lock distlock.Lock, locks []*metadataElementLock, reqID string) []*metadataElementLock {
strTarget := lock.Target.(StringLockTarget)
lck, index, ok := lo.FindIndexOf(locks, func(l *metadataElementLock) bool { return strTarget.IsConflict(&l.target) })
if !ok {
return locks
}

lck.requestIDs = lo2.Remove(lck.requestIDs, reqID)

if len(lck.requestIDs) == 0 {
locks = lo2.RemoveAt(locks, index)
}

return locks
}

// GetTargetString 将锁对象序列化为字符串,方便存储到ETCD
func (l *MetadataLock) GetTargetString(target any) (string, error) {
tar := target.(StringLockTarget)
return StringLockTargetToString(&tar)
}

// ParseTargetString 解析字符串格式的锁对象数据
func (l *MetadataLock) ParseTargetString(targetStr string) (any, error) {
return StringLockTargetFromString(targetStr)
}

// Clear 清除内部所有状态
func (l *MetadataLock) Clear() {
l.createReqIDs = nil
}

+ 9
- 20
common/pkgs/distlock/lockprovider/shard_store.go View File

@@ -3,8 +3,8 @@ package lockprovider
import ( import (
"fmt" "fmt"


"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/common/utils/lo2" "gitlink.org.cn/cloudream/common/utils/lo2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/types"
) )


const ( const (
@@ -27,7 +27,7 @@ func NewShardStoreLock() *ShardStoreLock {
} }


// CanLock 判断这个锁能否锁定成功 // CanLock 判断这个锁能否锁定成功
func (l *ShardStoreLock) CanLock(lock distlock.Lock) error {
func (l *ShardStoreLock) CanLock(lock types.Lock) error {
nodeLock, ok := l.stgLocks[lock.Path[ShardStoreStorageIDPathIndex]] nodeLock, ok := l.stgLocks[lock.Path[ShardStoreStorageIDPathIndex]]
if !ok { if !ok {
// 不能直接返回nil,因为如果锁数据的格式不对,也不能获取锁。 // 不能直接返回nil,因为如果锁数据的格式不对,也不能获取锁。
@@ -39,7 +39,7 @@ func (l *ShardStoreLock) CanLock(lock distlock.Lock) error {
} }


// 锁定。在内部可以不用判断能否加锁,外部需要保证调用此函数前调用了CanLock进行检查 // 锁定。在内部可以不用判断能否加锁,外部需要保证调用此函数前调用了CanLock进行检查
func (l *ShardStoreLock) Lock(reqID string, lock distlock.Lock) error {
func (l *ShardStoreLock) Lock(reqID types.RequestID, lock types.Lock) error {
stgID := lock.Path[ShardStoreStorageIDPathIndex] stgID := lock.Path[ShardStoreStorageIDPathIndex]


nodeLock, ok := l.stgLocks[stgID] nodeLock, ok := l.stgLocks[stgID]
@@ -52,7 +52,7 @@ func (l *ShardStoreLock) Lock(reqID string, lock distlock.Lock) error {
} }


// 解锁 // 解锁
func (l *ShardStoreLock) Unlock(reqID string, lock distlock.Lock) error {
func (l *ShardStoreLock) Unlock(reqID types.RequestID, lock types.Lock) error {
stgID := lock.Path[ShardStoreStorageIDPathIndex] stgID := lock.Path[ShardStoreStorageIDPathIndex]


nodeLock, ok := l.stgLocks[stgID] nodeLock, ok := l.stgLocks[stgID]
@@ -63,25 +63,14 @@ func (l *ShardStoreLock) Unlock(reqID string, lock distlock.Lock) error {
return nodeLock.Unlock(reqID, lock) return nodeLock.Unlock(reqID, lock)
} }


// GetTargetString 将锁对象序列化为字符串,方便存储到ETCD
func (l *ShardStoreLock) GetTargetString(target any) (string, error) {
tar := target.(StringLockTarget)
return StringLockTargetToString(&tar)
}

// ParseTargetString 解析字符串格式的锁对象数据
func (l *ShardStoreLock) ParseTargetString(targetStr string) (any, error) {
return StringLockTargetFromString(targetStr)
}

// Clear 清除内部所有状态 // Clear 清除内部所有状态
func (l *ShardStoreLock) Clear() { func (l *ShardStoreLock) Clear() {
l.stgLocks = make(map[string]*ShardStoreStorageLock) l.stgLocks = make(map[string]*ShardStoreStorageLock)
} }


type ShardStoreStorageLock struct { type ShardStoreStorageLock struct {
buzyReqIDs []string
gcReqIDs []string
buzyReqIDs []types.RequestID
gcReqIDs []types.RequestID


lockCompatibilityTable *LockCompatibilityTable lockCompatibilityTable *LockCompatibilityTable
} }
@@ -107,12 +96,12 @@ func NewShardStoreStorageLock() *ShardStoreStorageLock {
} }


// CanLock 判断这个锁能否锁定成功 // CanLock 判断这个锁能否锁定成功
func (l *ShardStoreStorageLock) CanLock(lock distlock.Lock) error {
func (l *ShardStoreStorageLock) CanLock(lock types.Lock) error {
return l.lockCompatibilityTable.Test(lock) return l.lockCompatibilityTable.Test(lock)
} }


// 锁定 // 锁定
func (l *ShardStoreStorageLock) Lock(reqID string, lock distlock.Lock) error {
func (l *ShardStoreStorageLock) Lock(reqID types.RequestID, lock types.Lock) error {
switch lock.Name { switch lock.Name {
case ShardStoreBuzyLock: case ShardStoreBuzyLock:
l.buzyReqIDs = append(l.buzyReqIDs, reqID) l.buzyReqIDs = append(l.buzyReqIDs, reqID)
@@ -126,7 +115,7 @@ func (l *ShardStoreStorageLock) Lock(reqID string, lock distlock.Lock) error {
} }


// 解锁 // 解锁
func (l *ShardStoreStorageLock) Unlock(reqID string, lock distlock.Lock) error {
func (l *ShardStoreStorageLock) Unlock(reqID types.RequestID, lock types.Lock) error {
switch lock.Name { switch lock.Name {
case ShardStoreBuzyLock: case ShardStoreBuzyLock:
l.buzyReqIDs = lo2.Remove(l.buzyReqIDs, reqID) l.buzyReqIDs = lo2.Remove(l.buzyReqIDs, reqID)


+ 13
- 13
common/pkgs/distlock/lockprovider/shard_store_test.go View File

@@ -4,25 +4,25 @@ import (
"testing" "testing"


. "github.com/smartystreets/goconvey/convey" . "github.com/smartystreets/goconvey/convey"
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/types"
) )


func Test_ShardStoreLock(t *testing.T) { func Test_ShardStoreLock(t *testing.T) {
cases := []struct { cases := []struct {
title string title string
initLocks []distlock.Lock
doLock distlock.Lock
initLocks []types.Lock
doLock types.Lock
wantOK bool wantOK bool
}{ }{
{ {
title: "同节点,同一个Buzy锁", title: "同节点,同一个Buzy锁",
initLocks: []distlock.Lock{
initLocks: []types.Lock{
{ {
Path: []string{ShardStoreLockPathPrefix, "hub1"}, Path: []string{ShardStoreLockPathPrefix, "hub1"},
Name: ShardStoreBuzyLock, Name: ShardStoreBuzyLock,
}, },
}, },
doLock: distlock.Lock{
doLock: types.Lock{
Path: []string{ShardStoreLockPathPrefix, "hub1"}, Path: []string{ShardStoreLockPathPrefix, "hub1"},
Name: ShardStoreBuzyLock, Name: ShardStoreBuzyLock,
}, },
@@ -30,13 +30,13 @@ func Test_ShardStoreLock(t *testing.T) {
}, },
{ {
title: "同节点,同一个GC锁", title: "同节点,同一个GC锁",
initLocks: []distlock.Lock{
initLocks: []types.Lock{
{ {
Path: []string{ShardStoreLockPathPrefix, "hub1"}, Path: []string{ShardStoreLockPathPrefix, "hub1"},
Name: ShardStoreGCLock, Name: ShardStoreGCLock,
}, },
}, },
doLock: distlock.Lock{
doLock: types.Lock{
Path: []string{ShardStoreLockPathPrefix, "hub1"}, Path: []string{ShardStoreLockPathPrefix, "hub1"},
Name: ShardStoreGCLock, Name: ShardStoreGCLock,
}, },
@@ -44,17 +44,17 @@ func Test_ShardStoreLock(t *testing.T) {
}, },
{ {
title: "同时设置Buzy和GC", title: "同时设置Buzy和GC",
initLocks: []distlock.Lock{
initLocks: []types.Lock{
{ {
Path: []string{ShardStoreLockPathPrefix, "hub1"}, Path: []string{ShardStoreLockPathPrefix, "hub1"},
Name: ShardStoreBuzyLock, Name: ShardStoreBuzyLock,
Target: *NewStringLockTarget(),
Target: NewStringLockTarget(),
}, },
}, },
doLock: distlock.Lock{
doLock: types.Lock{
Path: []string{ShardStoreLockPathPrefix, "hub1"}, Path: []string{ShardStoreLockPathPrefix, "hub1"},
Name: ShardStoreGCLock, Name: ShardStoreGCLock,
Target: *NewStringLockTarget(),
Target: NewStringLockTarget(),
}, },
wantOK: false, wantOK: false,
}, },
@@ -80,7 +80,7 @@ func Test_ShardStoreLock(t *testing.T) {
Convey("解锁", t, func() { Convey("解锁", t, func() {
ipfsLock := NewShardStoreLock() ipfsLock := NewShardStoreLock()


lock := distlock.Lock{
lock := types.Lock{
Path: []string{ShardStoreLockPathPrefix, "hub1"}, Path: []string{ShardStoreLockPathPrefix, "hub1"},
Name: ShardStoreBuzyLock, Name: ShardStoreBuzyLock,
} }
@@ -92,7 +92,7 @@ func Test_ShardStoreLock(t *testing.T) {


ipfsLock.Unlock("req1", lock) ipfsLock.Unlock("req1", lock)


lock = distlock.Lock{
lock = types.Lock{
Path: []string{ShardStoreLockPathPrefix, "hub1"}, Path: []string{ShardStoreLockPathPrefix, "hub1"},
Name: ShardStoreGCLock, Name: ShardStoreGCLock,
} }


+ 0
- 140
common/pkgs/distlock/lockprovider/storage_lock.go View File

@@ -1,140 +0,0 @@
package lockprovider

import (
"fmt"

"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/common/utils/lo2"
)

const (
StorageLockPathPrefix = "Storage"
StorageHubIDPathIndex = 1
StorageBuzyLock = "Buzy"
StorageGCLock = "GC"
)

type StorageLock struct {
nodeLocks map[string]*StorageNodeLock
dummyLock *StorageNodeLock
}

func NewStorageLock() *StorageLock {
return &StorageLock{
nodeLocks: make(map[string]*StorageNodeLock),
dummyLock: NewStorageNodeLock(),
}
}

// CanLock 判断这个锁能否锁定成功
func (l *StorageLock) CanLock(lock distlock.Lock) error {
nodeLock, ok := l.nodeLocks[lock.Path[StorageHubIDPathIndex]]
if !ok {
// 不能直接返回nil,因为如果锁数据的格式不对,也不能获取锁。
// 这里使用一个空Provider来进行检查。
return l.dummyLock.CanLock(lock)
}

return nodeLock.CanLock(lock)
}

// 锁定。在内部可以不用判断能否加锁,外部需要保证调用此函数前调用了CanLock进行检查
func (l *StorageLock) Lock(reqID string, lock distlock.Lock) error {
hubID := lock.Path[StorageHubIDPathIndex]

nodeLock, ok := l.nodeLocks[hubID]
if !ok {
nodeLock = NewStorageNodeLock()
l.nodeLocks[hubID] = nodeLock
}

return nodeLock.Lock(reqID, lock)
}

// 解锁
func (l *StorageLock) Unlock(reqID string, lock distlock.Lock) error {
hubID := lock.Path[StorageHubIDPathIndex]

nodeLock, ok := l.nodeLocks[hubID]
if !ok {
return nil
}

return nodeLock.Unlock(reqID, lock)
}

// GetTargetString 将锁对象序列化为字符串,方便存储到ETCD
func (l *StorageLock) GetTargetString(target any) (string, error) {
tar := target.(StringLockTarget)
return StringLockTargetToString(&tar)
}

// ParseTargetString 解析字符串格式的锁对象数据
func (l *StorageLock) ParseTargetString(targetStr string) (any, error) {
return StringLockTargetFromString(targetStr)
}

// Clear 清除内部所有状态
func (l *StorageLock) Clear() {
l.nodeLocks = make(map[string]*StorageNodeLock)
}

type StorageNodeLock struct {
buzyReqIDs []string
gcReqIDs []string

lockCompatibilityTable *LockCompatibilityTable
}

func NewStorageNodeLock() *StorageNodeLock {
compTable := &LockCompatibilityTable{}

StorageLock := StorageNodeLock{
lockCompatibilityTable: compTable,
}

compTable.
Column(StorageBuzyLock, func() bool { return len(StorageLock.buzyReqIDs) > 0 }).
Column(StorageGCLock, func() bool { return len(StorageLock.gcReqIDs) > 0 })

comp := LockCompatible()
uncp := LockUncompatible()

compTable.MustRow(comp, uncp)
compTable.MustRow(uncp, comp)

return &StorageLock
}

// CanLock 判断这个锁能否锁定成功
func (l *StorageNodeLock) CanLock(lock distlock.Lock) error {
return l.lockCompatibilityTable.Test(lock)
}

// 锁定
func (l *StorageNodeLock) Lock(reqID string, lock distlock.Lock) error {
switch lock.Name {
case StorageBuzyLock:
l.buzyReqIDs = append(l.buzyReqIDs, reqID)
case StorageGCLock:
l.gcReqIDs = append(l.gcReqIDs, reqID)
default:
return fmt.Errorf("unknow lock name: %s", lock.Name)
}

return nil
}

// 解锁
func (l *StorageNodeLock) Unlock(reqID string, lock distlock.Lock) error {
switch lock.Name {
case StorageBuzyLock:
l.buzyReqIDs = lo2.Remove(l.buzyReqIDs, reqID)
case StorageGCLock:
l.gcReqIDs = lo2.Remove(l.gcReqIDs, reqID)
default:
return fmt.Errorf("unknow lock name: %s", lock.Name)
}

return nil
}

+ 20
- 0
common/pkgs/distlock/lockprovider/string_lock_target.go View File

@@ -5,6 +5,7 @@ import (


"github.com/samber/lo" "github.com/samber/lo"
"gitlink.org.cn/cloudream/common/utils/serder" "gitlink.org.cn/cloudream/common/utils/serder"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/types"
) )


type StringLockTarget struct { type StringLockTarget struct {
@@ -43,6 +44,25 @@ func (t *StringLockTarget) IsConflict(other *StringLockTarget) bool {
return false return false
} }


func (t *StringLockTarget) Equals(other types.LockTarget) bool {
st, ok := other.(*StringLockTarget)
if !ok {
return false
}

if len(t.Components) != len(st.Components) {
return false
}

for i := 0; i < len(t.Components); i++ {
if !t.Components[i].IsEquals(&st.Components[i]) {
return false
}
}

return true
}

type StringLockTargetComponet struct { type StringLockTargetComponet struct {
Values []string `json:"values"` Values []string `json:"values"`
} }


+ 15
- 0
common/pkgs/distlock/mutex.go View File

@@ -0,0 +1,15 @@
package distlock

import (
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/types"
)

type Mutex struct {
svc *Service
lockReq types.LockRequest
lockReqID types.RequestID
}

func (m *Mutex) Unlock() {
m.svc.release(m.lockReqID, m.lockReq)
}

+ 53
- 0
common/pkgs/distlock/reentrant.go View File

@@ -0,0 +1,53 @@
package distlock

import "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/types"

type Reentrant struct {
svc *Service
reqs []types.LockRequest
locked []*Mutex
}

func (r *Reentrant) Lock(req types.LockRequest, opt ...AcquireOptionFn) error {
var willLock []types.Lock

loop:
for _, lock := range req.Locks {
for _, req := range r.reqs {
for _, locked := range req.Locks {
if locked.Equals(lock) {
continue loop
}
}
}

willLock = append(willLock, lock)
}

if len(willLock) == 0 {
return nil
}

newReq := types.LockRequest{
Reason: req.Reason,
Locks: willLock,
}

m, err := r.svc.Acquire(newReq, opt...)
if err != nil {
return err
}

r.reqs = append(r.reqs, newReq)
r.locked = append(r.locked, m)

return nil
}

func (r *Reentrant) Unlock() {
for i := len(r.reqs) - 1; i >= 0; i-- {
r.locked[i].Unlock()
}
r.locked = nil
r.reqs = nil
}

+ 11
- 12
common/pkgs/distlock/reqbuilder/lock_request_builder.go View File

@@ -1,30 +1,29 @@
package reqbuilder package reqbuilder


import ( import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/common/utils/lo2" "gitlink.org.cn/cloudream/common/utils/lo2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/types"
) )


type LockRequestBuilder struct { type LockRequestBuilder struct {
locks []distlock.Lock
locks []types.Lock
} }


func NewBuilder() *LockRequestBuilder { func NewBuilder() *LockRequestBuilder {
return &LockRequestBuilder{} return &LockRequestBuilder{}
} }


func (b *LockRequestBuilder) Build() distlock.LockRequest {
return distlock.LockRequest{
Locks: lo2.ArrayClone(b.locks),
}
func (b *LockRequestBuilder) IsEmpty() bool {
return len(b.locks) == 0
} }


func (b *LockRequestBuilder) MutexLock(svc *distlock.Service) (*distlock.Mutex, error) {
mutex := distlock.NewMutex(svc, b.Build())
err := mutex.Lock()
if err != nil {
return nil, err
func (b *LockRequestBuilder) Build() types.LockRequest {
return types.LockRequest{
Locks: lo2.ArrayClone(b.locks),
} }
}


return mutex, nil
func (b *LockRequestBuilder) MutexLock(svc *distlock.Service, opt ...distlock.AcquireOptionFn) (*distlock.Mutex, error) {
return svc.Acquire(b.Build(), opt...)
} }

+ 0
- 17
common/pkgs/distlock/reqbuilder/metadata.go View File

@@ -1,17 +0,0 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/lockprovider"
)

type MetadataLockReqBuilder struct {
*LockRequestBuilder
}

func (b *LockRequestBuilder) Metadata() *MetadataLockReqBuilder {
return &MetadataLockReqBuilder{LockRequestBuilder: b}
}

func (b *MetadataLockReqBuilder) makePath(tableName string) []string {
return []string{lockprovider.MetadataLockPathPrefix, tableName}
}

+ 0
- 24
common/pkgs/distlock/reqbuilder/metadata_object.go View File

@@ -1,24 +0,0 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/lockprovider"
)

type MetadataObjectLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) Object() *MetadataObjectLockReqBuilder {
return &MetadataObjectLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataObjectLockReqBuilder) CreateOne(packageID clitypes.PackageID, objectPath string) *MetadataObjectLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Object"),
Name: lockprovider.MetadataCreateLock,
Target: *lockprovider.NewStringLockTarget().Add(packageID, objectPath),
})
return b
}

+ 11
- 11
common/pkgs/distlock/reqbuilder/shard_store.go View File

@@ -3,9 +3,9 @@ package reqbuilder
import ( import (
"strconv" "strconv"


"gitlink.org.cn/cloudream/common/pkgs/distlock"
clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/lockprovider" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/lockprovider"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/types"
) )


type ShardStoreLockReqBuilder struct { type ShardStoreLockReqBuilder struct {
@@ -15,24 +15,24 @@ type ShardStoreLockReqBuilder struct {
func (b *LockRequestBuilder) Shard() *ShardStoreLockReqBuilder { func (b *LockRequestBuilder) Shard() *ShardStoreLockReqBuilder {
return &ShardStoreLockReqBuilder{LockRequestBuilder: b} return &ShardStoreLockReqBuilder{LockRequestBuilder: b}
} }
func (b *ShardStoreLockReqBuilder) Buzy(stgID cortypes.StorageID) *ShardStoreLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(stgID),
func (b *ShardStoreLockReqBuilder) Buzy(spaceID clitypes.UserSpaceID) *ShardStoreLockReqBuilder {
b.locks = append(b.locks, types.Lock{
Path: b.makePath(spaceID),
Name: lockprovider.ShardStoreBuzyLock, Name: lockprovider.ShardStoreBuzyLock,
Target: *lockprovider.NewStringLockTarget(),
Target: lockprovider.NewEmptyTarget(),
}) })
return b return b
} }


func (b *ShardStoreLockReqBuilder) GC(stgID cortypes.StorageID) *ShardStoreLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(stgID),
func (b *ShardStoreLockReqBuilder) GC(spaceID clitypes.UserSpaceID) *ShardStoreLockReqBuilder {
b.locks = append(b.locks, types.Lock{
Path: b.makePath(spaceID),
Name: lockprovider.ShardStoreGCLock, Name: lockprovider.ShardStoreGCLock,
Target: *lockprovider.NewStringLockTarget(),
Target: lockprovider.NewEmptyTarget(),
}) })
return b return b
} }


func (b *ShardStoreLockReqBuilder) makePath(hubID cortypes.StorageID) []string {
func (b *ShardStoreLockReqBuilder) makePath(hubID clitypes.UserSpaceID) []string {
return []string{lockprovider.ShardStoreLockPathPrefix, strconv.FormatInt(int64(hubID), 10)} return []string{lockprovider.ShardStoreLockPathPrefix, strconv.FormatInt(int64(hubID), 10)}
} }

+ 0
- 39
common/pkgs/distlock/reqbuilder/storage.go View File

@@ -1,39 +0,0 @@
package reqbuilder

import (
"strconv"

"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/lockprovider"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
)

type StorageLockReqBuilder struct {
*LockRequestBuilder
}

func (b *LockRequestBuilder) Storage() *StorageLockReqBuilder {
return &StorageLockReqBuilder{LockRequestBuilder: b}
}

func (b *StorageLockReqBuilder) Buzy(storageID cortypes.StorageID) *StorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(storageID),
Name: lockprovider.StorageBuzyLock,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

func (b *StorageLockReqBuilder) GC(storageID cortypes.StorageID) *StorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(storageID),
Name: lockprovider.StorageGCLock,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

func (b *StorageLockReqBuilder) makePath(storageID cortypes.StorageID) []string {
return []string{lockprovider.StorageLockPathPrefix, strconv.FormatInt(int64(storageID), 10)}
}

+ 165
- 33
common/pkgs/distlock/service.go View File

@@ -1,62 +1,194 @@
package distlock package distlock


import ( import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"context"
"fmt"
"sync"
"time"

"gitlink.org.cn/cloudream/common/pkgs/future"
"gitlink.org.cn/cloudream/common/pkgs/trie" "gitlink.org.cn/cloudream/common/pkgs/trie"
"gitlink.org.cn/cloudream/common/utils/lo2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/lockprovider" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/lockprovider"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock/types"
) )


type Service = distlock.Service
type AcquireOption struct {
Timeout time.Duration
}

type AcquireOptionFn func(opt *AcquireOption)

func WithTimeout(timeout time.Duration) AcquireOptionFn {
return func(opt *AcquireOption) {
opt.Timeout = timeout
}
}

type Service struct {
lock *sync.Mutex
provdersTrie *trie.Trie[types.LockProvider]
acquirings []*acquireInfo
nextReqID int64
}

func NewService() *Service {
svc := &Service{
lock: &sync.Mutex{},
provdersTrie: trie.NewTrie[types.LockProvider](),
}


type Mutex = distlock.Mutex
svc.provdersTrie.Create([]any{lockprovider.ShardStoreLockPathPrefix, trie.WORD_ANY}).Value = lockprovider.NewShardStoreLock()
return svc
}


func NewService(cfg *distlock.Config) (*distlock.Service, error) {
srv, err := distlock.NewService(cfg, initProviders())
type acquireInfo struct {
Request types.LockRequest
Callback *future.SetValueFuture[types.RequestID]
LastErr error
}

func (svc *Service) Acquire(req types.LockRequest, opts ...AcquireOptionFn) (*Mutex, error) {
var opt = AcquireOption{
Timeout: time.Second * 10,
}
for _, fn := range opts {
fn(&opt)
}

ctx := context.Background()
if opt.Timeout != 0 {
var cancel func()
ctx, cancel = context.WithTimeout(ctx, opt.Timeout)
defer cancel()
}

// 就地检测锁是否可用
svc.lock.Lock()
defer svc.lock.Unlock()

reqID, err := svc.tryAcquireOne(req)
if err != nil { if err != nil {
return nil, err return nil, err
} }


return srv, nil
if reqID != "" {
return &Mutex{
svc: svc,
lockReq: req,
lockReqID: reqID,
}, nil
}

// 就地检测失败,那么就需要异步等待锁可用
info := &acquireInfo{
Request: req,
Callback: future.NewSetValue[types.RequestID](),
}
svc.acquirings = append(svc.acquirings, info)

// 等待的时候不加锁
svc.lock.Unlock()
reqID, err = info.Callback.Wait(ctx)
svc.lock.Lock()

if err == nil {
return &Mutex{
svc: svc,
lockReq: req,
lockReqID: reqID,
}, nil
}

if err != future.ErrCanceled {
lo2.Remove(svc.acquirings, info)
return nil, err
}

// 如果第一次等待是超时错误,那么在锁里再尝试获取一次结果
reqID, err = info.Callback.TryGetValue()
if err == nil {
return &Mutex{
svc: svc,
lockReq: req,
lockReqID: reqID,
}, nil
}

lo2.Remove(svc.acquirings, info)
return nil, err
}

func (s *Service) BeginReentrant() *Reentrant {
return &Reentrant{
svc: s,
}
} }


func initProviders() []distlock.PathProvider {
var provs []distlock.PathProvider
func (s *Service) release(reqID types.RequestID, req types.LockRequest) {
s.lock.Lock()
defer s.lock.Unlock()


provs = append(provs, initMetadataLockProviders()...)
s.releaseRequest(reqID, req)
s.tryAcquirings()
}

func (a *Service) tryAcquirings() {
for i := 0; i < len(a.acquirings); i++ {
req := a.acquirings[i]


provs = append(provs, initShardLockProviders()...)
reqID, err := a.tryAcquireOne(req.Request)
if err != nil {
req.LastErr = err
continue
}


provs = append(provs, initStorageLockProviders()...)
req.Callback.SetValue(reqID)
a.acquirings[i] = nil
}


return provs
a.acquirings = lo2.RemoveAllDefault(a.acquirings)
} }


func initMetadataLockProviders() []distlock.PathProvider {
return []distlock.PathProvider{
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Hub"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Storage"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "User"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "UserBucket"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "UserHub"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "UserStorage"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Bucket"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Object"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Package"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "ObjectRep"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "ObjectBlock"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Cache"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Location"),
func (s *Service) tryAcquireOne(req types.LockRequest) (types.RequestID, error) {
err := s.testOneRequest(req)
if err != nil {
return "", err
} }

reqID := types.RequestID(fmt.Sprintf("%d", s.nextReqID))
s.nextReqID++

s.applyRequest(reqID, req)
return reqID, nil
}

func (s *Service) testOneRequest(req types.LockRequest) error {
for _, lock := range req.Locks {
n, ok := s.provdersTrie.WalkEnd(lock.Path)
if !ok || n.Value == nil {
return fmt.Errorf("lock provider not found for path %v", lock.Path)
}

err := n.Value.CanLock(lock)
if err != nil {
return err
}
}

return nil
} }


func initShardLockProviders() []distlock.PathProvider {
return []distlock.PathProvider{
distlock.NewPathProvider(lockprovider.NewShardStoreLock(), lockprovider.ShardStoreLockPathPrefix, trie.WORD_ANY),
func (s *Service) applyRequest(reqID types.RequestID, req types.LockRequest) {
for _, lock := range req.Locks {
p, _ := s.provdersTrie.WalkEnd(lock.Path)
p.Value.Lock(reqID, lock)
} }
} }


func initStorageLockProviders() []distlock.PathProvider {
return []distlock.PathProvider{
distlock.NewPathProvider(lockprovider.NewStorageLock(), lockprovider.StorageLockPathPrefix, trie.WORD_ANY),
func (s *Service) releaseRequest(reqID types.RequestID, req types.LockRequest) {
for _, lock := range req.Locks {
p, _ := s.provdersTrie.WalkEnd(lock.Path)
p.Value.Unlock(reqID, lock)
} }
} }

+ 60
- 0
common/pkgs/distlock/types/models.go View File

@@ -0,0 +1,60 @@
package types

import (
"fmt"

"gitlink.org.cn/cloudream/common/utils/lo2"
)

type RequestID string

type Lock struct {
Path []string // 锁路径,存储的是路径的每一部分
Name string // 锁名
Target LockTarget // 锁对象,由具体的Provider去解析
}

func (b *Lock) Equals(other Lock) bool {
return lo2.ArrayEquals(b.Path, other.Path) && b.Name == other.Name && b.Target.Equals(other.Target)
}

type LockTarget interface {
Equals(other LockTarget) bool
}

type LockRequest struct {
Reason string
Locks []Lock
}

func (b *LockRequest) Add(lock Lock) {
b.Locks = append(b.Locks, lock)
}

type LockProvider interface {
// CanLock 判断这个锁能否锁定成功
CanLock(lock Lock) error

// Lock 锁定。由于同一个锁请求内的锁不检查冲突,因此这个函数必须支持有冲突的锁进行锁定。
Lock(reqID RequestID, lock Lock) error

// 解锁
Unlock(reqID RequestID, lock Lock) error

// Clear 清除内部所有状态
Clear()
}

type LockTargetBusyError struct {
lockName string
}

func (e *LockTargetBusyError) Error() string {
return fmt.Sprintf("the lock object is locked by %s", e.lockName)
}

func NewLockTargetBusyError(lockName string) *LockTargetBusyError {
return &LockTargetBusyError{
lockName: lockName,
}
}

+ 0
- 12
common/pkgs/grpc/config.go View File

@@ -1,12 +0,0 @@
package grpc

import "fmt"

type Config struct {
IP string `json:"ip"`
Port int `json:"port"`
}

func (c *Config) MakeListenAddress() string {
return fmt.Sprintf("%s:%d", c.IP, c.Port)
}

+ 0
- 206
common/pkgs/grpc/hub/client.go View File

@@ -1,206 +0,0 @@
package hub

import (
"context"
"fmt"
"io"

"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/common/utils/serder"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)

type Client struct {
con *grpc.ClientConn
cli HubClient
}

func NewClient(addr string) (*Client, error) {
con, err := grpc.Dial(addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
return nil, err
}

return &Client{
con: con,
cli: NewHubClient(con),
}, nil
}

func (c *Client) ExecuteIOPlan(ctx context.Context, plan exec.Plan) error {
data, err := serder.ObjectToJSONEx(plan)
if err != nil {
return err
}

_, err = c.cli.ExecuteIOPlan(ctx, &ExecuteIOPlanReq{
Plan: string(data),
})
return err
}

type grpcStreamReadCloser struct {
io.ReadCloser
stream Hub_GetStreamClient
cancelFn context.CancelFunc
readingData []byte
recvEOF bool
}

func (s *grpcStreamReadCloser) Read(p []byte) (int, error) {
if len(s.readingData) == 0 && !s.recvEOF {
resp, err := s.stream.Recv()
if err != nil {
return 0, err
}

if resp.Type == StreamDataPacketType_Data {
s.readingData = resp.Data

} else if resp.Type == StreamDataPacketType_EOF {
s.readingData = resp.Data
s.recvEOF = true

} else {
return 0, fmt.Errorf("unsupported packt type: %v", resp.Type)
}
}

cnt := copy(p, s.readingData)
s.readingData = s.readingData[cnt:]

if len(s.readingData) == 0 && s.recvEOF {
return cnt, io.EOF
}

return cnt, nil
}

func (s *grpcStreamReadCloser) Close() error {
s.cancelFn()

return nil
}

func (c *Client) SendStream(ctx context.Context, planID exec.PlanID, varID exec.VarID, str io.Reader) error {
sendCli, err := c.cli.SendStream(ctx)
if err != nil {
return err
}

err = sendCli.Send(&StreamDataPacket{
Type: StreamDataPacketType_SendArgs,
PlanID: string(planID),
VarID: int32(varID),
})
if err != nil {
return fmt.Errorf("sending first stream packet: %w", err)
}

buf := make([]byte, 1024*64)
for {
rd, err := str.Read(buf)
if err == io.EOF {
err := sendCli.Send(&StreamDataPacket{
Type: StreamDataPacketType_EOF,
Data: buf[:rd],
})
if err != nil {
return fmt.Errorf("sending EOF packet: %w", err)
}

_, err = sendCli.CloseAndRecv()
if err != nil {
return fmt.Errorf("receiving response: %w", err)
}

return nil
}

if err != nil {
return fmt.Errorf("reading stream data: %w", err)
}

err = sendCli.Send(&StreamDataPacket{
Type: StreamDataPacketType_Data,
Data: buf[:rd],
})
if err != nil {
return fmt.Errorf("sending data packet: %w", err)
}
}
}

func (c *Client) GetStream(ctx context.Context, planID exec.PlanID, varID exec.VarID, signalID exec.VarID, signal exec.VarValue) (io.ReadCloser, error) {
ctx, cancel := context.WithCancel(ctx)

sdata, err := serder.ObjectToJSONEx(signal)
if err != nil {
cancel()
return nil, err
}

stream, err := c.cli.GetStream(ctx, &GetStreamReq{
PlanID: string(planID),
VarID: int32(varID),
SignalID: int32(signalID),
Signal: string(sdata),
})
if err != nil {
cancel()
return nil, fmt.Errorf("request grpc failed, err: %w", err)
}

return &grpcStreamReadCloser{
stream: stream,
cancelFn: cancel,
}, nil
}

func (c *Client) SendVar(ctx context.Context, planID exec.PlanID, id exec.VarID, value exec.VarValue) error {
data, err := serder.ObjectToJSONEx(value)
if err != nil {
return err
}

_, err = c.cli.SendVar(ctx, &SendVarReq{
PlanID: string(planID),
VarID: int32(id),
VarValue: string(data),
})
return err
}

func (c *Client) GetVar(ctx context.Context, planID exec.PlanID, varID exec.VarID, signalID exec.VarID, signal exec.VarValue) (exec.VarValue, error) {
sdata, err := serder.ObjectToJSONEx(signal)
if err != nil {
return nil, err
}

resp, err := c.cli.GetVar(ctx, &GetVarReq{
PlanID: string(planID),
VarID: int32(varID),
SignalID: int32(signalID),
Signal: string(sdata),
})
if err != nil {
return nil, err
}

getVar, err := serder.JSONToObjectEx[exec.VarValue]([]byte(resp.Var))
if err != nil {
return nil, err
}

return getVar, nil
}

func (c *Client) Ping() error {
_, err := c.cli.Ping(context.Background(), &PingReq{})
return err
}

func (c *Client) Close() {
c.con.Close()
}

+ 0
- 983
common/pkgs/grpc/hub/hub.pb.go View File

@@ -1,983 +0,0 @@
// 使用的语法版本

// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.34.2
// protoc v4.22.3
// source: pkgs/grpc/hub/hub.proto

package hub

import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)

const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)

type StreamDataPacketType int32

const (
StreamDataPacketType_EOF StreamDataPacketType = 0
StreamDataPacketType_Data StreamDataPacketType = 1
StreamDataPacketType_SendArgs StreamDataPacketType = 2
)

// Enum value maps for StreamDataPacketType.
var (
StreamDataPacketType_name = map[int32]string{
0: "EOF",
1: "Data",
2: "SendArgs",
}
StreamDataPacketType_value = map[string]int32{
"EOF": 0,
"Data": 1,
"SendArgs": 2,
}
)

func (x StreamDataPacketType) Enum() *StreamDataPacketType {
p := new(StreamDataPacketType)
*p = x
return p
}

func (x StreamDataPacketType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}

func (StreamDataPacketType) Descriptor() protoreflect.EnumDescriptor {
return file_pkgs_grpc_hub_hub_proto_enumTypes[0].Descriptor()
}

func (StreamDataPacketType) Type() protoreflect.EnumType {
return &file_pkgs_grpc_hub_hub_proto_enumTypes[0]
}

func (x StreamDataPacketType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}

// Deprecated: Use StreamDataPacketType.Descriptor instead.
func (StreamDataPacketType) EnumDescriptor() ([]byte, []int) {
return file_pkgs_grpc_hub_hub_proto_rawDescGZIP(), []int{0}
}

type ExecuteIOPlanReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

Plan string `protobuf:"bytes,1,opt,name=Plan,proto3" json:"Plan,omitempty"`
}

func (x *ExecuteIOPlanReq) Reset() {
*x = ExecuteIOPlanReq{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *ExecuteIOPlanReq) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*ExecuteIOPlanReq) ProtoMessage() {}

func (x *ExecuteIOPlanReq) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use ExecuteIOPlanReq.ProtoReflect.Descriptor instead.
func (*ExecuteIOPlanReq) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_hub_hub_proto_rawDescGZIP(), []int{0}
}

func (x *ExecuteIOPlanReq) GetPlan() string {
if x != nil {
return x.Plan
}
return ""
}

type ExecuteIOPlanResp struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}

func (x *ExecuteIOPlanResp) Reset() {
*x = ExecuteIOPlanResp{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *ExecuteIOPlanResp) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*ExecuteIOPlanResp) ProtoMessage() {}

func (x *ExecuteIOPlanResp) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use ExecuteIOPlanResp.ProtoReflect.Descriptor instead.
func (*ExecuteIOPlanResp) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_hub_hub_proto_rawDescGZIP(), []int{1}
}

// 文件数据。注意:只在Type为Data或EOF的时候,Data字段才能有数据
type FileDataPacket struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

Type StreamDataPacketType `protobuf:"varint,1,opt,name=Type,proto3,enum=StreamDataPacketType" json:"Type,omitempty"`
Data []byte `protobuf:"bytes,2,opt,name=Data,proto3" json:"Data,omitempty"`
}

func (x *FileDataPacket) Reset() {
*x = FileDataPacket{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *FileDataPacket) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*FileDataPacket) ProtoMessage() {}

func (x *FileDataPacket) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use FileDataPacket.ProtoReflect.Descriptor instead.
func (*FileDataPacket) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_hub_hub_proto_rawDescGZIP(), []int{2}
}

func (x *FileDataPacket) GetType() StreamDataPacketType {
if x != nil {
return x.Type
}
return StreamDataPacketType_EOF
}

func (x *FileDataPacket) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}

// 注:EOF时data也可能有数据
type StreamDataPacket struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

Type StreamDataPacketType `protobuf:"varint,1,opt,name=Type,proto3,enum=StreamDataPacketType" json:"Type,omitempty"`
PlanID string `protobuf:"bytes,2,opt,name=PlanID,proto3" json:"PlanID,omitempty"`
VarID int32 `protobuf:"varint,3,opt,name=VarID,proto3" json:"VarID,omitempty"`
Data []byte `protobuf:"bytes,4,opt,name=Data,proto3" json:"Data,omitempty"`
}

func (x *StreamDataPacket) Reset() {
*x = StreamDataPacket{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *StreamDataPacket) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*StreamDataPacket) ProtoMessage() {}

func (x *StreamDataPacket) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use StreamDataPacket.ProtoReflect.Descriptor instead.
func (*StreamDataPacket) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_hub_hub_proto_rawDescGZIP(), []int{3}
}

func (x *StreamDataPacket) GetType() StreamDataPacketType {
if x != nil {
return x.Type
}
return StreamDataPacketType_EOF
}

func (x *StreamDataPacket) GetPlanID() string {
if x != nil {
return x.PlanID
}
return ""
}

func (x *StreamDataPacket) GetVarID() int32 {
if x != nil {
return x.VarID
}
return 0
}

func (x *StreamDataPacket) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}

type SendStreamResp struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}

func (x *SendStreamResp) Reset() {
*x = SendStreamResp{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *SendStreamResp) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*SendStreamResp) ProtoMessage() {}

func (x *SendStreamResp) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use SendStreamResp.ProtoReflect.Descriptor instead.
func (*SendStreamResp) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_hub_hub_proto_rawDescGZIP(), []int{4}
}

type GetStreamReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

PlanID string `protobuf:"bytes,1,opt,name=PlanID,proto3" json:"PlanID,omitempty"`
VarID int32 `protobuf:"varint,2,opt,name=VarID,proto3" json:"VarID,omitempty"`
SignalID int32 `protobuf:"varint,3,opt,name=SignalID,proto3" json:"SignalID,omitempty"`
Signal string `protobuf:"bytes,4,opt,name=Signal,proto3" json:"Signal,omitempty"`
}

func (x *GetStreamReq) Reset() {
*x = GetStreamReq{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *GetStreamReq) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*GetStreamReq) ProtoMessage() {}

func (x *GetStreamReq) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use GetStreamReq.ProtoReflect.Descriptor instead.
func (*GetStreamReq) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_hub_hub_proto_rawDescGZIP(), []int{5}
}

func (x *GetStreamReq) GetPlanID() string {
if x != nil {
return x.PlanID
}
return ""
}

func (x *GetStreamReq) GetVarID() int32 {
if x != nil {
return x.VarID
}
return 0
}

func (x *GetStreamReq) GetSignalID() int32 {
if x != nil {
return x.SignalID
}
return 0
}

func (x *GetStreamReq) GetSignal() string {
if x != nil {
return x.Signal
}
return ""
}

type SendVarReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

PlanID string `protobuf:"bytes,1,opt,name=PlanID,proto3" json:"PlanID,omitempty"`
VarID int32 `protobuf:"varint,2,opt,name=VarID,proto3" json:"VarID,omitempty"`
VarValue string `protobuf:"bytes,3,opt,name=VarValue,proto3" json:"VarValue,omitempty"`
}

func (x *SendVarReq) Reset() {
*x = SendVarReq{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *SendVarReq) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*SendVarReq) ProtoMessage() {}

func (x *SendVarReq) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use SendVarReq.ProtoReflect.Descriptor instead.
func (*SendVarReq) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_hub_hub_proto_rawDescGZIP(), []int{6}
}

func (x *SendVarReq) GetPlanID() string {
if x != nil {
return x.PlanID
}
return ""
}

func (x *SendVarReq) GetVarID() int32 {
if x != nil {
return x.VarID
}
return 0
}

func (x *SendVarReq) GetVarValue() string {
if x != nil {
return x.VarValue
}
return ""
}

type SendVarResp struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}

func (x *SendVarResp) Reset() {
*x = SendVarResp{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *SendVarResp) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*SendVarResp) ProtoMessage() {}

func (x *SendVarResp) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use SendVarResp.ProtoReflect.Descriptor instead.
func (*SendVarResp) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_hub_hub_proto_rawDescGZIP(), []int{7}
}

type GetVarReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

PlanID string `protobuf:"bytes,1,opt,name=PlanID,proto3" json:"PlanID,omitempty"`
VarID int32 `protobuf:"varint,2,opt,name=VarID,proto3" json:"VarID,omitempty"`
SignalID int32 `protobuf:"varint,3,opt,name=SignalID,proto3" json:"SignalID,omitempty"`
Signal string `protobuf:"bytes,4,opt,name=Signal,proto3" json:"Signal,omitempty"`
}

func (x *GetVarReq) Reset() {
*x = GetVarReq{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *GetVarReq) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*GetVarReq) ProtoMessage() {}

func (x *GetVarReq) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use GetVarReq.ProtoReflect.Descriptor instead.
func (*GetVarReq) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_hub_hub_proto_rawDescGZIP(), []int{8}
}

func (x *GetVarReq) GetPlanID() string {
if x != nil {
return x.PlanID
}
return ""
}

func (x *GetVarReq) GetVarID() int32 {
if x != nil {
return x.VarID
}
return 0
}

func (x *GetVarReq) GetSignalID() int32 {
if x != nil {
return x.SignalID
}
return 0
}

func (x *GetVarReq) GetSignal() string {
if x != nil {
return x.Signal
}
return ""
}

type GetVarResp struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

Var string `protobuf:"bytes,1,opt,name=Var,proto3" json:"Var,omitempty"`
}

func (x *GetVarResp) Reset() {
*x = GetVarResp{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *GetVarResp) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*GetVarResp) ProtoMessage() {}

func (x *GetVarResp) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use GetVarResp.ProtoReflect.Descriptor instead.
func (*GetVarResp) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_hub_hub_proto_rawDescGZIP(), []int{9}
}

func (x *GetVarResp) GetVar() string {
if x != nil {
return x.Var
}
return ""
}

type PingReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}

func (x *PingReq) Reset() {
*x = PingReq{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *PingReq) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*PingReq) ProtoMessage() {}

func (x *PingReq) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use PingReq.ProtoReflect.Descriptor instead.
func (*PingReq) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_hub_hub_proto_rawDescGZIP(), []int{10}
}

type PingResp struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}

func (x *PingResp) Reset() {
*x = PingResp{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *PingResp) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*PingResp) ProtoMessage() {}

func (x *PingResp) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_hub_hub_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use PingResp.ProtoReflect.Descriptor instead.
func (*PingResp) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_hub_hub_proto_rawDescGZIP(), []int{11}
}

var File_pkgs_grpc_hub_hub_proto protoreflect.FileDescriptor

var file_pkgs_grpc_hub_hub_proto_rawDesc = []byte{
0x0a, 0x17, 0x70, 0x6b, 0x67, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x75, 0x62, 0x2f,
0x68, 0x75, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x26, 0x0a, 0x10, 0x45, 0x78, 0x65,
0x63, 0x75, 0x74, 0x65, 0x49, 0x4f, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x12, 0x0a,
0x04, 0x50, 0x6c, 0x61, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x6c, 0x61,
0x6e, 0x22, 0x13, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x49, 0x4f, 0x50, 0x6c,
0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x4f, 0x0a, 0x0e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x61,
0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44,
0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54,
0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x7f, 0x0a, 0x10, 0x53, 0x74, 0x72, 0x65, 0x61,
0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x54,
0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x53, 0x74, 0x72, 0x65,
0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65,
0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44, 0x12, 0x14,
0x0a, 0x05, 0x56, 0x61, 0x72, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x56,
0x61, 0x72, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x10, 0x0a, 0x0e, 0x53, 0x65, 0x6e, 0x64,
0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x22, 0x70, 0x0a, 0x0c, 0x47, 0x65,
0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6c,
0x61, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6c, 0x61, 0x6e,
0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x72, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28,
0x05, 0x52, 0x05, 0x56, 0x61, 0x72, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x53, 0x69, 0x67, 0x6e,
0x61, 0x6c, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x53, 0x69, 0x67, 0x6e,
0x61, 0x6c, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x18, 0x04,
0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x22, 0x56, 0x0a, 0x0a,
0x53, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x52, 0x65, 0x71, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6c,
0x61, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6c, 0x61, 0x6e,
0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x72, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28,
0x05, 0x52, 0x05, 0x56, 0x61, 0x72, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x56, 0x61, 0x72, 0x56,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x56, 0x61, 0x72, 0x56,
0x61, 0x6c, 0x75, 0x65, 0x22, 0x0d, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x52,
0x65, 0x73, 0x70, 0x22, 0x6d, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x56, 0x61, 0x72, 0x52, 0x65, 0x71,
0x12, 0x16, 0x0a, 0x06, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x06, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x72, 0x49,
0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x56, 0x61, 0x72, 0x49, 0x44, 0x12, 0x1a,
0x0a, 0x08, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
0x52, 0x08, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x69,
0x67, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, 0x69, 0x67, 0x6e,
0x61, 0x6c, 0x22, 0x1e, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x61, 0x72, 0x52, 0x65, 0x73, 0x70,
0x12, 0x10, 0x0a, 0x03, 0x56, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x56,
0x61, 0x72, 0x22, 0x09, 0x0a, 0x07, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x22, 0x0a, 0x0a,
0x08, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x2a, 0x37, 0x0a, 0x14, 0x53, 0x74, 0x72,
0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70,
0x65, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x4f, 0x46, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x61,
0x74, 0x61, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x72, 0x67, 0x73,
0x10, 0x02, 0x32, 0x94, 0x02, 0x0a, 0x03, 0x48, 0x75, 0x62, 0x12, 0x38, 0x0a, 0x0d, 0x45, 0x78,
0x65, 0x63, 0x75, 0x74, 0x65, 0x49, 0x4f, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x11, 0x2e, 0x45, 0x78,
0x65, 0x63, 0x75, 0x74, 0x65, 0x49, 0x4f, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x12,
0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x49, 0x4f, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65,
0x73, 0x70, 0x22, 0x00, 0x12, 0x34, 0x0a, 0x0a, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65,
0x61, 0x6d, 0x12, 0x11, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50,
0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x0f, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65,
0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x12, 0x31, 0x0a, 0x09, 0x47, 0x65,
0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x0d, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72,
0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x1a, 0x11, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44,
0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x00, 0x30, 0x01, 0x12, 0x26, 0x0a,
0x07, 0x53, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x12, 0x0b, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x56,
0x61, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x0c, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x52,
0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x23, 0x0a, 0x06, 0x47, 0x65, 0x74, 0x56, 0x61, 0x72, 0x12,
0x0a, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x0b, 0x2e, 0x47, 0x65,
0x74, 0x56, 0x61, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x1d, 0x0a, 0x04, 0x50, 0x69,
0x6e, 0x67, 0x12, 0x08, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x1a, 0x09, 0x2e, 0x50,
0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x42, 0x07, 0x5a, 0x05, 0x2e, 0x3b, 0x68,
0x75, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}

var (
file_pkgs_grpc_hub_hub_proto_rawDescOnce sync.Once
file_pkgs_grpc_hub_hub_proto_rawDescData = file_pkgs_grpc_hub_hub_proto_rawDesc
)

func file_pkgs_grpc_hub_hub_proto_rawDescGZIP() []byte {
file_pkgs_grpc_hub_hub_proto_rawDescOnce.Do(func() {
file_pkgs_grpc_hub_hub_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkgs_grpc_hub_hub_proto_rawDescData)
})
return file_pkgs_grpc_hub_hub_proto_rawDescData
}

var file_pkgs_grpc_hub_hub_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_pkgs_grpc_hub_hub_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
var file_pkgs_grpc_hub_hub_proto_goTypes = []any{
(StreamDataPacketType)(0), // 0: StreamDataPacketType
(*ExecuteIOPlanReq)(nil), // 1: ExecuteIOPlanReq
(*ExecuteIOPlanResp)(nil), // 2: ExecuteIOPlanResp
(*FileDataPacket)(nil), // 3: FileDataPacket
(*StreamDataPacket)(nil), // 4: StreamDataPacket
(*SendStreamResp)(nil), // 5: SendStreamResp
(*GetStreamReq)(nil), // 6: GetStreamReq
(*SendVarReq)(nil), // 7: SendVarReq
(*SendVarResp)(nil), // 8: SendVarResp
(*GetVarReq)(nil), // 9: GetVarReq
(*GetVarResp)(nil), // 10: GetVarResp
(*PingReq)(nil), // 11: PingReq
(*PingResp)(nil), // 12: PingResp
}
var file_pkgs_grpc_hub_hub_proto_depIdxs = []int32{
0, // 0: FileDataPacket.Type:type_name -> StreamDataPacketType
0, // 1: StreamDataPacket.Type:type_name -> StreamDataPacketType
1, // 2: Hub.ExecuteIOPlan:input_type -> ExecuteIOPlanReq
4, // 3: Hub.SendStream:input_type -> StreamDataPacket
6, // 4: Hub.GetStream:input_type -> GetStreamReq
7, // 5: Hub.SendVar:input_type -> SendVarReq
9, // 6: Hub.GetVar:input_type -> GetVarReq
11, // 7: Hub.Ping:input_type -> PingReq
2, // 8: Hub.ExecuteIOPlan:output_type -> ExecuteIOPlanResp
5, // 9: Hub.SendStream:output_type -> SendStreamResp
4, // 10: Hub.GetStream:output_type -> StreamDataPacket
8, // 11: Hub.SendVar:output_type -> SendVarResp
10, // 12: Hub.GetVar:output_type -> GetVarResp
12, // 13: Hub.Ping:output_type -> PingResp
8, // [8:14] is the sub-list for method output_type
2, // [2:8] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}

func init() { file_pkgs_grpc_hub_hub_proto_init() }
func file_pkgs_grpc_hub_hub_proto_init() {
if File_pkgs_grpc_hub_hub_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_pkgs_grpc_hub_hub_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*ExecuteIOPlanReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_hub_hub_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ExecuteIOPlanResp); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_hub_hub_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*FileDataPacket); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_hub_hub_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*StreamDataPacket); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_hub_hub_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*SendStreamResp); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_hub_hub_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*GetStreamReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_hub_hub_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*SendVarReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_hub_hub_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*SendVarResp); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_hub_hub_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*GetVarReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_hub_hub_proto_msgTypes[9].Exporter = func(v any, i int) any {
switch v := v.(*GetVarResp); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_hub_hub_proto_msgTypes[10].Exporter = func(v any, i int) any {
switch v := v.(*PingReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_hub_hub_proto_msgTypes[11].Exporter = func(v any, i int) any {
switch v := v.(*PingResp); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_pkgs_grpc_hub_hub_proto_rawDesc,
NumEnums: 1,
NumMessages: 12,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_pkgs_grpc_hub_hub_proto_goTypes,
DependencyIndexes: file_pkgs_grpc_hub_hub_proto_depIdxs,
EnumInfos: file_pkgs_grpc_hub_hub_proto_enumTypes,
MessageInfos: file_pkgs_grpc_hub_hub_proto_msgTypes,
}.Build()
File_pkgs_grpc_hub_hub_proto = out.File
file_pkgs_grpc_hub_hub_proto_rawDesc = nil
file_pkgs_grpc_hub_hub_proto_goTypes = nil
file_pkgs_grpc_hub_hub_proto_depIdxs = nil
}

+ 0
- 75
common/pkgs/grpc/hub/hub.proto View File

@@ -1,75 +0,0 @@
// 使用的语法版本
syntax = "proto3";

// 生成的go文件包
option go_package = ".;hub";//grpc这里生效了



message ExecuteIOPlanReq {
string Plan = 1;
}

message ExecuteIOPlanResp {
}

enum StreamDataPacketType {
EOF = 0;
Data = 1;
SendArgs = 2;
}
// 文件数据。注意:只在Type为Data或EOF的时候,Data字段才能有数据
message FileDataPacket {
StreamDataPacketType Type = 1;
bytes Data = 2;
}

// 注:EOF时data也可能有数据
message StreamDataPacket {
StreamDataPacketType Type = 1;
string PlanID = 2;
int32 VarID = 3;
bytes Data = 4;
}

message SendStreamResp {}

message GetStreamReq {
string PlanID = 1;
int32 VarID = 2;
int32 SignalID = 3;
string Signal = 4;
}

message SendVarReq {
string PlanID = 1;
int32 VarID = 2;
string VarValue = 3;
}
message SendVarResp {}

message GetVarReq {
string PlanID = 1;
int32 VarID = 2;
int32 SignalID = 3;
string Signal = 4;
}
message GetVarResp {
string Var = 1;
}

message PingReq {}
message PingResp {}

service Hub {
rpc ExecuteIOPlan(ExecuteIOPlanReq) returns(ExecuteIOPlanResp){}

rpc SendStream(stream StreamDataPacket)returns(SendStreamResp){}
rpc GetStream(GetStreamReq)returns(stream StreamDataPacket){}

rpc SendVar(SendVarReq)returns(SendVarResp){}
rpc GetVar(GetVarReq)returns(GetVarResp){}

rpc Ping(PingReq) returns(PingResp){}
}


+ 0
- 358
common/pkgs/grpc/hub/hub_grpc.pb.go View File

@@ -1,358 +0,0 @@
// 使用的语法版本

// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.22.3
// source: pkgs/grpc/hub/hub.proto

package hub

import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)

// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7

const (
Hub_ExecuteIOPlan_FullMethodName = "/Hub/ExecuteIOPlan"
Hub_SendStream_FullMethodName = "/Hub/SendStream"
Hub_GetStream_FullMethodName = "/Hub/GetStream"
Hub_SendVar_FullMethodName = "/Hub/SendVar"
Hub_GetVar_FullMethodName = "/Hub/GetVar"
Hub_Ping_FullMethodName = "/Hub/Ping"
)

// HubClient is the client API for Hub service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type HubClient interface {
ExecuteIOPlan(ctx context.Context, in *ExecuteIOPlanReq, opts ...grpc.CallOption) (*ExecuteIOPlanResp, error)
SendStream(ctx context.Context, opts ...grpc.CallOption) (Hub_SendStreamClient, error)
GetStream(ctx context.Context, in *GetStreamReq, opts ...grpc.CallOption) (Hub_GetStreamClient, error)
SendVar(ctx context.Context, in *SendVarReq, opts ...grpc.CallOption) (*SendVarResp, error)
GetVar(ctx context.Context, in *GetVarReq, opts ...grpc.CallOption) (*GetVarResp, error)
Ping(ctx context.Context, in *PingReq, opts ...grpc.CallOption) (*PingResp, error)
}

type hubClient struct {
cc grpc.ClientConnInterface
}

func NewHubClient(cc grpc.ClientConnInterface) HubClient {
return &hubClient{cc}
}

func (c *hubClient) ExecuteIOPlan(ctx context.Context, in *ExecuteIOPlanReq, opts ...grpc.CallOption) (*ExecuteIOPlanResp, error) {
out := new(ExecuteIOPlanResp)
err := c.cc.Invoke(ctx, Hub_ExecuteIOPlan_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}

func (c *hubClient) SendStream(ctx context.Context, opts ...grpc.CallOption) (Hub_SendStreamClient, error) {
stream, err := c.cc.NewStream(ctx, &Hub_ServiceDesc.Streams[0], Hub_SendStream_FullMethodName, opts...)
if err != nil {
return nil, err
}
x := &hubSendStreamClient{stream}
return x, nil
}

type Hub_SendStreamClient interface {
Send(*StreamDataPacket) error
CloseAndRecv() (*SendStreamResp, error)
grpc.ClientStream
}

type hubSendStreamClient struct {
grpc.ClientStream
}

func (x *hubSendStreamClient) Send(m *StreamDataPacket) error {
return x.ClientStream.SendMsg(m)
}

func (x *hubSendStreamClient) CloseAndRecv() (*SendStreamResp, error) {
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
m := new(SendStreamResp)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}

func (c *hubClient) GetStream(ctx context.Context, in *GetStreamReq, opts ...grpc.CallOption) (Hub_GetStreamClient, error) {
stream, err := c.cc.NewStream(ctx, &Hub_ServiceDesc.Streams[1], Hub_GetStream_FullMethodName, opts...)
if err != nil {
return nil, err
}
x := &hubGetStreamClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}

type Hub_GetStreamClient interface {
Recv() (*StreamDataPacket, error)
grpc.ClientStream
}

type hubGetStreamClient struct {
grpc.ClientStream
}

func (x *hubGetStreamClient) Recv() (*StreamDataPacket, error) {
m := new(StreamDataPacket)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}

func (c *hubClient) SendVar(ctx context.Context, in *SendVarReq, opts ...grpc.CallOption) (*SendVarResp, error) {
out := new(SendVarResp)
err := c.cc.Invoke(ctx, Hub_SendVar_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}

func (c *hubClient) GetVar(ctx context.Context, in *GetVarReq, opts ...grpc.CallOption) (*GetVarResp, error) {
out := new(GetVarResp)
err := c.cc.Invoke(ctx, Hub_GetVar_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}

func (c *hubClient) Ping(ctx context.Context, in *PingReq, opts ...grpc.CallOption) (*PingResp, error) {
out := new(PingResp)
err := c.cc.Invoke(ctx, Hub_Ping_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}

// HubServer is the server API for Hub service.
// All implementations must embed UnimplementedHubServer
// for forward compatibility
type HubServer interface {
ExecuteIOPlan(context.Context, *ExecuteIOPlanReq) (*ExecuteIOPlanResp, error)
SendStream(Hub_SendStreamServer) error
GetStream(*GetStreamReq, Hub_GetStreamServer) error
SendVar(context.Context, *SendVarReq) (*SendVarResp, error)
GetVar(context.Context, *GetVarReq) (*GetVarResp, error)
Ping(context.Context, *PingReq) (*PingResp, error)
mustEmbedUnimplementedHubServer()
}

// UnimplementedHubServer must be embedded to have forward compatible implementations.
type UnimplementedHubServer struct {
}

func (UnimplementedHubServer) ExecuteIOPlan(context.Context, *ExecuteIOPlanReq) (*ExecuteIOPlanResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method ExecuteIOPlan not implemented")
}
func (UnimplementedHubServer) SendStream(Hub_SendStreamServer) error {
return status.Errorf(codes.Unimplemented, "method SendStream not implemented")
}
func (UnimplementedHubServer) GetStream(*GetStreamReq, Hub_GetStreamServer) error {
return status.Errorf(codes.Unimplemented, "method GetStream not implemented")
}
func (UnimplementedHubServer) SendVar(context.Context, *SendVarReq) (*SendVarResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method SendVar not implemented")
}
func (UnimplementedHubServer) GetVar(context.Context, *GetVarReq) (*GetVarResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetVar not implemented")
}
func (UnimplementedHubServer) Ping(context.Context, *PingReq) (*PingResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented")
}
func (UnimplementedHubServer) mustEmbedUnimplementedHubServer() {}

// UnsafeHubServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to HubServer will
// result in compilation errors.
type UnsafeHubServer interface {
mustEmbedUnimplementedHubServer()
}

func RegisterHubServer(s grpc.ServiceRegistrar, srv HubServer) {
s.RegisterService(&Hub_ServiceDesc, srv)
}

func _Hub_ExecuteIOPlan_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ExecuteIOPlanReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HubServer).ExecuteIOPlan(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Hub_ExecuteIOPlan_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HubServer).ExecuteIOPlan(ctx, req.(*ExecuteIOPlanReq))
}
return interceptor(ctx, in, info, handler)
}

func _Hub_SendStream_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(HubServer).SendStream(&hubSendStreamServer{stream})
}

type Hub_SendStreamServer interface {
SendAndClose(*SendStreamResp) error
Recv() (*StreamDataPacket, error)
grpc.ServerStream
}

type hubSendStreamServer struct {
grpc.ServerStream
}

func (x *hubSendStreamServer) SendAndClose(m *SendStreamResp) error {
return x.ServerStream.SendMsg(m)
}

func (x *hubSendStreamServer) Recv() (*StreamDataPacket, error) {
m := new(StreamDataPacket)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}

func _Hub_GetStream_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(GetStreamReq)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(HubServer).GetStream(m, &hubGetStreamServer{stream})
}

type Hub_GetStreamServer interface {
Send(*StreamDataPacket) error
grpc.ServerStream
}

type hubGetStreamServer struct {
grpc.ServerStream
}

func (x *hubGetStreamServer) Send(m *StreamDataPacket) error {
return x.ServerStream.SendMsg(m)
}

func _Hub_SendVar_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SendVarReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HubServer).SendVar(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Hub_SendVar_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HubServer).SendVar(ctx, req.(*SendVarReq))
}
return interceptor(ctx, in, info, handler)
}

func _Hub_GetVar_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetVarReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HubServer).GetVar(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Hub_GetVar_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HubServer).GetVar(ctx, req.(*GetVarReq))
}
return interceptor(ctx, in, info, handler)
}

func _Hub_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PingReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HubServer).Ping(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Hub_Ping_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HubServer).Ping(ctx, req.(*PingReq))
}
return interceptor(ctx, in, info, handler)
}

// Hub_ServiceDesc is the grpc.ServiceDesc for Hub service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Hub_ServiceDesc = grpc.ServiceDesc{
ServiceName: "Hub",
HandlerType: (*HubServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ExecuteIOPlan",
Handler: _Hub_ExecuteIOPlan_Handler,
},
{
MethodName: "SendVar",
Handler: _Hub_SendVar_Handler,
},
{
MethodName: "GetVar",
Handler: _Hub_GetVar_Handler,
},
{
MethodName: "Ping",
Handler: _Hub_Ping_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "SendStream",
Handler: _Hub_SendStream_Handler,
ClientStreams: true,
},
{
StreamName: "GetStream",
Handler: _Hub_GetStream_Handler,
ServerStreams: true,
},
},
Metadata: "pkgs/grpc/hub/hub.proto",
}

+ 0
- 60
common/pkgs/grpc/hub/pool.go View File

@@ -1,60 +0,0 @@
package hub

import (
"fmt"
sync "sync"
)

type PoolConfig struct {
}

type PoolClient struct {
*Client
owner *Pool
}

func (c *PoolClient) Close() {
c.owner.Release(c)
}

type Pool struct {
grpcCfg *PoolConfig
shareds map[string]*PoolClient
lock sync.Mutex
}

func NewPool(grpcCfg *PoolConfig) *Pool {
return &Pool{
grpcCfg: grpcCfg,
shareds: make(map[string]*PoolClient),
}
}

// 获取一个GRPC客户端。由于事先不能知道所有hub的GRPC配置信息,所以只能让调用者把建立连接所需的配置都传递进来,
// Pool来决定要不要新建客户端。
func (p *Pool) Acquire(ip string, port int) (*PoolClient, error) {
addr := fmt.Sprintf("%s:%d", ip, port)

p.lock.Lock()
defer p.lock.Unlock()

cli, ok := p.shareds[addr]
if !ok {
c, err := NewClient(addr)
if err != nil {
return nil, err
}
cli = &PoolClient{
Client: c,
owner: p,
}
p.shareds[addr] = cli
}

return cli, nil

}

func (p *Pool) Release(cli *PoolClient) {
// TODO 释放长时间未使用的client
}

+ 36
- 16
common/pkgs/ioswitch2/fromto.go View File

@@ -3,7 +3,7 @@ package ioswitch2
import ( import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/common/utils/math2" "gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/jcs-pub/client/types"
clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
) )


@@ -69,9 +69,9 @@ type FromTos []FromTo


type FromTo struct { type FromTo struct {
// 如果输入或者输出用到了EC编码的流,则需要提供EC参数。 // 如果输入或者输出用到了EC编码的流,则需要提供EC参数。
ECParam *types.ECRedundancy
ECParam *clitypes.ECRedundancy
// 同上 // 同上
SegmentParam *types.SegmentRedundancy
SegmentParam *clitypes.SegmentRedundancy
Froms []From Froms []From
Toes []To Toes []To
} }
@@ -110,17 +110,17 @@ func (f *FromDriver) GetStreamIndex() StreamIndex {
} }


type FromShardstore struct { type FromShardstore struct {
FileHash types.FileHash
FileHash clitypes.FileHash
Hub cortypes.Hub Hub cortypes.Hub
Space types.UserSpaceDetail
UserSpace clitypes.UserSpaceDetail
StreamIndex StreamIndex StreamIndex StreamIndex
} }


func NewFromShardstore(fileHash types.FileHash, hub cortypes.Hub, space types.UserSpaceDetail, strIdx StreamIndex) *FromShardstore {
func NewFromShardstore(fileHash clitypes.FileHash, hub cortypes.Hub, space clitypes.UserSpaceDetail, strIdx StreamIndex) *FromShardstore {
return &FromShardstore{ return &FromShardstore{
FileHash: fileHash, FileHash: fileHash,
Hub: hub, Hub: hub,
Space: space,
UserSpace: space,
StreamIndex: strIdx, StreamIndex: strIdx,
} }
} }
@@ -129,6 +129,26 @@ func (f *FromShardstore) GetStreamIndex() StreamIndex {
return f.StreamIndex return f.StreamIndex
} }


type FromPublicStore struct {
Hub cortypes.Hub
UserSpace clitypes.UserSpaceDetail
Path string
}

func NewFromPublicStore(hub cortypes.Hub, space clitypes.UserSpaceDetail, path string) *FromPublicStore {
return &FromPublicStore{
Hub: hub,
UserSpace: space,
Path: path,
}
}

func (f *FromPublicStore) GetStreamIndex() StreamIndex {
return StreamIndex{
Type: StreamIndexRaw,
}
}

type ToDriver struct { type ToDriver struct {
Handle *exec.DriverReadStream Handle *exec.DriverReadStream
StreamIndex StreamIndex StreamIndex StreamIndex
@@ -162,13 +182,13 @@ func (t *ToDriver) GetRange() math2.Range {


type ToShardStore struct { type ToShardStore struct {
Hub cortypes.Hub Hub cortypes.Hub
Space types.UserSpaceDetail
Space clitypes.UserSpaceDetail
StreamIndex StreamIndex StreamIndex StreamIndex
Range math2.Range Range math2.Range
FileHashStoreKey string FileHashStoreKey string
} }


func NewToShardStore(hub cortypes.Hub, space types.UserSpaceDetail, strIdx StreamIndex, fileHashStoreKey string) *ToShardStore {
func NewToShardStore(hub cortypes.Hub, space clitypes.UserSpaceDetail, strIdx StreamIndex, fileHashStoreKey string) *ToShardStore {
return &ToShardStore{ return &ToShardStore{
Hub: hub, Hub: hub,
Space: space, Space: space,
@@ -177,7 +197,7 @@ func NewToShardStore(hub cortypes.Hub, space types.UserSpaceDetail, strIdx Strea
} }
} }


func NewToShardStoreWithRange(hub cortypes.Hub, space types.UserSpaceDetail, streamIndex StreamIndex, fileHashStoreKey string, rng math2.Range) *ToShardStore {
func NewToShardStoreWithRange(hub cortypes.Hub, space clitypes.UserSpaceDetail, streamIndex StreamIndex, fileHashStoreKey string, rng math2.Range) *ToShardStore {
return &ToShardStore{ return &ToShardStore{
Hub: hub, Hub: hub,
Space: space, Space: space,
@@ -195,26 +215,26 @@ func (t *ToShardStore) GetRange() math2.Range {
return t.Range return t.Range
} }


type LoadToPublic struct {
type ToPublicStore struct {
Hub cortypes.Hub Hub cortypes.Hub
Space types.UserSpaceDetail
Space clitypes.UserSpaceDetail
ObjectPath string ObjectPath string
} }


func NewLoadToPublic(hub cortypes.Hub, space types.UserSpaceDetail, objectPath string) *LoadToPublic {
return &LoadToPublic{
func NewToPublicStore(hub cortypes.Hub, space clitypes.UserSpaceDetail, objectPath string) *ToPublicStore {
return &ToPublicStore{
Hub: hub, Hub: hub,
Space: space, Space: space,
ObjectPath: objectPath, ObjectPath: objectPath,
} }
} }


func (t *LoadToPublic) GetStreamIndex() StreamIndex {
func (t *ToPublicStore) GetStreamIndex() StreamIndex {
return StreamIndex{ return StreamIndex{
Type: StreamIndexRaw, Type: StreamIndexRaw,
} }
} }


func (t *LoadToPublic) GetRange() math2.Range {
func (t *ToPublicStore) GetRange() math2.Range {
return math2.Range{} return math2.Range{}
} }

+ 32
- 19
common/pkgs/ioswitch2/hub_worker.go View File

@@ -9,7 +9,7 @@ import (
"gitlink.org.cn/cloudream/common/utils/io2" "gitlink.org.cn/cloudream/common/utils/io2"
"gitlink.org.cn/cloudream/common/utils/serder" "gitlink.org.cn/cloudream/common/utils/serder"
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/grpc/hub"
hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
) )


@@ -24,11 +24,7 @@ type HubWorker struct {
} }


func (w *HubWorker) NewClient() (exec.WorkerClient, error) { func (w *HubWorker) NewClient() (exec.WorkerClient, error) {
cli, err := stgglb.HubRPCPool.Acquire(stgglb.SelectGRPCAddress(w.Hub, w.Address))
if err != nil {
return nil, err
}

cli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(&w.Hub, &w.Address))
return &HubWorkerClient{hubID: w.Hub.HubID, cli: cli}, nil return &HubWorkerClient{hubID: w.Hub.HubID, cli: cli}, nil
} }


@@ -47,38 +43,55 @@ func (w *HubWorker) Equals(worker exec.WorkerInfo) bool {


type HubWorkerClient struct { type HubWorkerClient struct {
hubID cortypes.HubID hubID cortypes.HubID
cli *hubrpc.PoolClient
cli *hubrpc.Client
} }


func (c *HubWorkerClient) ExecutePlan(ctx context.Context, plan exec.Plan) error { func (c *HubWorkerClient) ExecutePlan(ctx context.Context, plan exec.Plan) error {
return c.cli.ExecuteIOPlan(ctx, plan)
_, err := c.cli.ExecuteIOPlan(ctx, &hubrpc.ExecuteIOPlan{Plan: plan})
return err.ToError()
} }
func (c *HubWorkerClient) SendStream(ctx context.Context, planID exec.PlanID, id exec.VarID, stream io.ReadCloser) error { func (c *HubWorkerClient) SendStream(ctx context.Context, planID exec.PlanID, id exec.VarID, stream io.ReadCloser) error {
return c.cli.SendStream(ctx, planID, id, io2.CounterCloser(stream, func(cnt int64, err error) {
if stgglb.Stats.HubTransfer != nil {
stgglb.Stats.HubTransfer.RecordOutput(c.hubID, cnt, err == nil || err == io.EOF)
}
}))
_, err := c.cli.SendIOStream(ctx, &hubrpc.SendIOStream{
PlanID: planID,
VarID: id,
Stream: io2.CounterCloser(stream, func(cnt int64, err error) {
if stgglb.Stats.HubTransfer != nil {
stgglb.Stats.HubTransfer.RecordOutput(c.hubID, cnt, err == nil || err == io.EOF)
}
}),
})
return err.ToError()
} }
func (c *HubWorkerClient) SendVar(ctx context.Context, planID exec.PlanID, id exec.VarID, value exec.VarValue) error { func (c *HubWorkerClient) SendVar(ctx context.Context, planID exec.PlanID, id exec.VarID, value exec.VarValue) error {
return c.cli.SendVar(ctx, planID, id, value)
_, err := c.cli.SendIOVar(ctx, &hubrpc.SendIOVar{
PlanID: planID, VarID: id, Value: value,
})
return err.ToError()
} }
func (c *HubWorkerClient) GetStream(ctx context.Context, planID exec.PlanID, streamID exec.VarID, signalID exec.VarID, signal exec.VarValue) (io.ReadCloser, error) { func (c *HubWorkerClient) GetStream(ctx context.Context, planID exec.PlanID, streamID exec.VarID, signalID exec.VarID, signal exec.VarValue) (io.ReadCloser, error) {
str, err := c.cli.GetStream(ctx, planID, streamID, signalID, signal)
resp, err := c.cli.GetIOStream(ctx, &hubrpc.GetIOStream{
PlanID: planID, VarID: streamID, SignalID: signalID, Signal: signal,
})
if err != nil { if err != nil {
return nil, err
return nil, err.ToError()
} }


return io2.CounterCloser(str, func(cnt int64, err error) {
return io2.CounterCloser(resp.Stream, func(cnt int64, err error) {
if stgglb.Stats.HubTransfer != nil { if stgglb.Stats.HubTransfer != nil {
stgglb.Stats.HubTransfer.RecordInput(c.hubID, cnt, err == nil || err == io.EOF) stgglb.Stats.HubTransfer.RecordInput(c.hubID, cnt, err == nil || err == io.EOF)
} }
}), nil }), nil
} }
func (c *HubWorkerClient) GetVar(ctx context.Context, planID exec.PlanID, varID exec.VarID, signalID exec.VarID, signal exec.VarValue) (exec.VarValue, error) { func (c *HubWorkerClient) GetVar(ctx context.Context, planID exec.PlanID, varID exec.VarID, signalID exec.VarID, signal exec.VarValue) (exec.VarValue, error) {
return c.cli.GetVar(ctx, planID, varID, signalID, signal)
resp, err := c.cli.GetIOVar(ctx, &hubrpc.GetIOVar{
PlanID: planID, VarID: varID, SignalID: signalID, Signal: signal,
})
if err != nil {
return nil, err.ToError()
}
return resp.Value, nil
} }
func (c *HubWorkerClient) Close() error { func (c *HubWorkerClient) Close() error {
stgglb.HubRPCPool.Release(c.cli)
c.cli.Release()
return nil return nil
} }

+ 167
- 15
common/pkgs/ioswitch2/ops2/bypass.go View File

@@ -12,23 +12,26 @@ import (


func init() { func init() {
exec.UseOp[*BypassToShardStore]() exec.UseOp[*BypassToShardStore]()
exec.UseVarValue[*BypassUploadedFileValue]()
exec.UseOp[*BypassToPublicStore]()

exec.UseVarValue[*BypassedFileInfoValue]()
exec.UseVarValue[*BypassHandleResultValue]() exec.UseVarValue[*BypassHandleResultValue]()


exec.UseOp[*BypassFromShardStore]() exec.UseOp[*BypassFromShardStore]()
exec.UseOp[*BypassFromPublicStore]()
exec.UseVarValue[*BypassFilePathValue]() exec.UseVarValue[*BypassFilePathValue]()


exec.UseOp[*BypassFromShardStoreHTTP]() exec.UseOp[*BypassFromShardStoreHTTP]()
exec.UseVarValue[*HTTPRequestValue]() exec.UseVarValue[*HTTPRequestValue]()
} }


type BypassUploadedFileValue struct {
types.BypassUploadedFile
type BypassedFileInfoValue struct {
types.BypassedFileInfo
} }


func (v *BypassUploadedFileValue) Clone() exec.VarValue {
return &BypassUploadedFileValue{
BypassUploadedFile: v.BypassUploadedFile,
func (v *BypassedFileInfoValue) Clone() exec.VarValue {
return &BypassedFileInfoValue{
BypassedFileInfo: v.BypassedFileInfo,
} }
} }


@@ -46,7 +49,7 @@ type BypassToShardStore struct {
UserSpace clitypes.UserSpaceDetail UserSpace clitypes.UserSpaceDetail
BypassFileInfo exec.VarID BypassFileInfo exec.VarID
BypassCallback exec.VarID BypassCallback exec.VarID
FileHash exec.VarID
FileInfo exec.VarID
} }


func (o *BypassToShardStore) Execute(ctx *exec.ExecContext, e *exec.Executor) error { func (o *BypassToShardStore) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
@@ -60,23 +63,23 @@ func (o *BypassToShardStore) Execute(ctx *exec.ExecContext, e *exec.Executor) er
return err return err
} }


br, ok := shardStore.(types.BypassWrite)
br, ok := shardStore.(types.BypassShardWrite)
if !ok { if !ok {
return fmt.Errorf("shard store %v not support bypass write", o.UserSpace) return fmt.Errorf("shard store %v not support bypass write", o.UserSpace)
} }


fileInfo, err := exec.BindVar[*BypassUploadedFileValue](e, ctx.Context, o.BypassFileInfo)
fileInfo, err := exec.BindVar[*BypassedFileInfoValue](e, ctx.Context, o.BypassFileInfo)
if err != nil { if err != nil {
return err return err
} }


err = br.BypassUploaded(fileInfo.BypassUploadedFile)
err = br.BypassedShard(fileInfo.BypassedFileInfo)
if err != nil { if err != nil {
return err return err
} }


e.PutVar(o.BypassCallback, &BypassHandleResultValue{Commited: true}) e.PutVar(o.BypassCallback, &BypassHandleResultValue{Commited: true})
e.PutVar(o.FileHash, &ShardInfoValue{Hash: fileInfo.Hash, Size: fileInfo.Size})
e.PutVar(o.FileInfo, &ShardInfoValue{Hash: fileInfo.Hash, Size: fileInfo.Size})
return nil return nil
} }


@@ -84,6 +87,47 @@ func (o *BypassToShardStore) String() string {
return fmt.Sprintf("BypassToShardStore[UserSpace:%v] Info: %v, Callback: %v", o.UserSpace, o.BypassFileInfo, o.BypassCallback) return fmt.Sprintf("BypassToShardStore[UserSpace:%v] Info: %v, Callback: %v", o.UserSpace, o.BypassFileInfo, o.BypassCallback)
} }


type BypassToPublicStore struct {
UserSpace clitypes.UserSpaceDetail
BypassFileInfo exec.VarID
BypassCallback exec.VarID
DestPath string
}

func (o *BypassToPublicStore) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
stgPool, err := exec.GetValueByType[*pool.Pool](ctx)
if err != nil {
return err
}

store, err := stgPool.GetPublicStore(&o.UserSpace)
if err != nil {
return err
}

br, ok := store.(types.BypassPublicWrite)
if !ok {
return fmt.Errorf("public store %v not support bypass write", o.UserSpace)
}

fileInfo, err := exec.BindVar[*BypassedFileInfoValue](e, ctx.Context, o.BypassFileInfo)
if err != nil {
return err
}

err = br.BypassedPublic(fileInfo.BypassedFileInfo, o.DestPath)
if err != nil {
return err
}

e.PutVar(o.BypassCallback, &BypassHandleResultValue{Commited: true})
return nil
}

func (o *BypassToPublicStore) String() string {
return fmt.Sprintf("BypassToPublicStore[UserSpace:%v] Info: %v, Callback: %v", o.UserSpace, o.BypassFileInfo, o.BypassCallback)
}

type BypassFilePathValue struct { type BypassFilePathValue struct {
types.BypassFilePath types.BypassFilePath
} }
@@ -111,12 +155,12 @@ func (o *BypassFromShardStore) Execute(ctx *exec.ExecContext, e *exec.Executor)
return err return err
} }


br, ok := shardStore.(types.BypassRead)
br, ok := shardStore.(types.BypassShardRead)
if !ok { if !ok {
return fmt.Errorf("shard store %v not support bypass read", o.UserSpace) return fmt.Errorf("shard store %v not support bypass read", o.UserSpace)
} }


path, err := br.BypassRead(o.FileHash)
path, err := br.BypassShardRead(o.FileHash)
if err != nil { if err != nil {
return err return err
} }
@@ -129,6 +173,41 @@ func (o *BypassFromShardStore) String() string {
return fmt.Sprintf("BypassFromShardStore[UserSpace:%v] FileHash: %v, Output: %v", o.UserSpace, o.FileHash, o.Output) return fmt.Sprintf("BypassFromShardStore[UserSpace:%v] FileHash: %v, Output: %v", o.UserSpace, o.FileHash, o.Output)
} }


type BypassFromPublicStore struct {
UserSpace clitypes.UserSpaceDetail
Path string
Output exec.VarID
}

func (o *BypassFromPublicStore) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
stgPool, err := exec.GetValueByType[*pool.Pool](ctx)
if err != nil {
return err
}

store, err := stgPool.GetPublicStore(&o.UserSpace)
if err != nil {
return err
}

br, ok := store.(types.BypassPublicRead)
if !ok {
return fmt.Errorf("public store %v not support bypass read", o.UserSpace)
}

path, err := br.BypassPublicRead(o.Path)
if err != nil {
return err
}

e.PutVar(o.Output, &BypassFilePathValue{BypassFilePath: path})
return nil
}

func (o *BypassFromPublicStore) String() string {
return fmt.Sprintf("BypassFromPublicStore[UserSpace:%v] Path: %v, Output: %v", o.UserSpace, o.Path, o.Output)
}

// 旁路Http读取 // 旁路Http读取
type BypassFromShardStoreHTTP struct { type BypassFromShardStoreHTTP struct {
UserSpace clitypes.UserSpaceDetail UserSpace clitypes.UserSpaceDetail
@@ -157,7 +236,7 @@ func (o *BypassFromShardStoreHTTP) Execute(ctx *exec.ExecContext, e *exec.Execut
return err return err
} }


br, ok := shardStore.(types.HTTPBypassRead)
br, ok := shardStore.(types.HTTPBypassShardRead)
if !ok { if !ok {
return fmt.Errorf("shard store %v not support bypass read", o.UserSpace) return fmt.Errorf("shard store %v not support bypass read", o.UserSpace)
} }
@@ -220,7 +299,48 @@ func (t *BypassToShardStoreNode) GenerateOp() (exec.Op, error) {
UserSpace: t.UserSpace, UserSpace: t.UserSpace,
BypassFileInfo: t.BypassFileInfoSlot().Var().VarID, BypassFileInfo: t.BypassFileInfoSlot().Var().VarID,
BypassCallback: t.BypassCallbackVar().Var().VarID, BypassCallback: t.BypassCallbackVar().Var().VarID,
FileHash: t.FileHashVar().Var().VarID,
FileInfo: t.FileHashVar().Var().VarID,
}, nil
}

type BypassToPublicStoreNode struct {
dag.NodeBase
UserSpace clitypes.UserSpaceDetail
DestPath string
}

func (b *GraphNodeBuilder) NewBypassToPublicStore(userSpace clitypes.UserSpaceDetail, dstPath string) *BypassToPublicStoreNode {
node := &BypassToPublicStoreNode{
UserSpace: userSpace,
DestPath: dstPath,
}
b.AddNode(node)

node.InputValues().Init(1)
node.OutputValues().Init(node, 1)
return node
}

func (n *BypassToPublicStoreNode) BypassFileInfoSlot() dag.ValueInputSlot {
return dag.ValueInputSlot{
Node: n,
Index: 0,
}
}

func (n *BypassToPublicStoreNode) BypassCallbackVar() dag.ValueOutputSlot {
return dag.ValueOutputSlot{
Node: n,
Index: 0,
}
}

func (t *BypassToPublicStoreNode) GenerateOp() (exec.Op, error) {
return &BypassToPublicStore{
UserSpace: t.UserSpace,
BypassFileInfo: t.BypassFileInfoSlot().Var().VarID,
BypassCallback: t.BypassCallbackVar().Var().VarID,
DestPath: t.DestPath,
}, nil }, nil
} }


@@ -257,6 +377,38 @@ func (n *BypassFromShardStoreNode) GenerateOp() (exec.Op, error) {
}, nil }, nil
} }


type BypassFromPublicStoreNode struct {
dag.NodeBase
UserSpace clitypes.UserSpaceDetail
Path string
}

func (b *GraphNodeBuilder) NewBypassFromPublicStore(userSpace clitypes.UserSpaceDetail, path string) *BypassFromPublicStoreNode {
node := &BypassFromPublicStoreNode{
UserSpace: userSpace,
Path: path,
}
b.AddNode(node)

node.OutputValues().Init(node, 1)
return node
}

func (n *BypassFromPublicStoreNode) FilePathVar() dag.ValueOutputSlot {
return dag.ValueOutputSlot{
Node: n,
Index: 0,
}
}

func (n *BypassFromPublicStoreNode) GenerateOp() (exec.Op, error) {
return &BypassFromPublicStore{
UserSpace: n.UserSpace,
Path: n.Path,
Output: n.FilePathVar().Var().VarID,
}, nil
}

// 旁路Http读取 // 旁路Http读取
type BypassFromShardStoreHTTPNode struct { type BypassFromShardStoreHTTPNode struct {
dag.NodeBase dag.NodeBase


+ 3
- 3
common/pkgs/ioswitch2/ops2/ec.go View File

@@ -193,10 +193,10 @@ func (o *CallECMultiplier) Execute(ctx *exec.ExecContext, e *exec.Executor) erro
} }
defer ecMul.Abort() defer ecMul.Abort()


outputVals := make([]*BypassUploadedFileValue, 0, len(outputs))
outputVals := make([]*BypassedFileInfoValue, 0, len(outputs))
for _, output := range outputs { for _, output := range outputs {
outputVals = append(outputVals, &BypassUploadedFileValue{
BypassUploadedFile: output,
outputVals = append(outputVals, &BypassedFileInfoValue{
BypassedFileInfo: output,
}) })
} }
exec.PutArray(e, o.Outputs, outputVals) exec.PutArray(e, o.Outputs, outputVals)


+ 2
- 2
common/pkgs/ioswitch2/ops2/multipart.go View File

@@ -88,8 +88,8 @@ func (o *MultipartInitiator) Execute(ctx *exec.ExecContext, e *exec.Executor) er
} }


// 告知后续Op临时文件的路径 // 告知后续Op临时文件的路径
e.PutVar(o.BypassFileOutput, &BypassUploadedFileValue{
BypassUploadedFile: fileInfo,
e.PutVar(o.BypassFileOutput, &BypassedFileInfoValue{
BypassedFileInfo: fileInfo,
}) })


// 等待后续Op处理临时文件 // 等待后续Op处理临时文件


+ 101
- 15
common/pkgs/ioswitch2/ops2/public_store.go View File

@@ -2,30 +2,78 @@ package ops2


import ( import (
"fmt" "fmt"
"io"


"gitlink.org.cn/cloudream/common/pkgs/future"
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/utils/io2"
clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types" clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool"
) )


func init() { func init() {
exec.UseOp[*PublicLoad]()
exec.UseOp[*PublicWrite]()
exec.UseOp[*PublicRead]()
} }


type PublicLoad struct {
type PublicRead struct {
Output exec.VarID
UserSpace clitypes.UserSpaceDetail
ObjectPath string
}

func (o *PublicRead) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
logger.
WithField("Output", o.Output).
WithField("UserSpace", o.UserSpace).
WithField("ObjectPath", o.ObjectPath).
Debug("public read")
defer logger.Debug("public read end")

stgPool, err := exec.GetValueByType[*pool.Pool](ctx)
if err != nil {
return fmt.Errorf("getting storage pool: %w", err)
}

store, err := stgPool.GetPublicStore(&o.UserSpace)
if err != nil {
return fmt.Errorf("getting public store of storage %v: %w", o.UserSpace, err)
}

stream, err := store.Read(o.ObjectPath)
if err != nil {
return fmt.Errorf("reading object %v: %w", o.ObjectPath, err)
}

fut := future.NewSetVoid()
output := &exec.StreamValue{
Stream: io2.AfterReadClosed(stream, func(closer io.ReadCloser) {
fut.SetVoid()
}),
}

e.PutVar(o.Output, output)
return fut.Wait(ctx.Context)
}

func (o *PublicRead) String() string {
return fmt.Sprintf("PublicRead %v:%v -> %v", o.UserSpace, o.ObjectPath, o.Output)
}

type PublicWrite struct {
Input exec.VarID Input exec.VarID
UserSpace clitypes.UserSpaceDetail UserSpace clitypes.UserSpaceDetail
ObjectPath string ObjectPath string
} }


func (o *PublicLoad) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
func (o *PublicWrite) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
logger. logger.
WithField("Input", o.Input). WithField("Input", o.Input).
Debugf("load file to public store")
defer logger.Debugf("load file to public store finished")
Debugf("write file to public store")
defer logger.Debugf("write file to public store finished")


stgPool, err := exec.GetValueByType[*pool.Pool](ctx) stgPool, err := exec.GetValueByType[*pool.Pool](ctx)
if err != nil { if err != nil {
@@ -46,19 +94,57 @@ func (o *PublicLoad) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
return store.Write(o.ObjectPath, input.Stream) return store.Write(o.ObjectPath, input.Stream)
} }


func (o *PublicLoad) String() string {
return fmt.Sprintf("PublicLoad %v -> %v:%v", o.Input, o.UserSpace, o.ObjectPath)
func (o *PublicWrite) String() string {
return fmt.Sprintf("PublicWrite %v -> %v:%v", o.Input, o.UserSpace, o.ObjectPath)
}

type PublicReadNode struct {
dag.NodeBase
From ioswitch2.From
UserSpace clitypes.UserSpaceDetail
ObjectPath string
}

func (b *GraphNodeBuilder) NewPublicRead(from ioswitch2.From, userSpace clitypes.UserSpaceDetail, objPath string) *PublicReadNode {
node := &PublicReadNode{
From: from,
UserSpace: userSpace,
ObjectPath: objPath,
}
b.AddNode(node)

node.OutputStreams().Init(node, 1)
return node
}

func (t *PublicReadNode) GetFrom() ioswitch2.From {
return t.From
}

func (t *PublicReadNode) Output() dag.StreamOutputSlot {
return dag.StreamOutputSlot{
Node: t,
Index: 0,
}
}

func (t *PublicReadNode) GenerateOp() (exec.Op, error) {
return &PublicRead{
Output: t.Output().Var().VarID,
UserSpace: t.UserSpace,
ObjectPath: t.ObjectPath,
}, nil
} }


type PublicLoadNode struct {
type PublicWriteNode struct {
dag.NodeBase dag.NodeBase
To ioswitch2.To To ioswitch2.To
UserSpace clitypes.UserSpaceDetail UserSpace clitypes.UserSpaceDetail
ObjectPath string ObjectPath string
} }


func (b *GraphNodeBuilder) NewPublicLoad(to ioswitch2.To, userSpace clitypes.UserSpaceDetail, objPath string) *PublicLoadNode {
node := &PublicLoadNode{
func (b *GraphNodeBuilder) NewPublicWrite(to ioswitch2.To, userSpace clitypes.UserSpaceDetail, objPath string) *PublicWriteNode {
node := &PublicWriteNode{
To: to, To: to,
UserSpace: userSpace, UserSpace: userSpace,
ObjectPath: objPath, ObjectPath: objPath,
@@ -69,23 +155,23 @@ func (b *GraphNodeBuilder) NewPublicLoad(to ioswitch2.To, userSpace clitypes.Use
return node return node
} }


func (t *PublicLoadNode) GetTo() ioswitch2.To {
func (t *PublicWriteNode) GetTo() ioswitch2.To {
return t.To return t.To
} }


func (t *PublicLoadNode) SetInput(input *dag.StreamVar) {
func (t *PublicWriteNode) SetInput(input *dag.StreamVar) {
input.To(t, 0) input.To(t, 0)
} }


func (t *PublicLoadNode) Input() dag.StreamInputSlot {
func (t *PublicWriteNode) Input() dag.StreamInputSlot {
return dag.StreamInputSlot{ return dag.StreamInputSlot{
Node: t, Node: t,
Index: 0, Index: 0,
} }
} }


func (t *PublicLoadNode) GenerateOp() (exec.Op, error) {
return &PublicLoad{
func (t *PublicWriteNode) GenerateOp() (exec.Op, error) {
return &PublicWrite{
Input: t.InputStreams().Get(0).VarID, Input: t.InputStreams().Get(0).VarID,
UserSpace: t.UserSpace, UserSpace: t.UserSpace,
ObjectPath: t.ObjectPath, ObjectPath: t.ObjectPath,


+ 8
- 4
common/pkgs/ioswitch2/ops2/s2s.go View File

@@ -20,6 +20,7 @@ type S2STransfer struct {
Dst clitypes.UserSpaceDetail Dst clitypes.UserSpaceDetail
Output exec.VarID Output exec.VarID
BypassCallback exec.VarID BypassCallback exec.VarID
S2SOption types.S2SOption
} }


func (o *S2STransfer) Execute(ctx *exec.ExecContext, e *exec.Executor) error { func (o *S2STransfer) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
@@ -39,14 +40,14 @@ func (o *S2STransfer) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
} }


// 传输文件 // 传输文件
dstPath, err := s2s.Transfer(ctx.Context, &o.Src, srcPath.Path)
dstPath, err := s2s.Transfer(ctx.Context, &o.Src, srcPath.Path, o.S2SOption)
if err != nil { if err != nil {
return err return err
} }
defer s2s.Abort() defer s2s.Abort()


// 告知后续Op处理临时文件 // 告知后续Op处理临时文件
e.PutVar(o.Output, &BypassUploadedFileValue{BypassUploadedFile: types.BypassUploadedFile{
e.PutVar(o.Output, &BypassedFileInfoValue{BypassedFileInfo: types.BypassedFileInfo{
Path: dstPath, Path: dstPath,
Hash: srcPath.Info.Hash, Hash: srcPath.Info.Hash,
Size: srcPath.Info.Size, Size: srcPath.Info.Size,
@@ -66,19 +67,21 @@ func (o *S2STransfer) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
} }


func (o *S2STransfer) String() string { func (o *S2STransfer) String() string {
return fmt.Sprintf("S2STransfer %v:%v -> %v:%v", o.Src.Storage.String(), o.SrcPath, o.Dst.Storage.String(), o.Output)
return fmt.Sprintf("S2STransfer %v:%v -> %v:%v, Callback: %v", o.Src.Storage.String(), o.SrcPath, o.Dst.Storage.String(), o.Output, o.BypassCallback)
} }


type S2STransferNode struct { type S2STransferNode struct {
dag.NodeBase dag.NodeBase
Src clitypes.UserSpaceDetail Src clitypes.UserSpaceDetail
Dst clitypes.UserSpaceDetail Dst clitypes.UserSpaceDetail
Opt types.S2SOption
} }


func (b *GraphNodeBuilder) NewS2STransfer(src, dst clitypes.UserSpaceDetail) *S2STransferNode {
func (b *GraphNodeBuilder) NewS2STransfer(src, dst clitypes.UserSpaceDetail, opt types.S2SOption) *S2STransferNode {
n := &S2STransferNode{ n := &S2STransferNode{
Src: src, Src: src,
Dst: dst, Dst: dst,
Opt: opt,
} }
b.AddNode(n) b.AddNode(n)


@@ -116,5 +119,6 @@ func (n *S2STransferNode) GenerateOp() (exec.Op, error) {
Dst: n.Dst, Dst: n.Dst,
Output: n.BypassFileInfoVar().Var().VarID, Output: n.BypassFileInfoVar().Var().VarID,
BypassCallback: n.BypassCallbackSlot().Var().VarID, BypassCallback: n.BypassCallbackSlot().Var().VarID,
S2SOption: n.Opt,
}, nil }, nil
} }

+ 21
- 3
common/pkgs/ioswitch2/parser/gen/generator.go View File

@@ -259,7 +259,7 @@ func buildFromNode(ctx *state.GenerateState, f ioswitch2.From) (ops2.FromNode, e


switch f := f.(type) { switch f := f.(type) {
case *ioswitch2.FromShardstore: case *ioswitch2.FromShardstore:
t := ctx.DAG.NewShardRead(f, f.Space, types.NewOpen(f.FileHash))
t := ctx.DAG.NewShardRead(f, f.UserSpace, types.NewOpen(f.FileHash))


if f.StreamIndex.IsRaw() { if f.StreamIndex.IsRaw() {
t.Open.WithNullableLength(repRange.Offset, repRange.Length) t.Open.WithNullableLength(repRange.Offset, repRange.Length)
@@ -336,6 +336,24 @@ func buildFromNode(ctx *state.GenerateState, f ioswitch2.From) (ops2.FromNode, e


return n, nil return n, nil


case *ioswitch2.FromPublicStore:
// TODO 可以考虑支持设置读取范围
n := ctx.DAG.NewPublicRead(f, f.UserSpace, f.Path)
switch addr := f.Hub.Address.(type) {
case *cortypes.HttpAddressInfo:
n.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: f.Hub})
n.Env().Pinned = true

case *cortypes.GRPCAddressInfo:
n.Env().ToEnvWorker(&ioswitch2.HubWorker{Hub: f.Hub, Address: *addr})
n.Env().Pinned = true

default:
return nil, fmt.Errorf("unsupported node address type %T", addr)
}

return n, nil

default: default:
return nil, fmt.Errorf("unsupported from type %T", f) return nil, fmt.Errorf("unsupported from type %T", f)
} }
@@ -361,8 +379,8 @@ func buildToNode(ctx *state.GenerateState, t ioswitch2.To) (ops2.ToNode, error)


return n, nil return n, nil


case *ioswitch2.LoadToPublic:
n := ctx.DAG.NewPublicLoad(t, t.Space, t.ObjectPath)
case *ioswitch2.ToPublicStore:
n := ctx.DAG.NewPublicWrite(t, t.Space, t.ObjectPath)


if err := setEnvByAddress(n, t.Hub, t.Hub.Address); err != nil { if err := setEnvByAddress(n, t.Hub, t.Hub.Address); err != nil {
return nil, err return nil, err


+ 1
- 1
common/pkgs/ioswitch2/parser/opt/ec.go View File

@@ -88,7 +88,7 @@ func UseECMultiplier(ctx *state.GenerateState) {
return true return true
} }


if !factory.GetBuilder(&srNode.From.Space).FeatureDesc().HasBypassHTTPRead {
if !factory.GetBuilder(&srNode.From.UserSpace).FeatureDesc().HasBypassHTTPRead {
return true return true
} }




+ 178
- 91
common/pkgs/ioswitch2/parser/opt/s2s.go View File

@@ -5,6 +5,7 @@ import (
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser/state" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser/state"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/factory" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/factory"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
) )


// 将直接从一个存储服务传到另一个存储服务的过程换成S2S传输 // 将直接从一个存储服务传到另一个存储服务的过程换成S2S传输
@@ -15,116 +16,202 @@ func UseS2STransfer(ctx *state.GenerateState) {
} }


for fr, frNode := range ctx.FromNodes { for fr, frNode := range ctx.FromNodes {
fromShard, ok := fr.(*ioswitch2.FromShardstore)
if !ok {
continue
switch fr := fr.(type) {
case *ioswitch2.FromShardstore:
s2sFromShardStore(ctx, fr, frNode)
case *ioswitch2.FromPublicStore:
s2sFromPublicStore(ctx, fr, frNode)
} }
}
}


fromStgBld := factory.GetBuilder(&fromShard.Space)
if !fromStgBld.FeatureDesc().HasBypassRead {
continue
}
func s2sFromShardStore(ctx *state.GenerateState, fromShard *ioswitch2.FromShardstore, frNode ops2.FromNode) {
fromStgBld := factory.GetBuilder(&fromShard.UserSpace)
if !fromStgBld.FeatureDesc().HasBypassShardRead {
return
}


s2s, err := fromStgBld.CreateS2STransfer()
if err != nil {
continue
}
s2s, err := fromStgBld.CreateS2STransfer()
if err != nil {
return
}


// 此输出流的所有目的地都要能支持S2S传输
outVar := frNode.Output().Var()
if outVar.Dst.Len() == 0 {
continue
}
// 此输出流的所有目的地都要能支持S2S传输
outVar := frNode.Output().Var()
if outVar.Dst.Len() == 0 {
return
}

failed := false
var toShards []*ops2.ShardWriteNode
var toPublics []*ops2.PublicWriteNode


failed := false
var toShards []*ops2.ShardWriteNode
// var toShareds []*ops2.SharedLoadNode

loop:
for i := 0; i < outVar.Dst.Len(); i++ {
dstNode := outVar.Dst.Get(i)

switch dstNode := dstNode.(type) {
case *ops2.ShardWriteNode:
dstStgBld := factory.GetBuilder(&dstNode.UserSpace)
if !dstStgBld.FeatureDesc().HasBypassWrite {
failed = true
break
}

if !s2s.CanTransfer(&dstNode.UserSpace) {
failed = true
break
}

toShards = append(toShards, dstNode)

/* TODO 暂不支持共享存储服务
case *ops2.SharedLoadNode:
if !s2s.CanTransfer(to.Storage) {
failed = true
break
}
toShareds = append(toShareds, to)
*/
default:
loop:
for i := 0; i < outVar.Dst.Len(); i++ {
dstNode := outVar.Dst.Get(i)

switch dstNode := dstNode.(type) {
case *ops2.ShardWriteNode:
dstStgBld := factory.GetBuilder(&dstNode.UserSpace)
if !dstStgBld.FeatureDesc().HasBypassShardWrite {
failed = true failed = true
break loop
break
} }
}
if failed {
continue
}


for _, toShard := range toShards {
s2sNode := ctx.DAG.NewS2STransfer(fromShard.Space, toShard.UserSpace)
// 直传指令在目的地Hub上执行
s2sNode.Env().CopyFrom(toShard.Env())
if !s2s.CanTransfer(&dstNode.UserSpace) {
failed = true
break
}

toShards = append(toShards, dstNode)


// 先获取文件路径,送到S2S节点
brNode := ctx.DAG.NewBypassFromShardStore(fromShard.Space, fromShard.FileHash)
brNode.Env().CopyFrom(frNode.Env())
brNode.FilePathVar().ToSlot(s2sNode.SrcPathSlot())
case *ops2.PublicWriteNode:
dstStgBld := factory.GetBuilder(&dstNode.UserSpace)
if !dstStgBld.FeatureDesc().HasBypassPublicWrite {
failed = true
break
}


// 传输结果通知目的节点
bwNode := ctx.DAG.NewBypassToShardStore(toShard.UserSpace, toShard.To.FileHashStoreKey)
bwNode.Env().CopyFrom(toShard.Env())
if !s2s.CanTransfer(&dstNode.UserSpace) {
failed = true
break
}


s2sNode.BypassFileInfoVar().ToSlot(bwNode.BypassFileInfoSlot())
bwNode.BypassCallbackVar().ToSlot(s2sNode.BypassCallbackSlot())
toPublics = append(toPublics, dstNode)


// 从计划中删除目标节点
ctx.DAG.RemoveNode(toShard)
delete(ctx.ToNodes, toShard.To)
default:
failed = true
break loop
} }
}
if failed {
return
}

for _, toShard := range toShards {
s2sNode := ctx.DAG.NewS2STransfer(fromShard.UserSpace, toShard.UserSpace, types.S2SOption{})
// 直传指令在目的地Hub上执行
s2sNode.Env().CopyFrom(toShard.Env())

// 先获取文件路径,送到S2S节点
brNode := ctx.DAG.NewBypassFromShardStore(fromShard.UserSpace, fromShard.FileHash)
brNode.Env().CopyFrom(frNode.Env())
brNode.FilePathVar().ToSlot(s2sNode.SrcPathSlot())

// 传输结果通知目的节点
bwNode := ctx.DAG.NewBypassToShardStore(toShard.UserSpace, toShard.To.FileHashStoreKey)
bwNode.Env().CopyFrom(toShard.Env())

s2sNode.BypassFileInfoVar().ToSlot(bwNode.BypassFileInfoSlot())
bwNode.BypassCallbackVar().ToSlot(s2sNode.BypassCallbackSlot())

// 从计划中删除目标节点
ctx.DAG.RemoveNode(toShard)
delete(ctx.ToNodes, toShard.To)
}


/*
for _, toShared := range toShareds {
s2sNode := ctx.DAG.NewS2STransfer(fromShard.Storage, toShared.Storage)
// 直传指令在目的地Hub上执行
s2sNode.Env().CopyFrom(toShared.Env())
for _, toPub := range toPublics {
s2sNode := ctx.DAG.NewS2STransfer(fromShard.UserSpace, toPub.UserSpace, types.S2SOption{
DestPathHint: toPub.ObjectPath,
})
// 直传指令在目的地Hub上执行
s2sNode.Env().CopyFrom(toPub.Env())


// 先获取文件路径,送到S2S节点
brNode := ctx.DAG.NewBypassFromShardStore(fromShard.Storage.Storage.StorageID, fromShard.FileHash)
brNode.Env().CopyFrom(toShared.Env())
brNode.FilePathVar().ToSlot(s2sNode.SrcPathSlot())
// 先获取文件路径,送到S2S节点
brNode := ctx.DAG.NewBypassFromShardStore(fromShard.UserSpace, fromShard.FileHash)
brNode.Env().CopyFrom(toPub.Env())
brNode.FilePathVar().ToSlot(s2sNode.SrcPathSlot())


// 传输结果通知目的节点
to := toShared.To.(*ioswitch2.LoadToShared)
bwNode := ctx.DAG.NewBypassToShardStore(toShard.Storage.Storage.StorageID, to.FileHashStoreKey)
bwNode.Env().CopyFrom(toShard.Env())
// 传输结果通知目的节点
bwNode := ctx.DAG.NewBypassToPublicStore(toPub.UserSpace, toPub.ObjectPath)
bwNode.Env().CopyFrom(toPub.Env())


s2sNode.BypassFileInfoVar().ToSlot(bwNode.BypassFileInfoSlot())
bwNode.BypassCallbackVar().ToSlot(s2sNode.BypassCallbackSlot())
s2sNode.BypassFileInfoVar().ToSlot(bwNode.BypassFileInfoSlot())
bwNode.BypassCallbackVar().ToSlot(s2sNode.BypassCallbackSlot())


// 从计划中删除目标节点
ctx.DAG.RemoveNode(toShared)
delete(ctx.ToNodes, toShared.To)
// 从计划中删除目标节点
ctx.DAG.RemoveNode(toPub)
delete(ctx.ToNodes, toPub.To)
}

// 从计划中删除源节点
ctx.DAG.RemoveNode(frNode)
delete(ctx.FromNodes, frNode.GetFrom())
}

func s2sFromPublicStore(ctx *state.GenerateState, fromPub *ioswitch2.FromPublicStore, frNode ops2.FromNode) {
fromStgBld := factory.GetBuilder(&fromPub.UserSpace)
if !fromStgBld.FeatureDesc().HasBypassPublicRead {
return
}

s2s, err := fromStgBld.CreateS2STransfer()
if err != nil {
return
}

// 此输出流的所有目的地都要能支持S2S传输
outVar := frNode.Output().Var()
if outVar.Dst.Len() == 0 {
return
}

failed := false
var toPublics []*ops2.PublicWriteNode

loop:
for i := 0; i < outVar.Dst.Len(); i++ {
dstNode := outVar.Dst.Get(i)

switch dstNode := dstNode.(type) {
case *ops2.PublicWriteNode:
dstStgBld := factory.GetBuilder(&dstNode.UserSpace)
if !dstStgBld.FeatureDesc().HasBypassPublicWrite {
failed = true
break
}

if !s2s.CanTransfer(&dstNode.UserSpace) {
failed = true
break
} }
*/


// 从计划中删除源节点
ctx.DAG.RemoveNode(frNode)
delete(ctx.FromNodes, fr)
toPublics = append(toPublics, dstNode)

default:
failed = true
break loop
}
}
if failed {
return
} }

for _, toPub := range toPublics {
s2sNode := ctx.DAG.NewS2STransfer(fromPub.UserSpace, toPub.UserSpace, types.S2SOption{
DestPathHint: toPub.ObjectPath,
})
// 直传指令在目的地Hub上执行
s2sNode.Env().CopyFrom(toPub.Env())

// 先获取文件路径,送到S2S节点
brNode := ctx.DAG.NewBypassFromPublicStore(fromPub.UserSpace, fromPub.Path)
brNode.Env().CopyFrom(toPub.Env())
brNode.FilePathVar().ToSlot(s2sNode.SrcPathSlot())

// 传输结果通知目的节点
bwNode := ctx.DAG.NewBypassToPublicStore(toPub.UserSpace, toPub.ObjectPath)
bwNode.Env().CopyFrom(toPub.Env())

s2sNode.BypassFileInfoVar().ToSlot(bwNode.BypassFileInfoSlot())
bwNode.BypassCallbackVar().ToSlot(s2sNode.BypassCallbackSlot())

// 从计划中删除目标节点
ctx.DAG.RemoveNode(toPub)
delete(ctx.ToNodes, toPub.To)
}

// 从计划中删除源节点
ctx.DAG.RemoveNode(frNode)
delete(ctx.FromNodes, frNode.GetFrom())
} }

+ 31
- 13
common/pkgs/ioswitchlrc/hub_worker.go View File

@@ -6,7 +6,7 @@ import (


"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec" "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/grpc/hub"
hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types" cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
) )


@@ -20,11 +20,7 @@ type HubWorker struct {
} }


func (w *HubWorker) NewClient() (exec.WorkerClient, error) { func (w *HubWorker) NewClient() (exec.WorkerClient, error) {
cli, err := stgglb.HubRPCPool.Acquire(stgglb.SelectGRPCAddress(w.Hub, w.Address))
if err != nil {
return nil, err
}

cli := stgglb.HubRPCPool.Get(stgglb.SelectGRPCAddress(&w.Hub, &w.Address))
return &HubWorkerClient{cli: cli}, nil return &HubWorkerClient{cli: cli}, nil
} }


@@ -42,25 +38,47 @@ func (w *HubWorker) Equals(worker exec.WorkerInfo) bool {
} }


type HubWorkerClient struct { type HubWorkerClient struct {
cli *hubrpc.PoolClient
cli *hubrpc.Client
} }


func (c *HubWorkerClient) ExecutePlan(ctx context.Context, plan exec.Plan) error { func (c *HubWorkerClient) ExecutePlan(ctx context.Context, plan exec.Plan) error {
return c.cli.ExecuteIOPlan(ctx, plan)
_, err := c.cli.ExecuteIOPlan(ctx, &hubrpc.ExecuteIOPlan{Plan: plan})
return err.ToError()
} }
func (c *HubWorkerClient) SendStream(ctx context.Context, planID exec.PlanID, id exec.VarID, stream io.ReadCloser) error { func (c *HubWorkerClient) SendStream(ctx context.Context, planID exec.PlanID, id exec.VarID, stream io.ReadCloser) error {
return c.cli.SendStream(ctx, planID, id, stream)
_, err := c.cli.SendIOStream(ctx, &hubrpc.SendIOStream{
PlanID: planID,
VarID: id,
Stream: stream,
})
return err.ToError()
} }
func (c *HubWorkerClient) SendVar(ctx context.Context, planID exec.PlanID, id exec.VarID, value exec.VarValue) error { func (c *HubWorkerClient) SendVar(ctx context.Context, planID exec.PlanID, id exec.VarID, value exec.VarValue) error {
return c.cli.SendVar(ctx, planID, id, value)
_, err := c.cli.SendIOVar(ctx, &hubrpc.SendIOVar{
PlanID: planID, VarID: id, Value: value,
})
return err.ToError()
} }
func (c *HubWorkerClient) GetStream(ctx context.Context, planID exec.PlanID, streamID exec.VarID, signalID exec.VarID, signal exec.VarValue) (io.ReadCloser, error) { func (c *HubWorkerClient) GetStream(ctx context.Context, planID exec.PlanID, streamID exec.VarID, signalID exec.VarID, signal exec.VarValue) (io.ReadCloser, error) {
return c.cli.GetStream(ctx, planID, streamID, signalID, signal)
resp, err := c.cli.GetIOStream(ctx, &hubrpc.GetIOStream{
PlanID: planID, VarID: streamID, SignalID: signalID, Signal: signal,
})
if err != nil {
return nil, err.ToError()
}

return resp.Stream, nil
} }
func (c *HubWorkerClient) GetVar(ctx context.Context, planID exec.PlanID, varID exec.VarID, signalID exec.VarID, signal exec.VarValue) (exec.VarValue, error) { func (c *HubWorkerClient) GetVar(ctx context.Context, planID exec.PlanID, varID exec.VarID, signalID exec.VarID, signal exec.VarValue) (exec.VarValue, error) {
return c.cli.GetVar(ctx, planID, varID, signalID, signal)
resp, err := c.cli.GetIOVar(ctx, &hubrpc.GetIOVar{
PlanID: planID, VarID: varID, SignalID: signalID, Signal: signal,
})
if err != nil {
return nil, err.ToError()
}
return resp.Value, nil
} }
func (c *HubWorkerClient) Close() error { func (c *HubWorkerClient) Close() error {
stgglb.HubRPCPool.Release(c.cli)
c.cli.Release()
return nil return nil
} }

+ 0
- 13
common/pkgs/mq/consts.go View File

@@ -1,13 +0,0 @@
package mq

import "fmt"

const (
COORDINATOR_QUEUE_NAME = "Coordinator"
SCANNER_QUEUE_NAME = "Scanner"
DATAMAP_QUEUE_NAME = "DataMap"
)

func MakeHubQueueName(id int64) string {
return fmt.Sprintf("Hub@%d", id)
}

+ 0
- 60
common/pkgs/mq/coordinator/client.go View File

@@ -1,60 +0,0 @@
package coordinator

import (
"sync"

"gitlink.org.cn/cloudream/common/pkgs/mq"
stgmq "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq"
)

type Client struct {
rabbitCli *mq.RabbitMQTransport
}

func NewClient(cfg mq.Config) (*Client, error) {
rabbitCli, err := mq.NewRabbitMQTransport(cfg, stgmq.COORDINATOR_QUEUE_NAME, "")
if err != nil {
return nil, err
}

return &Client{
rabbitCli: rabbitCli,
}, nil
}

func (c *Client) Close() {
c.rabbitCli.Close()
}

type Pool interface {
Acquire() (*Client, error)
Release(cli *Client)
}

type pool struct {
mqcfg mq.Config
shared *Client
lock sync.Mutex
}

func NewPool(mqcfg mq.Config) Pool {
return &pool{
mqcfg: mqcfg,
}
}
func (p *pool) Acquire() (*Client, error) {
p.lock.Lock()
defer p.lock.Unlock()
if p.shared == nil {
var err error
p.shared, err = NewClient(p.mqcfg)
if err != nil {
return nil, err
}
}

return p.shared, nil
}

func (p *pool) Release(cli *Client) {
}

+ 0
- 15
common/pkgs/mq/coordinator/coordinator_test.go View File

@@ -1,15 +0,0 @@
package coordinator

import (
"testing"

. "github.com/smartystreets/goconvey/convey"
)

func TestSerder(t *testing.T) {
Convey("输出注册的Handler", t, func() {
for k, _ := range msgDispatcher.Handlers {
t.Logf("(%s)", k)
}
})
}

+ 0
- 100
common/pkgs/mq/coordinator/hub.go View File

@@ -1,100 +0,0 @@
package coordinator

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
)

type HubService interface {
GetHubConfig(msg *GetHubConfig) (*GetHubConfigResp, *mq.CodeMessage)

GetHubs(msg *GetHubs) (*GetHubsResp, *mq.CodeMessage)

GetHubConnectivities(msg *GetHubConnectivities) (*GetHubConnectivitiesResp, *mq.CodeMessage)
}

var _ = Register(Service.GetHubConfig)

type GetHubConfig struct {
mq.MessageBodyBase
HubID cortypes.HubID `json:"hubID"`
}
type GetHubConfigResp struct {
mq.MessageBodyBase
Hub cortypes.Hub `json:"hub"`
}

func ReqGetHubConfig(hubID cortypes.HubID) *GetHubConfig {
return &GetHubConfig{
HubID: hubID,
}
}
func RespGetHubConfig(hub cortypes.Hub) *GetHubConfigResp {
return &GetHubConfigResp{
Hub: hub,
}
}
func (client *Client) GetHubConfig(msg *GetHubConfig) (*GetHubConfigResp, error) {
return mq.Request(Service.GetHubConfig, client.rabbitCli, msg)
}

// 获取指定节点的信息。如果HubIDs为nil,则返回所有Hub
var _ = Register(Service.GetHubs)

type GetHubs struct {
mq.MessageBodyBase
HubIDs []cortypes.HubID `json:"hubIDs"`
}
type GetHubsResp struct {
mq.MessageBodyBase
Hubs []*cortypes.Hub `json:"hubs"`
}

func NewGetHubs(hubIDs []cortypes.HubID) *GetHubs {
return &GetHubs{
HubIDs: hubIDs,
}
}
func NewGetHubsResp(hubs []*cortypes.Hub) *GetHubsResp {
return &GetHubsResp{
Hubs: hubs,
}
}
func (r *GetHubsResp) GetHub(id cortypes.HubID) *cortypes.Hub {
for _, n := range r.Hubs {
if n.HubID == id {
return n
}
}

return nil
}
func (client *Client) GetHubs(msg *GetHubs) (*GetHubsResp, error) {
return mq.Request(Service.GetHubs, client.rabbitCli, msg)
}

// 获取节点连通性信息
var _ = Register(Service.GetHubConnectivities)

type GetHubConnectivities struct {
mq.MessageBodyBase
HubIDs []cortypes.HubID `json:"hubIDs"`
}
type GetHubConnectivitiesResp struct {
mq.MessageBodyBase
Connectivities []cortypes.HubConnectivity `json:"hubs"`
}

func ReqGetHubConnectivities(hubIDs []cortypes.HubID) *GetHubConnectivities {
return &GetHubConnectivities{
HubIDs: hubIDs,
}
}
func RespGetHubConnectivities(cons []cortypes.HubConnectivity) *GetHubConnectivitiesResp {
return &GetHubConnectivitiesResp{
Connectivities: cons,
}
}
func (client *Client) GetHubConnectivities(msg *GetHubConnectivities) (*GetHubConnectivitiesResp, error) {
return mq.Request(Service.GetHubConnectivities, client.rabbitCli, msg)
}

+ 0
- 72
common/pkgs/mq/coordinator/server.go View File

@@ -1,72 +0,0 @@
package coordinator

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/common/utils/sync2"
mymq "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq"
)

// Service 协调端接口
type Service interface {
HubService

StorageService
}

type Server struct {
service Service
rabbitSvr mq.RabbitMQServer
}

func NewServer(svc Service, cfg mq.Config) (*Server, error) {
srv := &Server{
service: svc,
}

rabbitSvr, err := mq.NewRabbitMQServer(
cfg,
mymq.COORDINATOR_QUEUE_NAME,
func(msg *mq.Message) (*mq.Message, error) {
return msgDispatcher.Handle(srv.service, msg)
},
)
if err != nil {
return nil, err
}

srv.rabbitSvr = *rabbitSvr

return srv, nil
}
func (s *Server) Stop() {
s.rabbitSvr.Close()
}

func (s *Server) Start(cfg mq.Config) *sync2.UnboundChannel[mq.RabbitMQServerEvent] {
return s.rabbitSvr.Start()
}

func (s *Server) OnError(callback func(error)) {
s.rabbitSvr.OnError = callback
}

var msgDispatcher mq.MessageDispatcher = mq.NewMessageDispatcher()

// Register 将Service中的一个接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型
// TODO 需要约束:Service实现了TSvc接口
func Register[TReq mq.MessageBody, TResp mq.MessageBody](svcFn func(svc Service, msg TReq) (TResp, *mq.CodeMessage)) any {
mq.AddServiceFn(&msgDispatcher, svcFn)
mq.RegisterMessage[TReq]()
mq.RegisterMessage[TResp]()

return nil
}

// RegisterNoReply 将Service中的一个*没有返回值的*接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型
// TODO 需要约束:Service实现了TSvc接口
func RegisterNoReply[TReq mq.MessageBody](svcFn func(svc Service, msg TReq)) any {
mq.AddNoRespServiceFn(&msgDispatcher, svcFn)
mq.RegisterMessage[TReq]()

return nil
}

+ 0
- 36
common/pkgs/mq/coordinator/storage.go View File

@@ -1,36 +0,0 @@
package coordinator

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
)

type StorageService interface {
GetStorageDetails(msg *GetStorageDetails) (*GetStorageDetailsResp, *mq.CodeMessage)
}

// 获取Storage信息
var _ = Register(Service.GetStorageDetails)

type GetStorageDetails struct {
mq.MessageBodyBase
StorageIDs []cortypes.StorageID `json:"storageIDs"`
}
type GetStorageDetailsResp struct {
mq.MessageBodyBase
Storage []*cortypes.StorageDetail `json:"storages"`
}

func ReqGetStorageDetails(storageIDs []cortypes.StorageID) *GetStorageDetails {
return &GetStorageDetails{
StorageIDs: storageIDs,
}
}
func RespGetStorageDetails(stgs []*cortypes.StorageDetail) *GetStorageDetailsResp {
return &GetStorageDetailsResp{
Storage: stgs,
}
}
func (client *Client) GetStorageDetails(msg *GetStorageDetails) (*GetStorageDetailsResp, error) {
return mq.Request(Service.GetStorageDetails, client.rabbitCli, msg)
}

+ 0
- 61
common/pkgs/mq/hub/cache.go View File

@@ -1,61 +0,0 @@
package hub

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types"
)

type CacheService interface {
CheckCache(msg *CheckCache) (*CheckCacheResp, *mq.CodeMessage)

CacheGC(msg *CacheGC) (*CacheGCResp, *mq.CodeMessage)
}

// 检查节点上的IPFS
var _ = Register(Service.CheckCache)

type CheckCache struct {
mq.MessageBodyBase
UserSpace clitypes.UserSpaceDetail `json:"userSpace"`
}
type CheckCacheResp struct {
mq.MessageBodyBase
FileHashes []clitypes.FileHash `json:"fileHashes"`
}

func NewCheckCache(space clitypes.UserSpaceDetail) *CheckCache {
return &CheckCache{UserSpace: space}
}
func NewCheckCacheResp(fileHashes []clitypes.FileHash) *CheckCacheResp {
return &CheckCacheResp{
FileHashes: fileHashes,
}
}
func (client *Client) CheckCache(msg *CheckCache, opts ...mq.RequestOption) (*CheckCacheResp, error) {
return mq.Request(Service.CheckCache, client.rabbitCli, msg, opts...)
}

// 清理Cache中不用的文件
var _ = Register(Service.CacheGC)

type CacheGC struct {
mq.MessageBodyBase
UserSpace clitypes.UserSpaceDetail `json:"userSpace"`
Avaiables []clitypes.FileHash `json:"avaiables"`
}
type CacheGCResp struct {
mq.MessageBodyBase
}

func ReqCacheGC(space clitypes.UserSpaceDetail, avaiables []clitypes.FileHash) *CacheGC {
return &CacheGC{
UserSpace: space,
Avaiables: avaiables,
}
}
func RespCacheGC() *CacheGCResp {
return &CacheGCResp{}
}
func (client *Client) CacheGC(msg *CacheGC, opts ...mq.RequestOption) (*CacheGCResp, error) {
return mq.Request(Service.CacheGC, client.rabbitCli, msg, opts...)
}

+ 0
- 68
common/pkgs/mq/hub/client.go View File

@@ -1,68 +0,0 @@
package hub

import (
"sync"

"gitlink.org.cn/cloudream/common/pkgs/mq"
stgmq "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
)

type Client struct {
rabbitCli *mq.RabbitMQTransport
id cortypes.HubID
}

func NewClient(id cortypes.HubID, cfg mq.Config) (*Client, error) {
rabbitCli, err := mq.NewRabbitMQTransport(cfg, stgmq.MakeHubQueueName(int64(id)), "")
if err != nil {
return nil, err
}

return &Client{
rabbitCli: rabbitCli,
id: id,
}, nil
}

func (c *Client) Close() {
c.rabbitCli.Close()
}

type Pool interface {
Acquire(id cortypes.HubID) (*Client, error)
Release(cli *Client)
}

type pool struct {
mqcfg mq.Config
shareds map[cortypes.HubID]*Client
lock sync.Mutex
}

func NewPool(mqcfg mq.Config) Pool {
return &pool{
mqcfg: mqcfg,
shareds: make(map[cortypes.HubID]*Client),
}
}
func (p *pool) Acquire(id cortypes.HubID) (*Client, error) {
p.lock.Lock()
defer p.lock.Unlock()

cli, ok := p.shareds[id]
if !ok {
var err error
cli, err = NewClient(id, p.mqcfg)
if err != nil {
return nil, err
}
p.shareds[id] = cli
}

return cli, nil
}

func (p *pool) Release(cli *Client) {
// TODO 定时关闭
}

+ 0
- 29
common/pkgs/mq/hub/hub.go View File

@@ -1,29 +0,0 @@
package hub

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
)

type HubService interface {
GetState(msg *GetState) (*GetStateResp, *mq.CodeMessage)
}

// 获取hub状态
var _ = Register(Service.GetState)

type GetState struct {
mq.MessageBodyBase
}
type GetStateResp struct {
mq.MessageBodyBase
}

func NewGetState() *GetState {
return &GetState{}
}
func NewGetStateResp() *GetStateResp {
return &GetStateResp{}
}
func (client *Client) GetState(msg *GetState, opts ...mq.RequestOption) (*GetStateResp, error) {
return mq.Request(Service.GetState, client.rabbitCli, msg, opts...)
}

+ 0
- 75
common/pkgs/mq/hub/server.go View File

@@ -1,75 +0,0 @@
package hub

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/common/utils/sync2"
mymq "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq"
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
)

type Service interface {
// UserSpaceService

CacheService

HubService
}

type Server struct {
service Service
rabbitSvr mq.RabbitMQServer
}

func NewServer(svc Service, id cortypes.HubID, cfg mq.Config) (*Server, error) {
srv := &Server{
service: svc,
}

rabbitSvr, err := mq.NewRabbitMQServer(
cfg,
mymq.MakeHubQueueName(int64(id)),
func(msg *mq.Message) (*mq.Message, error) {
return msgDispatcher.Handle(srv.service, msg)
},
)
if err != nil {
return nil, err
}

srv.rabbitSvr = *rabbitSvr

return srv, nil
}

func (s *Server) Stop() {
s.rabbitSvr.Close()
}

func (s *Server) Start() *sync2.UnboundChannel[mq.RabbitMQServerEvent] {
return s.rabbitSvr.Start()
}

func (s *Server) OnError(callback func(error)) {
s.rabbitSvr.OnError = callback
}

var msgDispatcher mq.MessageDispatcher = mq.NewMessageDispatcher()

// Register 将Service中的一个接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型
// TODO 需要约束:Service实现了TSvc接口
func Register[TReq mq.MessageBody, TResp mq.MessageBody](svcFn func(svc Service, msg TReq) (TResp, *mq.CodeMessage)) any {
mq.AddServiceFn(&msgDispatcher, svcFn)
mq.RegisterMessage[TReq]()
mq.RegisterMessage[TResp]()

return nil
}

// RegisterNoReply 将Service中的一个*没有返回值的*接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型
// TODO 需要约束:Service实现了TSvc接口
func RegisterNoReply[TReq mq.MessageBody](svcFn func(svc Service, msg TReq)) any {
mq.AddNoRespServiceFn(&msgDispatcher, svcFn)
mq.RegisterMessage[TReq]()

return nil
}

+ 0
- 48
common/pkgs/mq/hub/storage.go View File

@@ -1,48 +0,0 @@
package hub

/*
import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
)

type UserSpaceService interface {
UserSpaceCreatePackage(msg *UserSpaceCreatePackage) (*UserSpaceCreatePackageResp, *mq.CodeMessage)
}

// 启动从UserSpace上传Package的任务
var _ = Register(Service.UserSpaceCreatePackage)

type UserSpaceCreatePackage struct {
mq.MessageBodyBase
UserID cdssdk.UserID `json:"userID"`
BucketID cdssdk.BucketID `json:"bucketID"`
Name string `json:"name"`
UserSpaceID cdssdk.UserSpaceID `json:"userspaceID"`
Path string `json:"path"`
UserSpaceAffinity cdssdk.UserSpaceID `json:"userspaceAffinity"`
}
type UserSpaceCreatePackageResp struct {
mq.MessageBodyBase
Package cdssdk.Package `json:"package"`
}

func ReqUserSpaceCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, userspaceID cdssdk.UserSpaceID, path string, stgAffinity cdssdk.UserSpaceID) *UserSpaceCreatePackage {
return &UserSpaceCreatePackage{
UserID: userID,
BucketID: bucketID,
Name: name,
UserSpaceID: userspaceID,
Path: path,
UserSpaceAffinity: stgAffinity,
}
}
func RespUserSpaceCreatePackage(pkg cdssdk.Package) *UserSpaceCreatePackageResp {
return &UserSpaceCreatePackageResp{
Package: pkg,
}
}
func (client *Client) UserSpaceCreatePackage(msg *UserSpaceCreatePackage, opts ...mq.RequestOption) (*UserSpaceCreatePackageResp, error) {
return mq.Request(Service.UserSpaceCreatePackage, client.rabbitCli, msg, opts...)
}
*/

+ 0
- 60
common/pkgs/mq/scanner/client.go View File

@@ -1,60 +0,0 @@
package scanner

import (
"sync"

"gitlink.org.cn/cloudream/common/pkgs/mq"
stgmq "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq"
)

type Client struct {
rabbitCli *mq.RabbitMQTransport
}

func NewClient(cfg mq.Config) (*Client, error) {
rabbitCli, err := mq.NewRabbitMQTransport(cfg, stgmq.SCANNER_QUEUE_NAME, "")
if err != nil {
return nil, err
}

return &Client{
rabbitCli: rabbitCli,
}, nil
}

func (c *Client) Close() {
c.rabbitCli.Close()
}

type Pool interface {
Acquire() (*Client, error)
Release(cli *Client)
}

type pool struct {
mqcfg mq.Config
shared *Client
lock sync.Mutex
}

func NewPool(mqcfg mq.Config) Pool {
return &pool{
mqcfg: mqcfg,
}
}
func (p *pool) Acquire() (*Client, error) {
p.lock.Lock()
defer p.lock.Unlock()
if p.shared == nil {
var err error
p.shared, err = NewClient(p.mqcfg)
if err != nil {
return nil, err
}
}

return p.shared, nil
}

func (p *pool) Release(cli *Client) {
}

+ 0
- 31
common/pkgs/mq/scanner/event.go View File

@@ -1,31 +0,0 @@
package scanner

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
scevt "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq/scanner/event"
)

type EventService interface {
PostEvent(event *PostEvent)
}

// 投递Event
var _ = RegisterNoReply(Service.PostEvent)

type PostEvent struct {
mq.MessageBodyBase
Event scevt.Event `json:"event"`
IsEmergency bool `json:"isEmergency"` // 重要消息,优先处理
DontMerge bool `json:"dontMerge"` // 不可合并此消息
}

func NewPostEvent(event scevt.Event, isEmergency bool, dontMerge bool) *PostEvent {
return &PostEvent{
Event: event,
IsEmergency: isEmergency,
DontMerge: dontMerge,
}
}
func (client *Client) PostEvent(msg *PostEvent) error {
return mq.Send(Service.PostEvent, client.rabbitCli, msg)
}

+ 0
- 18
common/pkgs/mq/scanner/event/agent_check_shardstore.go View File

@@ -1,18 +0,0 @@
package event

import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

type HubCheckShardStore struct {
EventBase
StorageID cdssdk.StorageID `json:"storageID"`
}

func NewHubCheckShardStore(stgID cdssdk.StorageID) *HubCheckShardStore {
return &HubCheckShardStore{
StorageID: stgID,
}
}

func init() {
Register[*HubCheckShardStore]()
}

+ 0
- 18
common/pkgs/mq/scanner/event/agent_check_state.go View File

@@ -1,18 +0,0 @@
package event

import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

type HubCheckState struct {
EventBase
HubID cdssdk.HubID `json:"hubID"`
}

func NewHubCheckState(hubID cdssdk.HubID) *HubCheckState {
return &HubCheckState{
HubID: hubID,
}
}

func init() {
Register[*HubCheckState]()
}

+ 0
- 18
common/pkgs/mq/scanner/event/agent_check_storage.go View File

@@ -1,18 +0,0 @@
package event

import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

type HubCheckStorage struct {
EventBase
StorageID cdssdk.StorageID `json:"storageID"`
}

func NewHubCheckStorage(storageID cdssdk.StorageID) *HubCheckStorage {
return &HubCheckStorage{
StorageID: storageID,
}
}

func init() {
Register[*HubCheckStorage]()
}

+ 0
- 18
common/pkgs/mq/scanner/event/agent_shardstore_gc.go View File

@@ -1,18 +0,0 @@
package event

import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

type HubShardStoreGC struct {
EventBase
StorageID cdssdk.StorageID `json:"storageID"`
}

func NewHubShardStoreGC(stgID cdssdk.StorageID) *HubShardStoreGC {
return &HubShardStoreGC{
StorageID: stgID,
}
}

func init() {
Register[*HubShardStoreGC]()
}

+ 0
- 18
common/pkgs/mq/scanner/event/agent_storage_gc.go View File

@@ -1,18 +0,0 @@
package event

import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

type HubStorageGC struct {
EventBase
StorageID cdssdk.StorageID `json:"storageID"`
}

func NewHubStorageGC(storageID cdssdk.StorageID) *HubStorageGC {
return &HubStorageGC{
StorageID: storageID,
}
}

func init() {
Register[*HubStorageGC]()
}

+ 0
- 18
common/pkgs/mq/scanner/event/check_package.go View File

@@ -1,18 +0,0 @@
package event

import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

type CheckPackage struct {
EventBase
PackageIDs []cdssdk.PackageID `json:"packageIDs"`
}

func NewCheckPackage(packageIDs []cdssdk.PackageID) *CheckPackage {
return &CheckPackage{
PackageIDs: packageIDs,
}
}

func init() {
Register[*CheckPackage]()
}

+ 0
- 18
common/pkgs/mq/scanner/event/check_package_redundancy.go View File

@@ -1,18 +0,0 @@
package event

import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

type CheckPackageRedundancy struct {
EventBase
PackageID cdssdk.PackageID `json:"packageIDs"`
}

func NewCheckPackageRedundancy(packageID cdssdk.PackageID) *CheckPackageRedundancy {
return &CheckPackageRedundancy{
PackageID: packageID,
}
}

func init() {
Register[*CheckPackageRedundancy]()
}

+ 0
- 18
common/pkgs/mq/scanner/event/clean_pinned.go View File

@@ -1,18 +0,0 @@
package event

import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

type CleanPinned struct {
EventBase
PackageID cdssdk.PackageID `json:"hubID"`
}

func NewCleanPinned(packageID cdssdk.PackageID) *CleanPinned {
return &CleanPinned{
PackageID: packageID,
}
}

func init() {
Register[*CleanPinned]()
}

+ 0
- 23
common/pkgs/mq/scanner/event/event.go View File

@@ -1,23 +0,0 @@
package event

import (
"gitlink.org.cn/cloudream/common/pkgs/types"
"gitlink.org.cn/cloudream/common/utils/reflect2"
"gitlink.org.cn/cloudream/common/utils/serder"
)

type Event interface {
Noop()
}

var EventTypeUnino = serder.UseTypeUnionExternallyTagged(types.Ref(types.NewTypeUnion[Event]()))

type EventBase struct{}

func (e *EventBase) Noop() {}

// 只能在init函数中调用,因为包级变量初始化比init函数调用先进行
func Register[T Event]() any {
EventTypeUnino.Add(reflect2.TypeOf[T]())
return nil
}

+ 0
- 18
common/pkgs/mq/scanner/event/update_package_access_stat_amount.go View File

@@ -1,18 +0,0 @@
package event

import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

type UpdatePackageAccessStatAmount struct {
EventBase
PackageIDs []cdssdk.PackageID `json:"packageIDs"`
}

func NewUpdatePackageAccessStatAmount(packageIDs []cdssdk.PackageID) *UpdatePackageAccessStatAmount {
return &UpdatePackageAccessStatAmount{
PackageIDs: packageIDs,
}
}

func init() {
Register[*UpdatePackageAccessStatAmount]()
}

+ 0
- 70
common/pkgs/mq/scanner/server.go View File

@@ -1,70 +0,0 @@
package scanner

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/common/utils/sync2"
mymq "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/mq"
)

// Service 协调端接口
type Service interface {
EventService
}
type Server struct {
service Service
rabbitSvr mq.RabbitMQServer
}

func NewServer(svc Service, cfg mq.Config) (*Server, error) {
srv := &Server{
service: svc,
}

rabbitSvr, err := mq.NewRabbitMQServer(
cfg,
mymq.SCANNER_QUEUE_NAME,
func(msg *mq.Message) (*mq.Message, error) {
return msgDispatcher.Handle(srv.service, msg)
},
)
if err != nil {
return nil, err
}

srv.rabbitSvr = *rabbitSvr

return srv, nil
}

func (s *Server) Stop() {
s.rabbitSvr.Close()
}

func (s *Server) Start() *sync2.UnboundChannel[mq.RabbitMQServerEvent] {
return s.rabbitSvr.Start()
}

func (s *Server) OnError(callback func(error)) {
s.rabbitSvr.OnError = callback
}

var msgDispatcher mq.MessageDispatcher = mq.NewMessageDispatcher()

// Register 将Service中的一个接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型
// TODO 需要约束:Service实现了TSvc接口
func Register[TReq mq.MessageBody, TResp mq.MessageBody](svcFn func(svc Service, msg TReq) (TResp, *mq.CodeMessage)) any {
mq.AddServiceFn(&msgDispatcher, svcFn)
mq.RegisterMessage[TReq]()
mq.RegisterMessage[TResp]()

return nil
}

// RegisterNoReply 将Service中的一个*没有返回值的*接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型
// TODO 需要约束:Service实现了TSvc接口
func RegisterNoReply[TReq mq.MessageBody](svcFn func(svc Service, msg TReq)) any {
mq.AddNoRespServiceFn(&msgDispatcher, svcFn)
mq.RegisterMessage[TReq]()

return nil
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save