Browse Source

Merge remote-tracking branch 'origin/mv_common'

gitlink
Sydonian 2 years ago
parent
commit
34318344ce
100 changed files with 8366 additions and 0 deletions
  1. +2
    -0
      common/README.md
  2. +39
    -0
      common/assets/confs/agent.config.json
  3. +31
    -0
      common/assets/confs/client.config.json
  4. +20
    -0
      common/assets/confs/coordinator.config.json
  5. +31
    -0
      common/assets/confs/scanner.config.json
  6. +63
    -0
      common/assets/confs/sysSetting.xml
  7. +168
    -0
      common/assets/scripts/create_database.sql
  8. +27
    -0
      common/consts/consts.go
  9. +11
    -0
      common/globals/globals.go
  10. +36
    -0
      common/globals/pools.go
  11. +77
    -0
      common/go.mod
  12. +200
    -0
      common/go.sum
  13. +17
    -0
      common/magefiles/main.go
  14. +93
    -0
      common/models/models.go
  15. +3
    -0
      common/pkgs/cmd/cmd.go
  16. +348
    -0
      common/pkgs/cmd/create_ec_package.go
  17. +285
    -0
      common/pkgs/cmd/create_rep_package.go
  18. +151
    -0
      common/pkgs/cmd/download_package.go
  19. +120
    -0
      common/pkgs/cmd/update_ec_package.go
  20. +128
    -0
      common/pkgs/cmd/update_rep_package.go
  21. +120
    -0
      common/pkgs/db/bucket.go
  22. +117
    -0
      common/pkgs/db/cache.go
  23. +21
    -0
      common/pkgs/db/config/config.go
  24. +61
    -0
      common/pkgs/db/db.go
  25. +30
    -0
      common/pkgs/db/ec.go
  26. +32
    -0
      common/pkgs/db/location.go
  27. +110
    -0
      common/pkgs/db/model/model.go
  28. +41
    -0
      common/pkgs/db/node.go
  29. +280
    -0
      common/pkgs/db/object.go
  30. +131
    -0
      common/pkgs/db/object_block.go
  31. +132
    -0
      common/pkgs/db/object_rep.go
  32. +170
    -0
      common/pkgs/db/package.go
  33. +64
    -0
      common/pkgs/db/storage.go
  34. +116
    -0
      common/pkgs/db/storage_package.go
  35. +14
    -0
      common/pkgs/db/user_bucket.go
  36. +217
    -0
      common/pkgs/distlock/lockprovider/ipfs_lock.go
  37. +113
    -0
      common/pkgs/distlock/lockprovider/ipfs_lock_test.go
  38. +123
    -0
      common/pkgs/distlock/lockprovider/lock_compatibility_table.go
  39. +41
    -0
      common/pkgs/distlock/lockprovider/lock_compatibility_table_test.go
  40. +184
    -0
      common/pkgs/distlock/lockprovider/metadata_lock.go
  41. +226
    -0
      common/pkgs/distlock/lockprovider/storage_lock.go
  42. +78
    -0
      common/pkgs/distlock/lockprovider/string_lock_target.go
  43. +60
    -0
      common/pkgs/distlock/lockprovider/string_lock_target_test.go
  44. +64
    -0
      common/pkgs/distlock/reqbuilder/ipfs.go
  45. +31
    -0
      common/pkgs/distlock/reqbuilder/lock_request_builder.go
  46. +17
    -0
      common/pkgs/distlock/reqbuilder/metadata.go
  47. +63
    -0
      common/pkgs/distlock/reqbuilder/metadata_bucket.go
  48. +63
    -0
      common/pkgs/distlock/reqbuilder/metadata_cache.go
  49. +63
    -0
      common/pkgs/distlock/reqbuilder/metadata_node.go
  50. +65
    -0
      common/pkgs/distlock/reqbuilder/metadata_object.go
  51. +63
    -0
      common/pkgs/distlock/reqbuilder/metadata_object_block.go
  52. +63
    -0
      common/pkgs/distlock/reqbuilder/metadata_object_rep.go
  53. +63
    -0
      common/pkgs/distlock/reqbuilder/metadata_package.go
  54. +63
    -0
      common/pkgs/distlock/reqbuilder/metadata_storage_package.go
  55. +63
    -0
      common/pkgs/distlock/reqbuilder/metadata_user_bucket.go
  56. +63
    -0
      common/pkgs/distlock/reqbuilder/metadata_user_storage.go
  57. +74
    -0
      common/pkgs/distlock/reqbuilder/storage.go
  58. +62
    -0
      common/pkgs/distlock/service.go
  59. +38
    -0
      common/pkgs/ec/rs.go
  60. +345
    -0
      common/pkgs/grpc/agent/agent.pb.go
  61. +30
    -0
      common/pkgs/grpc/agent/agent.proto
  62. +209
    -0
      common/pkgs/grpc/agent/agent_grpc.pb.go
  63. +131
    -0
      common/pkgs/grpc/agent/client.go
  64. +43
    -0
      common/pkgs/grpc/agent/pool.go
  65. +12
    -0
      common/pkgs/grpc/config.go
  66. +227
    -0
      common/pkgs/iterator/ec_object_iterator.go
  67. +45
    -0
      common/pkgs/iterator/http_uploading_iterator.go
  68. +12
    -0
      common/pkgs/iterator/iterator.go
  69. +63
    -0
      common/pkgs/iterator/local_uploading_iterator.go
  70. +211
    -0
      common/pkgs/iterator/rep_object_iterator.go
  71. +30
    -0
      common/pkgs/mq/agent/agent.go
  72. +108
    -0
      common/pkgs/mq/agent/cache.go
  73. +61
    -0
      common/pkgs/mq/agent/client.go
  74. +60
    -0
      common/pkgs/mq/agent/object.go
  75. +73
    -0
      common/pkgs/mq/agent/server.go
  76. +188
    -0
      common/pkgs/mq/agent/storage.go
  77. +14
    -0
      common/pkgs/mq/config.go
  78. +12
    -0
      common/pkgs/mq/consts.go
  79. +51
    -0
      common/pkgs/mq/coordinator/agent.go
  80. +114
    -0
      common/pkgs/mq/coordinator/bucket.go
  81. +33
    -0
      common/pkgs/mq/coordinator/cache.go
  82. +59
    -0
      common/pkgs/mq/coordinator/client.go
  83. +60
    -0
      common/pkgs/mq/coordinator/common.go
  84. +15
    -0
      common/pkgs/mq/coordinator/coordinator_test.go
  85. +60
    -0
      common/pkgs/mq/coordinator/node.go
  86. +60
    -0
      common/pkgs/mq/coordinator/object.go
  87. +273
    -0
      common/pkgs/mq/coordinator/package.go
  88. +81
    -0
      common/pkgs/mq/coordinator/server.go
  89. +68
    -0
      common/pkgs/mq/coordinator/storage.go
  90. +59
    -0
      common/pkgs/mq/scanner/client.go
  91. +50
    -0
      common/pkgs/mq/scanner/event.go
  92. +17
    -0
      common/pkgs/mq/scanner/event/agent_check_cache.go
  93. +15
    -0
      common/pkgs/mq/scanner/event/agent_check_state.go
  94. +17
    -0
      common/pkgs/mq/scanner/event/agent_check_storage.go
  95. +15
    -0
      common/pkgs/mq/scanner/event/check_cache.go
  96. +15
    -0
      common/pkgs/mq/scanner/event/check_package.go
  97. +15
    -0
      common/pkgs/mq/scanner/event/check_rep_count.go
  98. +25
    -0
      common/pkgs/mq/scanner/event/event.go
  99. +67
    -0
      common/pkgs/mq/scanner/server.go
  100. +74
    -0
      common/utils/config.go

+ 2
- 0
common/README.md View File

@@ -0,0 +1,2 @@
# storage-common


+ 39
- 0
common/assets/confs/agent.config.json View File

@@ -0,0 +1,39 @@
{
"id": 1,
"local": {
"nodeID": 1,
"localIP": "127.0.0.1",
"externalIP": "127.0.0.1"
},
"grpc": {
"ip": "127.0.0.1",
"port": 5010
},
"ecPacketSize": 10,
"storageBaseDir": ".",
"tempFileLifetime": 3600,
"logger": {
"output": "file",
"outputFileName": "agent",
"outputDirectory": "log",
"level": "debug"
},
"rabbitMQ": {
"address": "127.0.0.1:5672",
"account": "cloudream",
"password": "123456",
"vhost": "/"
},
"ipfs": {
"port": 5001
},
"distlock": {
"etcdAddress": "127.0.0.1:2379",
"etcdUsername": "",
"etcdPassword": "",
"etcdLockAcquireTimeoutMs": 5000,
"etcdLockLeaseTimeSec": 5,
"lockRequestLeaseTimeSec": 5,
"submitLockRequestWithoutLease": true
}
}

+ 31
- 0
common/assets/confs/client.config.json View File

@@ -0,0 +1,31 @@
{
"local": {
"localIP": "127.0.0.1",
"externalIP": "127.0.0.1"
},
"agentGRPC": {
"port": 5010
},
"ecPacketSize": 10,
"maxRepCount": 10,
"logger": {
"output": "stdout",
"level": "debug"
},
"rabbitMQ": {
"address": "127.0.0.1:5672",
"account": "cloudream",
"password": "123456",
"vhost": "/"
},
"ipfs": null,
"distlock": {
"etcdAddress": "127.0.0.1:2379",
"etcdUsername": "",
"etcdPassword": "",
"etcdLockAcquireTimeoutMs": 5000,
"etcdLockLeaseTimeSec": 5,
"lockRequestLeaseTimeSec": 5,
"submitLockRequestWithoutLease": true
}
}

+ 20
- 0
common/assets/confs/coordinator.config.json View File

@@ -0,0 +1,20 @@
{
"logger": {
"output": "file",
"outputFileName": "coordinator",
"outputDirectory": "log",
"level": "debug"
},
"db": {
"address": "127.0.0.1:3306",
"account": "root",
"password": "123456",
"databaseName": "cloudream"
},
"rabbitMQ": {
"address": "127.0.0.1:5672",
"account": "cloudream",
"password": "123456",
"vhost": "/"
}
}

+ 31
- 0
common/assets/confs/scanner.config.json View File

@@ -0,0 +1,31 @@
{
"minAvailableRepProportion": 0.8,
"nodeUnavailableSeconds": 300,
"logger": {
"output": "file",
"outputFileName": "scanner",
"outputDirectory": "log",
"level": "debug"
},
"db": {
"address": "127.0.0.1:3306",
"account": "root",
"password": "123456",
"databaseName": "cloudream"
},
"rabbitMQ": {
"address": "127.0.0.1:5672",
"account": "cloudream",
"password": "123456",
"vhost": "/"
},
"distlock": {
"etcdAddress": "127.0.0.1:2379",
"etcdUsername": "",
"etcdPassword": "",
"etcdLockAcquireTimeoutMs": 5000,
"etcdLockLeaseTimeSec": 5,
"lockRequestLeaseTimeSec": 5,
"submitLockRequestWithoutLease": true
}
}

+ 63
- 0
common/assets/confs/sysSetting.xml View File

@@ -0,0 +1,63 @@
<setting>
<attribute>
<name>local.addr</name>
<value>101.201.215.165</value>
</attribute>
<attribute>
<name>controller.addr</name>
<value>101.201.215.196</value>
</attribute>
<attribute>
<name>agents.addr</name>
<value>/hw-sh/123.60.146.162</value>
<value>/hw-bj/120.46.183.86</value>
<value>/ali/101.201.215.165</value>
</attribute>
<attribute>
<name>agents.location</name>
<value>ali</value>
<value>hw-sh</value>
<value>hw-bj</value>
</attribute>
<attribute>
<name>oec.controller.thread.num</name>
<value>4</value>
</attribute>
<attribute>
<name>oec.agent.thread.num</name>
<value>2</value>
</attribute>
<attribute>
<name>oec.cmddist.thread.num</name>
<value>2</value>
</attribute>
<attribute>
<name>packet.size</name>
<value>131072</value>
</attribute>
<attribute>
<name>ec.concurrent.num</name>
<value>2</value>
</attribute>
<attribute>
<name>ec.policy</name>
<value><ecid>rs_9_6</ecid><class>RS96</class><n>9</n><k>6</k><w>1</w><opt>-1</opt></value>
<value><ecid>rs_3_2</ecid><class>RS96</class><n>3</n><k>2</k><w>1</w><opt>-1</opt></value>
<value><ecid>edu_9_6</ecid><class>EDU96</class><n>9</n><k>6</k><w>1</w><opt>-1</opt></value>
<value><ecid>edu_3_2</ecid><class>EDU32</class><n>3</n><k>2</k><w>1</w><opt>-1</opt></value>
<value><ecid>dfc_9_4</ecid><class>DFC</class><n>9</n><k>4</k><w>1</w><opt>-1</opt><param>3,2</param></value>
</attribute>
<attribute>
<name>inter.inner.addr</name>
<inner>
<dc><ip>172.23.85.69</ip><ip>172.23.85.71</ip><ip>172.23.85.70</ip></dc>
<dc><ip>192.168.0.69</ip></dc>
<dc><ip>192.168.0.76</ip></dc>
</inner>
<inter>
<dc><ip>101.201.215.196</ip><ip>101.201.215.165</ip><ip>101.201.214.111</ip></dc>
<dc><ip>123.60.146.162</ip></dc>
<dc><ip>120.46.183.86</ip></dc>
</inter>
</attribute>
</setting>

+ 168
- 0
common/assets/scripts/create_database.sql View File

@@ -0,0 +1,168 @@
drop database if exists cloudream;

create database cloudream;

use cloudream;

create table Node (
NodeID int not null auto_increment primary key comment '节点ID',
Name varchar(128) not null comment '节点名称',
LocalIP varchar(128) not null comment '节点的内网IP',
ExternalIP varchar(128) not null comment '节点的外网IP',
LocationID int not null comment '节点的地域',
State varchar(128) comment '节点的状态',
LastReportTime timestamp comment '节点上次上报时间'
) comment = '节点表';

insert into
Node (
NodeID,
Name,
LocalIP,
ExternalIP,
LocationID,
State
)
values
(0, "LocalNode", "localhost", "localhost", 0, 1);

create table Storage (
StorageID int not null auto_increment primary key comment '存储服务ID',
Name varchar(100) not null comment '存储服务名称',
NodeID int not null comment '存储服务所在节点的ID',
Directory varchar(4096) not null comment '存储服务所在节点的目录',
State varchar(100) comment '状态'
) comment = "存储服务表";

insert into
Storage (StorageID, Name, NodeID, Directory, State)
values
(1, "HuaWei-Cloud", 1, "/", "Online");

create table NodeDelay (
SourceNodeID int not null comment '发起检测的节点ID',
DestinationNodeID int not null comment '被检测节点的ID',
DelayInMs int not null comment '发起节点与被检测节点间延迟(毫秒)',
primary key(SourceNodeID, DestinationNodeID)
) comment = '节点延迟表';

create table User (
UserID int not null primary key comment '用户ID',
Password varchar(100) not null comment '用户密码'
) comment = '用户密码表';

create table UserBucket (
UserID int not null comment '用户ID',
BucketID int not null comment '用户可访问的桶ID',
primary key(UserID, BucketID)
) comment = '用户桶权限表';

insert into
UserBucket (UserID, BucketID)
values
(0, 1);

create table UserNode (
UserID int not null comment '用户ID',
NodeID int not null comment '用户可使用的节点ID',
primary key(UserID, NodeID)
) comment = '用户节点权限表';

insert into
UserNode (UserID, NodeID)
values
(0, 1);

create table UserStorage (
UserID int not null comment "用户ID",
StorageID int not null comment "存储服务ID",
primary key(UserID, StorageID)
);

insert into
UserStorage (UserID, StorageID)
values
(0, 1);

create table Bucket (
BucketID int not null auto_increment primary key comment '桶ID',
Name varchar(100) not null comment '桶名',
CreatorID int not null comment '创建者ID'
) comment = '桶表';

insert into
Bucket (BucketID, Name, CreatorID)
values
(0, "bucket01", 0);

create table Package (
PackageID int not null auto_increment primary key comment '包ID',
Name varchar(100) not null comment '对象名',
BucketID int not null comment '桶ID',
State varchar(100) not null comment '状态',
Redundancy JSON not null comment '冗余策略'
);

create table Object (
ObjectID int not null auto_increment primary key comment '对象ID',
PackageID int not null comment '包ID',
Path varchar(500) not null comment '对象路径',
Size bigint not null comment '对象大小(Byte)',
UNIQUE KEY PackagePath (PackageID, Path)
) comment = '对象表';

create table ObjectRep (
ObjectID int not null primary key comment '对象ID',
FileHash varchar(100) not null comment '副本哈希值'
) comment = '对象副本表';

create table ObjectBlock (
ObjectID int not null comment '对象ID',
`Index` int not null comment '编码块在条带内的排序',
FileHash varchar(100) not null comment '编码块哈希值',
primary key(ObjectID, `Index`)
) comment = '对象编码块表';

create table Cache (
FileHash varchar(100) not null comment '编码块块ID',
NodeID int not null comment '节点ID',
State varchar(100) not null comment '状态',
CacheTime timestamp not null comment '缓存时间',
Priority int not null comment '编码块优先级',
primary key(FileHash, NodeID)
) comment = '缓存表';

create table StoragePackage (
PackageID int not null comment '包ID',
StorageID int not null comment '存储服务ID',
UserID int not null comment '调度了此文件的用户ID',
State varchar(100) not null comment '包状态',
primary key(PackageID, StorageID, UserID)
);

create table Location (
LocationID int not null auto_increment primary key comment 'ID',
Name varchar(128) not null comment '名称'
) comment = '地域表';

insert into
Location (LocationID, Name)
values
(1, "Local");

create table Ec (
EcID int not null comment '纠删码ID',
Name varchar(128) not null comment '纠删码名称',
EcK int not null comment 'ecK',
EcN int not null comment 'ecN'
) comment = '纠删码表';

insert into
Ec (EcID, Name, EcK, EcN)
values
(1, "rs_9_6", 6, 9);

insert into
Ec (EcID, Name, EcK, EcN)
values
(2, "rs_5_3", 3, 5);

+ 27
- 0
common/consts/consts.go View File

@@ -0,0 +1,27 @@
package consts

const (
IPFSStateOK = "OK"
IPFSStateUnavailable = "Unavailable"

StorageDirectoryStateOK = "OK"

NodeStateNormal = "Normal"
NodeStateUnavailable = "Unavailable"
)

const (
PackageStateNormal = "Normal"
PackageStateDeleted = "Deleted"
)

const (
StoragePackageStateNormal = "Normal"
StoragePackageStateDeleted = "Deleted"
StoragePackageStateOutdated = "Outdated"
)

const (
CacheStatePinned = "Pinned"
CacheStateTemp = "Temp"
)

+ 11
- 0
common/globals/globals.go View File

@@ -0,0 +1,11 @@
package globals

import (
stgmodels "gitlink.org.cn/cloudream/storage-common/models"
)

var Local *stgmodels.LocalMachineInfo

func InitLocal(info *stgmodels.LocalMachineInfo) {
Local = info
}

+ 36
- 0
common/globals/pools.go View File

@@ -0,0 +1,36 @@
package globals

import (
"gitlink.org.cn/cloudream/common/pkgs/ipfs"
agtrpc "gitlink.org.cn/cloudream/storage-common/pkgs/grpc/agent"
stgmq "gitlink.org.cn/cloudream/storage-common/pkgs/mq"
agtmq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/agent"
coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator"
scmq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/scanner"
)

var AgentMQPool *agtmq.Pool

var CoordinatorMQPool *coormq.Pool

var ScannerMQPool *scmq.Pool

func InitMQPool(cfg *stgmq.Config) {
AgentMQPool = agtmq.NewPool(cfg)

CoordinatorMQPool = coormq.NewPool(cfg)

ScannerMQPool = scmq.NewPool(cfg)
}

var AgentRPCPool *agtrpc.Pool

func InitAgentRPCPool(cfg *agtrpc.PoolConfig) {
AgentRPCPool = agtrpc.NewPool(cfg)
}

var IPFSPool *ipfs.Pool

func InitIPFSPool(cfg *ipfs.Config) {
IPFSPool = ipfs.NewPool(cfg)
}

+ 77
- 0
common/go.mod View File

@@ -0,0 +1,77 @@
module gitlink.org.cn/cloudream/storage-common

require (
github.com/baohan10/reedsolomon v0.0.0-20230406042632-43574cac9fa7
github.com/beevik/etree v1.2.0
github.com/go-ping/ping v1.1.0
github.com/go-sql-driver/mysql v1.7.1
github.com/jmoiron/sqlx v1.3.5
github.com/magefile/mage v1.15.0
github.com/samber/lo v1.36.0
github.com/smartystreets/goconvey v1.8.0
gitlink.org.cn/cloudream/common v0.0.0
google.golang.org/grpc v1.54.0
google.golang.org/protobuf v1.30.0
)

require (
github.com/antonfisher/nested-logrus-formatter v1.3.1 // indirect
github.com/benbjohnson/clock v1.3.0 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gopherjs/gopherjs v1.17.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/ipfs/boxo v0.8.0 // indirect
github.com/ipfs/go-cid v0.4.0 // indirect
github.com/ipfs/go-ipfs-api v0.6.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/klauspost/cpuid/v2 v2.2.3 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
github.com/libp2p/go-libp2p v0.26.3 // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr v0.8.0 // indirect
github.com/multiformats/go-multibase v0.1.1 // indirect
github.com/multiformats/go-multicodec v0.8.1 // indirect
github.com/multiformats/go-multihash v0.2.1 // indirect
github.com/multiformats/go-multistream v0.4.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/sirupsen/logrus v1.9.2 // indirect
github.com/smartystreets/assertions v1.13.1 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/streadway/amqp v1.1.0 // indirect
github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c // indirect
github.com/zyedidia/generic v1.2.1 // indirect
go.etcd.io/etcd/api/v3 v3.5.9 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
go.etcd.io/etcd/client/v3 v3.5.9 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/crypto v0.6.0 // indirect
golang.org/x/exp v0.0.0-20230519143937-03e91628a987 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd // indirect
lukechampine.com/blake3 v1.1.7 // indirect
)

go 1.20

replace gitlink.org.cn/cloudream/common v0.0.0 => ../../common

+ 200
- 0
common/go.sum View File

@@ -0,0 +1,200 @@
github.com/antonfisher/nested-logrus-formatter v1.3.1 h1:NFJIr+pzwv5QLHTPyKz9UMEoHck02Q9L0FP13b/xSbQ=
github.com/antonfisher/nested-logrus-formatter v1.3.1/go.mod h1:6WTfyWFkBc9+zyBaKIqRrg/KwMqBbodBjgbHjDz7zjA=
github.com/baohan10/reedsolomon v0.0.0-20230406042632-43574cac9fa7 h1:wcvD6enR///dFvb9cRodx5SGbPH4G4jPjw+aVIWkAKE=
github.com/baohan10/reedsolomon v0.0.0-20230406042632-43574cac9fa7/go.mod h1:rAxMF6pVaFK/s6T4gGczvloccNbtwzuYaP2Y7W6flE8=
github.com/beevik/etree v1.2.0 h1:l7WETslUG/T+xOPs47dtd6jov2Ii/8/OjCldk5fYfQw=
github.com/beevik/etree v1.2.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg=
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc=
github.com/go-ping/ping v1.1.0 h1:3MCGhVX4fyEUuhsfwPrsEdQw6xspHkv5zHsiSoDFZYw=
github.com/go-ping/ping v1.1.0/go.mod h1:xIFjORFzTxqIV/tDVGO4eDy/bLuSyawEeojSm3GfRGk=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/ipfs/boxo v0.8.0 h1:UdjAJmHzQHo/j3g3b1bAcAXCj/GM6iTwvSlBDvPBNBs=
github.com/ipfs/boxo v0.8.0/go.mod h1:RIsi4CnTyQ7AUsNn5gXljJYZlQrHBMnJp94p73liFiA=
github.com/ipfs/go-cid v0.4.0 h1:a4pdZq0sx6ZSxbCizebnKiMCx/xI/aBBFlB73IgH4rA=
github.com/ipfs/go-cid v0.4.0/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
github.com/ipfs/go-ipfs-api v0.6.0 h1:JARgG0VTbjyVhO5ZfesnbXv9wTcMvoKRBLF1SzJqzmg=
github.com/ipfs/go-ipfs-api v0.6.0/go.mod h1:iDC2VMwN9LUpQV/GzEeZ2zNqd8NUdRmWcFM+K/6odf0=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU=
github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
github.com/libp2p/go-libp2p v0.26.3 h1:6g/psubqwdaBqNNoidbRKSTBEYgaOuKBhHl8Q5tO+PM=
github.com/libp2p/go-libp2p v0.26.3/go.mod h1:x75BN32YbwuY0Awm2Uix4d4KOz+/4piInkp4Wr3yOo8=
github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=
github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU=
github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs=
github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI=
github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8=
github.com/multiformats/go-multicodec v0.8.1 h1:ycepHwavHafh3grIbR1jIXnKCsFm0fqsfEOsJ8NtKE8=
github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108=
github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc=
github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo=
github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q=
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/samber/lo v1.36.0 h1:4LaOxH1mHnbDGhTVE0i1z8v/lWaQW8AIfOD3HU4mSaw=
github.com/samber/lo v1.36.0/go.mod h1:HLeWcJRRyLKp3+/XBJvOrerCQn9mhdKMHyd7IRlgeQ8=
github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y=
github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU=
github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY=
github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w=
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/streadway/amqp v1.1.0 h1:py12iX8XSyI7aN/3dUT8DFIDJazNJsVJdxNVEpnQTZM=
github.com/streadway/amqp v1.1.0/go.mod h1:WYSrTEYHOXHd0nwFeUXAe2G2hRnQT+deZJJf88uS9Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/thoas/go-funk v0.9.1 h1:O549iLZqPpTUQ10ykd26sZhzD+rmR5pWhuElrhbC20M=
github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c h1:GGsyl0dZ2jJgVT+VvWBf/cNijrHRhkrTjkmp5wg7li0=
github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c/go.mod h1:xxcJeBb7SIUl/Wzkz1eVKJE/CB34YNrqX2TQI6jY9zs=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/zyedidia/generic v1.2.1 h1:Zv5KS/N2m0XZZiuLS82qheRG4X1o5gsWreGb0hR7XDc=
github.com/zyedidia/generic v1.2.1/go.mod h1:ly2RBz4mnz1yeuVbQA/VFwGjK3mnHGRj1JuoG336Bis=
go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs=
go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE=
go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E=
go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/exp v0.0.0-20230519143937-03e91628a987 h1:3xJIFvzUFbu4ls0BTBYcgbCGhA63eAOEMxIHugyXJqA=
golang.org/x/exp v0.0.0-20230519143937-03e91628a987/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd h1:sLpv7bNL1AsX3fdnWh9WVh7ejIzXdOc1RRHGeAmeStU=
google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=

+ 17
- 0
common/magefiles/main.go View File

@@ -0,0 +1,17 @@
//go:build mage

package main

import (
"path/filepath"

"github.com/magefile/mage/sh"
)

func Protos() error {
return proto("pkgs/grpc/agent", "agent.proto")
}

func proto(dir string, fileName string) error {
return sh.Run("protoc", "--go_out="+dir, "--go-grpc_out="+dir, filepath.Join(dir, fileName))
}

+ 93
- 0
common/models/models.go View File

@@ -0,0 +1,93 @@
package models

import "gitlink.org.cn/cloudream/storage-common/pkgs/db/model"

/// TODO 将分散在各处的公共结构体定义集中到这里来

type RedundancyData interface{}
type RedundancyDataConst interface {
RepRedundancyData | ECRedundancyData | RedundancyData
}
type RepRedundancyData struct {
FileHash string `json:"fileHash"`
}

func NewRedundancyRepData(fileHash string) RepRedundancyData {
return RepRedundancyData{
FileHash: fileHash,
}
}

type ECRedundancyData struct {
Ec EC `json:"ec"`
Blocks []ObjectBlockData `json:"blocks"`
}

func NewRedundancyEcData(ec EC, blocks []ObjectBlockData) ECRedundancyData {
return ECRedundancyData{
Ec: ec,
Blocks: blocks,
}
}

type EC struct {
ID int `json:"id"`
Name string `json:"name"`
EcK int `json:"ecK"`
EcN int `json:"ecN"`
}

type ObjectBlockData struct {
Index int `json:"index"`
FileHash string `json:"fileHash"`
NodeIDs []int64 `json:"nodeIDs"`
}

func NewObjectBlockData(index int, fileHash string, nodeIDs []int64) ObjectBlockData {
return ObjectBlockData{
Index: index,
FileHash: fileHash,
NodeIDs: nodeIDs,
}
}

func NewEc(id int, name string, ecK int, ecN int) EC {
return EC{
ID: id,
Name: name,
EcK: ecK,
EcN: ecN,
}
}

type ObjectRepData struct {
Object model.Object `json:"object"`
FileHash string `json:"fileHash"`
NodeIDs []int64 `json:"nodeIDs"`
}

func NewObjectRepData(object model.Object, fileHash string, nodeIDs []int64) ObjectRepData {
return ObjectRepData{
Object: object,
FileHash: fileHash,
NodeIDs: nodeIDs,
}
}

type ObjectECData struct {
Object model.Object `json:"object"`
Blocks []ObjectBlockData `json:"blocks"`
}

func NewObjectECData(object model.Object, blocks []ObjectBlockData) ObjectECData {
return ObjectECData{
Object: object,
Blocks: blocks,
}
}

type LocalMachineInfo struct {
NodeID *int64 `json:"nodeID"`
ExternalIP string `json:"externalIP"`
LocalIP string `json:"localIP"`
}

+ 3
- 0
common/pkgs/cmd/cmd.go View File

@@ -0,0 +1,3 @@
package cmd

// 这个包主要存放一些公共的业务逻辑代码

+ 348
- 0
common/pkgs/cmd/create_ec_package.go View File

@@ -0,0 +1,348 @@
package cmd

import (
"fmt"
"io"
"math/rand"
"os"
"path/filepath"
"sync"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/models"

"gitlink.org.cn/cloudream/storage-common/globals"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage-common/pkgs/ec"
"gitlink.org.cn/cloudream/storage-common/pkgs/iterator"
coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator"
)

type CreateECPackage struct {
userID int64
bucketID int64
name string
objectIter iterator.UploadingObjectIterator
redundancy models.ECRedundancyInfo
}

type CreateECPackageResult struct {
PackageID int64
ObjectResults []ECObjectUploadResult
}

type ECObjectUploadResult struct {
Info *iterator.IterUploadingObject
Error error
ObjectID int64
}

func NewCreateECPackage(userID int64, bucketID int64, name string, objIter iterator.UploadingObjectIterator, redundancy models.ECRedundancyInfo) *CreateECPackage {
return &CreateECPackage{
userID: userID,
bucketID: bucketID,
name: name,
objectIter: objIter,
redundancy: redundancy,
}
}

func (t *CreateECPackage) Execute(ctx *UpdatePackageContext) (*CreateECPackageResult, error) {
defer t.objectIter.Close()

coorCli, err := globals.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
}

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 用于判断用户是否有桶的权限
UserBucket().ReadOne(t.userID, t.bucketID).
// 用于查询可用的上传节点
Node().ReadAny().
// 用于创建包信息
Package().CreateOne(t.bucketID, t.name).
// 用于创建包中的文件的信息
Object().CreateAny().
// 用于设置EC配置
ObjectBlock().CreateAny().
// 用于创建Cache记录
Cache().CreateAny().
MutexLock(ctx.Distlock)
if err != nil {
return nil, fmt.Errorf("acquire locks failed, err: %w", err)
}
defer mutex.Unlock()

createPkgResp, err := coorCli.CreatePackage(coormq.NewCreatePackage(t.userID, t.bucketID, t.name,
models.NewTypedRedundancyInfo(t.redundancy)))
if err != nil {
return nil, fmt.Errorf("creating package: %w", err)
}

getUserNodesResp, err := coorCli.GetUserNodes(coormq.NewGetUserNodes(t.userID))
if err != nil {
return nil, fmt.Errorf("getting user nodes: %w", err)
}

findCliLocResp, err := coorCli.FindClientLocation(coormq.NewFindClientLocation(globals.Local.ExternalIP))
if err != nil {
return nil, fmt.Errorf("finding client location: %w", err)
}

uploadNodeInfos := lo.Map(getUserNodesResp.Nodes, func(node model.Node, index int) UploadNodeInfo {
return UploadNodeInfo{
Node: node,
IsSameLocation: node.LocationID == findCliLocResp.Location.LocationID,
}
})

getECResp, err := coorCli.GetECConfig(coormq.NewGetECConfig(t.redundancy.ECName))
if err != nil {
return nil, fmt.Errorf("getting ec: %w", err)
}

// 给上传节点的IPFS加锁
ipfsReqBlder := reqbuilder.NewBuilder()
// 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁
if globals.Local.NodeID != nil {
ipfsReqBlder.IPFS().CreateAnyRep(*globals.Local.NodeID)
}
for _, node := range uploadNodeInfos {
if globals.Local.NodeID != nil && node.Node.NodeID == *globals.Local.NodeID {
continue
}

ipfsReqBlder.IPFS().CreateAnyRep(node.Node.NodeID)
}
// 防止上传的副本被清除
ipfsMutex, err := ipfsReqBlder.MutexLock(ctx.Distlock)
if err != nil {
return nil, fmt.Errorf("acquire locks failed, err: %w", err)
}
defer ipfsMutex.Unlock()

rets, err := uploadAndUpdateECPackage(createPkgResp.PackageID, t.objectIter, uploadNodeInfos, t.redundancy, getECResp.Config)
if err != nil {
return nil, err
}

return &CreateECPackageResult{
PackageID: createPkgResp.PackageID,
ObjectResults: rets,
}, nil
}

func uploadAndUpdateECPackage(packageID int64, objectIter iterator.UploadingObjectIterator, uploadNodes []UploadNodeInfo, ecInfo models.ECRedundancyInfo, ec model.Ec) ([]ECObjectUploadResult, error) {
coorCli, err := globals.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
}

var uploadRets []ECObjectUploadResult
//上传文件夹
var adds []coormq.AddECObjectInfo
for {
objInfo, err := objectIter.MoveNext()
if err == iterator.ErrNoMoreItem {
break
}
if err != nil {
return nil, fmt.Errorf("reading object: %w", err)
}

fileHashes, uploadedNodeIDs, err := uploadECObject(objInfo, uploadNodes, ecInfo, ec)
uploadRets = append(uploadRets, ECObjectUploadResult{
Info: objInfo,
Error: err,
})
if err != nil {
return nil, fmt.Errorf("uploading object: %w", err)
}

adds = append(adds, coormq.NewAddECObjectInfo(objInfo.Path, objInfo.Size, fileHashes, uploadedNodeIDs))
}

_, err = coorCli.UpdateECPackage(coormq.NewUpdateECPackage(packageID, adds, nil))
if err != nil {
return nil, fmt.Errorf("updating package: %w", err)
}

return uploadRets, nil
}

// 上传文件
func uploadECObject(obj *iterator.IterUploadingObject, uploadNodes []UploadNodeInfo, ecInfo models.ECRedundancyInfo, ec model.Ec) ([]string, []int64, error) {
//生成纠删码的写入节点序列
nodes := make([]UploadNodeInfo, ec.EcN)
numNodes := len(uploadNodes)
startWriteNodeID := rand.Intn(numNodes)
for i := 0; i < ec.EcN; i++ {
nodes[i] = uploadNodes[(startWriteNodeID+i)%numNodes]
}

hashs, err := ecWrite(obj.File, obj.Size, ecInfo.PacketSize, ec.EcK, ec.EcN, nodes)
if err != nil {
return nil, nil, fmt.Errorf("EcWrite failed, err: %w", err)
}

nodeIDs := make([]int64, len(nodes))
for i := 0; i < len(nodes); i++ {
nodeIDs[i] = nodes[i].Node.NodeID
}

return hashs, nodeIDs, nil
}

// chooseUploadNode 选择一个上传文件的节点
// 1. 从与当前客户端相同地域的节点中随机选一个
// 2. 没有用的话从所有节点中随机选一个
func (t *CreateECPackage) chooseUploadNode(nodes []UploadNodeInfo) UploadNodeInfo {
sameLocationNodes := lo.Filter(nodes, func(e UploadNodeInfo, i int) bool { return e.IsSameLocation })
if len(sameLocationNodes) > 0 {
return sameLocationNodes[rand.Intn(len(sameLocationNodes))]
}

return nodes[rand.Intn(len(nodes))]
}

func ecWrite(file io.ReadCloser, fileSize int64, packetSize int64, ecK int, ecN int, nodes []UploadNodeInfo) ([]string, error) {
// TODO 需要参考RepWrite函数的代码逻辑,做好错误处理
//获取文件大小

var coefs = [][]int64{{1, 1, 1}, {1, 2, 3}} //2应替换为ecK,3应替换为ecN
//计算每个块的packet数
numPacket := (fileSize + int64(ecK)*packetSize - 1) / (int64(ecK) * packetSize)
//fmt.Println(numPacket)
//创建channel
loadBufs := make([]chan []byte, ecN)
encodeBufs := make([]chan []byte, ecN)
for i := 0; i < ecN; i++ {
loadBufs[i] = make(chan []byte)
}
for i := 0; i < ecN; i++ {
encodeBufs[i] = make(chan []byte)
}
hashs := make([]string, ecN)
//正式开始写入
go load(file, loadBufs[:ecN], ecK, numPacket*int64(ecK), packetSize) //从本地文件系统加载数据
go encode(loadBufs[:ecN], encodeBufs[:ecN], ecK, coefs, numPacket)

var wg sync.WaitGroup
wg.Add(ecN)

for idx := 0; idx < ecN; idx++ {
i := idx
reader := channelBytesReader{
channel: encodeBufs[idx],
packetCount: numPacket,
}
go func() {
// TODO 处理错误
fileHash, _ := uploadFile(&reader, nodes[i])
hashs[i] = fileHash
wg.Done()
}()
}
wg.Wait()

return hashs, nil

}

func load(file io.ReadCloser, loadBufs []chan []byte, ecK int, totalNumPacket int64, ecPacketSize int64) error {

for i := 0; int64(i) < totalNumPacket; i++ {

buf := make([]byte, ecPacketSize)
idx := i % ecK
_, err := file.Read(buf)
if err != nil {
return fmt.Errorf("read file falied, err:%w", err)
}
loadBufs[idx] <- buf

if idx == ecK-1 {
for j := ecK; j < len(loadBufs); j++ {
zeroPkt := make([]byte, ecPacketSize)
loadBufs[j] <- zeroPkt
}
}
if err != nil && err != io.EOF {
return fmt.Errorf("load file to buf failed, err:%w", err)
}
}
for i := 0; i < len(loadBufs); i++ {

close(loadBufs[i])
}
file.Close()
return nil
}

func encode(inBufs []chan []byte, outBufs []chan []byte, ecK int, coefs [][]int64, numPacket int64) {
var tmpIn [][]byte
tmpIn = make([][]byte, len(outBufs))
enc := ec.NewRsEnc(ecK, len(outBufs))
for i := 0; int64(i) < numPacket; i++ {
for j := 0; j < len(outBufs); j++ {
tmpIn[j] = <-inBufs[j]
}
enc.Encode(tmpIn)
for j := 0; j < len(outBufs); j++ {
outBufs[j] <- tmpIn[j]
}
}
for i := 0; i < len(outBufs); i++ {
close(outBufs[i])
}
}

type channelBytesReader struct {
channel chan []byte
packetCount int64
readingData []byte
}

func (r *channelBytesReader) Read(buf []byte) (int, error) {
if len(r.readingData) == 0 {
if r.packetCount == 0 {
return 0, io.EOF
}

r.readingData = <-r.channel
r.packetCount--
}

len := copy(buf, r.readingData)
r.readingData = r.readingData[:len]

return len, nil
}

func persist(inBuf []chan []byte, numPacket int64, localFilePath string, wg *sync.WaitGroup) {
fDir, err := os.Executable()
if err != nil {
panic(err)
}
fURL := filepath.Join(filepath.Dir(fDir), "assets")
_, err = os.Stat(fURL)
if os.IsNotExist(err) {
os.MkdirAll(fURL, os.ModePerm)
}
file, err := os.Create(filepath.Join(fURL, localFilePath))
if err != nil {
return
}
for i := 0; int64(i) < numPacket; i++ {
for j := 0; j < len(inBuf); j++ {
tmp := <-inBuf[j]
fmt.Println(tmp)
file.Write(tmp)
}
}
file.Close()
wg.Done()
}

+ 285
- 0
common/pkgs/cmd/create_rep_package.go View File

@@ -0,0 +1,285 @@
package cmd

import (
"fmt"
"io"
"math/rand"
"time"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/models"
distsvc "gitlink.org.cn/cloudream/common/pkgs/distlock/service"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/reqbuilder"

"gitlink.org.cn/cloudream/storage-common/globals"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage-common/pkgs/iterator"
agtmq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/agent"
coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator"
)

type UploadNodeInfo struct {
Node model.Node
IsSameLocation bool
}

type CreateRepPackage struct {
userID int64
bucketID int64
name string
objectIter iterator.UploadingObjectIterator
redundancy models.RepRedundancyInfo
}

type UpdatePackageContext struct {
Distlock *distsvc.Service
}

type CreateRepPackageResult struct {
PackageID int64
ObjectResults []RepObjectUploadResult
}

type RepObjectUploadResult struct {
Info *iterator.IterUploadingObject
Error error
FileHash string
ObjectID int64
}

func NewCreateRepPackage(userID int64, bucketID int64, name string, objIter iterator.UploadingObjectIterator, redundancy models.RepRedundancyInfo) *CreateRepPackage {
return &CreateRepPackage{
userID: userID,
bucketID: bucketID,
name: name,
objectIter: objIter,
redundancy: redundancy,
}
}

func (t *CreateRepPackage) Execute(ctx *UpdatePackageContext) (*CreateRepPackageResult, error) {
defer t.objectIter.Close()

coorCli, err := globals.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
}

reqBlder := reqbuilder.NewBuilder()
// 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁
if globals.Local.NodeID != nil {
reqBlder.IPFS().CreateAnyRep(*globals.Local.NodeID)
}
mutex, err := reqBlder.
Metadata().
// 用于判断用户是否有桶的权限
UserBucket().ReadOne(t.userID, t.bucketID).
// 用于查询可用的上传节点
Node().ReadAny().
// 用于创建包信息
Package().CreateOne(t.bucketID, t.name).
// 用于创建包中的文件的信息
Object().CreateAny().
// 用于设置EC配置
ObjectBlock().CreateAny().
// 用于创建Cache记录
Cache().CreateAny().
MutexLock(ctx.Distlock)
if err != nil {
return nil, fmt.Errorf("acquire locks failed, err: %w", err)
}
defer mutex.Unlock()

createPkgResp, err := coorCli.CreatePackage(coormq.NewCreatePackage(t.userID, t.bucketID, t.name,
models.NewTypedRedundancyInfo(t.redundancy)))
if err != nil {
return nil, fmt.Errorf("creating package: %w", err)
}

getUserNodesResp, err := coorCli.GetUserNodes(coormq.NewGetUserNodes(t.userID))
if err != nil {
return nil, fmt.Errorf("getting user nodes: %w", err)
}

findCliLocResp, err := coorCli.FindClientLocation(coormq.NewFindClientLocation(globals.Local.ExternalIP))
if err != nil {
return nil, fmt.Errorf("finding client location: %w", err)
}

nodeInfos := lo.Map(getUserNodesResp.Nodes, func(node model.Node, index int) UploadNodeInfo {
return UploadNodeInfo{
Node: node,
IsSameLocation: node.LocationID == findCliLocResp.Location.LocationID,
}
})
uploadNode := t.chooseUploadNode(nodeInfos)

// 防止上传的副本被清除
ipfsMutex, err := reqbuilder.NewBuilder().
IPFS().CreateAnyRep(uploadNode.Node.NodeID).
MutexLock(ctx.Distlock)
if err != nil {
return nil, fmt.Errorf("acquire locks failed, err: %w", err)
}
defer ipfsMutex.Unlock()

rets, err := uploadAndUpdateRepPackage(createPkgResp.PackageID, t.objectIter, uploadNode)
if err != nil {
return nil, err
}

return &CreateRepPackageResult{
PackageID: createPkgResp.PackageID,
ObjectResults: rets,
}, nil
}

func uploadAndUpdateRepPackage(packageID int64, objectIter iterator.UploadingObjectIterator, uploadNode UploadNodeInfo) ([]RepObjectUploadResult, error) {
coorCli, err := globals.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
}

var uploadRets []RepObjectUploadResult
var adds []coormq.AddRepObjectInfo
for {
objInfo, err := objectIter.MoveNext()
if err == iterator.ErrNoMoreItem {
break
}
if err != nil {
return nil, fmt.Errorf("reading object: %w", err)
}

fileHash, err := uploadFile(objInfo.File, uploadNode)
uploadRets = append(uploadRets, RepObjectUploadResult{
Info: objInfo,
Error: err,
FileHash: fileHash,
})
if err != nil {
return nil, fmt.Errorf("uploading object: %w", err)
}

adds = append(adds, coormq.NewAddRepObjectInfo(objInfo.Path, objInfo.Size, fileHash, []int64{uploadNode.Node.NodeID}))
}

_, err = coorCli.UpdateRepPackage(coormq.NewUpdateRepPackage(packageID, adds, nil))
if err != nil {
return nil, fmt.Errorf("updating package: %w", err)
}

return uploadRets, nil
}

// 上传文件
func uploadFile(file io.Reader, uploadNode UploadNodeInfo) (string, error) {
// 本地有IPFS,则直接从本地IPFS上传
if globals.IPFSPool != nil {
logger.Infof("try to use local IPFS to upload file")

// 只有本地IPFS不是存储系统中的一个节点,才需要Pin文件
fileHash, err := uploadToLocalIPFS(file, uploadNode.Node.NodeID, globals.Local.NodeID == nil)
if err == nil {
return fileHash, nil

} else {
logger.Warnf("upload to local IPFS failed, so try to upload to node %d, err: %s", uploadNode.Node.NodeID, err.Error())
}
}

// 否则发送到agent上传
// 如果客户端与节点在同一个地域,则使用内网地址连接节点
nodeIP := uploadNode.Node.ExternalIP
if uploadNode.IsSameLocation {
nodeIP = uploadNode.Node.LocalIP

logger.Infof("client and node %d are at the same location, use local ip\n", uploadNode.Node.NodeID)
}

fileHash, err := uploadToNode(file, nodeIP)
if err != nil {
return "", fmt.Errorf("upload to node %s failed, err: %w", nodeIP, err)
}

return fileHash, nil
}

// chooseUploadNode 选择一个上传文件的节点
// 1. 从与当前客户端相同地域的节点中随机选一个
// 2. 没有用的话从所有节点中随机选一个
func (t *CreateRepPackage) chooseUploadNode(nodes []UploadNodeInfo) UploadNodeInfo {
sameLocationNodes := lo.Filter(nodes, func(e UploadNodeInfo, i int) bool { return e.IsSameLocation })
if len(sameLocationNodes) > 0 {
return sameLocationNodes[rand.Intn(len(sameLocationNodes))]
}

return nodes[rand.Intn(len(nodes))]
}

func uploadToNode(file io.Reader, nodeIP string) (string, error) {
rpcCli, err := globals.AgentRPCPool.Acquire(nodeIP)
if err != nil {
return "", fmt.Errorf("new agent rpc client: %w", err)
}
defer rpcCli.Close()

return rpcCli.SendIPFSFile(file)
}

func uploadToLocalIPFS(file io.Reader, nodeID int64, shouldPin bool) (string, error) {
ipfsCli, err := globals.IPFSPool.Acquire()
if err != nil {
return "", fmt.Errorf("new ipfs client: %w", err)
}
defer ipfsCli.Close()

// 从本地IPFS上传文件
fileHash, err := ipfsCli.CreateFile(file)
if err != nil {
return "", fmt.Errorf("creating ipfs file: %w", err)
}

if !shouldPin {
return fileHash, nil
}

err = pinIPFSFile(nodeID, fileHash)
if err != nil {
return "", err
}

return fileHash, nil
}

func pinIPFSFile(nodeID int64, fileHash string) error {
agtCli, err := globals.AgentMQPool.Acquire(nodeID)
if err != nil {
return fmt.Errorf("new agent client: %w", err)
}
defer agtCli.Close()

// 然后让最近节点pin本地上传的文件
pinObjResp, err := agtCli.StartPinningObject(agtmq.NewStartPinningObject(fileHash))
if err != nil {
return fmt.Errorf("start pinning object: %w", err)
}

for {
waitResp, err := agtCli.WaitPinningObject(agtmq.NewWaitPinningObject(pinObjResp.TaskID, int64(time.Second)*5))
if err != nil {
return fmt.Errorf("waitting pinning object: %w", err)
}

if waitResp.IsComplete {
if waitResp.Error != "" {
return fmt.Errorf("agent pinning object: %s", waitResp.Error)
}

break
}
}

return nil
}

+ 151
- 0
common/pkgs/cmd/download_package.go View File

@@ -0,0 +1,151 @@
package cmd

import (
"fmt"
"io"
"os"
"path/filepath"

"gitlink.org.cn/cloudream/common/models"
distsvc "gitlink.org.cn/cloudream/common/pkgs/distlock/service"
"gitlink.org.cn/cloudream/storage-common/globals"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage-common/pkgs/iterator"
coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator"
)

type DownloadPackage struct {
userID int64
packageID int64
outputPath string
}

type DownloadPackageContext struct {
Distlock *distsvc.Service
}

func NewDownloadPackage(userID int64, packageID int64, outputPath string) *DownloadPackage {
return &DownloadPackage{
userID: userID,
packageID: packageID,
outputPath: outputPath,
}
}

func (t *DownloadPackage) Execute(ctx *DownloadPackageContext) error {
coorCli, err := globals.CoordinatorMQPool.Acquire()
if err != nil {
return fmt.Errorf("new coordinator client: %w", err)
}
defer coorCli.Close()

getPkgResp, err := coorCli.GetPackage(coormq.NewGetPackage(t.userID, t.packageID))
if err != nil {

return fmt.Errorf("getting package: %w", err)
}

var objIter iterator.DownloadingObjectIterator
if getPkgResp.Redundancy.IsRepInfo() {
objIter, err = t.downloadRep(ctx)
} else {
objIter, err = t.downloadEC(ctx, getPkgResp.Package)
}
if err != nil {
return err
}
defer objIter.Close()

return t.writeObject(objIter)
}

func (t *DownloadPackage) downloadRep(ctx *DownloadPackageContext) (iterator.DownloadingObjectIterator, error) {
coorCli, err := globals.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
}
defer coorCli.Close()

getObjsResp, err := coorCli.GetPackageObjects(coormq.NewGetPackageObjects(t.userID, t.packageID))
if err != nil {
return nil, fmt.Errorf("getting package objects: %w", err)
}

getObjRepDataResp, err := coorCli.GetPackageObjectRepData(coormq.NewGetPackageObjectRepData(t.packageID))
if err != nil {
return nil, fmt.Errorf("getting package object rep data: %w", err)
}

iter := iterator.NewRepObjectIterator(getObjsResp.Objects, getObjRepDataResp.Data, &iterator.DownloadContext{
Distlock: ctx.Distlock,
})

return iter, nil
}

func (t *DownloadPackage) downloadEC(ctx *DownloadPackageContext, pkg model.Package) (iterator.DownloadingObjectIterator, error) {
coorCli, err := globals.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
}
defer coorCli.Close()

getObjsResp, err := coorCli.GetPackageObjects(coormq.NewGetPackageObjects(t.userID, t.packageID))
if err != nil {
return nil, fmt.Errorf("getting package objects: %w", err)
}

getObjECDataResp, err := coorCli.GetPackageObjectECData(coormq.NewGetPackageObjectECData(t.packageID))
if err != nil {
return nil, fmt.Errorf("getting package object ec data: %w", err)
}

var ecInfo models.ECRedundancyInfo
if ecInfo, err = pkg.Redundancy.ToECInfo(); err != nil {
return nil, fmt.Errorf("get ec redundancy info: %w", err)
}

getECResp, err := coorCli.GetECConfig(coormq.NewGetECConfig(ecInfo.ECName))
if err != nil {
return nil, fmt.Errorf("getting ec: %w", err)
}

iter := iterator.NewECObjectIterator(getObjsResp.Objects, getObjECDataResp.Data, ecInfo, getECResp.Config, &iterator.DownloadContext{
Distlock: ctx.Distlock,
})

return iter, nil
}

func (t *DownloadPackage) writeObject(objIter iterator.DownloadingObjectIterator) error {
for {
objInfo, err := objIter.MoveNext()
if err == iterator.ErrNoMoreItem {
break
}
if err != nil {
return err
}
defer objInfo.File.Close()

fullPath := filepath.Join(t.outputPath, objInfo.Object.Path)

dirPath := filepath.Dir(fullPath)
if err := os.MkdirAll(dirPath, 0755); err != nil {
return fmt.Errorf("creating object dir: %w", err)
}

outputFile, err := os.Create(fullPath)
if err != nil {
return fmt.Errorf("creating object file: %w", err)
}
defer outputFile.Close()

_, err = io.Copy(outputFile, objInfo.File)
if err != nil {
return fmt.Errorf("copy object data to local file failed, err: %w", err)
}
}

return nil
}

+ 120
- 0
common/pkgs/cmd/update_ec_package.go View File

@@ -0,0 +1,120 @@
package cmd

import (
"fmt"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/models"

"gitlink.org.cn/cloudream/storage-common/globals"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage-common/pkgs/iterator"
coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator"
)

type UpdateECPackage struct {
userID int64
packageID int64
objectIter iterator.UploadingObjectIterator
}

type UpdateECPackageResult struct {
ObjectResults []ECObjectUploadResult
}

func NewUpdateECPackage(userID int64, packageID int64, objIter iterator.UploadingObjectIterator) *UpdateECPackage {
return &UpdateECPackage{
userID: userID,
packageID: packageID,
objectIter: objIter,
}
}

func (t *UpdateECPackage) Execute(ctx *UpdatePackageContext) (*UpdateECPackageResult, error) {
defer t.objectIter.Close()

coorCli, err := globals.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
}

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 用于查询可用的上传节点
Node().ReadAny().
// 用于创建包信息
Package().WriteOne(t.packageID).
// 用于创建包中的文件的信息
Object().CreateAny().
// 用于设置EC配置
ObjectBlock().CreateAny().
// 用于创建Cache记录
Cache().CreateAny().
MutexLock(ctx.Distlock)
if err != nil {
return nil, fmt.Errorf("acquire locks failed, err: %w", err)
}
defer mutex.Unlock()

getPkgResp, err := coorCli.GetPackage(coormq.NewGetPackage(t.userID, t.packageID))
if err != nil {
return nil, fmt.Errorf("getting package: %w", err)
}

getUserNodesResp, err := coorCli.GetUserNodes(coormq.NewGetUserNodes(t.userID))
if err != nil {
return nil, fmt.Errorf("getting user nodes: %w", err)
}

findCliLocResp, err := coorCli.FindClientLocation(coormq.NewFindClientLocation(globals.Local.ExternalIP))
if err != nil {
return nil, fmt.Errorf("finding client location: %w", err)
}

nodeInfos := lo.Map(getUserNodesResp.Nodes, func(node model.Node, index int) UploadNodeInfo {
return UploadNodeInfo{
Node: node,
IsSameLocation: node.LocationID == findCliLocResp.Location.LocationID,
}
})

var ecInfo models.ECRedundancyInfo
if ecInfo, err = getPkgResp.Package.Redundancy.ToECInfo(); err != nil {
return nil, fmt.Errorf("get ec redundancy info: %w", err)
}

getECResp, err := coorCli.GetECConfig(coormq.NewGetECConfig(ecInfo.ECName))
if err != nil {
return nil, fmt.Errorf("getting ec: %w", err)
}

// 给上传节点的IPFS加锁
ipfsReqBlder := reqbuilder.NewBuilder()
// 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁
if globals.Local.NodeID != nil {
ipfsReqBlder.IPFS().CreateAnyRep(*globals.Local.NodeID)
}
for _, node := range nodeInfos {
if globals.Local.NodeID != nil && node.Node.NodeID == *globals.Local.NodeID {
continue
}

ipfsReqBlder.IPFS().CreateAnyRep(node.Node.NodeID)
}
// 防止上传的副本被清除
ipfsMutex, err := ipfsReqBlder.MutexLock(ctx.Distlock)
if err != nil {
return nil, fmt.Errorf("acquire locks failed, err: %w", err)
}
defer ipfsMutex.Unlock()

rets, err := uploadAndUpdateECPackage(t.packageID, t.objectIter, nodeInfos, ecInfo, getECResp.Config)
if err != nil {
return nil, err
}

return &UpdateECPackageResult{
ObjectResults: rets,
}, nil
}

+ 128
- 0
common/pkgs/cmd/update_rep_package.go View File

@@ -0,0 +1,128 @@
package cmd

import (
"fmt"

"github.com/samber/lo"
mysort "gitlink.org.cn/cloudream/common/utils/sort"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/reqbuilder"

"gitlink.org.cn/cloudream/storage-common/globals"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage-common/pkgs/iterator"
coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator"
)

type UpdateRepPackage struct {
userID int64
packageID int64
objectIter iterator.UploadingObjectIterator
}

type UpdateNodeInfo struct {
UploadNodeInfo
HasOldObject bool
}

type UpdateRepPackageResult struct {
ObjectResults []RepObjectUploadResult
}

func NewUpdateRepPackage(userID int64, packageID int64, objectIter iterator.UploadingObjectIterator) *UpdateRepPackage {
return &UpdateRepPackage{
userID: userID,
packageID: packageID,
objectIter: objectIter,
}
}

func (t *UpdateRepPackage) Execute(ctx *UpdatePackageContext) (*UpdateRepPackageResult, error) {
defer t.objectIter.Close()

coorCli, err := globals.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
}

reqBlder := reqbuilder.NewBuilder()
// 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁
if globals.Local.NodeID != nil {
reqBlder.IPFS().CreateAnyRep(*globals.Local.NodeID)
}
mutex, err := reqBlder.
Metadata().
// 用于查询可用的上传节点
Node().ReadAny().
// 用于创建包信息
Package().WriteOne(t.packageID).
// 用于创建包中的文件的信息
Object().CreateAny().
// 用于设置EC配置
ObjectBlock().CreateAny().
// 用于创建Cache记录
Cache().CreateAny().
MutexLock(ctx.Distlock)
if err != nil {
return nil, fmt.Errorf("acquire locks failed, err: %w", err)
}
defer mutex.Unlock()
getUserNodesResp, err := coorCli.GetUserNodes(coormq.NewGetUserNodes(t.userID))
if err != nil {
return nil, fmt.Errorf("getting user nodes: %w", err)
}

findCliLocResp, err := coorCli.FindClientLocation(coormq.NewFindClientLocation(globals.Local.ExternalIP))
if err != nil {
return nil, fmt.Errorf("finding client location: %w", err)
}

nodeInfos := lo.Map(getUserNodesResp.Nodes, func(node model.Node, index int) UpdateNodeInfo {
return UpdateNodeInfo{
UploadNodeInfo: UploadNodeInfo{
Node: node,
IsSameLocation: node.LocationID == findCliLocResp.Location.LocationID,
},
}
})
// 上传文件的方式优先级:
// 1. 本地IPFS
// 2. 包含了旧文件,且与客户端在同地域的节点
// 3. 不在同地域,但包含了旧文件的节点
// 4. 同地域节点
// TODO 需要考虑在多文件的情况下的规则
uploadNode := t.chooseUploadNode(nodeInfos)

// 防止上传的副本被清除
ipfsMutex, err := reqbuilder.NewBuilder().
IPFS().CreateAnyRep(uploadNode.Node.NodeID).
MutexLock(ctx.Distlock)
if err != nil {
return nil, fmt.Errorf("acquire locks failed, err: %w", err)
}
defer ipfsMutex.Unlock()

rets, err := uploadAndUpdateRepPackage(t.packageID, t.objectIter, uploadNode.UploadNodeInfo)
if err != nil {
return nil, err
}

return &UpdateRepPackageResult{
ObjectResults: rets,
}, nil
}

// chooseUploadNode 选择一个上传文件的节点
// 1. 从与当前客户端相同地域的节点中随机选一个
// 2. 没有用的话从所有节点中随机选一个
func (t *UpdateRepPackage) chooseUploadNode(nodes []UpdateNodeInfo) UpdateNodeInfo {
mysort.Sort(nodes, func(left, right UpdateNodeInfo) int {
v := -mysort.CmpBool(left.HasOldObject, right.HasOldObject)
if v != 0 {
return v
}

return -mysort.CmpBool(left.IsSameLocation, right.IsSameLocation)
})

return nodes[0]
}

+ 120
- 0
common/pkgs/db/bucket.go View File

@@ -0,0 +1,120 @@
package db

import (
"database/sql"
"errors"
"fmt"

"github.com/jmoiron/sqlx"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type BucketDB struct {
*DB
}

func (db *DB) Bucket() *BucketDB {
return &BucketDB{DB: db}
}

// GetIDByName 根据BucketName查询BucketID
func (db *BucketDB) GetIDByName(bucketName string) (int64, error) {
//桶结构体
var result struct {
BucketID int64 `db:"BucketID"`
BucketName string `db:"BucketName"`
}

sql := "select BucketID, BucketName from Bucket where BucketName=? "
if err := db.d.Get(&result, sql, bucketName); err != nil {
return 0, err
}

return result.BucketID, nil
}

// IsAvailable 判断用户是否有指定Bucekt的权限
func (db *BucketDB) IsAvailable(ctx SQLContext, bucketID int64, userID int64) (bool, error) {
_, err := db.GetUserBucket(ctx, userID, bucketID)
if errors.Is(err, sql.ErrNoRows) {
return false, nil
}

if err != nil {
return false, fmt.Errorf("find bucket failed, err: %w", err)
}

return true, nil
}

func (*BucketDB) GetUserBucket(ctx SQLContext, userID int64, bucketID int64) (model.Bucket, error) {
var ret model.Bucket
err := sqlx.Get(ctx, &ret,
"select Bucket.* from UserBucket, Bucket where UserID = ? and"+
" UserBucket.BucketID = Bucket.BucketID and"+
" Bucket.BucketID = ?", userID, bucketID)
return ret, err
}

func (*BucketDB) GetUserBuckets(ctx SQLContext, userID int64) ([]model.Bucket, error) {
var ret []model.Bucket
err := sqlx.Select(ctx, &ret, "select Bucket.* from UserBucket, Bucket where UserID = ? and UserBucket.BucketID = Bucket.BucketID", userID)
return ret, err
}

func (db *BucketDB) Create(ctx SQLContext, userID int64, bucketName string) (int64, error) {
var bucketID int64
err := sqlx.Get(ctx, &bucketID, "select Bucket.BucketID from UserBucket, Bucket where UserBucket.UserID = ? and UserBucket.BucketID = Bucket.BucketID and Bucket.Name = ?", userID, bucketName)
if err == nil {
return 0, fmt.Errorf("bucket name exsits")
}

if err != sql.ErrNoRows {
return 0, err
}

ret, err := ctx.Exec("insert into Bucket(Name,CreatorID) values(?,?)", bucketName, userID)
if err != nil {
return 0, fmt.Errorf("insert bucket failed, err: %w", err)
}

bucketID, err = ret.LastInsertId()
if err != nil {
return 0, fmt.Errorf("get inserted bucket id failed, err: %w", err)
}

_, err = ctx.Exec("insert into UserBucket(UserID,BucketID) values(?,?)", userID, bucketID)
if err != nil {
return 0, fmt.Errorf("insert into user bucket failed, err: %w", err)
}

return bucketID, err
}

func (db *BucketDB) Delete(ctx SQLContext, bucketID int64) error {
_, err := ctx.Exec("delete from UserBucket where BucketID = ?", bucketID)
if err != nil {
return fmt.Errorf("delete user bucket failed, err: %w", err)
}

_, err = ctx.Exec("delete from Bucket where BucketID = ?", bucketID)
if err != nil {
return fmt.Errorf("delete bucket failed, err: %w", err)
}

// 删除Bucket内的Package
var objIDs []int64
err = sqlx.Select(ctx, &objIDs, "select PackageID from Package where BucketID = ?", bucketID)
if err != nil {
return fmt.Errorf("query package failed, err: %w", err)
}

for _, objID := range objIDs {
// TODO 不一定所有的错误都要中断后续过程
err = db.Package().SoftDelete(ctx, objID)
if err != nil {
return fmt.Errorf("set package seleted failed, err: %w", err)
}
}
return nil
}

+ 117
- 0
common/pkgs/db/cache.go View File

@@ -0,0 +1,117 @@
package db

import (
"time"

"github.com/jmoiron/sqlx"
"gitlink.org.cn/cloudream/storage-common/consts"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type CacheDB struct {
*DB
}

func (db *DB) Cache() *CacheDB {
return &CacheDB{DB: db}
}

func (*CacheDB) Get(ctx SQLContext, fileHash string, nodeID int64) (model.Cache, error) {
var ret model.Cache
err := sqlx.Get(ctx, &ret, "select * from Cache where FileHash = ? and NodeID = ?", fileHash, nodeID)
return ret, err
}

func (*CacheDB) BatchGetAllFileHashes(ctx SQLContext, start int, count int) ([]string, error) {
var ret []string
err := sqlx.Select(ctx, &ret, "select distinct FileHash from Cache limit ?, ?", start, count)
return ret, err
}

func (*CacheDB) GetNodeCaches(ctx SQLContext, nodeID int64) ([]model.Cache, error) {
var ret []model.Cache
err := sqlx.Select(ctx, &ret, "select * from Cache where NodeID = ?", nodeID)
return ret, err
}

// CreateNew 创建一条新的缓存记录
func (*CacheDB) CreateNew(ctx SQLContext, fileHash string, nodeID int64) error {
_, err := ctx.Exec("insert into Cache values(?,?,?,?)", fileHash, nodeID, consts.CacheStatePinned, time.Now())
if err != nil {
return err
}

return nil
}

// CreatePinned 创建一条缓存记录,如果已存在,但不是pinned状态,则将其设置为pin状态
func (*CacheDB) CreatePinned(ctx SQLContext, fileHash string, nodeID int64, priority int) error {
_, err := ctx.Exec("replace into Cache values(?,?,?,?,?)", fileHash, nodeID, consts.CacheStatePinned, time.Now(), priority)
return err
}

func (*CacheDB) BatchCreatePinned(ctx SQLContext, fileHashes []string, nodeID int64, priority int) error {
var caches []model.Cache
var nowTime = time.Now()
for _, hash := range fileHashes {
caches = append(caches, model.Cache{
FileHash: hash,
NodeID: nodeID,
State: consts.CacheStatePinned,
CacheTime: nowTime,
Priority: priority,
})
}

_, err := sqlx.NamedExec(ctx, "insert into Cache(FileHash,NodeID,State,CacheTime,Priority) values(:FileHash,:NodeID,:State,:CacheTime,:Priority)"+
" on duplicate key update State=values(State), CacheTime=values(CacheTime), Priority=values(Priority)",
caches,
)
return err
}

// Create 创建一条Temp状态的缓存记录,如果已存在则不产生效果
func (*CacheDB) CreateTemp(ctx SQLContext, fileHash string, nodeID int64) error {
_, err := ctx.Exec("insert ignore into Cache values(?,?,?,?)", fileHash, nodeID, consts.CacheStateTemp, time.Now())
return err
}

// GetCachingFileNodes 查找缓存了指定文件的节点
func (*CacheDB) GetCachingFileNodes(ctx SQLContext, fileHash string) ([]model.Node, error) {
var x []model.Node
err := sqlx.Select(ctx, &x,
"select Node.* from Cache, Node where Cache.FileHash=? and Cache.NodeID = Node.NodeID", fileHash)
return x, err
}

// DeleteTemp 删除一条Temp状态的记录
func (*CacheDB) DeleteTemp(ctx SQLContext, fileHash string, nodeID int64) error {
_, err := ctx.Exec("delete from Cache where FileHash = ? and NodeID = ? and State = ?", fileHash, nodeID, consts.CacheStateTemp)
return err
}

// DeleteNodeAll 删除一个节点所有的记录
func (*CacheDB) DeleteNodeAll(ctx SQLContext, nodeID int64) error {
_, err := ctx.Exec("delete from Cache where NodeID = ?", nodeID)
return err
}

// FindCachingFileUserNodes 在缓存表中查询指定数据所在的节点
func (*CacheDB) FindCachingFileUserNodes(ctx SQLContext, userID int64, fileHash string) ([]model.Node, error) {
var x []model.Node
err := sqlx.Select(ctx, &x,
"select Node.* from Cache, UserNode, Node where"+
" Cache.FileHash=? and Cache.NodeID = UserNode.NodeID and"+
" UserNode.UserID = ? and UserNode.NodeID = Node.NodeID", fileHash, userID)
return x, err
}

func (*CacheDB) SetTemp(ctx SQLContext, fileHash string, nodeID int64) error {
_, err := ctx.Exec("update Cache set State = ?, CacheTime = ? where FileHash = ? and NodeID = ?",
consts.CacheStateTemp,
time.Now(),
fileHash,
nodeID,
)
return err
}

+ 21
- 0
common/pkgs/db/config/config.go View File

@@ -0,0 +1,21 @@
package config

import "fmt"

type Config struct {
Address string `json:"address"`
Account string `json:"account"`
Password string `json:"password"`
DatabaseName string `json:"databaseName"`
}

func (cfg *Config) MakeSourceString() string {
return fmt.Sprintf(
"%s:%s@tcp(%s)/%s?charset=utf8mb4&parseTime=true&loc=%s",
cfg.Account,
cfg.Password,
cfg.Address,
cfg.DatabaseName,
"Asia%2FShanghai",
)
}

+ 61
- 0
common/pkgs/db/db.go View File

@@ -0,0 +1,61 @@
package db

import (
"context"
"database/sql"
"fmt"

_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/config"
)

type DB struct {
d *sqlx.DB
}

type SQLContext interface {
sqlx.Queryer
sqlx.Execer
sqlx.Ext
}

func NewDB(cfg *config.Config) (*DB, error) {
db, err := sqlx.Open("mysql", cfg.MakeSourceString())
if err != nil {
return nil, fmt.Errorf("open database connection failed, err: %w", err)
}

// 尝试连接一下数据库,如果数据库配置有错误在这里就能报出来
err = db.Ping()
if err != nil {
return nil, err
}

return &DB{
d: db,
}, nil
}

func (db *DB) DoTx(isolation sql.IsolationLevel, fn func(tx *sqlx.Tx) error) error {
tx, err := db.d.BeginTxx(context.Background(), &sql.TxOptions{Isolation: isolation})
if err != nil {
return err
}

if err := fn(tx); err != nil {
tx.Rollback()
return err
}

if err := tx.Commit(); err != nil {
tx.Rollback()
return err
}

return nil
}

func (db *DB) SQLCtx() SQLContext {
return db.d
}

+ 30
- 0
common/pkgs/db/ec.go View File

@@ -0,0 +1,30 @@
package db

import (
//"database/sql"

"github.com/jmoiron/sqlx"
//"gitlink.org.cn/cloudream/common/consts"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type EcDB struct {
*DB
}

func (db *DB) Ec() *EcDB {
return &EcDB{DB: db}
}

// GetEc 查询纠删码参数
func (db *EcDB) GetEc(ctx SQLContext, ecName string) (model.Ec, error) {
var ret model.Ec
err := sqlx.Get(ctx, &ret, "select * from Ec where Name = ?", ecName)
return ret, err
}

func (db *EcDB) GetEcName(ctx SQLContext, objectID int) (string, error) {
var ret string
err := sqlx.Get(ctx, &ret, "select Redundancy from Object where ObjectID = ?")
return ret, err
}

+ 32
- 0
common/pkgs/db/location.go View File

@@ -0,0 +1,32 @@
package db

import (
"fmt"

"github.com/jmoiron/sqlx"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type LocationDB struct {
*DB
}

func (db *DB) Location() *LocationDB {
return &LocationDB{DB: db}
}

func (*LocationDB) GetByID(ctx SQLContext, id int64) (model.Location, error) {
var ret model.Location
err := sqlx.Get(ctx, &ret, "select * from Location where LocationID = ?", id)
return ret, err
}

func (db *LocationDB) FindLocationByExternalIP(ctx SQLContext, ip string) (model.Location, error) {
var locID int64
err := sqlx.Get(ctx, &locID, "select LocationID from Node where ExternalIP = ?", ip)
if err != nil {
return model.Location{}, fmt.Errorf("find node by external ip: %w", err)
}

return db.GetByID(ctx, locID)
}

+ 110
- 0
common/pkgs/db/model/model.go View File

@@ -0,0 +1,110 @@
package model

import (
"time"

"gitlink.org.cn/cloudream/common/models"
)

type Node struct {
NodeID int64 `db:"NodeID" json:"nodeID"`
Name string `db:"Name" json:"name"`
LocalIP string `db:"LocalIP" json:"localIP"`
ExternalIP string `db:"ExternalIP" json:"externalIP"`
LocationID int64 `db:"LocationID" json:"locationID"`
State string `db:"State" json:"state"`
LastReportTime *time.Time `db:"LastReportTime" json:"lastReportTime"`
}

type Storage struct {
StorageID int64 `db:"StorageID" json:"storageID"`
Name string `db:"Name" json:"name"`
NodeID int64 `db:"NodeID" json:"nodeID"`
Directory string `db:"Directory" json:"directory"`
State string `db:"State" json:"state"`
}

type NodeDelay struct {
SourceNodeID int64 `db:"SourceNodeID"`
DestinationNodeID int64 `db:"DestinationNodeID"`
DelayInMs int `db:"DelayInMs"`
}

type User struct {
UserID int64 `db:"UserID" json:"userID"`
Password string `db:"PassWord" json:"password"`
}

type UserBucket struct {
UserID int64 `db:"UserID" json:"userID"`
BucketID int64 `db:"BucketID" json:"bucketID"`
}

type UserNode struct {
UserID int64 `db:"UserID" json:"userID"`
NodeID int64 `db:"NodeID" json:"nodeID"`
}

type UserStorage struct {
UserID int64 `db:"UserID" json:"userID"`
StorageID int64 `db:"StorageID" json:"storageID"`
}

type Bucket struct {
BucketID int64 `db:"BucketID" json:"bucketID"`
Name string `db:"Name" json:"name"`
CreatorID int64 `db:"CreatorID" json:"creatorID"`
}

type Package struct {
PackageID int64 `db:"PackageID" json:"packageID"`
Name string `db:"Name" json:"name"`
BucketID int64 `db:"BucketID" json:"bucketID"`
State string `db:"State" json:"state"`
Redundancy models.TypedRedundancyInfo `db:"Redundancy" json:"redundancy"`
}

type Object struct {
ObjectID int64 `db:"ObjectID" json:"objectID"`
PackageID int64 `db:"PackageID" json:"packageID"`
Path string `db:"Path" json:"path"`
Size int64 `db:"Size" json:"size,string"`
}

type ObjectRep struct {
ObjectID int64 `db:"ObjectID" json:"objectID"`
FileHash string `db:"FileHash" json:"fileHash"`
}

type ObjectBlock struct {
ObjectID int64 `db:"ObjectID" json:"objectID"`
Index int `db:"Index" json:"index"`
FileHash string `db:"FileHash" json:"fileHash"`
}

type Cache struct {
FileHash string `db:"FileHash" json:"fileHash"`
NodeID int64 `db:"NodeID" json:"nodeID"`
State string `db:"State" json:"state"`
CacheTime time.Time `db:"CacheTime" json:"cacheTime"`
Priority int `db:"Priority" json:"priority"`
}

type StoragePackage struct {
PackageID int64 `db:"PackageID" json:"packageID"`
StorageID int64 `db:"StorageID" json:"storageID"`
UserID int64 `db:"UserID" json:"userID"`
State string `db:"State" json:"state"`
}

type Location struct {
LocationID int64 `db:"LocationID" json:"locationID"`
Name string `db:"Name" json:"name"`
}

type Ec struct {
EcID int `db:"EcID" json:"ecID"`
Name string `db:"Name" json:"name"`
EcK int `db:"EcK" json:"ecK"`
EcN int `db:"EcN" json:"ecN"`
}

+ 41
- 0
common/pkgs/db/node.go View File

@@ -0,0 +1,41 @@
package db

import (
"time"

"github.com/jmoiron/sqlx"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type NodeDB struct {
*DB
}

func (db *DB) Node() *NodeDB {
return &NodeDB{DB: db}
}

func (db *NodeDB) GetByID(ctx SQLContext, nodeID int64) (model.Node, error) {
var ret model.Node
err := sqlx.Get(ctx, &ret, "select * from Node where NodeID = ?", nodeID)
return ret, err
}

func (db *NodeDB) GetAllNodes(ctx SQLContext) ([]model.Node, error) {
var ret []model.Node
err := sqlx.Select(ctx, &ret, "select * from Node")
return ret, err
}

// GetUserNodes 根据用户id查询可用node
func (db *NodeDB) GetUserNodes(ctx SQLContext, userID int64) ([]model.Node, error) {
var nodes []model.Node
err := sqlx.Select(ctx, &nodes, "select Node.* from UserNode, Node where UserNode.NodeID = Node.NodeID and UserNode.UserID=?", userID)
return nodes, err
}

// UpdateState 更新状态,并且设置上次上报时间为现在
func (db *NodeDB) UpdateState(ctx SQLContext, nodeID int64, state string) error {
_, err := ctx.Exec("update Node set State = ?, LastReportTime = ? where NodeID = ?", state, time.Now(), nodeID)
return err
}

+ 280
- 0
common/pkgs/db/object.go View File

@@ -0,0 +1,280 @@
package db

import (
"fmt"

"github.com/jmoiron/sqlx"
"github.com/samber/lo"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator"
)

type ObjectDB struct {
*DB
}

func (db *DB) Object() *ObjectDB {
return &ObjectDB{DB: db}
}

func (db *ObjectDB) GetByID(ctx SQLContext, objectID int64) (model.Object, error) {
var ret model.Object
err := sqlx.Get(ctx, &ret, "select * from Object where ObjectID = ?", objectID)
return ret, err
}

func (db *ObjectDB) Create(ctx SQLContext, packageID int64, path string, size int64) (int64, error) {
sql := "insert into Object(PackageID, Path, Size) values(?,?,?)"
ret, err := ctx.Exec(sql, packageID, path, size)
if err != nil {
return 0, fmt.Errorf("insert object failed, err: %w", err)
}

objectID, err := ret.LastInsertId()
if err != nil {
return 0, fmt.Errorf("get id of inserted object failed, err: %w", err)
}

return objectID, nil
}

// 创建或者更新记录,返回值true代表是创建,false代表是更新
func (db *ObjectDB) CreateOrUpdate(ctx SQLContext, packageID int64, path string, size int64) (int64, bool, error) {
sql := "insert into Object(PackageID, Path, Size) values(?,?,?) on duplicate key update Size = ?"
ret, err := ctx.Exec(sql, packageID, path, size, size)
if err != nil {
return 0, false, fmt.Errorf("insert object failed, err: %w", err)
}

affs, err := ret.RowsAffected()
if err != nil {
return 0, false, fmt.Errorf("getting affected rows: %w", err)
}

// 影响行数为1时是插入,为2时是更新
if affs == 1 {
objectID, err := ret.LastInsertId()
if err != nil {
return 0, false, fmt.Errorf("get id of inserted object failed, err: %w", err)
}
return objectID, true, nil
}

var objID int64
if err = sqlx.Get(ctx, &objID, "select ObjectID from Object where PackageID = ? and Path = ?", packageID, path); err != nil {
return 0, false, fmt.Errorf("getting object id: %w", err)
}

return objID, false, nil
}

func (db *ObjectDB) UpdateRepObject(ctx SQLContext, objectID int64, fileSize int64, nodeIDs []int64, fileHash string) error {
_, err := db.UpdateFileInfo(ctx, objectID, fileSize)
if err != nil {
if err != nil {
return fmt.Errorf("update rep object failed, err: %w", err)
}
}

objRep, err := db.ObjectRep().GetByID(ctx, objectID)
if err != nil {
return fmt.Errorf("get object rep failed, err: %w", err)
}

// 如果新文件与旧文件的Hash不同,则需要更新关联的FileHash,重新插入Cache记录
if objRep.FileHash != fileHash {
_, err := db.ObjectRep().Update(ctx, objectID, fileHash)
if err != nil {
return fmt.Errorf("update rep object file hash failed, err: %w", err)
}

for _, nodeID := range nodeIDs {
err := db.Cache().CreatePinned(ctx, fileHash, nodeID, 0) //priority = 0
if err != nil {
return fmt.Errorf("create cache failed, err: %w", err)
}
}

} else {
// 如果相同,则只增加Cache中不存在的记录
cachedNodes, err := db.Cache().GetCachingFileNodes(ctx, fileHash)
if err != nil {
return fmt.Errorf("find caching file nodes failed, err: %w", err)
}

// 筛选出不在cachedNodes中的id
newNodeIDs := lo.Filter(nodeIDs, func(id int64, index int) bool {
return lo.NoneBy(cachedNodes, func(node model.Node) bool {
return node.NodeID == id
})
})
for _, nodeID := range newNodeIDs {
err := db.Cache().CreatePinned(ctx, fileHash, nodeID, 0) //priority
if err != nil {
return fmt.Errorf("create cache failed, err: %w", err)
}
}
}

return nil
}

func (*ObjectDB) BatchGetAllEcObjectIDs(ctx SQLContext, start int, count int) ([]int64, error) {
var ret []int64
rep := "rep"
err := sqlx.Select(ctx, &ret, "SELECT ObjectID FROM object where Redundancy != ? limit ?, ?", rep, start, count)
return ret, err
}

func (*ObjectDB) UpdateFileInfo(ctx SQLContext, objectID int64, fileSize int64) (bool, error) {
ret, err := ctx.Exec("update Object set FileSize = ? where ObjectID = ?", fileSize, objectID)
if err != nil {
return false, err
}

cnt, err := ret.RowsAffected()
if err != nil {
return false, fmt.Errorf("get affected rows failed, err: %w", err)
}

return cnt > 0, nil
}

func (*ObjectDB) GetPackageObjects(ctx SQLContext, packageID int64) ([]model.Object, error) {
var ret []model.Object
err := sqlx.Select(ctx, &ret, "select * from Object where PackageID = ? order by ObjectID asc", packageID)
return ret, err
}

func (db *ObjectDB) BatchAddRep(ctx SQLContext, packageID int64, objs []coormq.AddRepObjectInfo) ([]int64, error) {
var objIDs []int64
for _, obj := range objs {
// 创建对象的记录
objID, isCreate, err := db.CreateOrUpdate(ctx, packageID, obj.Path, obj.Size)
if err != nil {
return nil, fmt.Errorf("creating object: %w", err)
}

objIDs = append(objIDs, objID)

if isCreate {
if err := db.createRep(ctx, objID, obj); err != nil {
return nil, err
}
} else {
if err := db.updateRep(ctx, objID, obj); err != nil {
return nil, err
}
}
}

return objIDs, nil
}

func (db *ObjectDB) createRep(ctx SQLContext, objID int64, obj coormq.AddRepObjectInfo) error {
// 创建对象副本的记录
if err := db.ObjectRep().Create(ctx, objID, obj.FileHash); err != nil {
return fmt.Errorf("creating object rep: %w", err)
}

// 创建缓存记录
priority := 0 //优先级暂时设置为0
for _, nodeID := range obj.NodeIDs {
if err := db.Cache().CreatePinned(ctx, obj.FileHash, nodeID, priority); err != nil {
return fmt.Errorf("creating cache: %w", err)
}
}

return nil
}
func (db *ObjectDB) updateRep(ctx SQLContext, objID int64, obj coormq.AddRepObjectInfo) error {
objRep, err := db.ObjectRep().GetByID(ctx, objID)
if err != nil {
return fmt.Errorf("getting object rep: %w", err)
}

// 如果新文件与旧文件的Hash不同,则需要更新关联的FileHash,重新插入Cache记录
if objRep.FileHash != obj.FileHash {
_, err := db.ObjectRep().Update(ctx, objID, obj.FileHash)
if err != nil {
return fmt.Errorf("updating rep object file hash: %w", err)
}

for _, nodeID := range obj.NodeIDs {
if err := db.Cache().CreatePinned(ctx, obj.FileHash, nodeID, 0); err != nil {
return fmt.Errorf("creating cache: %w", err)
}
}

} else {
// 如果相同,则只增加Cache中不存在的记录
cachedNodes, err := db.Cache().GetCachingFileNodes(ctx, obj.FileHash)
if err != nil {
return fmt.Errorf("finding caching file nodes: %w", err)
}

// 筛选出不在cachedNodes中的id
newNodeIDs := lo.Filter(obj.NodeIDs, func(id int64, index int) bool {
return lo.NoneBy(cachedNodes, func(node model.Node) bool {
return node.NodeID == id
})
})
for _, nodeID := range newNodeIDs {
if err := db.Cache().CreatePinned(ctx, obj.FileHash, nodeID, 0); err != nil {
return fmt.Errorf("creating cache: %w", err)
}
}
}

return nil
}

func (db *ObjectDB) BatchAddEC(ctx SQLContext, packageID int64, objs []coormq.AddECObjectInfo) ([]int64, error) {
objIDs := make([]int64, 0, len(objs))
for _, obj := range objs {
// 创建对象的记录
objID, isCreate, err := db.CreateOrUpdate(ctx, packageID, obj.Path, obj.Size)
if err != nil {
return nil, fmt.Errorf("creating object: %w", err)
}

objIDs = append(objIDs, objID)

if !isCreate {
// 删除原本所有的编码块记录,重新添加
if err = db.ObjectBlock().DeleteObjectAll(ctx, objID); err != nil {
return nil, fmt.Errorf("deleting all object block: %w", err)
}

}

// 创建编码块的记录
for i := 0; i < len(obj.FileHashes); i++ {
err := db.ObjectBlock().Create(ctx, objID, i, obj.FileHashes[i])
if err != nil {
return nil, fmt.Errorf("creating object block: %w", err)
}
}

// 创建缓存记录
priority := 0 //优先级暂时设置为0
for i, nodeID := range obj.NodeIDs {
err = db.Cache().CreatePinned(ctx, obj.FileHashes[i], nodeID, priority)
if err != nil {
return nil, fmt.Errorf("creating cache: %w", err)
}
}
}

return objIDs, nil
}

func (*ObjectDB) BatchDelete(ctx SQLContext, ids []int64) error {
_, err := ctx.Exec("delete from Object where ObjectID in (?)", ids)
return err
}

func (*ObjectDB) DeleteInPackage(ctx SQLContext, packageID int64) error {
_, err := ctx.Exec("delete from Object where PackageID = ?", packageID)
return err
}

+ 131
- 0
common/pkgs/db/object_block.go View File

@@ -0,0 +1,131 @@
package db

import (
"database/sql"
"fmt"

"github.com/jmoiron/sqlx"
"gitlink.org.cn/cloudream/storage-common/consts"
"gitlink.org.cn/cloudream/storage-common/models"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type ObjectBlockDB struct {
*DB
}

func (db *DB) ObjectBlock() *ObjectBlockDB {
return &ObjectBlockDB{DB: db}
}

func (db *ObjectBlockDB) Create(ctx SQLContext, objectID int64, index int, fileHash string) error {
_, err := ctx.Exec("insert into ObjectBlock(ObjectID, Index, FileHash) values(?,?,?)", objectID, index, fileHash)
return err
}

func (db *ObjectBlockDB) DeleteObjectAll(ctx SQLContext, objectID int64) error {
_, err := ctx.Exec("delete from ObjectBlock where ObjectID = ?", objectID)
return err
}

func (db *ObjectBlockDB) DeleteInPackage(ctx SQLContext, packageID int64) error {
_, err := ctx.Exec("delete ObjectBlock from ObjectBlock inner join Object on ObjectBlock.ObjectID = Object.ObjectID where PackageID = ?", packageID)
return err
}

func (db *ObjectBlockDB) CountBlockWithHash(ctx SQLContext, fileHash string) (int, error) {
var cnt int
err := sqlx.Get(ctx, &cnt,
"select count(FileHash) from ObjectBlock, Object, Package where FileHash = ? and"+
" ObjectBlock.ObjectID = Object.ObjectID and"+
" Object.PackageID = Package.PackageID and"+
" Package.State = ?", fileHash, consts.PackageStateNormal)
if err == sql.ErrNoRows {
return 0, nil
}

return cnt, err
}

func (db *ObjectBlockDB) GetBatchObjectBlocks(ctx SQLContext, objectIDs []int64) ([][]string, error) {
blocks := make([][]string, len(objectIDs))
var err error
for i, objectID := range objectIDs {
var x []model.ObjectBlock
sql := "select * from ObjectBlock where ObjectID=?"
err = db.d.Select(&x, sql, objectID)
xx := make([]string, len(x))
for ii := 0; ii < len(x); ii++ {
xx[x[ii].Index] = x[ii].FileHash
}
blocks[i] = xx
}
return blocks, err
}

func (db *ObjectBlockDB) GetBatchBlocksNodes(ctx SQLContext, hashs [][]string) ([][][]int64, error) {
nodes := make([][][]int64, len(hashs))
var err error
for i, hs := range hashs {
fileNodes := make([][]int64, len(hs))
for j, h := range hs {
var x []model.Node
err = sqlx.Select(ctx, &x,
"select Node.* from Cache, Node where"+
" Cache.FileHash=? and Cache.NodeID = Node.NodeID and Cache.State=?", h, consts.CacheStatePinned)
xx := make([]int64, len(x))
for ii := 0; ii < len(x); ii++ {
xx[ii] = x[ii].NodeID
}
fileNodes[j] = xx
}
nodes[i] = fileNodes
}
return nodes, err
}

func (db *ObjectBlockDB) GetWithNodeIDInPackage(ctx SQLContext, packageID int64) ([]models.ObjectECData, error) {
var objs []model.Object
err := sqlx.Select(ctx, &objs, "select * from Object where PackageID = ? order by ObjectID asc", packageID)
if err != nil {
return nil, fmt.Errorf("query objectIDs: %w", err)
}

rets := make([]models.ObjectECData, 0, len(objs))

for _, obj := range objs {
var tmpRets []struct {
Index int `db:"Index"`
FileHash string `db:"FileHash"`
NodeIDs *string `db:"NodeIDs"`
}

err := sqlx.Select(ctx,
&tmpRets,
"select ObjectBlock.Index, ObjectBlock.FileHash, group_concat(NodeID) as NodeIDs from ObjectBlock"+
" left join Cache on ObjectBlock.FileHash = Cache.FileHash"+
" where ObjectID = ? group by ObjectBlock.Index, ObjectBlock.FileHash",
obj.ObjectID,
)
if err != nil {
return nil, err
}

blocks := make([]models.ObjectBlockData, 0, len(tmpRets))
for _, tmp := range tmpRets {
var block models.ObjectBlockData
block.Index = tmp.Index
block.FileHash = tmp.FileHash

if tmp.NodeIDs != nil {
block.NodeIDs = splitIDStringUnsafe(*tmp.NodeIDs)
}

blocks = append(blocks, block)
}

rets = append(rets, models.NewObjectECData(obj, blocks))
}

return rets, nil
}

+ 132
- 0
common/pkgs/db/object_rep.go View File

@@ -0,0 +1,132 @@
package db

import (
"database/sql"
"fmt"
"strconv"
"strings"

"github.com/jmoiron/sqlx"
"gitlink.org.cn/cloudream/storage-common/consts"
"gitlink.org.cn/cloudream/storage-common/models"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type ObjectRepDB struct {
*DB
}

func (db *DB) ObjectRep() *ObjectRepDB {
return &ObjectRepDB{DB: db}
}

// GetObjectRep 查询对象副本表
func (db *ObjectRepDB) GetByID(ctx SQLContext, objectID int64) (model.ObjectRep, error) {
var ret model.ObjectRep
err := sqlx.Get(ctx, &ret, "select * from ObjectRep where ObjectID = ?", objectID)
return ret, err
}

func (db *ObjectRepDB) Create(ctx SQLContext, objectID int64, fileHash string) error {
_, err := ctx.Exec("insert into ObjectRep(ObjectID, FileHash) values(?,?)", objectID, fileHash)
return err
}

func (db *ObjectRepDB) Update(ctx SQLContext, objectID int64, fileHash string) (int64, error) {
ret, err := ctx.Exec("update ObjectRep set FileHash = ? where ObjectID = ?", fileHash, objectID)
if err != nil {
return 0, err
}

cnt, err := ret.RowsAffected()
if err != nil {
return 0, fmt.Errorf("get affected rows failed, err: %w", err)
}

return cnt, nil
}

func (db *ObjectRepDB) Delete(ctx SQLContext, objectID int64) error {
_, err := ctx.Exec("delete from ObjectRep where ObjectID = ?", objectID)
return err
}

func (db *ObjectRepDB) DeleteInPackage(ctx SQLContext, packageID int64) error {
_, err := ctx.Exec("delete ObjectRep from ObjectRep inner join Object on ObjectRep.ObjectID = Object.ObjectID where PackageID = ?", packageID)
return err
}

func (db *ObjectRepDB) GetFileMaxRepCount(ctx SQLContext, fileHash string) (int, error) {
var maxRepCnt *int
err := sqlx.Get(ctx, &maxRepCnt,
"select json_extract(Redundancy, '$.info.repCount') from ObjectRep, Object, Package where FileHash = ? and"+
" ObjectRep.ObjectID = Object.ObjectID and"+
" Object.PackageID = Package.PackageID and"+
" Package.State = ?", fileHash, consts.PackageStateNormal)

if err == sql.ErrNoRows {
return 0, nil
}

if err != nil {
return 0, err
}

if maxRepCnt == nil {
return 0, nil
}

return *maxRepCnt, err
}

func (db *ObjectRepDB) GetWithNodeIDInPackage(ctx SQLContext, packageID int64) ([]models.ObjectRepData, error) {
var tmpRets []struct {
model.Object
FileHash *string `db:"FileHash"`
NodeIDs *string `db:"NodeIDs"`
}

err := sqlx.Select(ctx,
&tmpRets,
"select Object.*, ObjectRep.FileHash, group_concat(NodeID) as NodeIDs from Object"+
" left join ObjectRep on Object.ObjectID = ObjectRep.ObjectID"+
" left join Cache on ObjectRep.FileHash = Cache.FileHash"+
" where PackageID = ? group by Object.ObjectID order by Object.ObjectID asc",
packageID,
)
if err != nil {
return nil, err
}
rets := make([]models.ObjectRepData, 0, len(tmpRets))
for _, tmp := range tmpRets {
var repData models.ObjectRepData
repData.Object = tmp.Object

if tmp.FileHash != nil {
repData.FileHash = *tmp.FileHash
}

if tmp.NodeIDs != nil {
repData.NodeIDs = splitIDStringUnsafe(*tmp.NodeIDs)
}

rets = append(rets, repData)
}

return rets, nil
}

// 按逗号切割字符串,并将每一个部分解析为一个int64的ID。
// 注:需要外部保证分隔的每一个部分都是正确的10进制数字格式
func splitIDStringUnsafe(idStr string) []int64 {
idStrs := strings.Split(idStr, ",")
ids := make([]int64, 0, len(idStrs))

for _, str := range idStrs {
// 假设传入的ID是正确的数字格式
id, _ := strconv.ParseInt(str, 10, 64)
ids = append(ids, id)
}

return ids
}

+ 170
- 0
common/pkgs/db/package.go View File

@@ -0,0 +1,170 @@
package db

import (
"database/sql"
"errors"
"fmt"

"github.com/jmoiron/sqlx"
"gitlink.org.cn/cloudream/common/models"
"gitlink.org.cn/cloudream/common/utils/serder"
"gitlink.org.cn/cloudream/storage-common/consts"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type PackageDB struct {
*DB
}

func (db *DB) Package() *PackageDB {
return &PackageDB{DB: db}
}

func (db *PackageDB) GetByID(ctx SQLContext, packageID int64) (model.Package, error) {
var ret model.Package
err := sqlx.Get(ctx, &ret, "select * from Package where PackageID = ?", packageID)
return ret, err
}

func (db *PackageDB) GetByName(ctx SQLContext, bucketID int64, name string) (model.Package, error) {
var ret model.Package
err := sqlx.Get(ctx, &ret, "select * from Package where BucketID = ? and Name = ?", bucketID, name)
return ret, err
}

func (*PackageDB) BatchGetAllPackageIDs(ctx SQLContext, start int, count int) ([]int64, error) {
var ret []int64
err := sqlx.Select(ctx, &ret, "select PackageID from Package limit ?, ?", start, count)
return ret, err
}

func (db *PackageDB) GetBucketPackages(ctx SQLContext, userID int64, bucketID int64) ([]model.Package, error) {
var ret []model.Package
err := sqlx.Select(ctx, &ret, "select Package.* from UserBucket, Package where UserID = ? and UserBucket.BucketID = ? and UserBucket.BucketID = Package.BucketID", userID, bucketID)
return ret, err
}

// IsAvailable 判断一个用户是否拥有指定对象
func (db *PackageDB) IsAvailable(ctx SQLContext, userID int64, packageID int64) (bool, error) {
var objID int64
// 先根据PackageID找到Package,然后判断此Package所在的Bucket是不是归此用户所有
err := sqlx.Get(ctx, &objID,
"select Package.PackageID from Package, UserBucket where "+
"Package.PackageID = ? and "+
"Package.BucketID = UserBucket.BucketID and "+
"UserBucket.UserID = ?",
packageID, userID)

if err == sql.ErrNoRows {
return false, nil
}

if err != nil {
return false, fmt.Errorf("find package failed, err: %w", err)
}

return true, nil
}

// GetUserPackage 获得Package,如果用户没有权限访问,则不会获得结果
func (db *PackageDB) GetUserPackage(ctx SQLContext, userID int64, packageID int64) (model.Package, error) {
var ret model.Package
err := sqlx.Get(ctx, &ret,
"select Package.* from Package, UserBucket where"+
" Package.PackageID = ? and"+
" Package.BucketID = UserBucket.BucketID and"+
" UserBucket.UserID = ?",
packageID, userID)
return ret, err
}

func (db *PackageDB) Create(ctx SQLContext, bucketID int64, name string, redundancy models.TypedRedundancyInfo) (int64, error) {
// 根据packagename和bucketid查询,若不存在则插入,若存在则返回错误
var packageID int64
err := sqlx.Get(ctx, &packageID, "select PackageID from Package where Name = ? AND BucketID = ?", name, bucketID)
// 无错误代表存在记录
if err == nil {
return 0, fmt.Errorf("package with given Name and BucketID already exists")
}
// 错误不是记录不存在
if !errors.Is(err, sql.ErrNoRows) {
return 0, fmt.Errorf("query Package by PackageName and BucketID failed, err: %w", err)
}

redundancyJSON, err := serder.ObjectToJSON(redundancy)
if err != nil {
return 0, fmt.Errorf("redundancy to json: %w", err)
}

sql := "insert into Package(Name, BucketID, State, Redundancy) values(?,?,?,?)"
r, err := ctx.Exec(sql, name, bucketID, consts.PackageStateNormal, redundancyJSON)
if err != nil {
return 0, fmt.Errorf("insert package failed, err: %w", err)
}

packageID, err = r.LastInsertId()
if err != nil {
return 0, fmt.Errorf("get id of inserted package failed, err: %w", err)
}

return packageID, nil
}

// SoftDelete 设置一个对象被删除,并将相关数据删除
func (db *PackageDB) SoftDelete(ctx SQLContext, packageID int64) error {
obj, err := db.GetByID(ctx, packageID)
if err != nil {
return fmt.Errorf("get package failed, err: %w", err)
}

// 不是正常状态的Package,则不删除
// TODO 未来可能有其他状态
if obj.State != consts.PackageStateNormal {
return nil
}

err = db.ChangeState(ctx, packageID, consts.PackageStateDeleted)
if err != nil {
return fmt.Errorf("change package state failed, err: %w", err)
}

if obj.Redundancy.IsRepInfo() {
err = db.ObjectRep().DeleteInPackage(ctx, packageID)
if err != nil {
return fmt.Errorf("delete from object rep failed, err: %w", err)
}
} else {
err = db.ObjectBlock().DeleteInPackage(ctx, packageID)
if err != nil {
return fmt.Errorf("delete from object rep failed, err: %w", err)
}
}

if err := db.Object().DeleteInPackage(ctx, packageID); err != nil {
return fmt.Errorf("deleting objects in package: %w", err)
}

_, err = db.StoragePackage().SetAllPackageDeleted(ctx, packageID)
if err != nil {
return fmt.Errorf("set storage package deleted failed, err: %w", err)
}

return nil
}

// DeleteUnused 删除一个已经是Deleted状态,且不再被使用的对象。目前可能被使用的地方只有StoragePackage
func (PackageDB) DeleteUnused(ctx SQLContext, packageID int64) error {
_, err := ctx.Exec("delete from Package where PackageID = ? and State = ? and "+
"not exists(select StorageID from StoragePackage where PackageID = ?)",
packageID,
consts.PackageStateDeleted,
packageID,
)

return err
}

func (*PackageDB) ChangeState(ctx SQLContext, packageID int64, state string) error {
_, err := ctx.Exec("update Package set State = ? where PackageID = ?", state, packageID)
return err
}

+ 64
- 0
common/pkgs/db/storage.go View File

@@ -0,0 +1,64 @@
package db

import (
"database/sql"
"fmt"

"github.com/jmoiron/sqlx"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type StorageDB struct {
*DB
}

func (db *DB) Storage() *StorageDB {
return &StorageDB{DB: db}
}

func (db *StorageDB) GetByID(ctx SQLContext, stgID int64) (model.Storage, error) {
var stg model.Storage
err := sqlx.Get(ctx, &stg, "select * from Storage where StorageID = ?", stgID)
return stg, err
}

func (db *StorageDB) BatchGetAllStorageIDs(ctx SQLContext, start int, count int) ([]int64, error) {
var ret []int64
err := sqlx.Select(ctx, &ret, "select StorageID from Storage limit ?, ?", start, count)
return ret, err
}

func (db *StorageDB) IsAvailable(ctx SQLContext, userID int64, storageID int64) (bool, error) {
var stgID int64
err := sqlx.Get(ctx, &stgID,
"select Storage.StorageID from Storage, UserStorage where"+
" Storage.StorageID = ? and"+
" Storage.StorageID = UserStorage.StorageID and"+
" UserStorage.UserID = ?",
storageID, userID)

if err == sql.ErrNoRows {
return false, nil
}

if err != nil {
return false, fmt.Errorf("find storage failed, err: %w", err)
}

return true, nil
}

func (db *StorageDB) GetUserStorage(ctx SQLContext, userID int64, storageID int64) (model.Storage, error) {
var stg model.Storage
err := sqlx.Get(ctx, &stg,
"select Storage.* from UserStorage, Storage where UserID = ? and UserStorage.StorageID = ? and UserStorage.StorageID = Storage.StorageID",
userID,
storageID)

return stg, err
}

func (db *StorageDB) ChangeState(ctx SQLContext, storageID int64, state string) error {
_, err := ctx.Exec("update Storage set State = ? where StorageID = ?", state, storageID)
return err
}

+ 116
- 0
common/pkgs/db/storage_package.go View File

@@ -0,0 +1,116 @@
package db

import (
"fmt"

"github.com/jmoiron/sqlx"
"gitlink.org.cn/cloudream/storage-common/consts"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type StoragePackageDB struct {
*DB
}

func (db *DB) StoragePackage() *StoragePackageDB {
return &StoragePackageDB{DB: db}
}

func (*StoragePackageDB) Get(ctx SQLContext, storageID int64, packageID int64, userID int64) (model.StoragePackage, error) {
var ret model.StoragePackage
err := sqlx.Get(ctx, &ret, "select * from StoragePackage where StorageID = ? and PackageID = ? and UserID = ?", storageID, packageID, userID)
return ret, err
}

func (*StoragePackageDB) GetAllByStorageAndPackageID(ctx SQLContext, storageID int64, packageID int64) ([]model.StoragePackage, error) {
var ret []model.StoragePackage
err := sqlx.Select(ctx, &ret, "select * from StoragePackage where StorageID = ? and PackageID = ?", storageID, packageID)
return ret, err
}

func (*StoragePackageDB) GetAllByStorageID(ctx SQLContext, storageID int64) ([]model.StoragePackage, error) {
var ret []model.StoragePackage
err := sqlx.Select(ctx, &ret, "select * from StoragePackage where StorageID = ?", storageID)
return ret, err
}

func (*StoragePackageDB) LoadPackage(ctx SQLContext, packageID int64, storageID int64, userID int64) error {
_, err := ctx.Exec("insert into StoragePackage values(?,?,?,?)", packageID, storageID, userID, consts.StoragePackageStateNormal)
return err
}

func (*StoragePackageDB) ChangeState(ctx SQLContext, storageID int64, packageID int64, userID int64, state string) error {
_, err := ctx.Exec("update StoragePackage set State = ? where StorageID = ? and PackageID = ? and UserID = ?", state, storageID, packageID, userID)
return err
}

// SetStateNormal 将状态设置为Normal,如果记录状态是Deleted,则不进行操作
func (*StoragePackageDB) SetStateNormal(ctx SQLContext, storageID int64, packageID int64, userID int64) error {
_, err := ctx.Exec("update StoragePackage set State = ? where StorageID = ? and PackageID = ? and UserID = ? and State <> ?",
consts.StoragePackageStateNormal,
storageID,
packageID,
userID,
consts.StoragePackageStateDeleted,
)
return err
}

func (*StoragePackageDB) SetAllPackageState(ctx SQLContext, packageID int64, state string) (int64, error) {
ret, err := ctx.Exec(
"update StoragePackage set State = ? where PackageID = ?",
state,
packageID,
)
if err != nil {
return 0, err
}

cnt, err := ret.RowsAffected()
if err != nil {
return 0, fmt.Errorf("get affected rows failed, err: %w", err)
}

return cnt, nil
}

// SetAllPackageOutdated 将Storage中指定对象设置为已过期。
// 注:只会设置Normal状态的对象
func (*StoragePackageDB) SetAllPackageOutdated(ctx SQLContext, packageID int64) (int64, error) {
ret, err := ctx.Exec(
"update StoragePackage set State = ? where State = ? and PackageID = ?",
consts.StoragePackageStateOutdated,
consts.StoragePackageStateNormal,
packageID,
)
if err != nil {
return 0, err
}

cnt, err := ret.RowsAffected()
if err != nil {
return 0, fmt.Errorf("get affected rows failed, err: %w", err)
}

return cnt, nil
}

func (db *StoragePackageDB) SetAllPackageDeleted(ctx SQLContext, packageID int64) (int64, error) {
return db.SetAllPackageState(ctx, packageID, consts.StoragePackageStateDeleted)
}

func (*StoragePackageDB) Delete(ctx SQLContext, storageID int64, packageID int64, userID int64) error {
_, err := ctx.Exec("delete from StoragePackage where StorageID = ? and PackageID = ? and UserID = ?", storageID, packageID, userID)
return err
}

// FindPackageStorages 查询存储了指定对象的Storage
func (*StoragePackageDB) FindPackageStorages(ctx SQLContext, packageID int64) ([]model.Storage, error) {
var ret []model.Storage
err := sqlx.Select(ctx, &ret,
"select Storage.* from StoragePackage, Storage where PackageID = ? and"+
" StoragePackage.StorageID = Storage.StorageID",
packageID,
)
return ret, err
}

+ 14
- 0
common/pkgs/db/user_bucket.go View File

@@ -0,0 +1,14 @@
package db

type UserBucketDB struct {
*DB
}

func (db *DB) UserBucket() *UserBucketDB {
return &UserBucketDB{DB: db}
}

func (*UserBucketDB) Create(ctx SQLContext, userID int64, bucketID int64) error {
_, err := ctx.Exec("insert into UserBucket(UserID,BucketID) values(?,?)", userID, bucketID)
return err
}

+ 217
- 0
common/pkgs/distlock/lockprovider/ipfs_lock.go View File

@@ -0,0 +1,217 @@
package lockprovider

import (
"fmt"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/distlock"
mylo "gitlink.org.cn/cloudream/common/utils/lo"
)

const (
IPFSLockPathPrefix = "IPFS"

IPFS_SET_READ_LOCK = "SetRead"
IPFS_SET_WRITE_LOCK = "SetWrite"
IPFS_SET_CREATE_LOCK = "SetCreate"

IPFS_ELEMENT_READ_LOCK = "ElementRead"
IPFS_ELEMENT_WRITE_LOCK = "ElementWrite"

IPFS_NODE_ID_PATH_INDEX = 1
)

type IPFSLock struct {
nodeLocks map[string]*IPFSNodeLock
dummyLock *IPFSNodeLock
}

func NewIPFSLock() *IPFSLock {
return &IPFSLock{
nodeLocks: make(map[string]*IPFSNodeLock),
dummyLock: NewIPFSNodeLock(),
}
}

// CanLock 判断这个锁能否锁定成功
func (l *IPFSLock) CanLock(lock distlock.Lock) error {
nodeLock, ok := l.nodeLocks[lock.Path[IPFS_NODE_ID_PATH_INDEX]]
if !ok {
// 不能直接返回nil,因为如果锁数据的格式不对,也不能获取锁。
// 这里使用一个空Provider来进行检查。
return l.dummyLock.CanLock(lock)
}

return nodeLock.CanLock(lock)
}

// 锁定。在内部可以不用判断能否加锁,外部需要保证调用此函数前调用了CanLock进行检查
func (l *IPFSLock) Lock(reqID string, lock distlock.Lock) error {
nodeID := lock.Path[IPFS_NODE_ID_PATH_INDEX]

nodeLock, ok := l.nodeLocks[nodeID]
if !ok {
nodeLock = NewIPFSNodeLock()
l.nodeLocks[nodeID] = nodeLock
}

return nodeLock.Lock(reqID, lock)
}

// 解锁
func (l *IPFSLock) Unlock(reqID string, lock distlock.Lock) error {
nodeID := lock.Path[IPFS_NODE_ID_PATH_INDEX]

nodeLock, ok := l.nodeLocks[nodeID]
if !ok {
return nil
}

return nodeLock.Unlock(reqID, lock)
}

// GetTargetString 将锁对象序列化为字符串,方便存储到ETCD
func (l *IPFSLock) GetTargetString(target any) (string, error) {
tar := target.(StringLockTarget)
return StringLockTargetToString(&tar)
}

// ParseTargetString 解析字符串格式的锁对象数据
func (l *IPFSLock) ParseTargetString(targetStr string) (any, error) {
return StringLockTargetFromString(targetStr)
}

// Clear 清除内部所有状态
func (l *IPFSLock) Clear() {
l.nodeLocks = make(map[string]*IPFSNodeLock)
}

type ipfsElementLock struct {
target StringLockTarget
requestIDs []string
}

type IPFSNodeLock struct {
setReadReqIDs []string
setWriteReqIDs []string
setCreateReqIDs []string

elementReadLocks []*ipfsElementLock
elementWriteLocks []*ipfsElementLock

lockCompatibilityTable *LockCompatibilityTable
}

func NewIPFSNodeLock() *IPFSNodeLock {
compTable := &LockCompatibilityTable{}

ipfsLock := IPFSNodeLock{
lockCompatibilityTable: compTable,
}

compTable.
Column(IPFS_ELEMENT_READ_LOCK, func() bool { return len(ipfsLock.elementReadLocks) > 0 }).
Column(IPFS_ELEMENT_WRITE_LOCK, func() bool { return len(ipfsLock.elementWriteLocks) > 0 }).
Column(IPFS_SET_READ_LOCK, func() bool { return len(ipfsLock.setReadReqIDs) > 0 }).
Column(IPFS_SET_WRITE_LOCK, func() bool { return len(ipfsLock.setWriteReqIDs) > 0 }).
Column(IPFS_SET_CREATE_LOCK, func() bool { return len(ipfsLock.setCreateReqIDs) > 0 })

comp := LockCompatible()
uncp := LockUncompatible()
trgt := LockSpecial(func(lock distlock.Lock, testLockName string) bool {
strTar := lock.Target.(StringLockTarget)
if testLockName == IPFS_ELEMENT_READ_LOCK {
// 如果没有任何锁的锁对象与当前的锁对象冲突,那么这个锁可以加
return lo.NoneBy(ipfsLock.elementReadLocks, func(other *ipfsElementLock) bool { return strTar.IsConflict(&other.target) })
}

return lo.NoneBy(ipfsLock.elementWriteLocks, func(other *ipfsElementLock) bool { return strTar.IsConflict(&other.target) })
})

compTable.MustRow(comp, trgt, comp, uncp, comp)
compTable.MustRow(trgt, trgt, uncp, uncp, uncp)
compTable.MustRow(comp, uncp, comp, uncp, uncp)
compTable.MustRow(uncp, uncp, uncp, uncp, uncp)
compTable.MustRow(comp, uncp, uncp, uncp, comp)

return &ipfsLock
}

// CanLock 判断这个锁能否锁定成功
func (l *IPFSNodeLock) CanLock(lock distlock.Lock) error {
return l.lockCompatibilityTable.Test(lock)
}

// 锁定
func (l *IPFSNodeLock) Lock(reqID string, lock distlock.Lock) error {
switch lock.Name {
case IPFS_SET_READ_LOCK:
l.setReadReqIDs = append(l.setReadReqIDs, reqID)
case IPFS_SET_WRITE_LOCK:
l.setWriteReqIDs = append(l.setWriteReqIDs, reqID)
case IPFS_SET_CREATE_LOCK:
l.setCreateReqIDs = append(l.setCreateReqIDs, reqID)

case IPFS_ELEMENT_READ_LOCK:
l.elementReadLocks = l.addElementLock(lock, l.elementReadLocks, reqID)
case IPFS_ELEMENT_WRITE_LOCK:
l.elementWriteLocks = l.addElementLock(lock, l.elementWriteLocks, reqID)

default:
return fmt.Errorf("unknow lock name: %s", lock.Name)
}

return nil
}

func (l *IPFSNodeLock) addElementLock(lock distlock.Lock, locks []*ipfsElementLock, reqID string) []*ipfsElementLock {
strTarget := lock.Target.(StringLockTarget)
lck, ok := lo.Find(locks, func(l *ipfsElementLock) bool { return strTarget.IsConflict(&l.target) })
if !ok {
lck = &ipfsElementLock{
target: strTarget,
}
locks = append(locks, lck)
}

lck.requestIDs = append(lck.requestIDs, reqID)
return locks
}

// 解锁
func (l *IPFSNodeLock) Unlock(reqID string, lock distlock.Lock) error {
switch lock.Name {
case IPFS_SET_READ_LOCK:
l.setReadReqIDs = mylo.Remove(l.setReadReqIDs, reqID)
case IPFS_SET_WRITE_LOCK:
l.setWriteReqIDs = mylo.Remove(l.setWriteReqIDs, reqID)
case IPFS_SET_CREATE_LOCK:
l.setCreateReqIDs = mylo.Remove(l.setCreateReqIDs, reqID)

case IPFS_ELEMENT_READ_LOCK:
l.elementReadLocks = l.removeElementLock(lock, l.elementReadLocks, reqID)
case IPFS_ELEMENT_WRITE_LOCK:
l.elementWriteLocks = l.removeElementLock(lock, l.elementWriteLocks, reqID)

default:
return fmt.Errorf("unknow lock name: %s", lock.Name)
}

return nil
}

func (l *IPFSNodeLock) removeElementLock(lock distlock.Lock, locks []*ipfsElementLock, reqID string) []*ipfsElementLock {
strTarget := lock.Target.(StringLockTarget)
lck, index, ok := lo.FindIndexOf(locks, func(l *ipfsElementLock) bool { return strTarget.IsConflict(&l.target) })
if !ok {
return locks
}

lck.requestIDs = mylo.Remove(lck.requestIDs, reqID)

if len(lck.requestIDs) == 0 {
locks = mylo.RemoveAt(locks, index)
}

return locks
}

+ 113
- 0
common/pkgs/distlock/lockprovider/ipfs_lock_test.go View File

@@ -0,0 +1,113 @@
package lockprovider

import (
"testing"

. "github.com/smartystreets/goconvey/convey"
"gitlink.org.cn/cloudream/common/pkgs/distlock"
)

func Test_IPFSLock(t *testing.T) {
cases := []struct {
title string
initLocks []distlock.Lock
doLock distlock.Lock
wantOK bool
}{
{
title: "同节点,同一个Read锁",
initLocks: []distlock.Lock{
{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFS_SET_READ_LOCK,
},
},
doLock: distlock.Lock{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFS_SET_READ_LOCK,
},
wantOK: true,
},
{
title: "同节点,同一个Write锁",
initLocks: []distlock.Lock{
{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFS_SET_WRITE_LOCK,
},
},
doLock: distlock.Lock{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFS_SET_WRITE_LOCK,
},
wantOK: false,
},
{
title: "不同节点,同一个Write锁",
initLocks: []distlock.Lock{
{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFS_SET_WRITE_LOCK,
},
},
doLock: distlock.Lock{
Path: []string{IPFSLockPathPrefix, "node2"},
Name: IPFS_SET_WRITE_LOCK,
},
wantOK: true,
},
{
title: "相同对象的Read、Write锁",
initLocks: []distlock.Lock{
{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFS_ELEMENT_WRITE_LOCK,
Target: *NewStringLockTarget(),
},
},
doLock: distlock.Lock{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFS_ELEMENT_WRITE_LOCK,
Target: *NewStringLockTarget(),
},
wantOK: false,
},
}

for _, ca := range cases {
Convey(ca.title, t, func() {
ipfsLock := NewIPFSLock()

for _, l := range ca.initLocks {
ipfsLock.Lock("req1", l)
}

err := ipfsLock.CanLock(ca.doLock)
if ca.wantOK {
So(err, ShouldBeNil)
} else {
So(err, ShouldNotBeNil)
}
})
}

Convey("解锁", t, func() {
ipfsLock := NewIPFSLock()

lock := distlock.Lock{
Path: []string{IPFSLockPathPrefix, "node1"},
Name: IPFS_SET_WRITE_LOCK,
}

ipfsLock.Lock("req1", lock)

err := ipfsLock.CanLock(lock)
So(err, ShouldNotBeNil)

ipfsLock.Unlock("req1", lock)

err = ipfsLock.CanLock(lock)
So(err, ShouldBeNil)
})

}

+ 123
- 0
common/pkgs/distlock/lockprovider/lock_compatibility_table.go View File

@@ -0,0 +1,123 @@
package lockprovider

import (
"fmt"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/distlock"
)

const (
LOCK_COMPATIBILITY_COMPATIBLE LockCompatibilityType = "Compatible"
LOCK_COMPATIBILITY_UNCOMPATIBLE LockCompatibilityType = "Uncompatible"
LOCK_COMPATIBILITY_SPECIAL LockCompatibilityType = "Special"
)

type HasSuchLockFn = func() bool

// LockCompatibilitySpecialFn 判断锁与指定的锁名是否兼容
type LockCompatibilitySpecialFn func(lock distlock.Lock, testLockName string) bool

type LockCompatibilityType string

type LockCompatibility struct {
Type LockCompatibilityType
SpecialFn LockCompatibilitySpecialFn
}

func LockCompatible() LockCompatibility {
return LockCompatibility{
Type: LOCK_COMPATIBILITY_COMPATIBLE,
}
}

func LockUncompatible() LockCompatibility {
return LockCompatibility{
Type: LOCK_COMPATIBILITY_UNCOMPATIBLE,
}
}

func LockSpecial(specialFn LockCompatibilitySpecialFn) LockCompatibility {
return LockCompatibility{
Type: LOCK_COMPATIBILITY_SPECIAL,
SpecialFn: specialFn,
}
}

type LockCompatibilityTableRow struct {
LockName string
HasSuchLockFn HasSuchLockFn
Compatibilities []LockCompatibility
}

type LockCompatibilityTable struct {
rows []LockCompatibilityTableRow
rowIndex int
}

func (t *LockCompatibilityTable) Column(lockName string, hasSuchLock HasSuchLockFn) *LockCompatibilityTable {
t.rows = append(t.rows, LockCompatibilityTableRow{
LockName: lockName,
HasSuchLockFn: hasSuchLock,
})

return t
}
func (t *LockCompatibilityTable) MustRow(comps ...LockCompatibility) {
err := t.Row(comps...)
if err != nil {
panic(fmt.Sprintf("build lock compatibility table failed, err: %s", err.Error()))
}
}

func (t *LockCompatibilityTable) Row(comps ...LockCompatibility) error {
if t.rowIndex >= len(t.rows) {
return fmt.Errorf("there should be no more rows in the table")
}

if len(comps) < len(t.rows) {
return fmt.Errorf("the columns should equals the rows")
}

t.rows[t.rowIndex].Compatibilities = comps

for i := 0; i < t.rowIndex-1; i++ {
chkRowCeil := t.rows[t.rowIndex].Compatibilities[i]
chkColCeil := t.rows[i].Compatibilities[t.rowIndex]

if chkRowCeil.Type != chkColCeil.Type {
return fmt.Errorf("value at %d, %d is not equals to at %d, %d", t.rowIndex, i, i, t.rowIndex)
}
}

t.rowIndex++

return nil
}

func (t *LockCompatibilityTable) Test(lock distlock.Lock) error {
row, ok := lo.Find(t.rows, func(row LockCompatibilityTableRow) bool { return lock.Name == row.LockName })
if !ok {
return fmt.Errorf("unknow lock name %s", lock.Name)
}

for i, c := range row.Compatibilities {
if c.Type == LOCK_COMPATIBILITY_COMPATIBLE {
continue
}

if c.Type == LOCK_COMPATIBILITY_UNCOMPATIBLE {
if t.rows[i].HasSuchLockFn() {
return distlock.NewLockTargetBusyError(t.rows[i].LockName)
}
}

if c.Type == LOCK_COMPATIBILITY_SPECIAL {
if !c.SpecialFn(lock, t.rows[i].LockName) {
return distlock.NewLockTargetBusyError(t.rows[i].LockName)
}
}
}

return nil
}

+ 41
- 0
common/pkgs/distlock/lockprovider/lock_compatibility_table_test.go View File

@@ -0,0 +1,41 @@
package lockprovider

import (
"testing"

. "github.com/smartystreets/goconvey/convey"
"gitlink.org.cn/cloudream/common/pkgs/distlock"
)

func Test_LockCompatibilityTable(t *testing.T) {
Convey("兼容,互斥,特殊比较", t, func() {
table := LockCompatibilityTable{}

table.
Column("l1", func() bool { return true }).
Column("l2", func() bool { return true }).
Column("l3", func() bool { return false })

comp := LockCompatible()
uncp := LockUncompatible()
spcl := LockSpecial(func(lock distlock.Lock, testLockName string) bool { return true })
table.Row(comp, comp, comp)
table.Row(comp, uncp, comp)
table.Row(comp, comp, spcl)

err := table.Test(distlock.Lock{
Name: "l1",
})
So(err, ShouldBeNil)

err = table.Test(distlock.Lock{
Name: "l2",
})
So(err, ShouldNotBeNil)

err = table.Test(distlock.Lock{
Name: "l3",
})
So(err, ShouldBeNil)
})
}

+ 184
- 0
common/pkgs/distlock/lockprovider/metadata_lock.go View File

@@ -0,0 +1,184 @@
package lockprovider

import (
"fmt"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/distlock"
mylo "gitlink.org.cn/cloudream/common/utils/lo"
)

const (
MetadataLockPathPrefix = "Metadata"

METADATA_SET_READ_LOCK = "SetRead"
METADATA_SET_WRITE_LOCK = "SetWrite"
METADATA_SET_CREATE_LOCK = "SetCreate"

METADATA_ELEMENT_READ_LOCK = "ElementRead"
METADATA_ELEMENT_WRITE_LOCK = "ElementWrite"
METADATA_ELEMENT_CREATE_LOCK = "ElementCreate"
)

type metadataElementLock struct {
target StringLockTarget
requestIDs []string
}

type MetadataLock struct {
setReadReqIDs []string
setWriteReqIDs []string
setCreateReqIDs []string

elementReadLocks []*metadataElementLock
elementWriteLocks []*metadataElementLock
elementCreateLocks []*metadataElementLock

lockCompatibilityTable LockCompatibilityTable
}

func NewMetadataLock() *MetadataLock {

metadataLock := MetadataLock{
lockCompatibilityTable: LockCompatibilityTable{},
}

compTable := &metadataLock.lockCompatibilityTable

compTable.
Column(METADATA_ELEMENT_READ_LOCK, func() bool { return len(metadataLock.elementReadLocks) > 0 }).
Column(METADATA_ELEMENT_WRITE_LOCK, func() bool { return len(metadataLock.elementWriteLocks) > 0 }).
Column(METADATA_ELEMENT_CREATE_LOCK, func() bool { return len(metadataLock.elementCreateLocks) > 0 }).
Column(METADATA_SET_READ_LOCK, func() bool { return len(metadataLock.setReadReqIDs) > 0 }).
Column(METADATA_SET_WRITE_LOCK, func() bool { return len(metadataLock.setWriteReqIDs) > 0 }).
Column(METADATA_SET_CREATE_LOCK, func() bool { return len(metadataLock.setCreateReqIDs) > 0 })

comp := LockCompatible()
uncp := LockUncompatible()
trgt := LockSpecial(func(lock distlock.Lock, testLockName string) bool {
strTar := lock.Target.(StringLockTarget)
if testLockName == METADATA_ELEMENT_READ_LOCK {
// 如果没有任何锁的锁对象与当前的锁对象冲突,那么这个锁可以加
return lo.NoneBy(metadataLock.elementReadLocks, func(other *metadataElementLock) bool { return strTar.IsConflict(&other.target) })
}

if testLockName == METADATA_ELEMENT_WRITE_LOCK {
return lo.NoneBy(metadataLock.elementWriteLocks, func(other *metadataElementLock) bool { return strTar.IsConflict(&other.target) })
}

return lo.NoneBy(metadataLock.elementCreateLocks, func(other *metadataElementLock) bool { return strTar.IsConflict(&other.target) })
})

compTable.MustRow(comp, trgt, comp, comp, uncp, comp)
compTable.MustRow(trgt, trgt, comp, uncp, uncp, comp)
compTable.MustRow(comp, comp, trgt, uncp, uncp, uncp)
compTable.MustRow(comp, uncp, uncp, comp, uncp, uncp)
compTable.MustRow(uncp, uncp, uncp, uncp, uncp, uncp)
compTable.MustRow(comp, comp, uncp, uncp, uncp, uncp)

return &metadataLock
}

// CanLock 判断这个锁能否锁定成功
func (l *MetadataLock) CanLock(lock distlock.Lock) error {
return l.lockCompatibilityTable.Test(lock)
}

// 锁定
func (l *MetadataLock) Lock(reqID string, lock distlock.Lock) error {
switch lock.Name {
case METADATA_SET_READ_LOCK:
l.setReadReqIDs = append(l.setReadReqIDs, reqID)
case METADATA_SET_WRITE_LOCK:
l.setWriteReqIDs = append(l.setWriteReqIDs, reqID)
case METADATA_SET_CREATE_LOCK:
l.setCreateReqIDs = append(l.setCreateReqIDs, reqID)

case METADATA_ELEMENT_READ_LOCK:
l.elementReadLocks = l.addElementLock(lock, l.elementReadLocks, reqID)
case METADATA_ELEMENT_WRITE_LOCK:
l.elementWriteLocks = l.addElementLock(lock, l.elementWriteLocks, reqID)
case METADATA_ELEMENT_CREATE_LOCK:
l.elementCreateLocks = l.addElementLock(lock, l.elementCreateLocks, reqID)

default:
return fmt.Errorf("unknow lock name: %s", lock.Name)
}

return nil
}

func (l *MetadataLock) addElementLock(lock distlock.Lock, locks []*metadataElementLock, reqID string) []*metadataElementLock {
strTarget := lock.Target.(StringLockTarget)
lck, ok := lo.Find(locks, func(l *metadataElementLock) bool { return strTarget.IsConflict(&l.target) })
if !ok {
lck = &metadataElementLock{
target: strTarget,
}
locks = append(locks, lck)
}

lck.requestIDs = append(lck.requestIDs, reqID)
return locks
}

// 解锁
func (l *MetadataLock) Unlock(reqID string, lock distlock.Lock) error {
switch lock.Name {
case METADATA_SET_READ_LOCK:
l.setReadReqIDs = mylo.Remove(l.setReadReqIDs, reqID)
case METADATA_SET_WRITE_LOCK:
l.setWriteReqIDs = mylo.Remove(l.setWriteReqIDs, reqID)
case METADATA_SET_CREATE_LOCK:
l.setCreateReqIDs = mylo.Remove(l.setCreateReqIDs, reqID)

case METADATA_ELEMENT_READ_LOCK:
l.elementReadLocks = l.removeElementLock(lock, l.elementReadLocks, reqID)
case METADATA_ELEMENT_WRITE_LOCK:
l.elementWriteLocks = l.removeElementLock(lock, l.elementWriteLocks, reqID)
case METADATA_ELEMENT_CREATE_LOCK:
l.elementCreateLocks = l.removeElementLock(lock, l.elementCreateLocks, reqID)

default:
return fmt.Errorf("unknow lock name: %s", lock.Name)
}

return nil
}

func (l *MetadataLock) removeElementLock(lock distlock.Lock, locks []*metadataElementLock, reqID string) []*metadataElementLock {
strTarget := lock.Target.(StringLockTarget)
lck, index, ok := lo.FindIndexOf(locks, func(l *metadataElementLock) bool { return strTarget.IsConflict(&l.target) })
if !ok {
return locks
}

lck.requestIDs = mylo.Remove(lck.requestIDs, reqID)

if len(lck.requestIDs) == 0 {
locks = mylo.RemoveAt(locks, index)
}

return locks
}

// GetTargetString 将锁对象序列化为字符串,方便存储到ETCD
func (l *MetadataLock) GetTargetString(target any) (string, error) {
tar := target.(StringLockTarget)
return StringLockTargetToString(&tar)
}

// ParseTargetString 解析字符串格式的锁对象数据
func (l *MetadataLock) ParseTargetString(targetStr string) (any, error) {
return StringLockTargetFromString(targetStr)
}

// Clear 清除内部所有状态
func (l *MetadataLock) Clear() {
l.setReadReqIDs = nil
l.setWriteReqIDs = nil
l.setCreateReqIDs = nil
l.elementReadLocks = nil
l.elementWriteLocks = nil
l.elementCreateLocks = nil
}

+ 226
- 0
common/pkgs/distlock/lockprovider/storage_lock.go View File

@@ -0,0 +1,226 @@
package lockprovider

import (
"fmt"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/distlock"
mylo "gitlink.org.cn/cloudream/common/utils/lo"
)

const (
StorageLockPathPrefix = "Storage"

STORAGE_SET_READ_LOCK = "SetRead"
STORAGE_SET_WRITE_LOCK = "SetWrite"
STORAGE_SET_CREATE_LOCK = "SetCreate"

STORAGE_ELEMENT_READ_LOCK = "ElementRead"
STORAGE_ELEMENT_WRITE_LOCK = "ElementWrite"
STORAGE_ELEMENT_CREATE_LOCK = "ElementCreate"

STORAGE_STORAGE_ID_PATH_INDEX = 1
)

type StorageLock struct {
nodeLocks map[string]*StorageNodeLock
dummyLock *StorageNodeLock
}

func NewStorageLock() *StorageLock {
return &StorageLock{
nodeLocks: make(map[string]*StorageNodeLock),
dummyLock: NewStorageNodeLock(),
}
}

// CanLock 判断这个锁能否锁定成功
func (l *StorageLock) CanLock(lock distlock.Lock) error {
nodeLock, ok := l.nodeLocks[lock.Path[STORAGE_STORAGE_ID_PATH_INDEX]]
if !ok {
// 不能直接返回nil,因为如果锁数据的格式不对,也不能获取锁。
// 这里使用一个空Provider来进行检查。
return l.dummyLock.CanLock(lock)
}

return nodeLock.CanLock(lock)
}

// 锁定。在内部可以不用判断能否加锁,外部需要保证调用此函数前调用了CanLock进行检查
func (l *StorageLock) Lock(reqID string, lock distlock.Lock) error {
nodeID := lock.Path[STORAGE_STORAGE_ID_PATH_INDEX]

nodeLock, ok := l.nodeLocks[nodeID]
if !ok {
nodeLock = NewStorageNodeLock()
l.nodeLocks[nodeID] = nodeLock
}

return nodeLock.Lock(reqID, lock)
}

// 解锁
func (l *StorageLock) Unlock(reqID string, lock distlock.Lock) error {
nodeID := lock.Path[STORAGE_STORAGE_ID_PATH_INDEX]

nodeLock, ok := l.nodeLocks[nodeID]
if !ok {
return nil
}

return nodeLock.Unlock(reqID, lock)
}

// GetTargetString 将锁对象序列化为字符串,方便存储到ETCD
func (l *StorageLock) GetTargetString(target any) (string, error) {
tar := target.(StringLockTarget)
return StringLockTargetToString(&tar)
}

// ParseTargetString 解析字符串格式的锁对象数据
func (l *StorageLock) ParseTargetString(targetStr string) (any, error) {
return StringLockTargetFromString(targetStr)
}

// Clear 清除内部所有状态
func (l *StorageLock) Clear() {
l.nodeLocks = make(map[string]*StorageNodeLock)
}

type storageElementLock struct {
target StringLockTarget
requestIDs []string
}

type StorageNodeLock struct {
setReadReqIDs []string
setWriteReqIDs []string
setCreateReqIDs []string

elementReadLocks []*storageElementLock
elementWriteLocks []*storageElementLock
elementCreateLocks []*storageElementLock

lockCompatibilityTable LockCompatibilityTable
}

func NewStorageNodeLock() *StorageNodeLock {

storageLock := StorageNodeLock{
lockCompatibilityTable: LockCompatibilityTable{},
}

compTable := &storageLock.lockCompatibilityTable

compTable.
Column(STORAGE_ELEMENT_READ_LOCK, func() bool { return len(storageLock.elementReadLocks) > 0 }).
Column(STORAGE_ELEMENT_WRITE_LOCK, func() bool { return len(storageLock.elementWriteLocks) > 0 }).
Column(STORAGE_ELEMENT_CREATE_LOCK, func() bool { return len(storageLock.elementCreateLocks) > 0 }).
Column(STORAGE_SET_READ_LOCK, func() bool { return len(storageLock.setReadReqIDs) > 0 }).
Column(STORAGE_SET_WRITE_LOCK, func() bool { return len(storageLock.setWriteReqIDs) > 0 }).
Column(STORAGE_SET_CREATE_LOCK, func() bool { return len(storageLock.setCreateReqIDs) > 0 })

comp := LockCompatible()
uncp := LockUncompatible()
trgt := LockSpecial(func(lock distlock.Lock, testLockName string) bool {
strTar := lock.Target.(StringLockTarget)
if testLockName == STORAGE_ELEMENT_READ_LOCK {
// 如果没有任何锁的锁对象与当前的锁对象冲突,那么这个锁可以加
return lo.NoneBy(storageLock.elementReadLocks, func(other *storageElementLock) bool { return strTar.IsConflict(&other.target) })
}

if testLockName == STORAGE_ELEMENT_WRITE_LOCK {
return lo.NoneBy(storageLock.elementWriteLocks, func(other *storageElementLock) bool { return strTar.IsConflict(&other.target) })
}

return lo.NoneBy(storageLock.elementCreateLocks, func(other *storageElementLock) bool { return strTar.IsConflict(&other.target) })
})

compTable.MustRow(comp, trgt, comp, comp, uncp, comp)
compTable.MustRow(trgt, trgt, comp, uncp, uncp, comp)
compTable.MustRow(comp, comp, trgt, uncp, uncp, uncp)
compTable.MustRow(comp, uncp, uncp, comp, uncp, uncp)
compTable.MustRow(uncp, uncp, uncp, uncp, uncp, uncp)
compTable.MustRow(comp, comp, uncp, uncp, uncp, uncp)

return &storageLock
}

// CanLock 判断这个锁能否锁定成功
func (l *StorageNodeLock) CanLock(lock distlock.Lock) error {
return l.lockCompatibilityTable.Test(lock)
}

// 锁定
func (l *StorageNodeLock) Lock(reqID string, lock distlock.Lock) error {
switch lock.Name {
case STORAGE_SET_READ_LOCK:
l.setReadReqIDs = append(l.setReadReqIDs, reqID)
case STORAGE_SET_WRITE_LOCK:
l.setWriteReqIDs = append(l.setWriteReqIDs, reqID)
case STORAGE_SET_CREATE_LOCK:
l.setCreateReqIDs = append(l.setCreateReqIDs, reqID)

case STORAGE_ELEMENT_READ_LOCK:
l.elementReadLocks = l.addElementLock(lock, l.elementReadLocks, reqID)
case STORAGE_ELEMENT_WRITE_LOCK:
l.elementWriteLocks = l.addElementLock(lock, l.elementWriteLocks, reqID)

default:
return fmt.Errorf("unknow lock name: %s", lock.Name)
}

return nil
}

func (l *StorageNodeLock) addElementLock(lock distlock.Lock, locks []*storageElementLock, reqID string) []*storageElementLock {
strTarget := lock.Target.(StringLockTarget)
lck, ok := lo.Find(locks, func(l *storageElementLock) bool { return strTarget.IsConflict(&l.target) })
if !ok {
lck = &storageElementLock{
target: strTarget,
}
locks = append(locks, lck)
}

lck.requestIDs = append(lck.requestIDs, reqID)
return locks
}

// 解锁
func (l *StorageNodeLock) Unlock(reqID string, lock distlock.Lock) error {
switch lock.Name {
case STORAGE_SET_READ_LOCK:
l.setReadReqIDs = mylo.Remove(l.setReadReqIDs, reqID)
case STORAGE_SET_WRITE_LOCK:
l.setWriteReqIDs = mylo.Remove(l.setWriteReqIDs, reqID)
case STORAGE_SET_CREATE_LOCK:
l.setCreateReqIDs = mylo.Remove(l.setCreateReqIDs, reqID)

case STORAGE_ELEMENT_READ_LOCK:
l.elementReadLocks = l.removeElementLock(lock, l.elementReadLocks, reqID)
case STORAGE_ELEMENT_WRITE_LOCK:
l.elementWriteLocks = l.removeElementLock(lock, l.elementWriteLocks, reqID)

default:
return fmt.Errorf("unknow lock name: %s", lock.Name)
}

return nil
}

func (l *StorageNodeLock) removeElementLock(lock distlock.Lock, locks []*storageElementLock, reqID string) []*storageElementLock {
strTarget := lock.Target.(StringLockTarget)
lck, index, ok := lo.FindIndexOf(locks, func(l *storageElementLock) bool { return strTarget.IsConflict(&l.target) })
if !ok {
return locks
}

lck.requestIDs = mylo.Remove(lck.requestIDs, reqID)

if len(lck.requestIDs) == 0 {
locks = mylo.RemoveAt(locks, index)
}

return locks
}

+ 78
- 0
common/pkgs/distlock/lockprovider/string_lock_target.go View File

@@ -0,0 +1,78 @@
package lockprovider

import (
"fmt"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/utils/serder"
)

type StringLockTarget struct {
Components []StringLockTargetComponet `json:"components"`
}

func NewStringLockTarget() *StringLockTarget {
return &StringLockTarget{}
}

// Add 添加一个Component,并将其内容设置为compValues
func (t *StringLockTarget) Add(compValues ...any) *StringLockTarget {
t.Components = append(t.Components, StringLockTargetComponet{
Values: lo.Map(compValues, func(val any, index int) string { return fmt.Sprintf("%v", val) }),
})

return t
}

// IsConflict 判断两个锁对象是否冲突。注:只有相同的结构的Target才有意义
func (t *StringLockTarget) IsConflict(other *StringLockTarget) bool {
if len(t.Components) != len(other.Components) {
return false
}

if len(t.Components) == 0 {
return true
}

for i := 0; i < len(t.Components); i++ {
if t.Components[i].IsEquals(&other.Components[i]) {
return true
}
}

return false
}

type StringLockTargetComponet struct {
Values []string `json:"values"`
}

// IsEquals 判断两个Component是否相同。注:只有相同的结构的Component才有意义
func (t *StringLockTargetComponet) IsEquals(other *StringLockTargetComponet) bool {
if len(t.Values) != len(other.Values) {
return false
}

for i := 0; i < len(t.Values); i++ {
if t.Values[i] != other.Values[i] {
return false
}
}

return true
}

func StringLockTargetToString(target *StringLockTarget) (string, error) {
data, err := serder.ObjectToJSON(target)
if err != nil {
return "", err
}

return string(data), nil
}

func StringLockTargetFromString(str string) (StringLockTarget, error) {
var ret StringLockTarget
err := serder.JSONToObject([]byte(str), &ret)
return ret, err
}

+ 60
- 0
common/pkgs/distlock/lockprovider/string_lock_target_test.go View File

@@ -0,0 +1,60 @@
package lockprovider

import (
"testing"

. "github.com/smartystreets/goconvey/convey"
)

func Test_StringLockTarget(t *testing.T) {
cases := []struct {
title string
target1 *StringLockTarget
target2 *StringLockTarget
wantIsConflict bool
}{
{
title: "没有任何段算冲突",
target1: NewStringLockTarget(),
target2: NewStringLockTarget(),
wantIsConflict: true,
},
{
title: "有段,但段内为空,算冲突",
target1: NewStringLockTarget().Add(),
target2: NewStringLockTarget().Add(),
wantIsConflict: true,
},
{
title: "每一段不同才不冲突",
target1: NewStringLockTarget().Add("a").Add("b"),
target2: NewStringLockTarget().Add("b").Add("c"),
wantIsConflict: false,
},
{
title: "只要有一段相同就冲突",
target1: NewStringLockTarget().Add("a").Add("b"),
target2: NewStringLockTarget().Add("a").Add("c"),
wantIsConflict: true,
},
{
title: "同段内,只要有一个数据不同就不冲突",
target1: NewStringLockTarget().Add("a", "b"),
target2: NewStringLockTarget().Add("b", "b"),
wantIsConflict: false,
},
{
title: "同段内,只要每个数据都相同才不冲突",
target1: NewStringLockTarget().Add("a", "b"),
target2: NewStringLockTarget().Add("a", "b"),
wantIsConflict: true,
},
}

for _, ca := range cases {
Convey(ca.title, t, func() {
ret := ca.target1.IsConflict(ca.target2)
So(ret, ShouldEqual, ca.wantIsConflict)
})
}
}

+ 64
- 0
common/pkgs/distlock/reqbuilder/ipfs.go View File

@@ -0,0 +1,64 @@
package reqbuilder

import (
"strconv"

"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/lockprovider"
)

type IPFSLockReqBuilder struct {
*LockRequestBuilder
}

func (b *LockRequestBuilder) IPFS() *IPFSLockReqBuilder {
return &IPFSLockReqBuilder{LockRequestBuilder: b}
}
func (b *IPFSLockReqBuilder) ReadOneRep(nodeID int64, fileHash string) *IPFSLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(nodeID),
Name: lockprovider.IPFS_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(fileHash),
})
return b
}

func (b *IPFSLockReqBuilder) WriteOneRep(nodeID int64, fileHash string) *IPFSLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(nodeID),
Name: lockprovider.IPFS_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(fileHash),
})
return b
}

func (b *IPFSLockReqBuilder) ReadAnyRep(nodeID int64) *IPFSLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(nodeID),
Name: lockprovider.IPFS_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

func (b *IPFSLockReqBuilder) WriteAnyRep(nodeID int64) *IPFSLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(nodeID),
Name: lockprovider.IPFS_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

func (b *IPFSLockReqBuilder) CreateAnyRep(nodeID int64) *IPFSLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(nodeID),
Name: lockprovider.IPFS_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

func (b *IPFSLockReqBuilder) makePath(nodeID int64) []string {
return []string{lockprovider.IPFSLockPathPrefix, strconv.FormatInt(nodeID, 10)}
}

+ 31
- 0
common/pkgs/distlock/reqbuilder/lock_request_builder.go View File

@@ -0,0 +1,31 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/common/pkgs/distlock/service"
mylo "gitlink.org.cn/cloudream/common/utils/lo"
)

type LockRequestBuilder struct {
locks []distlock.Lock
}

func NewBuilder() *LockRequestBuilder {
return &LockRequestBuilder{}
}

func (b *LockRequestBuilder) Build() distlock.LockRequest {
return distlock.LockRequest{
Locks: mylo.ArrayClone(b.locks),
}
}

func (b *LockRequestBuilder) MutexLock(svc *service.Service) (*service.Mutex, error) {
mutex := service.NewMutex(svc, b.Build())
err := mutex.Lock()
if err != nil {
return nil, err
}

return mutex, nil
}

+ 17
- 0
common/pkgs/distlock/reqbuilder/metadata.go View File

@@ -0,0 +1,17 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/lockprovider"
)

type MetadataLockReqBuilder struct {
*LockRequestBuilder
}

func (b *LockRequestBuilder) Metadata() *MetadataLockReqBuilder {
return &MetadataLockReqBuilder{LockRequestBuilder: b}
}

func (b *MetadataLockReqBuilder) makePath(tableName string) []string {
return []string{lockprovider.MetadataLockPathPrefix, tableName}
}

+ 63
- 0
common/pkgs/distlock/reqbuilder/metadata_bucket.go View File

@@ -0,0 +1,63 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/lockprovider"
)

type MetadataBucketLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) Bucket() *MetadataBucketLockReqBuilder {
return &MetadataBucketLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataBucketLockReqBuilder) ReadOne(bucketID int64) *MetadataBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Bucket"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(bucketID),
})
return b
}
func (b *MetadataBucketLockReqBuilder) WriteOne(bucketID int64) *MetadataBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Bucket"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(bucketID),
})
return b
}
func (b *MetadataBucketLockReqBuilder) CreateOne(userID int64, bucketName string) *MetadataBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Bucket"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, bucketName),
})
return b
}
func (b *MetadataBucketLockReqBuilder) ReadAny() *MetadataBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Bucket"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataBucketLockReqBuilder) WriteAny() *MetadataBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Bucket"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataBucketLockReqBuilder) CreateAny() *MetadataBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Bucket"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 63
- 0
common/pkgs/distlock/reqbuilder/metadata_cache.go View File

@@ -0,0 +1,63 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/lockprovider"
)

type MetadataCacheLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) Cache() *MetadataCacheLockReqBuilder {
return &MetadataCacheLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataCacheLockReqBuilder) ReadOne(nodeID int64, fileHash string) *MetadataCacheLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Cache"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(nodeID, fileHash),
})
return b
}
func (b *MetadataCacheLockReqBuilder) WriteOne(nodeID int64, fileHash string) *MetadataCacheLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Cache"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(nodeID, fileHash),
})
return b
}
func (b *MetadataCacheLockReqBuilder) CreateOne(nodeID int64, fileHash string) *MetadataCacheLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Cache"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(nodeID, fileHash),
})
return b
}
func (b *MetadataCacheLockReqBuilder) ReadAny() *MetadataCacheLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Cache"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataCacheLockReqBuilder) WriteAny() *MetadataCacheLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Cache"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataCacheLockReqBuilder) CreateAny() *MetadataCacheLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Cache"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 63
- 0
common/pkgs/distlock/reqbuilder/metadata_node.go View File

@@ -0,0 +1,63 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/lockprovider"
)

type MetadataNodeLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) Node() *MetadataNodeLockReqBuilder {
return &MetadataNodeLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataNodeLockReqBuilder) ReadOne(nodeID int64) *MetadataNodeLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Node"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(nodeID),
})
return b
}
func (b *MetadataNodeLockReqBuilder) WriteOne(nodeID int64) *MetadataNodeLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Node"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(nodeID),
})
return b
}
func (b *MetadataNodeLockReqBuilder) CreateOne() *MetadataNodeLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Node"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataNodeLockReqBuilder) ReadAny() *MetadataNodeLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Node"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataNodeLockReqBuilder) WriteAny() *MetadataNodeLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Node"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataNodeLockReqBuilder) CreateAny() *MetadataNodeLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Node"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 65
- 0
common/pkgs/distlock/reqbuilder/metadata_object.go View File

@@ -0,0 +1,65 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/lockprovider"
)

// TODO 可以考虑增加基于PackageID的锁,让访问不同Package的Object的操作能并行

type MetadataObjectLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) Object() *MetadataObjectLockReqBuilder {
return &MetadataObjectLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataObjectLockReqBuilder) ReadOne(objectID int64) *MetadataObjectLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Object"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(objectID),
})
return b
}
func (b *MetadataObjectLockReqBuilder) WriteOne(objectID int64) *MetadataObjectLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Object"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(objectID),
})
return b
}
func (b *MetadataObjectLockReqBuilder) CreateOne(bucketID int64, objectName string) *MetadataObjectLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Object"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(bucketID, objectName),
})
return b
}
func (b *MetadataObjectLockReqBuilder) ReadAny() *MetadataObjectLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Object"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataObjectLockReqBuilder) WriteAny() *MetadataObjectLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Object"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataObjectLockReqBuilder) CreateAny() *MetadataObjectLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Object"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 63
- 0
common/pkgs/distlock/reqbuilder/metadata_object_block.go View File

@@ -0,0 +1,63 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/lockprovider"
)

type MetadataObjectBlockLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) ObjectBlock() *MetadataObjectBlockLockReqBuilder {
return &MetadataObjectBlockLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataObjectBlockLockReqBuilder) ReadOne(objectID int) *MetadataObjectBlockLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectBlock"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(objectID),
})
return b
}
func (b *MetadataObjectBlockLockReqBuilder) WriteOne(objectID int) *MetadataObjectBlockLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectBlock"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(objectID),
})
return b
}
func (b *MetadataObjectBlockLockReqBuilder) CreateOne() *MetadataObjectBlockLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectBlock"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataObjectBlockLockReqBuilder) ReadAny() *MetadataObjectBlockLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectBlock"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataObjectBlockLockReqBuilder) WriteAny() *MetadataObjectBlockLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectBlock"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataObjectBlockLockReqBuilder) CreateAny() *MetadataObjectBlockLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectBlock"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 63
- 0
common/pkgs/distlock/reqbuilder/metadata_object_rep.go View File

@@ -0,0 +1,63 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/lockprovider"
)

type MetadataObjectRepLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) ObjectRep() *MetadataObjectRepLockReqBuilder {
return &MetadataObjectRepLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataObjectRepLockReqBuilder) ReadOne(objectID int64) *MetadataObjectRepLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectRep"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(objectID),
})
return b
}
func (b *MetadataObjectRepLockReqBuilder) WriteOne(objectID int64) *MetadataObjectRepLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectRep"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(objectID),
})
return b
}
func (b *MetadataObjectRepLockReqBuilder) CreateOne() *MetadataObjectRepLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectRep"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataObjectRepLockReqBuilder) ReadAny() *MetadataObjectRepLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectRep"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataObjectRepLockReqBuilder) WriteAny() *MetadataObjectRepLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectRep"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataObjectRepLockReqBuilder) CreateAny() *MetadataObjectRepLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("ObjectRep"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 63
- 0
common/pkgs/distlock/reqbuilder/metadata_package.go View File

@@ -0,0 +1,63 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/lockprovider"
)

type MetadataPackageLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) Package() *MetadataPackageLockReqBuilder {
return &MetadataPackageLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataPackageLockReqBuilder) ReadOne(packageID int64) *MetadataPackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Package"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(packageID),
})
return b
}
func (b *MetadataPackageLockReqBuilder) WriteOne(packageID int64) *MetadataPackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Package"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(packageID),
})
return b
}
func (b *MetadataPackageLockReqBuilder) CreateOne(bucketID int64, packageName string) *MetadataPackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Package"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(bucketID, packageName),
})
return b
}
func (b *MetadataPackageLockReqBuilder) ReadAny() *MetadataPackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Package"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataPackageLockReqBuilder) WriteAny() *MetadataPackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Package"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataPackageLockReqBuilder) CreateAny() *MetadataPackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("Package"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 63
- 0
common/pkgs/distlock/reqbuilder/metadata_storage_package.go View File

@@ -0,0 +1,63 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/lockprovider"
)

type MetadataStoragePackageLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) StoragePackage() *MetadataStoragePackageLockReqBuilder {
return &MetadataStoragePackageLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataStoragePackageLockReqBuilder) ReadOne(storageID int64, userID int64, packageID int64) *MetadataStoragePackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("StoragePackage"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(storageID, userID, packageID),
})
return b
}
func (b *MetadataStoragePackageLockReqBuilder) WriteOne(storageID int64, userID int64, packageID int64) *MetadataStoragePackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("StoragePackage"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(storageID, userID, packageID),
})
return b
}
func (b *MetadataStoragePackageLockReqBuilder) CreateOne(storageID int64, userID int64, packageID int64) *MetadataStoragePackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("StoragePackage"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(storageID, userID, packageID),
})
return b
}
func (b *MetadataStoragePackageLockReqBuilder) ReadAny() *MetadataStoragePackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("StoragePackage"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataStoragePackageLockReqBuilder) WriteAny() *MetadataStoragePackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("StoragePackage"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataStoragePackageLockReqBuilder) CreateAny() *MetadataStoragePackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("StoragePackage"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 63
- 0
common/pkgs/distlock/reqbuilder/metadata_user_bucket.go View File

@@ -0,0 +1,63 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/lockprovider"
)

type MetadataUserBucketLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) UserBucket() *MetadataUserBucketLockReqBuilder {
return &MetadataUserBucketLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataUserBucketLockReqBuilder) ReadOne(userID int64, bucketID int64) *MetadataUserBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserBucket"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, bucketID),
})
return b
}
func (b *MetadataUserBucketLockReqBuilder) WriteOne(userID int64, bucketID int64) *MetadataUserBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserBucket"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, bucketID),
})
return b
}
func (b *MetadataUserBucketLockReqBuilder) CreateOne(userID int64, bucketID int64) *MetadataUserBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserBucket"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, bucketID),
})
return b
}
func (b *MetadataUserBucketLockReqBuilder) ReadAny() *MetadataUserBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserBucket"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataUserBucketLockReqBuilder) WriteAny() *MetadataUserBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserBucket"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataUserBucketLockReqBuilder) CreateAny() *MetadataUserBucketLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserBucket"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 63
- 0
common/pkgs/distlock/reqbuilder/metadata_user_storage.go View File

@@ -0,0 +1,63 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/lockprovider"
)

type MetadataUserStorageLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) UserStorage() *MetadataUserStorageLockReqBuilder {
return &MetadataUserStorageLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataUserStorageLockReqBuilder) ReadOne(userID int64, storageID int64) *MetadataUserStorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserStorage"),
Name: lockprovider.METADATA_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, storageID),
})
return b
}
func (b *MetadataUserStorageLockReqBuilder) WriteOne(userID int64, storageID int64) *MetadataUserStorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserStorage"),
Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, storageID),
})
return b
}
func (b *MetadataUserStorageLockReqBuilder) CreateOne(userID int64, storageID int64) *MetadataUserStorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserStorage"),
Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, storageID),
})
return b
}
func (b *MetadataUserStorageLockReqBuilder) ReadAny() *MetadataUserStorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserStorage"),
Name: lockprovider.METADATA_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataUserStorageLockReqBuilder) WriteAny() *MetadataUserStorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserStorage"),
Name: lockprovider.METADATA_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}
func (b *MetadataUserStorageLockReqBuilder) CreateAny() *MetadataUserStorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("UserStorage"),
Name: lockprovider.METADATA_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

+ 74
- 0
common/pkgs/distlock/reqbuilder/storage.go View File

@@ -0,0 +1,74 @@
package reqbuilder

import (
"strconv"

"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/lockprovider"
)

type StorageLockReqBuilder struct {
*LockRequestBuilder
}

func (b *LockRequestBuilder) Storage() *StorageLockReqBuilder {
return &StorageLockReqBuilder{LockRequestBuilder: b}
}

func (b *StorageLockReqBuilder) ReadOnePackage(storageID int64, userID int64, packageID int64) *StorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(storageID),
Name: lockprovider.STORAGE_ELEMENT_READ_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, packageID),
})
return b
}

func (b *StorageLockReqBuilder) WriteOnePackage(storageID int64, userID int64, packageID int64) *StorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(storageID),
Name: lockprovider.STORAGE_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, packageID),
})
return b
}

func (b *StorageLockReqBuilder) CreateOnePackage(storageID int64, userID int64, packageID int64) *StorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(storageID),
Name: lockprovider.STORAGE_ELEMENT_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget().Add(userID, packageID),
})
return b
}

func (b *StorageLockReqBuilder) ReadAnyPackage(storageID int64) *StorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(storageID),
Name: lockprovider.STORAGE_SET_READ_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

func (b *StorageLockReqBuilder) WriteAnyPackage(storageID int64) *StorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(storageID),
Name: lockprovider.STORAGE_SET_WRITE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

func (b *StorageLockReqBuilder) CreateAnyPackage(storageID int64) *StorageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(storageID),
Name: lockprovider.STORAGE_SET_CREATE_LOCK,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

func (b *StorageLockReqBuilder) makePath(storageID int64) []string {
return []string{lockprovider.StorageLockPathPrefix, strconv.FormatInt(storageID, 10)}
}

+ 62
- 0
common/pkgs/distlock/service.go View File

@@ -0,0 +1,62 @@
package distlock

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/common/pkgs/distlock/service"
"gitlink.org.cn/cloudream/common/pkgs/trie"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/lockprovider"
)

type Service = service.Service

func NewService(cfg *distlock.Config) (*service.Service, error) {
srv, err := service.NewService(cfg, initProviders())
if err != nil {
return nil, err
}

return srv, nil
}

func initProviders() []service.PathProvider {
var provs []service.PathProvider

provs = append(provs, initMetadataLockProviders()...)

provs = append(provs, initIPFSLockProviders()...)

provs = append(provs, initStorageLockProviders()...)

return provs
}

func initMetadataLockProviders() []service.PathProvider {
return []service.PathProvider{
service.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Node"),
service.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Storage"),
service.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "User"),
service.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "UserBucket"),
service.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "UserNode"),
service.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "UserStorage"),
service.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Bucket"),
service.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Object"),
service.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Package"),
service.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "ObjectRep"),
service.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "ObjectBlock"),
service.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Cache"),
service.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "StoragePackage"),
service.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Location"),
}
}

func initIPFSLockProviders() []service.PathProvider {
return []service.PathProvider{
service.NewPathProvider(lockprovider.NewIPFSLock(), lockprovider.IPFSLockPathPrefix, trie.WORD_ANY),
}
}

func initStorageLockProviders() []service.PathProvider {
return []service.PathProvider{
service.NewPathProvider(lockprovider.NewStorageLock(), lockprovider.StorageLockPathPrefix, trie.WORD_ANY),
}
}

+ 38
- 0
common/pkgs/ec/rs.go View File

@@ -0,0 +1,38 @@
package ec

import (
"fmt"
"os"

"github.com/baohan10/reedsolomon"
)

type rs struct {
r *(reedsolomon.ReedSolomon)
ecN int
ecK int
ecP int
}

func NewRsEnc(ecK int, ecN int) *rs {
enc := rs{
ecN: ecN,
ecK: ecK,
ecP: ecN - ecK,
}
enc.r = reedsolomon.GetReedSolomonIns(ecK, ecN)
return &enc
}
func (r *rs) Encode(all [][]byte) {
r.r.Encode(all)
}

func (r *rs) Repair(all [][]byte) error {
return r.r.Reconstruct(all)
}

func checkErr(err error) {
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
}
}

+ 345
- 0
common/pkgs/grpc/agent/agent.pb.go View File

@@ -0,0 +1,345 @@
// 使用的语法版本

// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.30.0
// protoc v4.22.3
// source: pkgs/grpc/agent/agent.proto

package agent

import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)

const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)

type FileDataPacketType int32

const (
FileDataPacketType_Data FileDataPacketType = 0
FileDataPacketType_EOF FileDataPacketType = 1
)

// Enum value maps for FileDataPacketType.
var (
FileDataPacketType_name = map[int32]string{
0: "Data",
1: "EOF",
}
FileDataPacketType_value = map[string]int32{
"Data": 0,
"EOF": 1,
}
)

func (x FileDataPacketType) Enum() *FileDataPacketType {
p := new(FileDataPacketType)
*p = x
return p
}

func (x FileDataPacketType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}

func (FileDataPacketType) Descriptor() protoreflect.EnumDescriptor {
return file_pkgs_grpc_agent_agent_proto_enumTypes[0].Descriptor()
}

func (FileDataPacketType) Type() protoreflect.EnumType {
return &file_pkgs_grpc_agent_agent_proto_enumTypes[0]
}

func (x FileDataPacketType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}

// Deprecated: Use FileDataPacketType.Descriptor instead.
func (FileDataPacketType) EnumDescriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{0}
}

// 文件数据。注意:只在Type为Data的时候,Data字段才能有数据
type FileDataPacket struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

Type FileDataPacketType `protobuf:"varint,1,opt,name=Type,proto3,enum=FileDataPacketType" json:"Type,omitempty"`
Data []byte `protobuf:"bytes,2,opt,name=Data,proto3" json:"Data,omitempty"`
}

func (x *FileDataPacket) Reset() {
*x = FileDataPacket{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *FileDataPacket) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*FileDataPacket) ProtoMessage() {}

func (x *FileDataPacket) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use FileDataPacket.ProtoReflect.Descriptor instead.
func (*FileDataPacket) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{0}
}

func (x *FileDataPacket) GetType() FileDataPacketType {
if x != nil {
return x.Type
}
return FileDataPacketType_Data
}

func (x *FileDataPacket) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}

type SendIPFSFileResp struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

FileHash string `protobuf:"bytes,1,opt,name=FileHash,proto3" json:"FileHash,omitempty"`
}

func (x *SendIPFSFileResp) Reset() {
*x = SendIPFSFileResp{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *SendIPFSFileResp) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*SendIPFSFileResp) ProtoMessage() {}

func (x *SendIPFSFileResp) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use SendIPFSFileResp.ProtoReflect.Descriptor instead.
func (*SendIPFSFileResp) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{1}
}

func (x *SendIPFSFileResp) GetFileHash() string {
if x != nil {
return x.FileHash
}
return ""
}

type GetIPFSFileReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

FileHash string `protobuf:"bytes,1,opt,name=FileHash,proto3" json:"FileHash,omitempty"`
}

func (x *GetIPFSFileReq) Reset() {
*x = GetIPFSFileReq{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *GetIPFSFileReq) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*GetIPFSFileReq) ProtoMessage() {}

func (x *GetIPFSFileReq) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use GetIPFSFileReq.ProtoReflect.Descriptor instead.
func (*GetIPFSFileReq) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{2}
}

func (x *GetIPFSFileReq) GetFileHash() string {
if x != nil {
return x.FileHash
}
return ""
}

var File_pkgs_grpc_agent_agent_proto protoreflect.FileDescriptor

var file_pkgs_grpc_agent_agent_proto_rawDesc = []byte{
0x0a, 0x1b, 0x70, 0x6b, 0x67, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x61, 0x67, 0x65, 0x6e,
0x74, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4d, 0x0a,
0x0e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12,
0x27, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e,
0x46, 0x69, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79,
0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61,
0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x2e, 0x0a, 0x10,
0x53, 0x65, 0x6e, 0x64, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70,
0x12, 0x1a, 0x0a, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x22, 0x2c, 0x0a, 0x0e,
0x47, 0x65, 0x74, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x12, 0x1a,
0x0a, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x2a, 0x27, 0x0a, 0x12, 0x46, 0x69,
0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65,
0x12, 0x08, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x4f,
0x46, 0x10, 0x01, 0x32, 0x74, 0x0a, 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x0c,
0x53, 0x65, 0x6e, 0x64, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x0f, 0x2e, 0x46,
0x69, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x11, 0x2e,
0x53, 0x65, 0x6e, 0x64, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70,
0x22, 0x00, 0x28, 0x01, 0x12, 0x33, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x49, 0x50, 0x46, 0x53, 0x46,
0x69, 0x6c, 0x65, 0x12, 0x0f, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c,
0x65, 0x52, 0x65, 0x71, 0x1a, 0x0f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x50,
0x61, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x00, 0x30, 0x01, 0x42, 0x09, 0x5a, 0x07, 0x2e, 0x3b, 0x61,
0x67, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}

var (
file_pkgs_grpc_agent_agent_proto_rawDescOnce sync.Once
file_pkgs_grpc_agent_agent_proto_rawDescData = file_pkgs_grpc_agent_agent_proto_rawDesc
)

func file_pkgs_grpc_agent_agent_proto_rawDescGZIP() []byte {
file_pkgs_grpc_agent_agent_proto_rawDescOnce.Do(func() {
file_pkgs_grpc_agent_agent_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkgs_grpc_agent_agent_proto_rawDescData)
})
return file_pkgs_grpc_agent_agent_proto_rawDescData
}

var file_pkgs_grpc_agent_agent_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_pkgs_grpc_agent_agent_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
var file_pkgs_grpc_agent_agent_proto_goTypes = []interface{}{
(FileDataPacketType)(0), // 0: FileDataPacketType
(*FileDataPacket)(nil), // 1: FileDataPacket
(*SendIPFSFileResp)(nil), // 2: SendIPFSFileResp
(*GetIPFSFileReq)(nil), // 3: GetIPFSFileReq
}
var file_pkgs_grpc_agent_agent_proto_depIdxs = []int32{
0, // 0: FileDataPacket.Type:type_name -> FileDataPacketType
1, // 1: Agent.SendIPFSFile:input_type -> FileDataPacket
3, // 2: Agent.GetIPFSFile:input_type -> GetIPFSFileReq
2, // 3: Agent.SendIPFSFile:output_type -> SendIPFSFileResp
1, // 4: Agent.GetIPFSFile:output_type -> FileDataPacket
3, // [3:5] is the sub-list for method output_type
1, // [1:3] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}

func init() { file_pkgs_grpc_agent_agent_proto_init() }
func file_pkgs_grpc_agent_agent_proto_init() {
if File_pkgs_grpc_agent_agent_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_pkgs_grpc_agent_agent_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FileDataPacket); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_agent_agent_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SendIPFSFileResp); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_agent_agent_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetIPFSFileReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_pkgs_grpc_agent_agent_proto_rawDesc,
NumEnums: 1,
NumMessages: 3,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_pkgs_grpc_agent_agent_proto_goTypes,
DependencyIndexes: file_pkgs_grpc_agent_agent_proto_depIdxs,
EnumInfos: file_pkgs_grpc_agent_agent_proto_enumTypes,
MessageInfos: file_pkgs_grpc_agent_agent_proto_msgTypes,
}.Build()
File_pkgs_grpc_agent_agent_proto = out.File
file_pkgs_grpc_agent_agent_proto_rawDesc = nil
file_pkgs_grpc_agent_agent_proto_goTypes = nil
file_pkgs_grpc_agent_agent_proto_depIdxs = nil
}

+ 30
- 0
common/pkgs/grpc/agent/agent.proto View File

@@ -0,0 +1,30 @@
// 使用的语法版本
syntax = "proto3";

// 生成的go文件包
option go_package = ".;agent";//grpc这里生效了


enum FileDataPacketType {
Data = 0;
EOF = 1;
}
// 文件数据。注意:只在Type为Data的时候,Data字段才能有数据
message FileDataPacket {
FileDataPacketType Type = 1;
bytes Data = 2;
}

message SendIPFSFileResp {
string FileHash = 1;
}

message GetIPFSFileReq {
string FileHash = 1;
}

service Agent {
rpc SendIPFSFile(stream FileDataPacket)returns(SendIPFSFileResp){}
rpc GetIPFSFile(GetIPFSFileReq)returns(stream FileDataPacket){}
}


+ 209
- 0
common/pkgs/grpc/agent/agent_grpc.pb.go View File

@@ -0,0 +1,209 @@
// 使用的语法版本

// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v4.22.3
// source: pkgs/grpc/agent/agent.proto

package agent

import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)

// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7

const (
Agent_SendIPFSFile_FullMethodName = "/Agent/SendIPFSFile"
Agent_GetIPFSFile_FullMethodName = "/Agent/GetIPFSFile"
)

// AgentClient is the client API for Agent service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type AgentClient interface {
SendIPFSFile(ctx context.Context, opts ...grpc.CallOption) (Agent_SendIPFSFileClient, error)
GetIPFSFile(ctx context.Context, in *GetIPFSFileReq, opts ...grpc.CallOption) (Agent_GetIPFSFileClient, error)
}

type agentClient struct {
cc grpc.ClientConnInterface
}

func NewAgentClient(cc grpc.ClientConnInterface) AgentClient {
return &agentClient{cc}
}

func (c *agentClient) SendIPFSFile(ctx context.Context, opts ...grpc.CallOption) (Agent_SendIPFSFileClient, error) {
stream, err := c.cc.NewStream(ctx, &Agent_ServiceDesc.Streams[0], Agent_SendIPFSFile_FullMethodName, opts...)
if err != nil {
return nil, err
}
x := &agentSendIPFSFileClient{stream}
return x, nil
}

type Agent_SendIPFSFileClient interface {
Send(*FileDataPacket) error
CloseAndRecv() (*SendIPFSFileResp, error)
grpc.ClientStream
}

type agentSendIPFSFileClient struct {
grpc.ClientStream
}

func (x *agentSendIPFSFileClient) Send(m *FileDataPacket) error {
return x.ClientStream.SendMsg(m)
}

func (x *agentSendIPFSFileClient) CloseAndRecv() (*SendIPFSFileResp, error) {
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
m := new(SendIPFSFileResp)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}

func (c *agentClient) GetIPFSFile(ctx context.Context, in *GetIPFSFileReq, opts ...grpc.CallOption) (Agent_GetIPFSFileClient, error) {
stream, err := c.cc.NewStream(ctx, &Agent_ServiceDesc.Streams[1], Agent_GetIPFSFile_FullMethodName, opts...)
if err != nil {
return nil, err
}
x := &agentGetIPFSFileClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}

type Agent_GetIPFSFileClient interface {
Recv() (*FileDataPacket, error)
grpc.ClientStream
}

type agentGetIPFSFileClient struct {
grpc.ClientStream
}

func (x *agentGetIPFSFileClient) Recv() (*FileDataPacket, error) {
m := new(FileDataPacket)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}

// AgentServer is the server API for Agent service.
// All implementations must embed UnimplementedAgentServer
// for forward compatibility
type AgentServer interface {
SendIPFSFile(Agent_SendIPFSFileServer) error
GetIPFSFile(*GetIPFSFileReq, Agent_GetIPFSFileServer) error
mustEmbedUnimplementedAgentServer()
}

// UnimplementedAgentServer must be embedded to have forward compatible implementations.
type UnimplementedAgentServer struct {
}

func (UnimplementedAgentServer) SendIPFSFile(Agent_SendIPFSFileServer) error {
return status.Errorf(codes.Unimplemented, "method SendIPFSFile not implemented")
}
func (UnimplementedAgentServer) GetIPFSFile(*GetIPFSFileReq, Agent_GetIPFSFileServer) error {
return status.Errorf(codes.Unimplemented, "method GetIPFSFile not implemented")
}
func (UnimplementedAgentServer) mustEmbedUnimplementedAgentServer() {}

// UnsafeAgentServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to AgentServer will
// result in compilation errors.
type UnsafeAgentServer interface {
mustEmbedUnimplementedAgentServer()
}

func RegisterAgentServer(s grpc.ServiceRegistrar, srv AgentServer) {
s.RegisterService(&Agent_ServiceDesc, srv)
}

func _Agent_SendIPFSFile_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(AgentServer).SendIPFSFile(&agentSendIPFSFileServer{stream})
}

type Agent_SendIPFSFileServer interface {
SendAndClose(*SendIPFSFileResp) error
Recv() (*FileDataPacket, error)
grpc.ServerStream
}

type agentSendIPFSFileServer struct {
grpc.ServerStream
}

func (x *agentSendIPFSFileServer) SendAndClose(m *SendIPFSFileResp) error {
return x.ServerStream.SendMsg(m)
}

func (x *agentSendIPFSFileServer) Recv() (*FileDataPacket, error) {
m := new(FileDataPacket)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}

func _Agent_GetIPFSFile_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(GetIPFSFileReq)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(AgentServer).GetIPFSFile(m, &agentGetIPFSFileServer{stream})
}

type Agent_GetIPFSFileServer interface {
Send(*FileDataPacket) error
grpc.ServerStream
}

type agentGetIPFSFileServer struct {
grpc.ServerStream
}

func (x *agentGetIPFSFileServer) Send(m *FileDataPacket) error {
return x.ServerStream.SendMsg(m)
}

// Agent_ServiceDesc is the grpc.ServiceDesc for Agent service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Agent_ServiceDesc = grpc.ServiceDesc{
ServiceName: "Agent",
HandlerType: (*AgentServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "SendIPFSFile",
Handler: _Agent_SendIPFSFile_Handler,
ClientStreams: true,
},
{
StreamName: "GetIPFSFile",
Handler: _Agent_GetIPFSFile_Handler,
ServerStreams: true,
},
},
Metadata: "pkgs/grpc/agent/agent.proto",
}

+ 131
- 0
common/pkgs/grpc/agent/client.go View File

@@ -0,0 +1,131 @@
package agent

import (
"context"
"fmt"
"io"

"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)

type Client struct {
con *grpc.ClientConn
cli AgentClient
}

func NewClient(addr string) (*Client, error) {
con, err := grpc.Dial(addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
return nil, err
}

return &Client{
con: con,
cli: NewAgentClient(con),
}, nil
}

func (c *Client) SendIPFSFile(file io.Reader) (string, error) {
sendCli, err := c.cli.SendIPFSFile(context.Background())
if err != nil {
return "", err
}

buf := make([]byte, 4096)
for {
rd, err := file.Read(buf)
if err == io.EOF {
err := sendCli.Send(&FileDataPacket{
Type: FileDataPacketType_EOF,
Data: buf[:rd],
})
if err != nil {
return "", fmt.Errorf("sending EOF packet: %w", err)
}

resp, err := sendCli.CloseAndRecv()
if err != nil {
return "", fmt.Errorf("receiving response: %w", err)
}

return resp.FileHash, nil
}

if err != nil {
return "", fmt.Errorf("reading file data: %w", err)
}

err = sendCli.Send(&FileDataPacket{
Type: FileDataPacketType_Data,
Data: buf[:rd],
})
if err != nil {
return "", fmt.Errorf("sending data packet: %w", err)
}
}
}

type fileReadCloser struct {
io.ReadCloser
stream Agent_GetIPFSFileClient
cancelFn context.CancelFunc
readingData []byte
recvEOF bool
}

func (s *fileReadCloser) Read(p []byte) (int, error) {
if len(s.readingData) == 0 && !s.recvEOF {
resp, err := s.stream.Recv()
if err != nil {
return 0, err
}

if resp.Type == FileDataPacketType_Data {
s.readingData = resp.Data

} else if resp.Type == FileDataPacketType_EOF {
s.readingData = resp.Data
s.recvEOF = true

} else {
return 0, fmt.Errorf("unsupported packt type: %v", resp.Type)
}
}

cnt := copy(p, s.readingData)
s.readingData = s.readingData[cnt:]

if len(s.readingData) == 0 && s.recvEOF {
return cnt, io.EOF
}

return cnt, nil
}

func (s *fileReadCloser) Close() error {
s.cancelFn()

return nil
}

func (c *Client) GetIPFSFile(fileHash string) (io.ReadCloser, error) {
ctx, cancel := context.WithCancel(context.Background())

stream, err := c.cli.GetIPFSFile(ctx, &GetIPFSFileReq{
FileHash: fileHash,
})
if err != nil {
cancel()
return nil, fmt.Errorf("request grpc failed, err: %w", err)
}

return &fileReadCloser{
stream: stream,
cancelFn: cancel,
}, nil
}

func (c *Client) Close() {
c.con.Close()
}

+ 43
- 0
common/pkgs/grpc/agent/pool.go View File

@@ -0,0 +1,43 @@
package agent

import (
"fmt"
)

type PoolConfig struct {
Port int `json:"port"`
}

type PoolClient struct {
*Client
owner *Pool
}

func (c *PoolClient) Close() {
c.owner.Release(c)
}

type Pool struct {
grpcCfg *PoolConfig
}

func NewPool(grpcCfg *PoolConfig) *Pool {
return &Pool{
grpcCfg: grpcCfg,
}
}
func (p *Pool) Acquire(ip string) (*PoolClient, error) {
cli, err := NewClient(fmt.Sprintf("%s:%d", ip, p.grpcCfg.Port))
if err != nil {
return nil, err
}

return &PoolClient{
Client: cli,
owner: p,
}, nil
}

func (p *Pool) Release(cli *PoolClient) {
cli.Client.Close()
}

+ 12
- 0
common/pkgs/grpc/config.go View File

@@ -0,0 +1,12 @@
package grpc

import "fmt"

type Config struct {
IP string `json:"ip"`
Port int `json:"port"`
}

func (c *Config) MakeListenAddress() string {
return fmt.Sprintf("%s:%d", c.IP, c.Port)
}

+ 227
- 0
common/pkgs/iterator/ec_object_iterator.go View File

@@ -0,0 +1,227 @@
package iterator

import (
"fmt"
"io"
"math/rand"
"os"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/models"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/storage-common/globals"
stgmodels "gitlink.org.cn/cloudream/storage-common/models"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage-common/pkgs/ec"
coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator"
)

type ECObjectIterator struct {
OnClosing func()

objects []model.Object
objectECData []stgmodels.ObjectECData
currentIndex int
inited bool

ecInfo models.ECRedundancyInfo
ec model.Ec
downloadCtx *DownloadContext
cliLocation model.Location
}

func NewECObjectIterator(objects []model.Object, objectECData []stgmodels.ObjectECData, ecInfo models.ECRedundancyInfo, ec model.Ec, downloadCtx *DownloadContext) *ECObjectIterator {
return &ECObjectIterator{
objects: objects,
objectECData: objectECData,
ecInfo: ecInfo,
ec: ec,
downloadCtx: downloadCtx,
}
}

func (i *ECObjectIterator) MoveNext() (*IterDownloadingObject, error) {
// TODO 加锁
coorCli, err := globals.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
}
defer coorCli.Close()

if !i.inited {
i.inited = true

findCliLocResp, err := coorCli.FindClientLocation(coormq.NewFindClientLocation(globals.Local.ExternalIP))
if err != nil {
return nil, fmt.Errorf("finding client location: %w", err)
}
i.cliLocation = findCliLocResp.Location
}

if i.currentIndex >= len(i.objects) {
return nil, ErrNoMoreItem
}

item, err := i.doMove(coorCli)
i.currentIndex++
return item, err
}

func (iter *ECObjectIterator) doMove(coorCli *coormq.PoolClient) (*IterDownloadingObject, error) {
obj := iter.objects[iter.currentIndex]
ecData := iter.objectECData[iter.currentIndex]

blocks := ecData.Blocks
ec := iter.ec
ecK := ec.EcK
ecN := ec.EcN
//采取直接读,优先选内网节点
hashs := make([]string, ecK)
nds := make([]DownloadNodeInfo, ecK)
for i := 0; i < ecK; i++ {
hashs[i] = blocks[i].FileHash

getNodesResp, err := coorCli.GetNodes(coormq.NewGetNodes(blocks[i].NodeIDs))
if err != nil {
return nil, fmt.Errorf("getting nodes: %w", err)
}

downloadNodes := lo.Map(getNodesResp.Nodes, func(node model.Node, index int) DownloadNodeInfo {
return DownloadNodeInfo{
Node: node,
IsSameLocation: node.LocationID == iter.cliLocation.LocationID,
}
})

nds[i] = iter.chooseDownloadNode(downloadNodes)
}

//nodeIDs, nodeIPs直接按照第1~ecK个排列
nodeIDs := make([]int64, ecK)
nodeIPs := make([]string, ecK)
for i := 0; i < ecK; i++ {
nodeIDs[i] = nds[i].Node.NodeID
nodeIPs[i] = nds[i].Node.ExternalIP
if nds[i].IsSameLocation {
nodeIPs[i] = nds[i].Node.LocalIP
logger.Infof("client and node %d are at the same location, use local ip\n", nds[i].Node.NodeID)
}
}

fileSize := obj.Size
blockIDs := make([]int, ecK)
for i := 0; i < ecK; i++ {
blockIDs[i] = i
}
reader, err := iter.downloadEcObject(fileSize, ecK, ecN, blockIDs, nodeIDs, nodeIPs, hashs)
if err != nil {
return nil, fmt.Errorf("ec read failed, err: %w", err)
}

return &IterDownloadingObject{
File: reader,
}, nil
}

func (i *ECObjectIterator) Close() {
if i.OnClosing != nil {
i.OnClosing()
}
}

// chooseDownloadNode 选择一个下载节点
// 1. 从与当前客户端相同地域的节点中随机选一个
// 2. 没有用的话从所有节点中随机选一个
func (i *ECObjectIterator) chooseDownloadNode(entries []DownloadNodeInfo) DownloadNodeInfo {
sameLocationEntries := lo.Filter(entries, func(e DownloadNodeInfo, i int) bool { return e.IsSameLocation })
if len(sameLocationEntries) > 0 {
return sameLocationEntries[rand.Intn(len(sameLocationEntries))]
}

return entries[rand.Intn(len(entries))]
}

func (iter *ECObjectIterator) downloadEcObject(fileSize int64, ecK int, ecN int, blockIDs []int, nodeIDs []int64, nodeIPs []string, hashs []string) (io.ReadCloser, error) {
// TODO zkx 先试用同步方式实现逻辑,做好错误处理。同时也方便下面直接使用uploadToNode和uploadToLocalIPFS来优化代码结构
//wg := sync.WaitGroup{}
numPacket := (fileSize + int64(ecK)*iter.ecInfo.PacketSize - 1) / (int64(ecK) * iter.ecInfo.PacketSize)
getBufs := make([]chan []byte, ecN)
decodeBufs := make([]chan []byte, ecK)
for i := 0; i < ecN; i++ {
getBufs[i] = make(chan []byte)
}
for i := 0; i < ecK; i++ {
decodeBufs[i] = make(chan []byte)
}
for idx := 0; idx < len(blockIDs); idx++ {
i := idx
go func() {
// TODO 处理错误
file, _ := downloadFile(iter.downloadCtx, nodeIDs[i], nodeIPs[i], hashs[i])

for p := int64(0); p < numPacket; p++ {
buf := make([]byte, iter.ecInfo.PacketSize)
// TODO 处理错误
io.ReadFull(file, buf)
getBufs[blockIDs[i]] <- buf
}
}()
}
print(numPacket)
go decode(getBufs[:], decodeBufs[:], blockIDs, ecK, numPacket)
r, w := io.Pipe()
//persist函数,将解码得到的文件写入pipe
go func() {
for i := 0; int64(i) < numPacket; i++ {
for j := 0; j < len(decodeBufs); j++ {
tmp := <-decodeBufs[j]
_, err := w.Write(tmp)
if err != nil {
fmt.Errorf("persist file falied, err:%w", err)
}
}
}
w.Close()
}()
return r, nil
}

func decode(inBufs []chan []byte, outBufs []chan []byte, blockSeq []int, ecK int, numPacket int64) {
fmt.Println("decode ")
var tmpIn [][]byte
var zeroPkt []byte
tmpIn = make([][]byte, len(inBufs))
hasBlock := map[int]bool{}
for j := 0; j < len(blockSeq); j++ {
hasBlock[blockSeq[j]] = true
}
needRepair := false //检测是否传入了所有数据块
for j := 0; j < len(outBufs); j++ {
if blockSeq[j] != j {
needRepair = true
}
}
enc := ec.NewRsEnc(ecK, len(inBufs))
for i := 0; int64(i) < numPacket; i++ {
print("!!!!!")
for j := 0; j < len(inBufs); j++ {
if hasBlock[j] {
tmpIn[j] = <-inBufs[j]
} else {
tmpIn[j] = zeroPkt
}
}
if needRepair {
err := enc.Repair(tmpIn)
if err != nil {
fmt.Fprintf(os.Stderr, "Decode Repair Error: %s", err.Error())
}
}
for j := 0; j < len(outBufs); j++ {
outBufs[j] <- tmpIn[j]
}
}
for i := 0; i < len(outBufs); i++ {
close(outBufs[i])
}
}

+ 45
- 0
common/pkgs/iterator/http_uploading_iterator.go View File

@@ -0,0 +1,45 @@
package iterator

import (
"mime/multipart"
)

type HTTPUploadingIterator struct {
files []*multipart.FileHeader
currentIndex int
}

func NewHTTPObjectIterator(files []*multipart.FileHeader) *HTTPUploadingIterator {
return &HTTPUploadingIterator{
files: files,
}
}

func (i *HTTPUploadingIterator) MoveNext() (*IterUploadingObject, error) {
if i.currentIndex >= len(i.files) {
return nil, ErrNoMoreItem
}

item, err := i.doMove()
i.currentIndex++
return item, err
}

func (i *HTTPUploadingIterator) doMove() (*IterUploadingObject, error) {
fileInfo := i.files[i.currentIndex]

file, err := fileInfo.Open()
if err != nil {
return nil, err
}

return &IterUploadingObject{
Path: fileInfo.Filename,
Size: fileInfo.Size,
File: file,
}, nil
}

func (i *HTTPUploadingIterator) Close() {

}

+ 12
- 0
common/pkgs/iterator/iterator.go View File

@@ -0,0 +1,12 @@
package iterator

import (
"errors"
)

var ErrNoMoreItem = errors.New("no more item")

type Iterator[T any] interface {
MoveNext() (T, error)
Close()
}

+ 63
- 0
common/pkgs/iterator/local_uploading_iterator.go View File

@@ -0,0 +1,63 @@
package iterator

import (
"io"
"os"
"path/filepath"
"strings"
)

type UploadingObjectIterator = Iterator[*IterUploadingObject]

type LocalUploadingIterator struct {
pathRoot string
filePathes []string
currentIndex int
}

type IterUploadingObject struct {
Path string
Size int64
File io.ReadCloser
}

func NewUploadingObjectIterator(pathRoot string, filePathes []string) *LocalUploadingIterator {
return &LocalUploadingIterator{
pathRoot: filepath.ToSlash(pathRoot),
filePathes: filePathes,
}
}

func (i *LocalUploadingIterator) MoveNext() (*IterUploadingObject, error) {
if i.currentIndex >= len(i.filePathes) {
return nil, ErrNoMoreItem
}

item, err := i.doMove()
i.currentIndex++
return item, err
}

func (i *LocalUploadingIterator) doMove() (*IterUploadingObject, error) {
path := i.filePathes[i.currentIndex]

info, err := os.Stat(path)
if err != nil {
return nil, err
}

file, err := os.Open(path)
if err != nil {
return nil, err
}

return &IterUploadingObject{
Path: strings.TrimPrefix(filepath.ToSlash(path), i.pathRoot+"/"),
Size: info.Size(),
File: file,
}, nil
}

func (i *LocalUploadingIterator) Close() {

}

+ 211
- 0
common/pkgs/iterator/rep_object_iterator.go View File

@@ -0,0 +1,211 @@
package iterator

import (
"fmt"
"io"
"math/rand"

"github.com/samber/lo"
distsvc "gitlink.org.cn/cloudream/common/pkgs/distlock/service"
"gitlink.org.cn/cloudream/common/pkgs/logger"
myio "gitlink.org.cn/cloudream/common/utils/io"
"gitlink.org.cn/cloudream/storage-common/globals"
"gitlink.org.cn/cloudream/storage-common/models"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/reqbuilder"
coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator"
)

type DownloadingObjectIterator = Iterator[*IterDownloadingObject]

type RepObjectIterator struct {
OnClosing func()

objects []model.Object
objectRepData []models.ObjectRepData
currentIndex int
inited bool

downloadCtx *DownloadContext
cliLocation model.Location
}

type IterDownloadingObject struct {
Object model.Object
File io.ReadCloser
}

type DownloadNodeInfo struct {
Node model.Node
IsSameLocation bool
}

type DownloadContext struct {
Distlock *distsvc.Service
}

func NewRepObjectIterator(objects []model.Object, objectRepData []models.ObjectRepData, downloadCtx *DownloadContext) *RepObjectIterator {
return &RepObjectIterator{
objects: objects,
objectRepData: objectRepData,
downloadCtx: downloadCtx,
}
}

func (i *RepObjectIterator) MoveNext() (*IterDownloadingObject, error) {
// TODO 加锁
coorCli, err := globals.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
}
defer coorCli.Close()

if !i.inited {
i.inited = true

findCliLocResp, err := coorCli.FindClientLocation(coormq.NewFindClientLocation(globals.Local.ExternalIP))
if err != nil {
return nil, fmt.Errorf("finding client location: %w", err)
}
i.cliLocation = findCliLocResp.Location
}

if i.currentIndex >= len(i.objects) {
return nil, ErrNoMoreItem
}

item, err := i.doMove(coorCli)
i.currentIndex++
return item, err
}

func (i *RepObjectIterator) doMove(coorCli *coormq.PoolClient) (*IterDownloadingObject, error) {
repData := i.objectRepData[i.currentIndex]
if len(repData.NodeIDs) == 0 {
return nil, fmt.Errorf("no node has this file %s", repData.FileHash)
}

getNodesResp, err := coorCli.GetNodes(coormq.NewGetNodes(repData.NodeIDs))
if err != nil {
return nil, fmt.Errorf("getting nodes: %w", err)
}

downloadNodes := lo.Map(getNodesResp.Nodes, func(node model.Node, index int) DownloadNodeInfo {
return DownloadNodeInfo{
Node: node,
IsSameLocation: node.LocationID == i.cliLocation.LocationID,
}
})

// 选择下载节点
downloadNode := i.chooseDownloadNode(downloadNodes)

// 如果客户端与节点在同一个地域,则使用内网地址连接节点
nodeIP := downloadNode.Node.ExternalIP
if downloadNode.IsSameLocation {
nodeIP = downloadNode.Node.LocalIP

logger.Infof("client and node %d are at the same location, use local ip\n", downloadNode.Node.NodeID)
}

reader, err := downloadFile(i.downloadCtx, downloadNode.Node.NodeID, nodeIP, repData.FileHash)
if err != nil {
return nil, fmt.Errorf("rep read failed, err: %w", err)
}
return &IterDownloadingObject{
Object: i.objects[i.currentIndex],
File: reader,
}, nil
}

func (i *RepObjectIterator) Close() {
if i.OnClosing != nil {
i.OnClosing()
}
}

// chooseDownloadNode 选择一个下载节点
// 1. 从与当前客户端相同地域的节点中随机选一个
// 2. 没有用的话从所有节点中随机选一个
func (i *RepObjectIterator) chooseDownloadNode(entries []DownloadNodeInfo) DownloadNodeInfo {
sameLocationEntries := lo.Filter(entries, func(e DownloadNodeInfo, i int) bool { return e.IsSameLocation })
if len(sameLocationEntries) > 0 {
return sameLocationEntries[rand.Intn(len(sameLocationEntries))]
}

return entries[rand.Intn(len(entries))]
}

func downloadFile(ctx *DownloadContext, nodeID int64, nodeIP string, fileHash string) (io.ReadCloser, error) {
if globals.IPFSPool != nil {
logger.Infof("try to use local IPFS to download file")

reader, err := downloadFromLocalIPFS(ctx, fileHash)
if err == nil {
return reader, nil
}

logger.Warnf("download from local IPFS failed, so try to download from node %s, err: %s", nodeIP, err.Error())
}

return downloadFromNode(ctx, nodeID, nodeIP, fileHash)
}

func downloadFromNode(ctx *DownloadContext, nodeID int64, nodeIP string, fileHash string) (io.ReadCloser, error) {
// 二次获取锁
mutex, err := reqbuilder.NewBuilder().
// 用于从IPFS下载文件
IPFS().ReadOneRep(nodeID, fileHash).
MutexLock(ctx.Distlock)
if err != nil {
return nil, fmt.Errorf("acquire locks failed, err: %w", err)
}

// 连接grpc
agtCli, err := globals.AgentRPCPool.Acquire(nodeIP)
if err != nil {
return nil, fmt.Errorf("new agent grpc client: %w", err)
}

reader, err := agtCli.GetIPFSFile(fileHash)
if err != nil {
return nil, fmt.Errorf("getting ipfs file: %w", err)
}

reader = myio.AfterReadClosed(reader, func(io.ReadCloser) {
mutex.Unlock()
})
return reader, nil
}

func downloadFromLocalIPFS(ctx *DownloadContext, fileHash string) (io.ReadCloser, error) {
onClosed := func() {}
if globals.Local.NodeID != nil {
// 二次获取锁
mutex, err := reqbuilder.NewBuilder().
// 用于从IPFS下载文件
IPFS().ReadOneRep(*globals.Local.NodeID, fileHash).
MutexLock(ctx.Distlock)
if err != nil {
return nil, fmt.Errorf("acquire locks failed, err: %w", err)
}
onClosed = func() {
mutex.Unlock()
}
}

ipfsCli, err := globals.IPFSPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new ipfs client: %w", err)
}

reader, err := ipfsCli.OpenRead(fileHash)
if err != nil {
return nil, fmt.Errorf("read ipfs file failed, err: %w", err)
}

reader = myio.AfterReadClosed(reader, func(io.ReadCloser) {
onClosed()
})
return reader, nil
}

+ 30
- 0
common/pkgs/mq/agent/agent.go View File

@@ -0,0 +1,30 @@
package agent

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
)

type AgentService interface {
GetState(msg *GetState) (*GetStateResp, *mq.CodeMessage)
}

// 获取agent状态
var _ = Register(AgentService.GetState)

type GetState struct {
}
type GetStateResp struct {
IPFSState string `json:"ipfsState"`
}

func NewGetState() GetState {
return GetState{}
}
func NewGetStateResp(ipfsState string) GetStateResp {
return GetStateResp{
IPFSState: ipfsState,
}
}
func (client *Client) GetState(msg GetState, opts ...mq.RequestOption) (*GetStateResp, error) {
return mq.Request[GetStateResp](client.rabbitCli, msg, opts...)
}

+ 108
- 0
common/pkgs/mq/agent/cache.go View File

@@ -0,0 +1,108 @@
package agent

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type CacheService interface {
CheckCache(msg *CheckCache) (*CheckCacheResp, *mq.CodeMessage)

StartCacheMovePackage(msg *StartCacheMovePackage) (*StartCacheMovePackageResp, *mq.CodeMessage)
WaitCacheMovePackage(msg *WaitCacheMovePackage) (*WaitCacheMovePackageResp, *mq.CodeMessage)
}

// 检查节点上的IPFS
var _ = Register(CacheService.CheckCache)

const (
CHECK_IPFS_RESP_OP_DELETE_TEMP = "DeleteTemp"
CHECK_IPFS_RESP_OP_CREATE_TEMP = "CreateTemp"
)

type CheckCache struct {
IsComplete bool `json:"isComplete"`
Caches []model.Cache `json:"caches"`
}
type CheckCacheResp struct {
Entries []CheckIPFSRespEntry `json:"entries"`
}
type CheckIPFSRespEntry struct {
FileHash string `json:"fileHash"`
Operation string `json:"operation"`
}

func NewCheckCache(isComplete bool, caches []model.Cache) CheckCache {
return CheckCache{
IsComplete: isComplete,
Caches: caches,
}
}
func NewCheckCacheResp(entries []CheckIPFSRespEntry) CheckCacheResp {
return CheckCacheResp{
Entries: entries,
}
}
func NewCheckCacheRespEntry(fileHash string, op string) CheckIPFSRespEntry {
return CheckIPFSRespEntry{
FileHash: fileHash,
Operation: op,
}
}
func (client *Client) CheckCache(msg CheckCache, opts ...mq.RequestOption) (*CheckCacheResp, error) {
return mq.Request[CheckCacheResp](client.rabbitCli, msg, opts...)
}

// 将Package的缓存移动到这个节点
var _ = Register(CacheService.StartCacheMovePackage)

type StartCacheMovePackage struct {
UserID int64 `json:"userID"`
PackageID int64 `json:"packageID"`
}
type StartCacheMovePackageResp struct {
TaskID string `json:"taskID"`
}

func NewStartCacheMovePackage(userID int64, packageID int64) StartCacheMovePackage {
return StartCacheMovePackage{
UserID: userID,
PackageID: packageID,
}
}
func NewStartCacheMovePackageResp(taskID string) StartCacheMovePackageResp {
return StartCacheMovePackageResp{
TaskID: taskID,
}
}
func (client *Client) StartCacheMovePackage(msg StartCacheMovePackage, opts ...mq.RequestOption) (*StartCacheMovePackageResp, error) {
return mq.Request[StartCacheMovePackageResp](client.rabbitCli, msg, opts...)
}

// 将Package的缓存移动到这个节点
var _ = Register(CacheService.WaitCacheMovePackage)

type WaitCacheMovePackage struct {
TaskID string `json:"taskID"`
WaitTimeoutMs int64 `json:"waitTimeout"`
}
type WaitCacheMovePackageResp struct {
IsComplete bool `json:"isComplete"`
Error string `json:"error"`
}

func NewWaitCacheMovePackage(taskID string, waitTimeoutMs int64) WaitCacheMovePackage {
return WaitCacheMovePackage{
TaskID: taskID,
WaitTimeoutMs: waitTimeoutMs,
}
}
func NewWaitCacheMovePackageResp(isComplete bool, err string) WaitCacheMovePackageResp {
return WaitCacheMovePackageResp{
IsComplete: isComplete,
Error: err,
}
}
func (client *Client) WaitCacheMovePackage(msg WaitCacheMovePackage, opts ...mq.RequestOption) (*WaitCacheMovePackageResp, error) {
return mq.Request[WaitCacheMovePackageResp](client.rabbitCli, msg, opts...)
}

+ 61
- 0
common/pkgs/mq/agent/client.go View File

@@ -0,0 +1,61 @@
package agent

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
stgmq "gitlink.org.cn/cloudream/storage-common/pkgs/mq"
)

type Client struct {
rabbitCli *mq.RabbitMQClient
id int64
}

func NewClient(id int64, cfg *stgmq.Config) (*Client, error) {
rabbitCli, err := mq.NewRabbitMQClient(cfg.MakeConnectingURL(), stgmq.MakeAgentQueueName(id), "")
if err != nil {
return nil, err
}

return &Client{
rabbitCli: rabbitCli,
id: id,
}, nil
}

func (c *Client) Close() {
c.rabbitCli.Close()
}

type PoolClient struct {
*Client
owner *Pool
}

func (c *PoolClient) Close() {
c.owner.Release(c)
}

type Pool struct {
mqcfg *stgmq.Config
}

func NewPool(mqcfg *stgmq.Config) *Pool {
return &Pool{
mqcfg: mqcfg,
}
}
func (p *Pool) Acquire(id int64) (*PoolClient, error) {
cli, err := NewClient(id, p.mqcfg)
if err != nil {
return nil, err
}

return &PoolClient{
Client: cli,
owner: p,
}, nil
}

func (p *Pool) Release(cli *PoolClient) {
cli.Client.Close()
}

+ 60
- 0
common/pkgs/mq/agent/object.go View File

@@ -0,0 +1,60 @@
package agent

import "gitlink.org.cn/cloudream/common/pkgs/mq"

type ObjectService interface {
StartPinningObject(msg *StartPinningObject) (*StartPinningObjectResp, *mq.CodeMessage)
WaitPinningObject(msg *WaitPinningObject) (*WaitPinningObjectResp, *mq.CodeMessage)
}

// 启动Pin对象的任务
var _ = Register(ObjectService.StartPinningObject)

type StartPinningObject struct {
FileHash string `json:"fileHash"`
}
type StartPinningObjectResp struct {
TaskID string `json:"taskID"`
}

func NewStartPinningObject(fileHash string) StartPinningObject {
return StartPinningObject{
FileHash: fileHash,
}
}
func NewStartPinningObjectResp(taskID string) StartPinningObjectResp {
return StartPinningObjectResp{
TaskID: taskID,
}
}
func (client *Client) StartPinningObject(msg StartPinningObject, opts ...mq.RequestOption) (*StartPinningObjectResp, error) {
return mq.Request[StartPinningObjectResp](client.rabbitCli, msg, opts...)
}

// 等待Pin对象的任务
var _ = Register(ObjectService.WaitPinningObject)

type WaitPinningObject struct {
TaskID string `json:"taskID"`
WaitTimeoutMs int64 `json:"waitTimeout"`
}
type WaitPinningObjectResp struct {
IsComplete bool `json:"isComplete"`
Error string `json:"error"`
}

func NewWaitPinningObject(taskID string, waitTimeoutMs int64) WaitPinningObject {
return WaitPinningObject{
TaskID: taskID,
WaitTimeoutMs: waitTimeoutMs,
}
}
func NewWaitPinningObjectResp(isComplete bool, err string) WaitPinningObjectResp {
return WaitPinningObjectResp{
IsComplete: isComplete,
Error: err,
}
}
func (client *Client) WaitPinningObject(msg WaitPinningObject, opts ...mq.RequestOption) (*WaitPinningObjectResp, error) {
return mq.Request[WaitPinningObjectResp](client.rabbitCli, msg, opts...)
}

+ 73
- 0
common/pkgs/mq/agent/server.go View File

@@ -0,0 +1,73 @@
package agent

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
mymq "gitlink.org.cn/cloudream/storage-common/pkgs/mq"
)

type Service interface {
ObjectService

StorageService

CacheService

AgentService
}

type Server struct {
service Service
rabbitSvr mq.RabbitMQServer

OnError func(err error)
}

func NewServer(svc Service, id int64, cfg *mymq.Config) (*Server, error) {
srv := &Server{
service: svc,
}

rabbitSvr, err := mq.NewRabbitMQServer(
cfg.MakeConnectingURL(),
mymq.MakeAgentQueueName(id),
func(msg *mq.Message) (*mq.Message, error) {
return msgDispatcher.Handle(srv.service, msg)
},
)
if err != nil {
return nil, err
}

srv.rabbitSvr = *rabbitSvr

return srv, nil
}

func (s *Server) Stop() {
s.rabbitSvr.Close()
}

func (s *Server) Serve() error {
return s.rabbitSvr.Serve()
}

var msgDispatcher mq.MessageDispatcher = mq.NewMessageDispatcher()

// Register 将Service中的一个接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型
// TODO 需要约束:Service实现了TSvc接口
func Register[TSvc any, TReq any, TResp any](svcFn func(svc TSvc, msg *TReq) (*TResp, *mq.CodeMessage)) any {
mq.AddServiceFn(&msgDispatcher, svcFn)
mq.RegisterMessage[TReq]()
mq.RegisterMessage[TResp]()

return nil
}

// RegisterNoReply 将Service中的一个*没有返回值的*接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型
// TODO 需要约束:Service实现了TSvc接口
func RegisterNoReply[TSvc any, TReq any](svcFn func(svc TSvc, msg *TReq)) any {
mq.AddNoRespServiceFn(&msgDispatcher, svcFn)
mq.RegisterMessage[TReq]()

return nil
}

+ 188
- 0
common/pkgs/mq/agent/storage.go View File

@@ -0,0 +1,188 @@
package agent

import (
"gitlink.org.cn/cloudream/common/models"
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type StorageService interface {
StartStorageLoadPackage(msg *StartStorageLoadPackage) (*StartStorageLoadPackageResp, *mq.CodeMessage)

WaitStorageLoadPackage(msg *WaitStorageLoadPackage) (*WaitStorageLoadPackageResp, *mq.CodeMessage)

StorageCheck(msg *StorageCheck) (*StorageCheckResp, *mq.CodeMessage)

StartStorageCreatePackage(msg *StartStorageCreatePackage) (*StartStorageCreatePackageResp, *mq.CodeMessage)

WaitStorageCreatePackage(msg *WaitStorageCreatePackage) (*WaitStorageCreatePackageResp, *mq.CodeMessage)
}

// 启动调度Package的任务
var _ = Register(StorageService.StartStorageLoadPackage)

type StartStorageLoadPackage struct {
UserID int64 `json:"userID"`
PackageID int64 `json:"packageID"`
StorageID int64 `json:"storageID"`
}
type StartStorageLoadPackageResp struct {
TaskID string `json:"taskID"`
}

func NewStartStorageLoadPackage(userID int64, packageID int64, storageID int64) StartStorageLoadPackage {
return StartStorageLoadPackage{
UserID: userID,
PackageID: packageID,
StorageID: storageID,
}
}
func NewStartStorageLoadPackageResp(taskID string) StartStorageLoadPackageResp {
return StartStorageLoadPackageResp{
TaskID: taskID,
}
}
func (client *Client) StartStorageLoadPackage(msg StartStorageLoadPackage, opts ...mq.RequestOption) (*StartStorageLoadPackageResp, error) {
return mq.Request[StartStorageLoadPackageResp](client.rabbitCli, msg, opts...)
}

// 等待调度Package的任务
var _ = Register(StorageService.WaitStorageLoadPackage)

type WaitStorageLoadPackage struct {
TaskID string `json:"taskID"`
WaitTimeoutMs int64 `json:"waitTimeout"`
}
type WaitStorageLoadPackageResp struct {
IsComplete bool `json:"isComplete"`
Error string `json:"error"`
}

func NewWaitStorageLoadPackage(taskID string, waitTimeoutMs int64) WaitStorageLoadPackage {
return WaitStorageLoadPackage{
TaskID: taskID,
WaitTimeoutMs: waitTimeoutMs,
}
}
func NewWaitStorageLoadPackageResp(isComplete bool, err string) WaitStorageLoadPackageResp {
return WaitStorageLoadPackageResp{
IsComplete: isComplete,
Error: err,
}
}
func (client *Client) WaitStorageLoadPackage(msg WaitStorageLoadPackage, opts ...mq.RequestOption) (*WaitStorageLoadPackageResp, error) {
return mq.Request[WaitStorageLoadPackageResp](client.rabbitCli, msg, opts...)
}

// 检查Storage
var _ = Register(StorageService.StorageCheck)

const (
CHECK_STORAGE_RESP_OP_DELETE = "Delete"
CHECK_STORAGE_RESP_OP_SET_NORMAL = "SetNormal"
)

type StorageCheck struct {
StorageID int64 `json:"storageID"`
Directory string `json:"directory"`
IsComplete bool `json:"isComplete"`
Packages []model.StoragePackage `json:"packages"`
}
type StorageCheckResp struct {
DirectoryState string `json:"directoryState"`
Entries []StorageCheckRespEntry `json:"entries"`
}
type StorageCheckRespEntry struct {
PackageID int64 `json:"packageID"`
UserID int64 `json:"userID"`
Operation string `json:"operation"`
}

func NewStorageCheck(storageID int64, directory string, isComplete bool, packages []model.StoragePackage) StorageCheck {
return StorageCheck{
StorageID: storageID,
Directory: directory,
IsComplete: isComplete,
Packages: packages,
}
}
func NewStorageCheckResp(dirState string, entries []StorageCheckRespEntry) StorageCheckResp {
return StorageCheckResp{
DirectoryState: dirState,
Entries: entries,
}
}
func NewStorageCheckRespEntry(packageID int64, userID int64, op string) StorageCheckRespEntry {
return StorageCheckRespEntry{
PackageID: packageID,
UserID: userID,
Operation: op,
}
}
func (client *Client) StorageCheck(msg StorageCheck, opts ...mq.RequestOption) (*StorageCheckResp, error) {
return mq.Request[StorageCheckResp](client.rabbitCli, msg, opts...)
}

// 启动从Storage上传Package的任务
var _ = Register(StorageService.StartStorageCreatePackage)

type StartStorageCreatePackage struct {
UserID int64 `json:"userID"`
BucketID int64 `json:"bucketID"`
Name string `json:"name"`
StorageID int64 `json:"storageID"`
Path string `json:"path"`
Redundancy models.TypedRedundancyInfo `json:"redundancy"`
}
type StartStorageCreatePackageResp struct {
TaskID string `json:"taskID"`
}

func NewStartStorageCreatePackage(userID int64, bucketID int64, name string, storageID int64, path string, redundancy models.TypedRedundancyInfo) StartStorageCreatePackage {
return StartStorageCreatePackage{
UserID: userID,
BucketID: bucketID,
Name: name,
StorageID: storageID,
Path: path,
Redundancy: redundancy,
}
}
func NewStartStorageCreatePackageResp(taskID string) StartStorageCreatePackageResp {
return StartStorageCreatePackageResp{
TaskID: taskID,
}
}
func (client *Client) StartStorageCreatePackage(msg StartStorageCreatePackage, opts ...mq.RequestOption) (*StartStorageCreatePackageResp, error) {
return mq.Request[StartStorageCreatePackageResp](client.rabbitCli, msg, opts...)
}

// 等待从Storage上传Package的任务
var _ = Register(StorageService.WaitStorageCreatePackage)

type WaitStorageCreatePackage struct {
TaskID string `json:"taskID"`
WaitTimeoutMs int64 `json:"waitTimeout"`
}
type WaitStorageCreatePackageResp struct {
IsComplete bool `json:"isComplete"`
Error string `json:"error"`
PackageID int64 `json:"packageID"`
}

func NewWaitStorageCreatePackage(taskID string, waitTimeoutMs int64) WaitStorageCreatePackage {
return WaitStorageCreatePackage{
TaskID: taskID,
WaitTimeoutMs: waitTimeoutMs,
}
}
func NewWaitStorageCreatePackageResp(isComplete bool, err string, packageID int64) WaitStorageCreatePackageResp {
return WaitStorageCreatePackageResp{
IsComplete: isComplete,
Error: err,
PackageID: packageID,
}
}
func (client *Client) WaitStorageCreatePackage(msg WaitStorageCreatePackage, opts ...mq.RequestOption) (*WaitStorageCreatePackageResp, error) {
return mq.Request[WaitStorageCreatePackageResp](client.rabbitCli, msg, opts...)
}

+ 14
- 0
common/pkgs/mq/config.go View File

@@ -0,0 +1,14 @@
package mq

import "fmt"

type Config struct {
Address string `json:"address"`
Account string `json:"account"`
Password string `json:"password"`
VHost string `json:"vhost"`
}

func (cfg *Config) MakeConnectingURL() string {
return fmt.Sprintf("amqp://%s:%s@%s%s", cfg.Account, cfg.Password, cfg.Address, cfg.VHost)
}

+ 12
- 0
common/pkgs/mq/consts.go View File

@@ -0,0 +1,12 @@
package mq

import "fmt"

const (
COORDINATOR_QUEUE_NAME = "Coordinator"
SCANNER_QUEUE_NAME = "Scanner"
)

func MakeAgentQueueName(id int64) string {
return fmt.Sprintf("Agent@%d", id)
}

+ 51
- 0
common/pkgs/mq/coordinator/agent.go View File

@@ -0,0 +1,51 @@
package coordinator

import "gitlink.org.cn/cloudream/common/pkgs/mq"

type AgentService interface {
TempCacheReport(msg *TempCacheReport)

AgentStatusReport(msg *AgentStatusReport)
}

// 代理端发给协调端,告知临时缓存的数据
var _ = RegisterNoReply(AgentService.TempCacheReport)

type TempCacheReport struct {
NodeID int64 `json:"nodeID"`
Hashes []string `json:"hashes"`
}

func NewTempCacheReportBody(nodeID int64, hashes []string) TempCacheReport {
return TempCacheReport{
NodeID: nodeID,
Hashes: hashes,
}
}
func (client *Client) TempCacheReport(msg TempCacheReport) error {
return mq.Send(client.rabbitCli, msg)
}

// 代理端发给协调端,告知延迟、ipfs和资源目录的可达性
var _ = RegisterNoReply(AgentService.AgentStatusReport)

type AgentStatusReport struct {
NodeID int64 `json:"nodeID"`
NodeDelayIDs []int64 `json:"nodeDelayIDs"`
NodeDelays []int `json:"nodeDelays"`
IPFSStatus string `json:"ipfsStatus"`
LocalDirStatus string `json:"localDirStatus"`
}

func NewAgentStatusReportBody(nodeID int64, nodeDelayIDs []int64, nodeDelays []int, ipfsStatus string, localDirStatus string) AgentStatusReport {
return AgentStatusReport{
NodeID: nodeID,
NodeDelayIDs: nodeDelayIDs,
NodeDelays: nodeDelays,
IPFSStatus: ipfsStatus,
LocalDirStatus: localDirStatus,
}
}
func (client *Client) AgentStatusReport(msg AgentStatusReport) error {
return mq.Send(client.rabbitCli, msg)
}

+ 114
- 0
common/pkgs/mq/coordinator/bucket.go View File

@@ -0,0 +1,114 @@
package coordinator

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type BucketService interface {
GetUserBuckets(msg *GetUserBuckets) (*GetUserBucketsResp, *mq.CodeMessage)

GetBucketPackages(msg *GetBucketPackages) (*GetBucketPackagesResp, *mq.CodeMessage)

CreateBucket(msg *CreateBucket) (*CreateBucketResp, *mq.CodeMessage)

DeleteBucket(msg *DeleteBucket) (*DeleteBucketResp, *mq.CodeMessage)
}

// 获取用户所有的桶
var _ = Register(BucketService.GetUserBuckets)

type GetUserBuckets struct {
UserID int64 `json:"userID"`
}
type GetUserBucketsResp struct {
Buckets []model.Bucket `json:"buckets"`
}

func NewGetUserBuckets(userID int64) GetUserBuckets {
return GetUserBuckets{
UserID: userID,
}
}
func NewGetUserBucketsResp(buckets []model.Bucket) GetUserBucketsResp {
return GetUserBucketsResp{
Buckets: buckets,
}
}
func (client *Client) GetUserBuckets(msg GetUserBuckets) (*GetUserBucketsResp, error) {
return mq.Request[GetUserBucketsResp](client.rabbitCli, msg)
}

// 获取桶中的所有Package
var _ = Register(BucketService.GetBucketPackages)

type GetBucketPackages struct {
UserID int64 `json:"userID"`
BucketID int64 `json:"bucketID"`
}
type GetBucketPackagesResp struct {
Packages []model.Package `json:"packages"`
}

func NewGetBucketPackages(userID int64, bucketID int64) GetBucketPackages {
return GetBucketPackages{
UserID: userID,
BucketID: bucketID,
}
}
func NewGetBucketPackagesResp(packages []model.Package) GetBucketPackagesResp {
return GetBucketPackagesResp{
Packages: packages,
}
}
func (client *Client) GetBucketPackages(msg GetBucketPackages) (*GetBucketPackagesResp, error) {
return mq.Request[GetBucketPackagesResp](client.rabbitCli, msg)
}

// 创建桶
var _ = Register(BucketService.CreateBucket)

type CreateBucket struct {
UserID int64 `json:"userID"`
BucketName string `json:"bucketName"`
}
type CreateBucketResp struct {
BucketID int64 `json:"bucketID"`
}

func NewCreateBucket(userID int64, bucketName string) CreateBucket {
return CreateBucket{
UserID: userID,
BucketName: bucketName,
}
}
func NewCreateBucketResp(bucketID int64) CreateBucketResp {
return CreateBucketResp{
BucketID: bucketID,
}
}
func (client *Client) CreateBucket(msg CreateBucket) (*CreateBucketResp, error) {
return mq.Request[CreateBucketResp](client.rabbitCli, msg)
}

// 删除桶
var _ = Register(BucketService.DeleteBucket)

type DeleteBucket struct {
UserID int64 `json:"userID"`
BucketID int64 `json:"bucketID"`
}
type DeleteBucketResp struct{}

func NewDeleteBucket(userID int64, bucketID int64) DeleteBucket {
return DeleteBucket{
UserID: userID,
BucketID: bucketID,
}
}
func NewDeleteBucketResp() DeleteBucketResp {
return DeleteBucketResp{}
}
func (client *Client) DeleteBucket(msg DeleteBucket) (*DeleteBucketResp, error) {
return mq.Request[DeleteBucketResp](client.rabbitCli, msg)
}

+ 33
- 0
common/pkgs/mq/coordinator/cache.go View File

@@ -0,0 +1,33 @@
package coordinator

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
)

type CacheService interface {
CachePackageMoved(msg *CachePackageMoved) (*CachePackageMovedResp, *mq.CodeMessage)
}

// Package的Object移动到了节点的Cache中
var _ = Register(CacheService.CachePackageMoved)

type CachePackageMoved struct {
PackageID int64 `json:"packageID"`
NodeID int64 `json:"nodeID"`
FileHashes []string `json:"fileHashes"`
}
type CachePackageMovedResp struct{}

func NewCachePackageMoved(packageID int64, nodeID int64, fileHashes []string) CachePackageMoved {
return CachePackageMoved{
PackageID: packageID,
NodeID: nodeID,
FileHashes: fileHashes,
}
}
func NewCachePackageMovedResp() CachePackageMovedResp {
return CachePackageMovedResp{}
}
func (client *Client) CachePackageMoved(msg CachePackageMoved) (*CachePackageMovedResp, error) {
return mq.Request[CachePackageMovedResp](client.rabbitCli, msg)
}

+ 59
- 0
common/pkgs/mq/coordinator/client.go View File

@@ -0,0 +1,59 @@
package coordinator

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
stgmq "gitlink.org.cn/cloudream/storage-common/pkgs/mq"
)

type Client struct {
rabbitCli *mq.RabbitMQClient
}

func NewClient(cfg *stgmq.Config) (*Client, error) {
rabbitCli, err := mq.NewRabbitMQClient(cfg.MakeConnectingURL(), stgmq.COORDINATOR_QUEUE_NAME, "")
if err != nil {
return nil, err
}

return &Client{
rabbitCli: rabbitCli,
}, nil
}

func (c *Client) Close() {
c.rabbitCli.Close()
}

type PoolClient struct {
*Client
owner *Pool
}

func (c *PoolClient) Close() {
c.owner.Release(c)
}

type Pool struct {
mqcfg *stgmq.Config
}

func NewPool(mqcfg *stgmq.Config) *Pool {
return &Pool{
mqcfg: mqcfg,
}
}
func (p *Pool) Acquire() (*PoolClient, error) {
cli, err := NewClient(p.mqcfg)
if err != nil {
return nil, err
}

return &PoolClient{
Client: cli,
owner: p,
}, nil
}

func (p *Pool) Release(cli *PoolClient) {
cli.Client.Close()
}

+ 60
- 0
common/pkgs/mq/coordinator/common.go View File

@@ -0,0 +1,60 @@
package coordinator

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type CommonService interface {
FindClientLocation(msg *FindClientLocation) (*FindClientLocationResp, *mq.CodeMessage)

GetECConfig(msg *GetECConfig) (*GetECConfigResp, *mq.CodeMessage)
}

// 查询指定IP所属的地域
var _ = Register(CommonService.FindClientLocation)

type FindClientLocation struct {
IP string `json:"ip"`
}
type FindClientLocationResp struct {
Location model.Location `json:"location"`
}

func NewFindClientLocation(ip string) FindClientLocation {
return FindClientLocation{
IP: ip,
}
}
func NewFindClientLocationResp(location model.Location) FindClientLocationResp {
return FindClientLocationResp{
Location: location,
}
}
func (client *Client) FindClientLocation(msg FindClientLocation) (*FindClientLocationResp, error) {
return mq.Request[FindClientLocationResp](client.rabbitCli, msg)
}

// 获取EC具体配置
var _ = Register(CommonService.GetECConfig)

type GetECConfig struct {
ECName string `json:"ecName"`
}
type GetECConfigResp struct {
Config model.Ec `json:"config"`
}

func NewGetECConfig(ecName string) GetECConfig {
return GetECConfig{
ECName: ecName,
}
}
func NewGetECConfigResp(config model.Ec) GetECConfigResp {
return GetECConfigResp{
Config: config,
}
}
func (client *Client) GetECConfig(msg GetECConfig) (*GetECConfigResp, error) {
return mq.Request[GetECConfigResp](client.rabbitCli, msg)
}

+ 15
- 0
common/pkgs/mq/coordinator/coordinator_test.go View File

@@ -0,0 +1,15 @@
package coordinator

import (
"testing"

. "github.com/smartystreets/goconvey/convey"
)

func TestSerder(t *testing.T) {
Convey("输出注册的Handler", t, func() {
for k, _ := range msgDispatcher.Handlers {
t.Logf("(%s)", k)
}
})
}

+ 60
- 0
common/pkgs/mq/coordinator/node.go View File

@@ -0,0 +1,60 @@
package coordinator

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type NodeService interface {
GetUserNodes(msg *GetUserNodes) (*GetUserNodesResp, *mq.CodeMessage)

GetNodes(msg *GetNodes) (*GetNodesResp, *mq.CodeMessage)
}

// 查询用户可用的节点
var _ = Register(NodeService.GetUserNodes)

type GetUserNodes struct {
UserID int64 `json:"userID"`
}
type GetUserNodesResp struct {
Nodes []model.Node `json:"nodes"`
}

func NewGetUserNodes(userID int64) GetUserNodes {
return GetUserNodes{
UserID: userID,
}
}
func NewGetUserNodesResp(nodes []model.Node) GetUserNodesResp {
return GetUserNodesResp{
Nodes: nodes,
}
}
func (client *Client) GetUserNodes(msg GetUserNodes) (*GetUserNodesResp, error) {
return mq.Request[GetUserNodesResp](client.rabbitCli, msg)
}

// 获取指定节点的信息
var _ = Register(NodeService.GetNodes)

type GetNodes struct {
NodeIDs []int64 `json:"nodeIDs"`
}
type GetNodesResp struct {
Nodes []model.Node `json:"nodes"`
}

func NewGetNodes(nodeIDs []int64) GetNodes {
return GetNodes{
NodeIDs: nodeIDs,
}
}
func NewGetNodesResp(nodes []model.Node) GetNodesResp {
return GetNodesResp{
Nodes: nodes,
}
}
func (client *Client) GetNodes(msg GetNodes) (*GetNodesResp, error) {
return mq.Request[GetNodesResp](client.rabbitCli, msg)
}

+ 60
- 0
common/pkgs/mq/coordinator/object.go View File

@@ -0,0 +1,60 @@
package coordinator

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/storage-common/models"
)

type ObjectService interface {
GetPackageObjectRepData(msg *GetPackageObjectRepData) (*GetPackageObjectRepDataResp, *mq.CodeMessage)

GetPackageObjectECData(msg *GetPackageObjectECData) (*GetPackageObjectECDataResp, *mq.CodeMessage)
}

// 获取指定Object的Rep数据,返回的Objects会按照ObjectID升序
var _ = Register(ObjectService.GetPackageObjectRepData)

type GetPackageObjectRepData struct {
PackageID int64 `json:"packageID"`
}
type GetPackageObjectRepDataResp struct {
Data []models.ObjectRepData `json:"data"`
}

func NewGetPackageObjectRepData(packageID int64) GetPackageObjectRepData {
return GetPackageObjectRepData{
PackageID: packageID,
}
}
func NewGetPackageObjectRepDataResp(data []models.ObjectRepData) GetPackageObjectRepDataResp {
return GetPackageObjectRepDataResp{
Data: data,
}
}
func (client *Client) GetPackageObjectRepData(msg GetPackageObjectRepData) (*GetPackageObjectRepDataResp, error) {
return mq.Request[GetPackageObjectRepDataResp](client.rabbitCli, msg)
}

// 获取指定Object的EC数据,返回的Objects会按照ObjectID升序
var _ = Register(ObjectService.GetPackageObjectECData)

type GetPackageObjectECData struct {
PackageID int64 `json:"packageID"`
}
type GetPackageObjectECDataResp struct {
Data []models.ObjectECData `json:"data"`
}

func NewGetPackageObjectECData(packageID int64) GetPackageObjectECData {
return GetPackageObjectECData{
PackageID: packageID,
}
}
func NewGetPackageObjectECDataResp(data []models.ObjectECData) GetPackageObjectECDataResp {
return GetPackageObjectECDataResp{
Data: data,
}
}
func (client *Client) GetPackageObjectECData(msg GetPackageObjectECData) (*GetPackageObjectECDataResp, error) {
return mq.Request[GetPackageObjectECDataResp](client.rabbitCli, msg)
}

+ 273
- 0
common/pkgs/mq/coordinator/package.go View File

@@ -0,0 +1,273 @@
package coordinator

import (
"gitlink.org.cn/cloudream/common/models"
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type PackageService interface {
GetPackage(msg *GetPackage) (*GetPackageResp, *mq.CodeMessage)

GetPackageObjects(msg *GetPackageObjects) (*GetPackageObjectsResp, *mq.CodeMessage)

CreatePackage(msg *CreatePackage) (*CreatePackageResp, *mq.CodeMessage)

UpdateRepPackage(msg *UpdateRepPackage) (*UpdateRepPackageResp, *mq.CodeMessage)

UpdateECPackage(msg *UpdateECPackage) (*UpdateECPackageResp, *mq.CodeMessage)

DeletePackage(msg *DeletePackage) (*DeletePackageResp, *mq.CodeMessage)

GetPackageCachedNodes(msg *GetPackageCachedNodes) (*GetPackageCachedNodesResp, *mq.CodeMessage)

GetPackageLoadedNodes(msg *GetPackageLoadedNodes) (*GetPackageLoadedNodesResp, *mq.CodeMessage)
}

// 获取Package基本信息
var _ = Register(PackageService.GetPackage)

type GetPackage struct {
UserID int64 `json:"userID"`
PackageID int64 `json:"packageID"`
}
type GetPackageResp struct {
model.Package
}

func NewGetPackage(userID int64, packageID int64) GetPackage {
return GetPackage{
UserID: userID,
PackageID: packageID,
}
}
func NewGetPackageResp(pkg model.Package) GetPackageResp {
return GetPackageResp{
Package: pkg,
}
}
func (client *Client) GetPackage(msg GetPackage) (*GetPackageResp, error) {
return mq.Request[GetPackageResp](client.rabbitCli, msg)
}

// 查询Package中的所有Object,返回的Objects会按照ObjectID升序
var _ = Register(PackageService.GetPackageObjects)

type GetPackageObjects struct {
UserID int64 `json:"userID"`
PackageID int64 `json:"packageID"`
}
type GetPackageObjectsResp struct {
Objects []model.Object `json:"objects"`
}

func NewGetPackageObjects(userID int64, packageID int64) GetPackageObjects {
return GetPackageObjects{
UserID: userID,
PackageID: packageID,
}
}
func NewGetPackageObjectsResp(objects []model.Object) GetPackageObjectsResp {
return GetPackageObjectsResp{
Objects: objects,
}
}
func (client *Client) GetPackageObjects(msg GetPackageObjects) (*GetPackageObjectsResp, error) {
return mq.Request[GetPackageObjectsResp](client.rabbitCli, msg)
}

// 创建一个Package
var _ = Register(PackageService.CreatePackage)

type CreatePackage struct {
UserID int64 `json:"userID"`
BucketID int64 `json:"bucketID"`
Name string `json:"name"`
Redundancy models.TypedRedundancyInfo `json:"redundancy"`
}
type CreatePackageResp struct {
PackageID int64 `json:"packageID"`
}

func NewCreatePackage(userID int64, bucketID int64, name string, redundancy models.TypedRedundancyInfo) CreatePackage {
return CreatePackage{
UserID: userID,
BucketID: bucketID,
Name: name,
Redundancy: redundancy,
}
}
func NewCreatePackageResp(packageID int64) CreatePackageResp {
return CreatePackageResp{
PackageID: packageID,
}
}
func (client *Client) CreatePackage(msg CreatePackage) (*CreatePackageResp, error) {
return mq.Request[CreatePackageResp](client.rabbitCli, msg)
}

// 更新Rep备份模式的Package
var _ = Register(PackageService.UpdateRepPackage)

type UpdateRepPackage struct {
PackageID int64 `json:"packageID"`
Adds []AddRepObjectInfo `json:"objects"`
Deletes []int64 `json:"deletes"`
}
type UpdateRepPackageResp struct{}
type AddRepObjectInfo struct {
Path string `json:"path"`
Size int64 `json:"size,string"`
FileHash string `json:"fileHash"`
NodeIDs []int64 `json:"nodeIDs"`
}

func NewUpdateRepPackage(packageID int64, adds []AddRepObjectInfo, deletes []int64) UpdateRepPackage {
return UpdateRepPackage{
PackageID: packageID,
Adds: adds,
Deletes: deletes,
}
}
func NewUpdateRepPackageResp() UpdateRepPackageResp {
return UpdateRepPackageResp{}
}
func NewAddRepObjectInfo(path string, size int64, fileHash string, nodeIDs []int64) AddRepObjectInfo {
return AddRepObjectInfo{
Path: path,
Size: size,
FileHash: fileHash,
NodeIDs: nodeIDs,
}
}
func (client *Client) UpdateRepPackage(msg UpdateRepPackage) (*UpdateRepPackageResp, error) {
return mq.Request[UpdateRepPackageResp](client.rabbitCli, msg)
}

// 更新EC备份模式的Package
var _ = Register(PackageService.UpdateECPackage)

type UpdateECPackage struct {
PackageID int64 `json:"packageID"`
Adds []AddECObjectInfo `json:"objects"`
Deletes []int64 `json:"deletes"`
}
type UpdateECPackageResp struct{}
type AddECObjectInfo struct {
Path string `json:"path"`
Size int64 `json:"size,string"`
FileHashes []string `json:"fileHashes"`
NodeIDs []int64 `json:"nodeIDs"`
}

func NewUpdateECPackage(packageID int64, adds []AddECObjectInfo, deletes []int64) UpdateECPackage {
return UpdateECPackage{
PackageID: packageID,
Adds: adds,
Deletes: deletes,
}
}
func NewUpdateECPackageResp() UpdateECPackageResp {
return UpdateECPackageResp{}
}
func NewAddECObjectInfo(path string, size int64, fileHashes []string, nodeIDs []int64) AddECObjectInfo {
return AddECObjectInfo{
Path: path,
Size: size,
FileHashes: fileHashes,
NodeIDs: nodeIDs,
}
}
func (client *Client) UpdateECPackage(msg UpdateECPackage) (*UpdateECPackageResp, error) {
return mq.Request[UpdateECPackageResp](client.rabbitCli, msg)
}

// 删除对象
var _ = Register(PackageService.DeletePackage)

type DeletePackage struct {
UserID int64 `db:"userID"`
PackageID int64 `db:"packageID"`
}
type DeletePackageResp struct{}

func NewDeletePackage(userID int64, packageID int64) DeletePackage {
return DeletePackage{
UserID: userID,
PackageID: packageID,
}
}
func NewDeletePackageResp() DeletePackageResp {
return DeletePackageResp{}
}
func (client *Client) DeletePackage(msg DeletePackage) (*DeletePackageResp, error) {
return mq.Request[DeletePackageResp](client.rabbitCli, msg)
}

// 根据PackageID获取object分布情况
var _ = Register(PackageService.GetPackageCachedNodes)

type GetPackageCachedNodes struct {
UserID int64 `json:"userID"`
PackageID int64 `json:"packageID"`
}

type PackageCachedNodeInfo struct {
NodeID int64 `json:"nodeID"`
FileSize int64 `json:"fileSize"`
ObjectCount int64 `json:"objectCount"`
}

type GetPackageCachedNodesResp struct {
models.PackageCachingInfo
}

func NewGetPackageCachedNodes(userID int64, packageID int64) GetPackageCachedNodes {
return GetPackageCachedNodes{
UserID: userID,
PackageID: packageID,
}
}

func NewGetPackageCachedNodesResp(nodeInfos []models.NodePackageCachingInfo, packageSize int64, redunancyType string) GetPackageCachedNodesResp {
return GetPackageCachedNodesResp{
PackageCachingInfo: models.PackageCachingInfo{
NodeInfos: nodeInfos,
PackageSize: packageSize,
RedunancyType: redunancyType,
},
}
}

func (client *Client) GetPackageCachedNodes(msg GetPackageCachedNodes) (*GetPackageCachedNodesResp, error) {
return mq.Request[GetPackageCachedNodesResp](client.rabbitCli, msg)
}

// 根据PackageID获取storage分布情况
var _ = Register(PackageService.GetPackageLoadedNodes)

type GetPackageLoadedNodes struct {
UserID int64 `json:"userID"`
PackageID int64 `json:"packageID"`
}

type GetPackageLoadedNodesResp struct {
NodeIDs []int64 `json:"nodeIDs"`
}

func NewGetPackageLoadedNodes(userID int64, packageID int64) GetPackageLoadedNodes {
return GetPackageLoadedNodes{
UserID: userID,
PackageID: packageID,
}
}

func NewGetPackageLoadedNodesResp(nodeIDs []int64) GetPackageLoadedNodesResp {
return GetPackageLoadedNodesResp{
NodeIDs: nodeIDs,
}
}

func (client *Client) GetPackageLoadedNodes(msg GetPackageLoadedNodes) (*GetPackageLoadedNodesResp, error) {
return mq.Request[GetPackageLoadedNodesResp](client.rabbitCli, msg)
}

+ 81
- 0
common/pkgs/mq/coordinator/server.go View File

@@ -0,0 +1,81 @@
package coordinator

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
mymq "gitlink.org.cn/cloudream/storage-common/pkgs/mq"
)

// Service 协调端接口
type Service interface {
AgentService

BucketService

CacheService

CommonService

NodeService

ObjectService

PackageService

StorageService
}

type Server struct {
service Service
rabbitSvr mq.RabbitMQServer

OnError func(err error)
}

func NewServer(svc Service, cfg *mymq.Config) (*Server, error) {
srv := &Server{
service: svc,
}

rabbitSvr, err := mq.NewRabbitMQServer(
cfg.MakeConnectingURL(),
mymq.COORDINATOR_QUEUE_NAME,
func(msg *mq.Message) (*mq.Message, error) {
return msgDispatcher.Handle(srv.service, msg)
},
)
if err != nil {
return nil, err
}

srv.rabbitSvr = *rabbitSvr

return srv, nil
}
func (s *Server) Stop() {
s.rabbitSvr.Close()
}

func (s *Server) Serve() error {
return s.rabbitSvr.Serve()
}

var msgDispatcher mq.MessageDispatcher = mq.NewMessageDispatcher()

// Register 将Service中的一个接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型
// TODO 需要约束:Service实现了TSvc接口
func Register[TSvc any, TReq any, TResp any](svcFn func(svc TSvc, msg *TReq) (*TResp, *mq.CodeMessage)) any {
mq.AddServiceFn(&msgDispatcher, svcFn)
mq.RegisterMessage[TReq]()
mq.RegisterMessage[TResp]()

return nil
}

// RegisterNoReply 将Service中的一个*没有返回值的*接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型
// TODO 需要约束:Service实现了TSvc接口
func RegisterNoReply[TSvc any, TReq any](svcFn func(svc TSvc, msg *TReq)) any {
mq.AddNoRespServiceFn(&msgDispatcher, svcFn)
mq.RegisterMessage[TReq]()

return nil
}

+ 68
- 0
common/pkgs/mq/coordinator/storage.go View File

@@ -0,0 +1,68 @@
package coordinator

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

type StorageService interface {
GetStorageInfo(msg *GetStorageInfo) (*GetStorageInfoResp, *mq.CodeMessage)

StoragePackageLoaded(msg *StoragePackageLoaded) (*StoragePackageLoadedResp, *mq.CodeMessage)
}

// 获取Storage信息
var _ = Register(StorageService.GetStorageInfo)

type GetStorageInfo struct {
UserID int64 `json:"userID"`
StorageID int64 `json:"storageID"`
}
type GetStorageInfoResp struct {
model.Storage
}

func NewGetStorageInfo(userID int64, storageID int64) GetStorageInfo {
return GetStorageInfo{
UserID: userID,
StorageID: storageID,
}
}
func NewGetStorageInfoResp(storageID int64, name string, nodeID int64, dir string, state string) GetStorageInfoResp {
return GetStorageInfoResp{
model.Storage{
StorageID: storageID,
Name: name,
NodeID: nodeID,
Directory: dir,
State: state,
},
}
}
func (client *Client) GetStorageInfo(msg GetStorageInfo) (*GetStorageInfoResp, error) {
return mq.Request[GetStorageInfoResp](client.rabbitCli, msg)
}

// 提交调度记录
var _ = Register(StorageService.StoragePackageLoaded)

type StoragePackageLoaded struct {
UserID int64 `json:"userID"`
PackageID int64 `json:"packageID"`
StorageID int64 `json:"storageID"`
}
type StoragePackageLoadedResp struct{}

func NewStoragePackageLoaded(userID int64, packageID int64, stgID int64) StoragePackageLoaded {
return StoragePackageLoaded{
UserID: userID,
PackageID: packageID,
StorageID: stgID,
}
}
func NewStoragePackageLoadedResp() StoragePackageLoadedResp {
return StoragePackageLoadedResp{}
}
func (client *Client) StoragePackageLoaded(msg StoragePackageLoaded) (*StoragePackageLoadedResp, error) {
return mq.Request[StoragePackageLoadedResp](client.rabbitCli, msg)
}

+ 59
- 0
common/pkgs/mq/scanner/client.go View File

@@ -0,0 +1,59 @@
package scanner

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
stgmq "gitlink.org.cn/cloudream/storage-common/pkgs/mq"
)

type Client struct {
rabbitCli *mq.RabbitMQClient
}

func NewClient(cfg *stgmq.Config) (*Client, error) {
rabbitCli, err := mq.NewRabbitMQClient(cfg.MakeConnectingURL(), stgmq.SCANNER_QUEUE_NAME, "")
if err != nil {
return nil, err
}

return &Client{
rabbitCli: rabbitCli,
}, nil
}

func (c *Client) Close() {
c.rabbitCli.Close()
}

type PoolClient struct {
*Client
owner *Pool
}

func (c *PoolClient) Close() {
c.owner.Release(c)
}

type Pool struct {
mqcfg *stgmq.Config
}

func NewPool(mqcfg *stgmq.Config) *Pool {
return &Pool{
mqcfg: mqcfg,
}
}
func (p *Pool) Acquire() (*PoolClient, error) {
cli, err := NewClient(p.mqcfg)
if err != nil {
return nil, err
}

return &PoolClient{
Client: cli,
owner: p,
}, nil
}

func (p *Pool) Release(cli *PoolClient) {
cli.Client.Close()
}

+ 50
- 0
common/pkgs/mq/scanner/event.go View File

@@ -0,0 +1,50 @@
package scanner

import (
"fmt"
"time"

"gitlink.org.cn/cloudream/common/pkgs/mq"
scevt "gitlink.org.cn/cloudream/storage-common/pkgs/mq/scanner/event"
)

type EventService interface {
PostEvent(event *PostEvent)
}

// 投递Event
var _ = RegisterNoReply(EventService.PostEvent)

type PostEvent struct {
Event map[string]any `json:"event"`
IsEmergency bool `json:"isEmergency"` // 重要消息,优先处理
DontMerge bool `json:"dontMerge"` // 不可合并此消息
}

func NewPostEvent(event any, isEmergency bool, dontMerge bool) (PostEvent, error) {
mp, err := scevt.MessageToMap(event)
if err != nil {
return PostEvent{}, fmt.Errorf("message to map failed, err: %w", err)
}

return PostEvent{
Event: mp,
IsEmergency: isEmergency,
DontMerge: dontMerge,
}, nil
}
func (cli *Client) PostEvent(event any, isEmergency bool, dontMerge bool, opts ...mq.SendOption) error {
opt := mq.SendOption{
Timeout: time.Second * 30,
}
if len(opts) > 0 {
opt = opts[0]
}

body, err := NewPostEvent(event, isEmergency, dontMerge)
if err != nil {
return fmt.Errorf("new post event body failed, err: %w", err)
}

return mq.Send(cli.rabbitCli, body, opt)
}

+ 17
- 0
common/pkgs/mq/scanner/event/agent_check_cache.go View File

@@ -0,0 +1,17 @@
package event

type AgentCheckCache struct {
NodeID int64 `json:"nodeID"`
FileHashes []string `json:"fileHashes"` // 需要检查的FileHash列表,如果为nil(不是为空),则代表进行全量检查
}

func NewAgentCheckCache(nodeID int64, fileHashes []string) AgentCheckCache {
return AgentCheckCache{
NodeID: nodeID,
FileHashes: fileHashes,
}
}

func init() {
Register[AgentCheckCache]()
}

+ 15
- 0
common/pkgs/mq/scanner/event/agent_check_state.go View File

@@ -0,0 +1,15 @@
package event

type AgentCheckState struct {
NodeID int64 `json:"nodeID"`
}

func NewAgentCheckState(nodeID int64) AgentCheckState {
return AgentCheckState{
NodeID: nodeID,
}
}

func init() {
Register[AgentCheckState]()
}

+ 17
- 0
common/pkgs/mq/scanner/event/agent_check_storage.go View File

@@ -0,0 +1,17 @@
package event

type AgentCheckStorage struct {
StorageID int64 `json:"storageID"`
PackageIDs []int64 `json:"packageIDs"` // 需要检查的Package文件列表,如果为nil(不是为空),则代表进行全量检查
}

func NewAgentCheckStorage(storageID int64, packageIDs []int64) AgentCheckStorage {
return AgentCheckStorage{
StorageID: storageID,
PackageIDs: packageIDs,
}
}

func init() {
Register[AgentCheckStorage]()
}

+ 15
- 0
common/pkgs/mq/scanner/event/check_cache.go View File

@@ -0,0 +1,15 @@
package event

type CheckCache struct {
NodeID int64 `json:"nodeID"`
}

func NewCheckCache(nodeID int64) CheckCache {
return CheckCache{
NodeID: nodeID,
}
}

func init() {
Register[CheckCache]()
}

+ 15
- 0
common/pkgs/mq/scanner/event/check_package.go View File

@@ -0,0 +1,15 @@
package event

type CheckPackage struct {
PackageIDs []int64 `json:"packageIDs"`
}

func NewCheckPackage(packageIDs []int64) CheckPackage {
return CheckPackage{
PackageIDs: packageIDs,
}
}

func init() {
Register[CheckPackage]()
}

+ 15
- 0
common/pkgs/mq/scanner/event/check_rep_count.go View File

@@ -0,0 +1,15 @@
package event

type CheckRepCount struct {
FileHashes []string `json:"fileHashes"`
}

func NewCheckRepCount(fileHashes []string) CheckRepCount {
return CheckRepCount{
FileHashes: fileHashes,
}
}

func init() {
Register[CheckRepCount]()
}

+ 25
- 0
common/pkgs/mq/scanner/event/event.go View File

@@ -0,0 +1,25 @@
package event

import (
myreflect "gitlink.org.cn/cloudream/common/utils/reflect"
"gitlink.org.cn/cloudream/common/utils/serder"
)

var typeResolver = serder.NewTypeNameResolver(true)

var serderOption = serder.TypedSerderOption{
TypeResolver: &typeResolver,
TypeFieldName: "@type",
}

func MapToMessage(m map[string]any) (any, error) {
return serder.TypedMapToObject(m, serderOption)
}

func MessageToMap(msg any) (map[string]any, error) {
return serder.ObjectToTypedMap(msg, serderOption)
}

func Register[T any]() {
typeResolver.Register(myreflect.TypeOf[T]())
}

+ 67
- 0
common/pkgs/mq/scanner/server.go View File

@@ -0,0 +1,67 @@
package scanner

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
mymq "gitlink.org.cn/cloudream/storage-common/pkgs/mq"
)

// Service 协调端接口
type Service interface {
EventService
}
type Server struct {
service Service
rabbitSvr mq.RabbitMQServer

OnError func(err error)
}

func NewServer(svc Service, cfg *mymq.Config) (*Server, error) {
srv := &Server{
service: svc,
}

rabbitSvr, err := mq.NewRabbitMQServer(
cfg.MakeConnectingURL(),
mymq.SCANNER_QUEUE_NAME,
func(msg *mq.Message) (*mq.Message, error) {
return msgDispatcher.Handle(srv.service, msg)
},
)
if err != nil {
return nil, err
}

srv.rabbitSvr = *rabbitSvr

return srv, nil
}

func (s *Server) Stop() {
s.rabbitSvr.Close()
}

func (s *Server) Serve() error {
return s.rabbitSvr.Serve()
}

var msgDispatcher mq.MessageDispatcher = mq.NewMessageDispatcher()

// Register 将Service中的一个接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型
// TODO 需要约束:Service实现了TSvc接口
func Register[TSvc any, TReq any, TResp any](svcFn func(svc TSvc, msg *TReq) (*TResp, *mq.CodeMessage)) any {
mq.AddServiceFn(&msgDispatcher, svcFn)
mq.RegisterMessage[TReq]()
mq.RegisterMessage[TResp]()

return nil
}

// RegisterNoReply 将Service中的一个*没有返回值的*接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型
// TODO 需要约束:Service实现了TSvc接口
func RegisterNoReply[TSvc any, TReq any](svcFn func(svc TSvc, msg *TReq)) any {
mq.AddNoRespServiceFn(&msgDispatcher, svcFn)
mq.RegisterMessage[TReq]()

return nil
}

+ 74
- 0
common/utils/config.go View File

@@ -0,0 +1,74 @@
package utils

import (
"fmt"
"regexp"
"strconv"

"github.com/beevik/etree"
)

type EcConfig struct {
ecid string `xml:"ecid"`
class string `xml:"class"`
n int `xml:"n"`
k int `xml:"k"`
w int `xml:"w"`
opt int `xml:"opt"`
}

func (r *EcConfig) GetK() int {
return r.k
}

func (r *EcConfig) GetN() int {
return r.n
}

func GetEcPolicy() *map[string]EcConfig {
doc := etree.NewDocument()
if err := doc.ReadFromFile("../confs/sysSetting.xml"); err != nil {
panic(err)
}
ecMap := make(map[string]EcConfig, 20)
root := doc.SelectElement("setting")
for _, attr := range root.SelectElements("attribute") {
if name := attr.SelectElement("name"); name.Text() == "ec.policy" {
for _, eci := range attr.SelectElements("value") {
tt := EcConfig{}
tt.ecid = eci.SelectElement("ecid").Text()
tt.class = eci.SelectElement("class").Text()
tt.n, _ = strconv.Atoi(eci.SelectElement("n").Text())
tt.k, _ = strconv.Atoi(eci.SelectElement("k").Text())
tt.w, _ = strconv.Atoi(eci.SelectElement("w").Text())
tt.opt, _ = strconv.Atoi(eci.SelectElement("opt").Text())
ecMap[tt.ecid] = tt
}
}
}
fmt.Println(ecMap)
return &ecMap
//
}

func GetAgentIps() []string {
doc := etree.NewDocument()
if err := doc.ReadFromFile("../confs/sysSetting.xml"); err != nil {
panic(err)
}
root := doc.SelectElement("setting")
var ips []string // 定义存储 IP 的字符串切片

for _, attr := range root.SelectElements("attribute") {
if name := attr.SelectElement("name"); name.Text() == "agents.addr" {
for _, ip := range attr.SelectElements("value") {
ipRegex := regexp.MustCompile(`\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b`)
match := ipRegex.FindString(ip.Text())
print(match)
ips = append(ips, match)
}
}
}

return ips
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save