Browse Source

Merge remote-tracking branch 'origin/mv_sc'

gitlink
Sydonian 2 years ago
parent
commit
6b12b4517a
23 changed files with 1790 additions and 0 deletions
  1. +2
    -0
      scanner/README.md
  2. +82
    -0
      scanner/go.mod
  3. +191
    -0
      scanner/go.sum
  4. +29
    -0
      scanner/internal/config/config.go
  5. +174
    -0
      scanner/internal/event/agent_check_cache.go
  6. +117
    -0
      scanner/internal/event/agent_check_state.go
  7. +199
    -0
      scanner/internal/event/agent_check_storage.go
  8. +84
    -0
      scanner/internal/event/check_cache.go
  9. +57
    -0
      scanner/internal/event/check_package.go
  10. +215
    -0
      scanner/internal/event/check_rep_count.go
  11. +155
    -0
      scanner/internal/event/check_rep_count_test.go
  12. +46
    -0
      scanner/internal/event/event.go
  13. +28
    -0
      scanner/internal/services/event.go
  14. +15
    -0
      scanner/internal/services/service.go
  15. +43
    -0
      scanner/internal/tickevent/batch_all_agent_check_cache.go
  16. +39
    -0
      scanner/internal/tickevent/batch_check_all_package.go
  17. +39
    -0
      scanner/internal/tickevent/batch_check_all_rep_count.go
  18. +42
    -0
      scanner/internal/tickevent/batch_check_all_storage.go
  19. +32
    -0
      scanner/internal/tickevent/check_agent_state.go
  20. +29
    -0
      scanner/internal/tickevent/check_cache.go
  21. +24
    -0
      scanner/internal/tickevent/tick_event.go
  22. +20
    -0
      scanner/magefiles/magefile.go
  23. +128
    -0
      scanner/main.go

+ 2
- 0
scanner/README.md View File

@@ -0,0 +1,2 @@
# storage-scanner


+ 82
- 0
scanner/go.mod View File

@@ -0,0 +1,82 @@
module gitlink.org.cn/cloudream/storage-scanner

go 1.20

require (
github.com/samber/lo v1.38.1
github.com/smartystreets/goconvey v1.8.0
gitlink.org.cn/cloudream/common v0.0.0
gitlink.org.cn/cloudream/storage-common v0.0.0
)

require (
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gopherjs/gopherjs v1.17.2 // indirect
github.com/jmoiron/sqlx v1.3.5 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/smartystreets/assertions v1.13.1 // indirect
go.etcd.io/etcd/api/v3 v3.5.9 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
go.etcd.io/etcd/client/v3 v3.5.9 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/text v0.8.0 // indirect
google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd // indirect
google.golang.org/grpc v1.54.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
)

require (
github.com/antonfisher/nested-logrus-formatter v1.3.1 // indirect
github.com/benbjohnson/clock v1.3.0 // indirect
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
github.com/go-sql-driver/mysql v1.7.1 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/imdario/mergo v0.3.15 // indirect
github.com/ipfs/boxo v0.8.0 // indirect
github.com/ipfs/go-cid v0.4.0 // indirect
github.com/ipfs/go-ipfs-api v0.6.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.3 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
github.com/libp2p/go-libp2p v0.26.3 // indirect
github.com/magefile/mage v1.15.0 // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr v0.8.0 // indirect
github.com/multiformats/go-multibase v0.1.1 // indirect
github.com/multiformats/go-multicodec v0.8.1 // indirect
github.com/multiformats/go-multihash v0.2.1 // indirect
github.com/multiformats/go-multistream v0.4.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/otiai10/copy v1.12.0 // indirect
github.com/sirupsen/logrus v1.9.2 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/streadway/amqp v1.1.0 // indirect
github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c // indirect
github.com/zyedidia/generic v1.2.1 // indirect
golang.org/x/crypto v0.6.0 // indirect
golang.org/x/exp v0.0.0-20230519143937-03e91628a987 // indirect
golang.org/x/sys v0.7.0 // indirect
lukechampine.com/blake3 v1.1.7 // indirect
)

// 运行go mod tidy时需要将下面几行取消注释
replace gitlink.org.cn/cloudream/common => ../../common

replace gitlink.org.cn/cloudream/storage-common => ../storage-common

+ 191
- 0
scanner/go.sum View File

@@ -0,0 +1,191 @@
github.com/antonfisher/nested-logrus-formatter v1.3.1 h1:NFJIr+pzwv5QLHTPyKz9UMEoHck02Q9L0FP13b/xSbQ=
github.com/antonfisher/nested-logrus-formatter v1.3.1/go.mod h1:6WTfyWFkBc9+zyBaKIqRrg/KwMqBbodBjgbHjDz7zjA=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg=
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/ipfs/boxo v0.8.0 h1:UdjAJmHzQHo/j3g3b1bAcAXCj/GM6iTwvSlBDvPBNBs=
github.com/ipfs/boxo v0.8.0/go.mod h1:RIsi4CnTyQ7AUsNn5gXljJYZlQrHBMnJp94p73liFiA=
github.com/ipfs/go-cid v0.4.0 h1:a4pdZq0sx6ZSxbCizebnKiMCx/xI/aBBFlB73IgH4rA=
github.com/ipfs/go-cid v0.4.0/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
github.com/ipfs/go-ipfs-api v0.6.0 h1:JARgG0VTbjyVhO5ZfesnbXv9wTcMvoKRBLF1SzJqzmg=
github.com/ipfs/go-ipfs-api v0.6.0/go.mod h1:iDC2VMwN9LUpQV/GzEeZ2zNqd8NUdRmWcFM+K/6odf0=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU=
github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
github.com/libp2p/go-libp2p v0.26.3 h1:6g/psubqwdaBqNNoidbRKSTBEYgaOuKBhHl8Q5tO+PM=
github.com/libp2p/go-libp2p v0.26.3/go.mod h1:x75BN32YbwuY0Awm2Uix4d4KOz+/4piInkp4Wr3yOo8=
github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=
github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU=
github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs=
github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI=
github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8=
github.com/multiformats/go-multicodec v0.8.1 h1:ycepHwavHafh3grIbR1jIXnKCsFm0fqsfEOsJ8NtKE8=
github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108=
github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc=
github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo=
github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q=
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/otiai10/copy v1.12.0 h1:cLMgSQnXBs1eehF0Wy/FAGsgDTDmAqFR7rQylBb1nDY=
github.com/otiai10/copy v1.12.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww=
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM=
github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y=
github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU=
github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY=
github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w=
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/streadway/amqp v1.1.0 h1:py12iX8XSyI7aN/3dUT8DFIDJazNJsVJdxNVEpnQTZM=
github.com/streadway/amqp v1.1.0/go.mod h1:WYSrTEYHOXHd0nwFeUXAe2G2hRnQT+deZJJf88uS9Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c h1:GGsyl0dZ2jJgVT+VvWBf/cNijrHRhkrTjkmp5wg7li0=
github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c/go.mod h1:xxcJeBb7SIUl/Wzkz1eVKJE/CB34YNrqX2TQI6jY9zs=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/zyedidia/generic v1.2.1 h1:Zv5KS/N2m0XZZiuLS82qheRG4X1o5gsWreGb0hR7XDc=
github.com/zyedidia/generic v1.2.1/go.mod h1:ly2RBz4mnz1yeuVbQA/VFwGjK3mnHGRj1JuoG336Bis=
go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs=
go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE=
go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E=
go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/exp v0.0.0-20230519143937-03e91628a987 h1:3xJIFvzUFbu4ls0BTBYcgbCGhA63eAOEMxIHugyXJqA=
golang.org/x/exp v0.0.0-20230519143937-03e91628a987/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd h1:sLpv7bNL1AsX3fdnWh9WVh7ejIzXdOc1RRHGeAmeStU=
google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=

+ 29
- 0
scanner/internal/config/config.go View File

@@ -0,0 +1,29 @@
package config

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
log "gitlink.org.cn/cloudream/common/pkgs/logger"
c "gitlink.org.cn/cloudream/common/utils/config"
db "gitlink.org.cn/cloudream/storage-common/pkgs/db/config"
stgmq "gitlink.org.cn/cloudream/storage-common/pkgs/mq"
)

type Config struct {
MinAvailableRepProportion float32 `json:"minAvailableRepProportion"` // 可用的备份至少要占所有备份的比例,向上去整
NodeUnavailableSeconds int `json:"nodeUnavailableSeconds"` // 如果节点上次上报时间超过这个值,则认为节点已经不可用

Logger log.Config `json:"logger"`
DB db.Config `json:"db"`
RabbitMQ stgmq.Config `json:"rabbitMQ"`
DistLock distlock.Config `json:"distlock"`
}

var cfg Config

func Init() error {
return c.DefaultLoad("scanner", &cfg)
}

func Cfg() *Config {
return &cfg
}

+ 174
- 0
scanner/internal/event/agent_check_cache.go View File

@@ -0,0 +1,174 @@
package event

import (
"database/sql"
"time"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/storage-common/globals"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/reqbuilder"

agtmq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/agent"
scevt "gitlink.org.cn/cloudream/storage-common/pkgs/mq/scanner/event"
)

type AgentCheckCache struct {
scevt.AgentCheckCache
}

func NewAgentCheckCache(nodeID int64, fileHashes []string) *AgentCheckCache {
return &AgentCheckCache{
AgentCheckCache: scevt.NewAgentCheckCache(nodeID, fileHashes),
}
}

func (t *AgentCheckCache) TryMerge(other Event) bool {
event, ok := other.(*AgentCheckCache)
if !ok {
return false
}

if event.NodeID != t.NodeID {
return false
}

// FileHashes为nil时代表全量检查
if event.FileHashes == nil {
t.FileHashes = nil
} else if t.FileHashes != nil {
t.FileHashes = lo.Union(t.FileHashes, event.FileHashes)
}

return true
}

func (t *AgentCheckCache) Execute(execCtx ExecuteContext) {
log := logger.WithType[AgentCheckCache]("Event")
log.Debugf("begin with %v", logger.FormatStruct(t))
defer log.Debugf("end")

// TODO unavailable的节点需不需要发送任务?

if t.FileHashes == nil {
t.checkComplete(execCtx)
} else {
t.checkIncrement(execCtx)
}
}

func (t *AgentCheckCache) checkComplete(execCtx ExecuteContext) {
log := logger.WithType[AgentCheckCache]("Event")

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 全量模式下修改某个节点所有的Cache记录
Cache().WriteAny().
IPFS().
// 全量模式下修改某个节点所有的副本数据
WriteAnyRep(t.NodeID).
MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
return
}
defer mutex.Unlock()

caches, err := execCtx.Args.DB.Cache().GetNodeCaches(execCtx.Args.DB.SQLCtx(), t.NodeID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("get node caches failed, err: %s", err.Error())
return
}

t.startCheck(execCtx, true, caches)
}

func (t *AgentCheckCache) checkIncrement(execCtx ExecuteContext) {
log := logger.WithType[AgentCheckCache]("Event")

builder := reqbuilder.NewBuilder()
for _, hash := range t.FileHashes {
builder.
// 增量模式下,不会有改动到Cache记录的操作
Metadata().Cache().ReadOne(t.NodeID, hash).
// 由于副本Write锁的特点,Pin文件(创建文件)不需要Create锁
IPFS().WriteOneRep(t.NodeID, hash)
}
mutex, err := builder.MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
return
}
defer mutex.Unlock()

var caches []model.Cache
for _, hash := range t.FileHashes {
ch, err := execCtx.Args.DB.Cache().Get(execCtx.Args.DB.SQLCtx(), hash, t.NodeID)
// 记录不存在则跳过
if err == sql.ErrNoRows {
continue
}

if err != nil {
log.WithField("FileHash", hash).WithField("NodeID", t.NodeID).Warnf("get cache failed, err: %s", err.Error())
return
}

caches = append(caches, ch)
}

t.startCheck(execCtx, false, caches)
}

func (t *AgentCheckCache) startCheck(execCtx ExecuteContext, isComplete bool, caches []model.Cache) {
log := logger.WithType[AgentCheckCache]("Event")

// 然后向代理端发送移动文件的请求
agentClient, err := globals.AgentMQPool.Acquire(t.NodeID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("create agent client failed, err: %s", err.Error())
return
}
defer agentClient.Close()

checkResp, err := agentClient.CheckCache(agtmq.NewCheckCache(isComplete, caches), mq.RequestOption{Timeout: time.Minute})
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("checking ipfs: %s", err.Error())
return
}

// 根据返回结果修改数据库
for _, entry := range checkResp.Entries {
switch entry.Operation {
case agtmq.CHECK_IPFS_RESP_OP_DELETE_TEMP:
err := execCtx.Args.DB.Cache().DeleteTemp(execCtx.Args.DB.SQLCtx(), entry.FileHash, t.NodeID)
if err != nil {
log.WithField("FileHash", entry.FileHash).
WithField("NodeID", t.NodeID).
Warnf("delete temp cache failed, err: %s", err.Error())
}

log.WithField("FileHash", entry.FileHash).
WithField("NodeID", t.NodeID).
Debugf("delete temp cache")

case agtmq.CHECK_IPFS_RESP_OP_CREATE_TEMP:
err := execCtx.Args.DB.Cache().CreateTemp(execCtx.Args.DB.SQLCtx(), entry.FileHash, t.NodeID)
if err != nil {
log.WithField("FileHash", entry.FileHash).
WithField("NodeID", t.NodeID).
Warnf("create temp cache failed, err: %s", err.Error())
}

log.WithField("FileHash", entry.FileHash).
WithField("NodeID", t.NodeID).
Debugf("create temp cache")
}
}
}

func init() {
RegisterMessageConvertor(func(msg scevt.AgentCheckCache) Event { return NewAgentCheckCache(msg.NodeID, msg.FileHashes) })
}

+ 117
- 0
scanner/internal/event/agent_check_state.go View File

@@ -0,0 +1,117 @@
package event

import (
"database/sql"
"time"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/storage-common/consts"
"gitlink.org.cn/cloudream/storage-common/globals"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/reqbuilder"
agtmq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/agent"
scevt "gitlink.org.cn/cloudream/storage-common/pkgs/mq/scanner/event"
"gitlink.org.cn/cloudream/storage-scanner/internal/config"
)

type AgentCheckState struct {
scevt.AgentCheckState
}

func NewAgentCheckState(nodeID int64) *AgentCheckState {
return &AgentCheckState{
AgentCheckState: scevt.NewAgentCheckState(nodeID),
}
}

func (t *AgentCheckState) TryMerge(other Event) bool {
event, ok := other.(*AgentCheckState)
if !ok {
return false
}

return t.NodeID == event.NodeID
}

func (t *AgentCheckState) Execute(execCtx ExecuteContext) {
log := logger.WithType[AgentCheckState]("Event")
log.Debugf("begin with %v", logger.FormatStruct(t))
defer log.Debugf("end")

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 查询、修改节点状态
Node().WriteOne(t.NodeID).
MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
return
}
defer mutex.Unlock()

node, err := execCtx.Args.DB.Node().GetByID(execCtx.Args.DB.SQLCtx(), t.NodeID)
if err == sql.ErrNoRows {
return
}

if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("get node by id failed, err: %s", err.Error())
return
}

agentClient, err := globals.AgentMQPool.Acquire(t.NodeID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("create agent client failed, err: %s", err.Error())
return
}
defer agentClient.Close()

getResp, err := agentClient.GetState(agtmq.NewGetState(), mq.RequestOption{Timeout: time.Second * 30})
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("getting state: %s", err.Error())

// 检查上次上报时间,超时的设置为不可用
// TODO 没有上报过是否要特殊处理?
if node.LastReportTime != nil && time.Since(*node.LastReportTime) > time.Duration(config.Cfg().NodeUnavailableSeconds)*time.Second {
err := execCtx.Args.DB.Node().UpdateState(execCtx.Args.DB.SQLCtx(), t.NodeID, consts.NodeStateUnavailable)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("set node state failed, err: %s", err.Error())
return
}

caches, err := execCtx.Args.DB.Cache().GetNodeCaches(execCtx.Args.DB.SQLCtx(), t.NodeID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("get node caches failed, err: %s", err.Error())
return
}

// 补充备份数
execCtx.Executor.Post(NewCheckRepCount(lo.Map(caches, func(ch model.Cache, index int) string { return ch.FileHash })))
return
}
return
}

// 根据返回结果修改节点状态
if getResp.IPFSState != consts.IPFSStateOK {
log.WithField("NodeID", t.NodeID).Warnf("IPFS status is %s, set node state unavailable", getResp.IPFSState)

err := execCtx.Args.DB.Node().UpdateState(execCtx.Args.DB.SQLCtx(), t.NodeID, consts.NodeStateUnavailable)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("change node state failed, err: %s", err.Error())
}
return
}

// TODO 如果以后还有其他的状态,要判断哪些状态下能设置Normal
err = execCtx.Args.DB.Node().UpdateState(execCtx.Args.DB.SQLCtx(), t.NodeID, consts.NodeStateNormal)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("change node state failed, err: %s", err.Error())
}
}

func init() {
RegisterMessageConvertor(func(msg scevt.AgentCheckState) Event { return NewAgentCheckState(msg.NodeID) })
}

+ 199
- 0
scanner/internal/event/agent_check_storage.go View File

@@ -0,0 +1,199 @@
package event

import (
"database/sql"
"time"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/storage-common/consts"
"gitlink.org.cn/cloudream/storage-common/globals"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/reqbuilder"
agtmq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/agent"
scevt "gitlink.org.cn/cloudream/storage-common/pkgs/mq/scanner/event"
)

type AgentCheckStorage struct {
scevt.AgentCheckStorage
}

func NewAgentCheckStorage(storageID int64, packageIDs []int64) *AgentCheckStorage {
return &AgentCheckStorage{
AgentCheckStorage: scevt.NewAgentCheckStorage(storageID, packageIDs),
}
}

func (t *AgentCheckStorage) TryMerge(other Event) bool {
event, ok := other.(*AgentCheckStorage)
if !ok {
return false
}

if t.StorageID != event.StorageID {
return false
}

// PackageIDs为nil时代表全量检查
if event.PackageIDs == nil {
t.PackageIDs = nil
} else if t.PackageIDs != nil {
t.PackageIDs = lo.Union(t.PackageIDs, event.PackageIDs)
}

return true
}

func (t *AgentCheckStorage) Execute(execCtx ExecuteContext) {
log := logger.WithType[AgentCheckStorage]("Event")
log.Debugf("begin with %v", logger.FormatStruct(t))
defer log.Debugf("end")

// 读取数据的地方就不加锁了,因为check任务会反复执行,单次失败问题不大

stg, err := execCtx.Args.DB.Storage().GetByID(execCtx.Args.DB.SQLCtx(), t.StorageID)
if err != nil {
if err != sql.ErrNoRows {
log.WithField("StorageID", t.StorageID).Warnf("get storage failed, err: %s", err.Error())
}
return
}

node, err := execCtx.Args.DB.Node().GetByID(execCtx.Args.DB.SQLCtx(), stg.NodeID)
if err != nil {
if err != sql.ErrNoRows {
log.WithField("StorageID", t.StorageID).Warnf("get storage node failed, err: %s", err.Error())
}
return
}

// TODO unavailable的节点需不需要发送任务?
if node.State != consts.NodeStateNormal {
return
}

if t.PackageIDs == nil {
t.checkComplete(execCtx, stg)
} else {
t.checkIncrement(execCtx, stg)
}
}

func (t *AgentCheckStorage) checkComplete(execCtx ExecuteContext, stg model.Storage) {
log := logger.WithType[AgentCheckStorage]("Event")

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 全量模式下查询、修改Move记录
StoragePackage().WriteAny().
Storage().
// 全量模式下删除对象文件
WriteAnyPackage(t.StorageID).
MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
return
}
defer mutex.Unlock()

packages, err := execCtx.Args.DB.StoragePackage().GetAllByStorageID(execCtx.Args.DB.SQLCtx(), t.StorageID)
if err != nil {
log.WithField("StorageID", t.StorageID).Warnf("get storage packages failed, err: %s", err.Error())
return
}

t.startCheck(execCtx, stg, true, packages)
}

func (t *AgentCheckStorage) checkIncrement(execCtx ExecuteContext, stg model.Storage) {
log := logger.WithType[AgentCheckStorage]("Event")

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 全量模式下查询、修改Move记录。因为可能有多个User Move相同的文件,所以只能用集合Write锁
StoragePackage().WriteAny().
Storage().
// 全量模式下删除对象文件。因为可能有多个User Move相同的文件,所以只能用集合Write锁
WriteAnyPackage(t.StorageID).
MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
return
}
defer mutex.Unlock()

var packages []model.StoragePackage
for _, objID := range t.PackageIDs {
objs, err := execCtx.Args.DB.StoragePackage().GetAllByStorageAndPackageID(execCtx.Args.DB.SQLCtx(), t.StorageID, objID)
if err != nil {
log.WithField("StorageID", t.StorageID).
WithField("PackageID", objID).
Warnf("get storage package failed, err: %s", err.Error())
return
}

packages = append(packages, objs...)
}

t.startCheck(execCtx, stg, false, packages)
}

func (t *AgentCheckStorage) startCheck(execCtx ExecuteContext, stg model.Storage, isComplete bool, packages []model.StoragePackage) {
log := logger.WithType[AgentCheckStorage]("Event")

// 投递任务
agentClient, err := globals.AgentMQPool.Acquire(stg.NodeID)
if err != nil {
log.WithField("NodeID", stg.NodeID).Warnf("create agent client failed, err: %s", err.Error())
return
}
defer agentClient.Close()

checkResp, err := agentClient.StorageCheck(agtmq.NewStorageCheck(stg.StorageID, stg.Directory, isComplete, packages), mq.RequestOption{Timeout: time.Minute})
if err != nil {
log.WithField("NodeID", stg.NodeID).Warnf("checking storage: %s", err.Error())
return
}

// 根据返回结果修改数据库
var chkObjIDs []int64
for _, entry := range checkResp.Entries {
switch entry.Operation {
case agtmq.CHECK_STORAGE_RESP_OP_DELETE:
err := execCtx.Args.DB.StoragePackage().Delete(execCtx.Args.DB.SQLCtx(), t.StorageID, entry.PackageID, entry.UserID)
if err != nil {
log.WithField("StorageID", t.StorageID).
WithField("PackageID", entry.PackageID).
Warnf("delete storage package failed, err: %s", err.Error())
}
chkObjIDs = append(chkObjIDs, entry.PackageID)

log.WithField("StorageID", t.StorageID).
WithField("PackageID", entry.PackageID).
WithField("UserID", entry.UserID).
Debugf("delete storage package")

case agtmq.CHECK_STORAGE_RESP_OP_SET_NORMAL:
err := execCtx.Args.DB.StoragePackage().SetStateNormal(execCtx.Args.DB.SQLCtx(), t.StorageID, entry.PackageID, entry.UserID)
if err != nil {
log.WithField("StorageID", t.StorageID).
WithField("PackageID", entry.PackageID).
Warnf("change storage package state failed, err: %s", err.Error())
}

log.WithField("StorageID", t.StorageID).
WithField("PackageID", entry.PackageID).
WithField("UserID", entry.UserID).
Debugf("set storage package normal")
}
}

if len(chkObjIDs) > 0 {
execCtx.Executor.Post(NewCheckPackage(chkObjIDs))
}
}

func init() {
RegisterMessageConvertor(func(msg scevt.AgentCheckStorage) Event { return NewAgentCheckStorage(msg.StorageID, msg.PackageIDs) })
}

+ 84
- 0
scanner/internal/event/check_cache.go View File

@@ -0,0 +1,84 @@
package event

import (
"database/sql"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/storage-common/consts"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/reqbuilder"
scevt "gitlink.org.cn/cloudream/storage-common/pkgs/mq/scanner/event"
)

type CheckCache struct {
scevt.CheckCache
}

func NewCheckCache(nodeID int64) *CheckCache {
return &CheckCache{
CheckCache: scevt.NewCheckCache(nodeID),
}
}

func (t *CheckCache) TryMerge(other Event) bool {
event, ok := other.(*CheckCache)
if !ok {
return false
}
if event.NodeID != t.NodeID {
return false
}

return true
}

func (t *CheckCache) Execute(execCtx ExecuteContext) {
log := logger.WithType[AgentCheckStorage]("Event")
log.Debugf("begin with %v", logger.FormatStruct(t))
defer log.Debugf("end")

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 查询节点状态
Node().ReadOne(t.NodeID).
// 删除节点所有的Cache记录
Cache().WriteAny().
MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
return
}
defer mutex.Unlock()

node, err := execCtx.Args.DB.Node().GetByID(execCtx.Args.DB.SQLCtx(), t.NodeID)
if err == sql.ErrNoRows {
return
}
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("get node failed, err: %s", err.Error())
return
}

if node.State != consts.NodeStateUnavailable {
return
}

caches, err := execCtx.Args.DB.Cache().GetNodeCaches(execCtx.Args.DB.SQLCtx(), t.NodeID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("get node caches failed, err: %s", err.Error())
return
}

err = execCtx.Args.DB.Cache().DeleteNodeAll(execCtx.Args.DB.SQLCtx(), t.NodeID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("delete node all caches failed, err: %s", err.Error())
return
}

execCtx.Executor.Post(NewCheckRepCount(lo.Map(caches, func(ch model.Cache, index int) string { return ch.FileHash })))
}

func init() {
RegisterMessageConvertor(func(msg scevt.CheckCache) Event { return NewCheckCache(msg.NodeID) })
}

+ 57
- 0
scanner/internal/event/check_package.go View File

@@ -0,0 +1,57 @@
package event

import (
"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/reqbuilder"
scevt "gitlink.org.cn/cloudream/storage-common/pkgs/mq/scanner/event"
)

type CheckPackage struct {
scevt.CheckPackage
}

func NewCheckPackage(objIDs []int64) *CheckPackage {
return &CheckPackage{
CheckPackage: scevt.NewCheckPackage(objIDs),
}
}

func (t *CheckPackage) TryMerge(other Event) bool {
event, ok := other.(*CheckPackage)
if !ok {
return false
}

t.PackageIDs = lo.Union(t.PackageIDs, event.PackageIDs)
return true
}

func (t *CheckPackage) Execute(execCtx ExecuteContext) {
log := logger.WithType[CheckPackage]("Event")
log.Debugf("begin with %v", logger.FormatStruct(t))
defer log.Debugf("end")

// 检查对象是否没有被引用的时候,需要读取StoragePackage表
builder := reqbuilder.NewBuilder().Metadata().StoragePackage().ReadAny()
for _, objID := range t.PackageIDs {
builder.Metadata().Package().WriteOne(objID)
}
mutex, err := builder.MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
return
}
defer mutex.Unlock()

for _, objID := range t.PackageIDs {
err := execCtx.Args.DB.Package().DeleteUnused(execCtx.Args.DB.SQLCtx(), objID)
if err != nil {
log.WithField("PackageID", objID).Warnf("delete unused package failed, err: %s", err.Error())
}
}
}

func init() {
RegisterMessageConvertor(func(msg scevt.CheckPackage) Event { return NewCheckPackage(msg.PackageIDs) })
}

+ 215
- 0
scanner/internal/event/check_rep_count.go View File

@@ -0,0 +1,215 @@
package event

import (
"fmt"
"math"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/logger"
mymath "gitlink.org.cn/cloudream/common/utils/math"
mysort "gitlink.org.cn/cloudream/common/utils/sort"
"gitlink.org.cn/cloudream/storage-common/consts"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage-scanner/internal/config"

"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
scevt "gitlink.org.cn/cloudream/storage-common/pkgs/mq/scanner/event"
)

type CheckRepCount struct {
scevt.CheckRepCount
}

func NewCheckRepCount(fileHashes []string) *CheckRepCount {
return &CheckRepCount{
CheckRepCount: scevt.NewCheckRepCount(fileHashes),
}
}

func (t *CheckRepCount) TryMerge(other Event) bool {
event, ok := other.(*CheckRepCount)
if !ok {
return false
}

t.FileHashes = lo.Union(t.FileHashes, event.FileHashes)
return true
}

func (t *CheckRepCount) Execute(execCtx ExecuteContext) {
log := logger.WithType[CheckRepCount]("Event")
log.Debugf("begin with %v", logger.FormatStruct(t))
defer log.Debugf("end")

mutex, err := reqbuilder.NewBuilder().
Metadata().
// 读取某个FileHash的备份数设定
ObjectRep().ReadAny().
// 读取某个FileHash是否被Block引用
ObjectBlock().ReadAny().
// 获取所有可用的节点
Node().ReadAny().
// 增加或修改FileHash关联的Cache记录
Cache().WriteAny().
MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
return
}
defer mutex.Unlock()

updatedNodeAndHashes := make(map[int64][]string)

for _, fileHash := range t.FileHashes {
updatedNodeIDs, err := t.checkOneRepCount(fileHash, execCtx)
if err != nil {
log.WithField("FileHash", fileHash).Warnf("check file rep count failed, err: %s", err.Error())
continue
}

for _, id := range updatedNodeIDs {
hashes := updatedNodeAndHashes[id]
updatedNodeAndHashes[id] = append(hashes, fileHash)
}
}

for nodeID, hashes := range updatedNodeAndHashes {
// 新任务继承本任务的执行设定(紧急任务依然保持紧急任务)
execCtx.Executor.Post(NewAgentCheckCache(nodeID, hashes), execCtx.Option)
}
}

func (t *CheckRepCount) checkOneRepCount(fileHash string, execCtx ExecuteContext) ([]int64, error) {
log := logger.WithType[CheckRepCount]("Event")
sqlCtx := execCtx.Args.DB.SQLCtx()

var updatedNodeIDs []int64
// 计算所需的最少备份数:
// 1. ObjectRep中期望备份数的最大值
// 2. 如果ObjectBlock存在对此文件的引用,则至少为1
repMaxCnt, err := execCtx.Args.DB.ObjectRep().GetFileMaxRepCount(sqlCtx, fileHash)
if err != nil {
return nil, fmt.Errorf("get file max rep count failed, err: %w", err)
}

blkCnt, err := execCtx.Args.DB.ObjectBlock().CountBlockWithHash(sqlCtx, fileHash)
if err != nil {
return nil, fmt.Errorf("count block with hash failed, err: %w", err)
}

needRepCount := mymath.Max(repMaxCnt, mymath.Min(1, blkCnt))

repNodes, err := execCtx.Args.DB.Cache().GetCachingFileNodes(sqlCtx, fileHash)
if err != nil {
return nil, fmt.Errorf("get caching file nodes failed, err: %w", err)
}

allNodes, err := execCtx.Args.DB.Node().GetAllNodes(sqlCtx)
if err != nil {
return nil, fmt.Errorf("get all nodes failed, err: %w", err)
}

var normalNodes, unavaiNodes []model.Node
for _, node := range repNodes {
if node.State == consts.NodeStateNormal {
normalNodes = append(normalNodes, node)
} else if node.State == consts.NodeStateUnavailable {
unavaiNodes = append(unavaiNodes, node)
}
}

// 如果Available的备份数超过期望备份数,则让一些节点退出
if len(normalNodes) > needRepCount {
delNodes := chooseDeleteAvaiRepNodes(allNodes, normalNodes, len(normalNodes)-needRepCount)
for _, node := range delNodes {
err := execCtx.Args.DB.Cache().SetTemp(sqlCtx, fileHash, node.NodeID)
if err != nil {
return nil, fmt.Errorf("change cache state failed, err: %w", err)
}
updatedNodeIDs = append(updatedNodeIDs, node.NodeID)
}
return updatedNodeIDs, nil
}

// 因为总备份数不够,而需要增加的备份数
add1 := mymath.Max(0, needRepCount-len(repNodes))

// 因为Available的备份数占比过少,而需要增加的备份数
minAvaiNodeCnt := int(math.Ceil(float64(config.Cfg().MinAvailableRepProportion) * float64(needRepCount)))
add2 := mymath.Max(0, minAvaiNodeCnt-len(normalNodes))

// 最终需要增加的备份数,是以上两种情况的最大值
finalAddCount := mymath.Max(add1, add2)

if finalAddCount > 0 {
newNodes := chooseNewRepNodes(allNodes, repNodes, finalAddCount)
if len(newNodes) < finalAddCount {
log.WithField("FileHash", fileHash).Warnf("need %d more rep nodes, but get only %d nodes", finalAddCount, len(newNodes))
// TODO 节点数不够,进行一个告警
}

for _, node := range newNodes {
err := execCtx.Args.DB.Cache().CreatePinned(sqlCtx, fileHash, node.NodeID, 0)
if err != nil {
return nil, fmt.Errorf("create cache failed, err: %w", err)
}
updatedNodeIDs = append(updatedNodeIDs, node.NodeID)
}
}

return updatedNodeIDs, err
}

func chooseNewRepNodes(allNodes []model.Node, curRepNodes []model.Node, newCount int) []model.Node {
noRepNodes := lo.Reject(allNodes, func(node model.Node, index int) bool {
return lo.ContainsBy(curRepNodes, func(n model.Node) bool { return node.NodeID == n.NodeID }) ||
node.State != consts.NodeStateNormal
})

repNodeLocationIDs := make(map[int64]bool)
for _, node := range curRepNodes {
repNodeLocationIDs[node.LocationID] = true
}

mysort.Sort(noRepNodes, func(l, r model.Node) int {
// LocationID不存在时为false,false - true < 0,所以LocationID不存在的会排在前面
return mysort.CmpBool(repNodeLocationIDs[l.LocationID], repNodeLocationIDs[r.LocationID])
})

return noRepNodes[:mymath.Min(newCount, len(noRepNodes))]
}

func chooseDeleteAvaiRepNodes(allNodes []model.Node, curAvaiRepNodes []model.Node, delCount int) []model.Node {
// 按照地域ID分组
locationGroupedNodes := make(map[int64][]model.Node)
for _, node := range curAvaiRepNodes {
nodes := locationGroupedNodes[node.LocationID]
nodes = append(nodes, node)
locationGroupedNodes[node.LocationID] = nodes
}

// 每次从每个分组中取出一个元素放入结果数组,并将这个元素从分组中删除
// 最后结果数组中的元素会按照地域交错循环排列,比如:ABCABCBCC。同时还有一个特征:靠后的循环节中的元素都来自于元素数多的分组
// 将结果数组反转(此处是用存放时就逆序的形式实现),就把元素数多的分组提前了,此时从头部取出要删除的节点即可
alternatedNodes := make([]model.Node, len(curAvaiRepNodes))
for i := len(curAvaiRepNodes) - 1; i >= 0; {
for id, nodes := range locationGroupedNodes {
alternatedNodes[i] = nodes[0]

if len(nodes) == 1 {
delete(locationGroupedNodes, id)
} else {
locationGroupedNodes[id] = nodes[1:]
}

// 放置一个元素就移动一下下一个存放点
i--
}
}

return alternatedNodes[:mymath.Min(delCount, len(alternatedNodes))]
}

func init() {
RegisterMessageConvertor(func(msg scevt.CheckRepCount) Event { return NewCheckRepCount(msg.FileHashes) })
}

+ 155
- 0
scanner/internal/event/check_rep_count_test.go View File

@@ -0,0 +1,155 @@
package event

import (
"testing"

"github.com/samber/lo"
. "github.com/smartystreets/goconvey/convey"
"gitlink.org.cn/cloudream/common/utils/sort"
"gitlink.org.cn/cloudream/storage-common/consts"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
)

func Test_chooseNewRepNodes(t *testing.T) {
testcases := []struct {
title string
allNodes []model.Node
curRepNodes []model.Node
newCount int
wantNodeIDs []int
}{
{
title: "优先选择不同地域的节点",
allNodes: []model.Node{
{
NodeID: 1,
LocationID: 1,
State: consts.NodeStateNormal,
},
{
NodeID: 2,
LocationID: 1,
State: consts.NodeStateNormal,
},
{
NodeID: 3,
LocationID: 2,
State: consts.NodeStateNormal,
},
{
NodeID: 4,
LocationID: 3,
State: consts.NodeStateNormal,
},
},
curRepNodes: []model.Node{
{
NodeID: 1,
LocationID: 1,
},
},
newCount: 2,
wantNodeIDs: []int{3, 4},
},
{
title: "就算节点数不足,也不能选择重复节点",
allNodes: []model.Node{
{
NodeID: 1,
LocationID: 1,
State: consts.NodeStateNormal,
},
{
NodeID: 2,
LocationID: 1,
State: consts.NodeStateNormal,
},
},
curRepNodes: []model.Node{
{
NodeID: 1,
LocationID: 1,
},
},
newCount: 2,
wantNodeIDs: []int{2},
},
{
title: "就算节点数不足,也不能选择状态unavailable的节点",
allNodes: []model.Node{
{
NodeID: 1,
LocationID: 1,
State: consts.NodeStateUnavailable,
},
{
NodeID: 2,
LocationID: 1,
State: consts.NodeStateNormal,
},
},
curRepNodes: []model.Node{
{
NodeID: 3,
LocationID: 1,
},
},
newCount: 2,
wantNodeIDs: []int{2},
},
}

for _, test := range testcases {
Convey(test.title, t, func() {
chooseNodes := chooseNewRepNodes(test.allNodes, test.curRepNodes, test.newCount)
chooseNodeIDs := lo.Map(chooseNodes, func(node model.Node, index int) int64 { return node.NodeID })

sort.Sort(chooseNodeIDs, sort.Cmp[int64])

So(chooseNodeIDs, ShouldResemble, test.wantNodeIDs)
})
}
}

func Test_chooseDeleteAvaiRepNodes(t *testing.T) {
testcases := []struct {
title string
allNodes []model.Node
curRepNodes []model.Node
delCount int
wantNodeLocationIDs []int
}{
{
title: "优先选择地域重复的节点",
allNodes: []model.Node{},
curRepNodes: []model.Node{
{NodeID: 1, LocationID: 1}, {NodeID: 2, LocationID: 1},
{NodeID: 3, LocationID: 2}, {NodeID: 4, LocationID: 2},
{NodeID: 5, LocationID: 3}, {NodeID: 6, LocationID: 3}, {NodeID: 7, LocationID: 3},
{NodeID: 8, LocationID: 4},
},
delCount: 4,
wantNodeLocationIDs: []int{1, 2, 3, 3},
},
{
title: "节点不够删",
allNodes: []model.Node{},
curRepNodes: []model.Node{
{NodeID: 1, LocationID: 1},
},
delCount: 2,
wantNodeLocationIDs: []int{1},
},
}

for _, test := range testcases {
Convey(test.title, t, func() {
chooseNodes := chooseDeleteAvaiRepNodes(test.allNodes, test.curRepNodes, test.delCount)
chooseNodeLocationIDs := lo.Map(chooseNodes, func(node model.Node, index int) int64 { return node.LocationID })

sort.Sort(chooseNodeLocationIDs, sort.Cmp[int64])

So(chooseNodeLocationIDs, ShouldResemble, test.wantNodeLocationIDs)
})
}
}

+ 46
- 0
scanner/internal/event/event.go View File

@@ -0,0 +1,46 @@
package event

import (
"fmt"
"reflect"

distlocksvc "gitlink.org.cn/cloudream/common/pkgs/distlock/service"
event "gitlink.org.cn/cloudream/common/pkgs/event"
"gitlink.org.cn/cloudream/common/pkgs/typedispatcher"
mydb "gitlink.org.cn/cloudream/storage-common/pkgs/db"
)

type ExecuteArgs struct {
DB *mydb.DB
DistLock *distlocksvc.Service
}

type Executor = event.Executor[ExecuteArgs]

type ExecuteContext = event.ExecuteContext[ExecuteArgs]

type Event = event.Event[ExecuteArgs]

type ExecuteOption = event.ExecuteOption

func NewExecutor(db *mydb.DB, distLock *distlocksvc.Service) Executor {
return event.NewExecutor(ExecuteArgs{
DB: db,
DistLock: distLock,
})
}

var msgDispatcher = typedispatcher.NewTypeDispatcher[Event]()

func FromMessage(msg any) (Event, error) {
event, ok := msgDispatcher.Dispatch(msg)
if !ok {
return nil, fmt.Errorf("unknow event message type: %s", reflect.TypeOf(msg).Name())
}

return event, nil
}

func RegisterMessageConvertor[T any](converter func(msg T) Event) {
typedispatcher.Add(msgDispatcher, converter)
}

+ 28
- 0
scanner/internal/services/event.go View File

@@ -0,0 +1,28 @@
package services

import (
"gitlink.org.cn/cloudream/common/pkgs/logger"
scmq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/scanner"
scevt "gitlink.org.cn/cloudream/storage-common/pkgs/mq/scanner/event"
"gitlink.org.cn/cloudream/storage-scanner/internal/event"
)

func (svc *Service) PostEvent(msg *scmq.PostEvent) {

evtMsg, err := scevt.MapToMessage(msg.Event)
if err != nil {
logger.Warnf("convert map to event message failed, err: %s", err.Error())
return
}

evt, err := event.FromMessage(evtMsg)
if err != nil {
logger.Warnf("create event from event message failed, err: %s", err.Error())
return
}

svc.eventExecutor.Post(evt, event.ExecuteOption{
IsEmergency: msg.IsEmergency,
DontMerge: msg.DontMerge,
})
}

+ 15
- 0
scanner/internal/services/service.go View File

@@ -0,0 +1,15 @@
package services

import (
"gitlink.org.cn/cloudream/storage-scanner/internal/event"
)

type Service struct {
eventExecutor *event.Executor
}

func NewService(eventExecutor *event.Executor) *Service {
return &Service{
eventExecutor: eventExecutor,
}
}

+ 43
- 0
scanner/internal/tickevent/batch_all_agent_check_cache.go View File

@@ -0,0 +1,43 @@
package tickevent

import (
"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/storage-common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage-scanner/internal/event"
)

const AGENT_CHECK_CACHE_BATCH_SIZE = 2

type BatchAllAgentCheckCache struct {
nodeIDs []int64
}

func NewBatchAllAgentCheckCache() *BatchAllAgentCheckCache {
return &BatchAllAgentCheckCache{}
}

func (e *BatchAllAgentCheckCache) Execute(ctx ExecuteContext) {
log := logger.WithType[BatchAllAgentCheckCache]("TickEvent")
log.Debugf("begin")
defer log.Debugf("end")

if e.nodeIDs == nil || len(e.nodeIDs) == 0 {
nodes, err := ctx.Args.DB.Node().GetAllNodes(ctx.Args.DB.SQLCtx())
if err != nil {
log.Warnf("get all nodes failed, err: %s", err.Error())
return
}

e.nodeIDs = lo.Map(nodes, func(node model.Node, index int) int64 { return node.NodeID })

log.Debugf("new check start, get all nodes")
}

checkedCnt := 0
for ; checkedCnt < len(e.nodeIDs) && checkedCnt < AGENT_CHECK_CACHE_BATCH_SIZE; checkedCnt++ {
// nil代表进行全量检查
ctx.Args.EventExecutor.Post(event.NewAgentCheckCache(e.nodeIDs[checkedCnt], nil))
}
e.nodeIDs = e.nodeIDs[checkedCnt:]
}

+ 39
- 0
scanner/internal/tickevent/batch_check_all_package.go View File

@@ -0,0 +1,39 @@
package tickevent

import (
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/storage-scanner/internal/event"
)

const CheckPackageBatchSize = 100

type BatchCheckAllPackage struct {
lastCheckStart int
}

func NewBatchCheckAllPackage() *BatchCheckAllPackage {
return &BatchCheckAllPackage{}
}

func (e *BatchCheckAllPackage) Execute(ctx ExecuteContext) {
log := logger.WithType[BatchCheckAllPackage]("TickEvent")
log.Debugf("begin")
defer log.Debugf("end")

packageIDs, err := ctx.Args.DB.Package().BatchGetAllPackageIDs(ctx.Args.DB.SQLCtx(), e.lastCheckStart, CheckPackageBatchSize)
if err != nil {
log.Warnf("batch get package ids failed, err: %s", err.Error())
return
}

ctx.Args.EventExecutor.Post(event.NewCheckPackage(packageIDs))

// 如果结果的长度小于预期的长度,则认为已经查询了所有,下次从头再来
if len(packageIDs) < CheckPackageBatchSize {
e.lastCheckStart = 0
log.Debugf("all package checked, next time will start check at 0")

} else {
e.lastCheckStart += CheckPackageBatchSize
}
}

+ 39
- 0
scanner/internal/tickevent/batch_check_all_rep_count.go View File

@@ -0,0 +1,39 @@
package tickevent

import (
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/storage-scanner/internal/event"
)

const CHECK_CACHE_BATCH_SIZE = 100

type BatchCheckAllRepCount struct {
lastCheckStart int
}

func NewBatchCheckAllRepCount() *BatchCheckAllRepCount {
return &BatchCheckAllRepCount{}
}

func (e *BatchCheckAllRepCount) Execute(ctx ExecuteContext) {
log := logger.WithType[BatchCheckAllRepCount]("TickEvent")
log.Debugf("begin")
defer log.Debugf("end")

fileHashes, err := ctx.Args.DB.Cache().BatchGetAllFileHashes(ctx.Args.DB.SQLCtx(), e.lastCheckStart, CHECK_CACHE_BATCH_SIZE)
if err != nil {
log.Warnf("batch get file hashes failed, err: %s", err.Error())
return
}

ctx.Args.EventExecutor.Post(event.NewCheckRepCount(fileHashes))

// 如果结果的长度小于预期的长度,则认为已经查询了所有,下次从头再来
if len(fileHashes) < CHECK_CACHE_BATCH_SIZE {
e.lastCheckStart = 0
log.Debugf("all rep count checked, next time will start check at 0")

} else {
e.lastCheckStart += CHECK_CACHE_BATCH_SIZE
}
}

+ 42
- 0
scanner/internal/tickevent/batch_check_all_storage.go View File

@@ -0,0 +1,42 @@
package tickevent

import (
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/storage-scanner/internal/event"
)

const CHECK_STORAGE_BATCH_SIZE = 5

type BatchCheckAllStorage struct {
lastCheckStart int
}

func NewBatchCheckAllStorage() *BatchCheckAllStorage {
return &BatchCheckAllStorage{}
}

func (e *BatchCheckAllStorage) Execute(ctx ExecuteContext) {
log := logger.WithType[BatchCheckAllStorage]("TickEvent")
log.Debugf("begin")
defer log.Debugf("end")

storageIDs, err := ctx.Args.DB.Storage().BatchGetAllStorageIDs(ctx.Args.DB.SQLCtx(), e.lastCheckStart, CHECK_STORAGE_BATCH_SIZE)
if err != nil {
log.Warnf("batch get storage ids failed, err: %s", err.Error())
return
}

for _, stgID := range storageIDs {
// 设置nil代表进行全量检查
ctx.Args.EventExecutor.Post(event.NewAgentCheckStorage(stgID, nil))
}

// 如果结果的长度小于预期的长度,则认为已经查询了所有,下次从头再来
if len(storageIDs) < CHECK_STORAGE_BATCH_SIZE {
e.lastCheckStart = 0
log.Debugf("all storage checked, next time will start check at 0")

} else {
e.lastCheckStart += CHECK_STORAGE_BATCH_SIZE
}
}

+ 32
- 0
scanner/internal/tickevent/check_agent_state.go View File

@@ -0,0 +1,32 @@
package tickevent

import (
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/storage-scanner/internal/event"
)

type CheckAgentState struct {
}

func NewCheckAgentState() *CheckAgentState {
return &CheckAgentState{}
}

func (e *CheckAgentState) Execute(ctx ExecuteContext) {
log := logger.WithType[CheckAgentState]("TickEvent")
log.Debugf("begin")
defer log.Debugf("end")

nodes, err := ctx.Args.DB.Node().GetAllNodes(ctx.Args.DB.SQLCtx())
if err != nil {
log.Warnf("get all nodes failed, err: %s", err.Error())
return
}

for _, node := range nodes {
ctx.Args.EventExecutor.Post(event.NewAgentCheckState(node.NodeID), event.ExecuteOption{
IsEmergency: true,
DontMerge: true,
})
}
}

+ 29
- 0
scanner/internal/tickevent/check_cache.go View File

@@ -0,0 +1,29 @@
package tickevent

import (
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/storage-scanner/internal/event"
)

type CheckCache struct {
}

func NewCheckCache() *CheckCache {
return &CheckCache{}
}

func (e *CheckCache) Execute(ctx ExecuteContext) {
log := logger.WithType[CheckCache]("TickEvent")
log.Debugf("begin")
defer log.Debugf("end")

nodes, err := ctx.Args.DB.Node().GetAllNodes(ctx.Args.DB.SQLCtx())
if err != nil {
log.Warnf("get all nodes failed, err: %s", err.Error())
return
}

for _, node := range nodes {
ctx.Args.EventExecutor.Post(event.NewCheckCache(node.NodeID))
}
}

+ 24
- 0
scanner/internal/tickevent/tick_event.go View File

@@ -0,0 +1,24 @@
package tickevent

import (
tickevent "gitlink.org.cn/cloudream/common/pkgs/tickevent"
mydb "gitlink.org.cn/cloudream/storage-common/pkgs/db"
"gitlink.org.cn/cloudream/storage-scanner/internal/event"
)

type ExecuteArgs struct {
EventExecutor *event.Executor
DB *mydb.DB
}

type StartOption = tickevent.StartOption

type Executor = tickevent.Executor[ExecuteArgs]

type ExecuteContext = tickevent.ExecuteContext[ExecuteArgs]

type Event = tickevent.TickEvent[ExecuteArgs]

func NewExecutor(args ExecuteArgs) Executor {
return tickevent.NewExecutor(args)
}

+ 20
- 0
scanner/magefiles/magefile.go View File

@@ -0,0 +1,20 @@
//go:build mage

package main

import (
"gitlink.org.cn/cloudream/common/magefiles"

//mage:import
_ "gitlink.org.cn/cloudream/common/magefiles/targets"
)

var Default = Build

func Build() error {
return magefiles.Build(magefiles.BuildArgs{
OutputName: "scanner",
OutputDir: "scanner",
AssetsDir: "assets",
})
}

+ 128
- 0
scanner/main.go View File

@@ -0,0 +1,128 @@
package main

import (
"fmt"
"os"
"sync"

"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/storage-common/globals"
"gitlink.org.cn/cloudream/storage-common/pkgs/db"
"gitlink.org.cn/cloudream/storage-common/pkgs/distlock"
scmq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/scanner"
"gitlink.org.cn/cloudream/storage-scanner/internal/config"
"gitlink.org.cn/cloudream/storage-scanner/internal/event"
"gitlink.org.cn/cloudream/storage-scanner/internal/services"
"gitlink.org.cn/cloudream/storage-scanner/internal/tickevent"
)

func main() {
err := config.Init()
if err != nil {
fmt.Printf("init config failed, err: %s", err.Error())
os.Exit(1)
}

err = logger.Init(&config.Cfg().Logger)
if err != nil {
fmt.Printf("init logger failed, err: %s", err.Error())
os.Exit(1)
}

db, err := db.NewDB(&config.Cfg().DB)
if err != nil {
logger.Fatalf("new db failed, err: %s", err.Error())
}

globals.InitMQPool(&config.Cfg().RabbitMQ)

wg := sync.WaitGroup{}
wg.Add(3)

distlockSvc, err := distlock.NewService(&config.Cfg().DistLock)
if err != nil {
logger.Warnf("new distlock service failed, err: %s", err.Error())
os.Exit(1)
}
go serveDistLock(distlockSvc, &wg)

eventExecutor := event.NewExecutor(db, distlockSvc)
go serveEventExecutor(&eventExecutor, &wg)

agtSvr, err := scmq.NewServer(services.NewService(&eventExecutor), &config.Cfg().RabbitMQ)
if err != nil {
logger.Fatalf("new agent server failed, err: %s", err.Error())
}
agtSvr.OnError = func(err error) {
logger.Warnf("agent server err: %s", err.Error())
}
go serveScannerServer(agtSvr, &wg)

tickExecutor := tickevent.NewExecutor(tickevent.ExecuteArgs{
EventExecutor: &eventExecutor,
DB: db,
})
startTickEvent(&tickExecutor)

wg.Wait()
}

func serveEventExecutor(executor *event.Executor, wg *sync.WaitGroup) {
logger.Info("start serving event executor")

err := executor.Execute()

if err != nil {
logger.Errorf("event executor stopped with error: %s", err.Error())
}

logger.Info("event executor stopped")

wg.Done()
}

func serveScannerServer(server *scmq.Server, wg *sync.WaitGroup) {
logger.Info("start serving scanner server")

err := server.Serve()

if err != nil {
logger.Errorf("scanner server stopped with error: %s", err.Error())
}

logger.Info("scanner server stopped")

wg.Done()
}

func serveDistLock(svc *distlock.Service, wg *sync.WaitGroup) {
logger.Info("start serving distlock")

err := svc.Serve()

if err != nil {
logger.Errorf("distlock stopped with error: %s", err.Error())
}

logger.Info("distlock stopped")

wg.Done()
}

func startTickEvent(tickExecutor *tickevent.Executor) {
// TODO 可以考虑增加配置文件,配置这些任务间隔时间

interval := 5 * 60 * 1000

tickExecutor.Start(tickevent.NewBatchAllAgentCheckCache(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000})

tickExecutor.Start(tickevent.NewBatchCheckAllPackage(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000})

tickExecutor.Start(tickevent.NewBatchCheckAllRepCount(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000})

tickExecutor.Start(tickevent.NewBatchCheckAllStorage(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000})

tickExecutor.Start(tickevent.NewCheckAgentState(), 5*60*1000, tickevent.StartOption{RandomStartDelayMs: 60 * 1000})

tickExecutor.Start(tickevent.NewCheckCache(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000})
}

Loading…
Cancel
Save