You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

shard_store.go 5.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. package s3
  2. import (
  3. "context"
  4. "sync"
  5. "github.com/aws/aws-sdk-go-v2/aws"
  6. "github.com/aws/aws-sdk-go-v2/service/s3"
  7. s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
  8. "gitlink.org.cn/cloudream/common/pkgs/logger"
  9. "gitlink.org.cn/cloudream/common/utils/math2"
  10. stgtypes "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
  11. jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
  12. )
  13. type ShardStoreOption struct {
  14. UseAWSSha256 bool // 能否直接使用AWS提供的SHA256校验,如果不行,则使用本地计算。默认使用本地计算。
  15. }
  16. type ShardStore struct {
  17. Detail *jcstypes.UserSpaceDetail
  18. Bucket string
  19. workingDir jcstypes.JPath
  20. cli *s3.Client
  21. opt ShardStoreOption
  22. lock sync.Mutex
  23. }
  24. func NewShardStore(detail *jcstypes.UserSpaceDetail, cli *s3.Client, bkt string, opt ShardStoreOption) (*ShardStore, error) {
  25. wd := detail.UserSpace.WorkingDir.Clone()
  26. wd.Push(stgtypes.ShardStoreWorkingDir)
  27. return &ShardStore{
  28. Detail: detail,
  29. Bucket: bkt,
  30. workingDir: wd,
  31. cli: cli,
  32. opt: opt,
  33. }, nil
  34. }
  35. func (s *ShardStore) Start(ch *stgtypes.StorageEventChan) {
  36. s.getLogger().Infof("start, root: %v", s.workingDir)
  37. }
  38. func (s *ShardStore) Stop() {
  39. s.getLogger().Infof("component stop")
  40. }
  41. func (s *ShardStore) Store(path jcstypes.JPath, hash jcstypes.FileHash, size int64) (stgtypes.FileInfo, error) {
  42. s.lock.Lock()
  43. defer s.lock.Unlock()
  44. log := s.getLogger()
  45. log.Debugf("write file %v finished, size: %v, hash: %v", path, size, hash)
  46. newPath := s.GetFilePathFromHash(hash)
  47. _, err := s.cli.CopyObject(context.Background(), &s3.CopyObjectInput{
  48. Bucket: aws.String(s.Bucket),
  49. CopySource: aws.String(JoinKey(s.Bucket, path.String())),
  50. Key: aws.String(newPath.String()),
  51. })
  52. if err != nil {
  53. log.Warnf("copy file %v to %v: %v", path, newPath, err)
  54. return stgtypes.FileInfo{}, err
  55. }
  56. return stgtypes.FileInfo{
  57. Hash: hash,
  58. Size: size,
  59. Path: newPath,
  60. }, nil
  61. }
  62. func (s *ShardStore) Info(hash jcstypes.FileHash) (stgtypes.FileInfo, error) {
  63. s.lock.Lock()
  64. defer s.lock.Unlock()
  65. filePath := s.GetFilePathFromHash(hash)
  66. info, err := s.cli.HeadObject(context.TODO(), &s3.HeadObjectInput{
  67. Bucket: aws.String(s.Bucket),
  68. Key: aws.String(filePath.String()),
  69. })
  70. if err != nil {
  71. s.getLogger().Warnf("get file %v: %v", filePath, err)
  72. return stgtypes.FileInfo{}, err
  73. }
  74. return stgtypes.FileInfo{
  75. Hash: hash,
  76. Size: *info.ContentLength,
  77. Path: filePath,
  78. }, nil
  79. }
  80. func (s *ShardStore) ListAll() ([]stgtypes.FileInfo, error) {
  81. s.lock.Lock()
  82. defer s.lock.Unlock()
  83. var infos []stgtypes.FileInfo
  84. var marker *string
  85. for {
  86. resp, err := s.cli.ListObjects(context.Background(), &s3.ListObjectsInput{
  87. Bucket: aws.String(s.Bucket),
  88. Prefix: aws.String(s.workingDir.String()),
  89. Marker: marker,
  90. })
  91. if err != nil {
  92. s.getLogger().Warnf("list objects: %v", err)
  93. return nil, err
  94. }
  95. for _, obj := range resp.Contents {
  96. key := BaseKey(*obj.Key)
  97. fileHash, err := jcstypes.ParseHash(key)
  98. if err != nil {
  99. continue
  100. }
  101. infos = append(infos, stgtypes.FileInfo{
  102. Hash: fileHash,
  103. Size: *obj.Size,
  104. Path: jcstypes.PathFromJcsPathString(*obj.Key),
  105. })
  106. }
  107. if !*resp.IsTruncated {
  108. break
  109. }
  110. marker = resp.NextMarker
  111. }
  112. return infos, nil
  113. }
  114. func (s *ShardStore) GC(avaiables []jcstypes.FileHash) error {
  115. s.lock.Lock()
  116. defer s.lock.Unlock()
  117. avais := make(map[jcstypes.FileHash]bool)
  118. for _, hash := range avaiables {
  119. avais[hash] = true
  120. }
  121. var deletes []s3types.ObjectIdentifier
  122. var marker *string
  123. for {
  124. resp, err := s.cli.ListObjects(context.Background(), &s3.ListObjectsInput{
  125. Bucket: aws.String(s.Bucket),
  126. Prefix: aws.String(s.workingDir.String()),
  127. Marker: marker,
  128. })
  129. if err != nil {
  130. s.getLogger().Warnf("list objects: %v", err)
  131. return err
  132. }
  133. for _, obj := range resp.Contents {
  134. key := BaseKey(*obj.Key)
  135. fileHash, err := jcstypes.ParseHash(key)
  136. if err != nil {
  137. continue
  138. }
  139. if !avais[fileHash] {
  140. deletes = append(deletes, s3types.ObjectIdentifier{
  141. Key: obj.Key,
  142. })
  143. }
  144. }
  145. if !*resp.IsTruncated {
  146. break
  147. }
  148. marker = resp.NextMarker
  149. }
  150. totalCnt := len(deletes)
  151. for len(deletes) > 0 {
  152. cnt := math2.Min(500, len(deletes))
  153. _, err := s.cli.DeleteObjects(context.Background(), &s3.DeleteObjectsInput{
  154. Bucket: aws.String(s.Bucket),
  155. Delete: &s3types.Delete{
  156. Objects: deletes[:cnt],
  157. },
  158. })
  159. if err != nil {
  160. s.getLogger().Warnf("delete objects: %v", err)
  161. return err
  162. }
  163. deletes = deletes[cnt:]
  164. }
  165. s.getLogger().Infof("purge %d files", totalCnt)
  166. // TODO 无法保证原子性,所以删除失败只打日志
  167. return nil
  168. }
  169. func (s *ShardStore) Stats() stgtypes.Stats {
  170. // TODO 统计本地存储的相关信息
  171. return stgtypes.Stats{
  172. Status: stgtypes.StatusOK,
  173. }
  174. }
  175. func (s *ShardStore) getLogger() logger.Logger {
  176. return logger.WithField("ShardStore", "S3").WithField("UserSpace", s.Detail)
  177. }
  178. func (s *ShardStore) GetFileDirFromHash(hash jcstypes.FileHash) jcstypes.JPath {
  179. p := s.workingDir.Clone()
  180. p.Push(hash.GetHashPrefix(2))
  181. return p
  182. }
  183. func (s *ShardStore) GetFilePathFromHash(hash jcstypes.FileHash) jcstypes.JPath {
  184. p := s.workingDir.Clone()
  185. p.Push(hash.GetHashPrefix(2))
  186. p.Push(string(hash))
  187. return p
  188. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。