You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

shard_store.go 4.7 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. package ops2
  2. import (
  3. "fmt"
  4. "io"
  5. "gitlink.org.cn/cloudream/common/pkgs/future"
  6. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
  7. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
  8. "gitlink.org.cn/cloudream/common/pkgs/logger"
  9. cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
  10. "gitlink.org.cn/cloudream/common/utils/io2"
  11. "gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/pool"
  12. "gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/types"
  13. )
  14. func init() {
  15. exec.UseOp[*ShardRead]()
  16. exec.UseOp[*ShardWrite]()
  17. }
  18. type ShardRead struct {
  19. Output *exec.StreamVar `json:"output"`
  20. StorageID cdssdk.StorageID `json:"storageID"`
  21. Open types.OpenOption `json:"option"`
  22. }
  23. func (o *ShardRead) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
  24. logger.
  25. WithField("Open", o.Open).
  26. Debugf("reading from shard store")
  27. defer logger.Debugf("reading from shard store finished")
  28. pool, err := exec.ValueByType[*pool.ShardStorePool](ctx)
  29. if err != nil {
  30. return fmt.Errorf("getting shard store pool: %w", err)
  31. }
  32. store, err := pool.Get(o.StorageID)
  33. if err != nil {
  34. return fmt.Errorf("getting shard store %v: %w", o.StorageID, err)
  35. }
  36. file, err := store.Open(o.Open)
  37. if err != nil {
  38. return fmt.Errorf("opening shard store file: %w", err)
  39. }
  40. fut := future.NewSetVoid()
  41. o.Output.Stream = io2.AfterReadClosedOnce(file, func(closer io.ReadCloser) {
  42. fut.SetVoid()
  43. })
  44. e.PutVars(o.Output)
  45. return fut.Wait(ctx.Context)
  46. }
  47. func (o *ShardRead) String() string {
  48. return fmt.Sprintf("ShardRead %v -> %v", o.Open, o.Output.ID)
  49. }
  50. type ShardWrite struct {
  51. Input *exec.StreamVar `json:"input"`
  52. FileHash *exec.StringVar `json:"fileHash"`
  53. StorageID cdssdk.StorageID `json:"storageID"`
  54. }
  55. func (o *ShardWrite) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
  56. logger.
  57. WithField("Input", o.Input.ID).
  58. WithField("FileHashVar", o.FileHash.ID).
  59. Debugf("writting file to shard store")
  60. defer logger.Debugf("write to shard store finished")
  61. pool, err := exec.ValueByType[*pool.ShardStorePool](ctx)
  62. if err != nil {
  63. return fmt.Errorf("getting shard store pool: %w", err)
  64. }
  65. store, err := pool.Get(o.StorageID)
  66. if err != nil {
  67. return fmt.Errorf("getting shard store %v: %w", o.StorageID, err)
  68. }
  69. err = e.BindVars(ctx.Context, o.Input)
  70. if err != nil {
  71. return err
  72. }
  73. defer o.Input.Stream.Close()
  74. writer := store.New()
  75. defer writer.Abort()
  76. _, err = io.Copy(writer, o.Input.Stream)
  77. if err != nil {
  78. return fmt.Errorf("writing file to shard store: %w", err)
  79. }
  80. fileInfo, err := writer.Finish()
  81. if err != nil {
  82. return fmt.Errorf("finishing writing file to shard store: %w", err)
  83. }
  84. o.FileHash.Value = string(fileInfo.Hash)
  85. e.PutVars(o.FileHash)
  86. return nil
  87. }
  88. func (o *ShardWrite) String() string {
  89. return fmt.Sprintf("ShardWrite %v -> %v", o.Input.ID, o.FileHash.ID)
  90. }
  91. type ShardReadNode struct {
  92. dag.NodeBase
  93. StorageID cdssdk.StorageID
  94. Open types.OpenOption
  95. }
  96. func (b *GraphNodeBuilder) NewShardRead(stgID cdssdk.StorageID, open types.OpenOption) *ShardReadNode {
  97. node := &ShardReadNode{
  98. StorageID: stgID,
  99. Open: open,
  100. }
  101. b.AddNode(node)
  102. node.OutputStreams().SetupNew(node, b.NewStreamVar())
  103. return node
  104. }
  105. func (t *ShardReadNode) Output() dag.StreamSlot {
  106. return dag.StreamSlot{
  107. Var: t.OutputStreams().Get(0),
  108. Index: 0,
  109. }
  110. }
  111. func (t *ShardReadNode) GenerateOp() (exec.Op, error) {
  112. return &ShardRead{
  113. Output: t.OutputStreams().Get(0).Var,
  114. StorageID: t.StorageID,
  115. Open: t.Open,
  116. }, nil
  117. }
  118. // func (t *IPFSReadType) String() string {
  119. // return fmt.Sprintf("IPFSRead[%s,%v+%v]%v%v", t.FileHash, t.Option.Offset, t.Option.Length, formatStreamIO(node), formatValueIO(node))
  120. // }
  121. type ShardWriteNode struct {
  122. dag.NodeBase
  123. FileHashStoreKey string
  124. }
  125. func (b *GraphNodeBuilder) NewShardWrite(fileHashStoreKey string) *ShardWriteNode {
  126. node := &ShardWriteNode{
  127. FileHashStoreKey: fileHashStoreKey,
  128. }
  129. b.AddNode(node)
  130. return node
  131. }
  132. func (t *ShardWriteNode) SetInput(input *dag.StreamVar) {
  133. t.InputStreams().EnsureSize(1)
  134. input.Connect(t, 0)
  135. t.OutputValues().SetupNew(t, t.Graph().NewValueVar(dag.StringValueVar))
  136. }
  137. func (t *ShardWriteNode) Input() dag.StreamSlot {
  138. return dag.StreamSlot{
  139. Var: t.InputStreams().Get(0),
  140. Index: 0,
  141. }
  142. }
  143. func (t *ShardWriteNode) FileHashVar() *dag.ValueVar {
  144. return t.OutputValues().Get(0)
  145. }
  146. func (t *ShardWriteNode) GenerateOp() (exec.Op, error) {
  147. return &ShardWrite{
  148. Input: t.InputStreams().Get(0).Var,
  149. FileHash: t.OutputValues().Get(0).Var.(*exec.StringVar),
  150. }, nil
  151. }
  152. // func (t *IPFSWriteType) String() string {
  153. // return fmt.Sprintf("IPFSWrite[%s,%v+%v]%v%v", t.FileHashStoreKey, t.Range.Offset, t.Range.Length, formatStreamIO(node), formatValueIO(node))
  154. // }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。