You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

public_store.go 4.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. package ops2
  2. import (
  3. "fmt"
  4. "io"
  5. "gitlink.org.cn/cloudream/common/pkgs/future"
  6. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
  7. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
  8. "gitlink.org.cn/cloudream/common/pkgs/logger"
  9. "gitlink.org.cn/cloudream/common/utils/io2"
  10. clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types"
  11. "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
  12. "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool"
  13. )
  14. func init() {
  15. exec.UseOp[*BaseWrite]()
  16. exec.UseOp[*BaseRead]()
  17. exec.UseVarValue[*FileInfoValue]()
  18. }
  19. type FileInfoValue struct {
  20. Hash clitypes.FileHash `json:"hash"`
  21. Size int64 `json:"size"`
  22. }
  23. func (v *FileInfoValue) Clone() exec.VarValue {
  24. return &FileInfoValue{Hash: v.Hash, Size: v.Size}
  25. }
  26. type BaseRead struct {
  27. Output exec.VarID
  28. UserSpace clitypes.UserSpaceDetail
  29. Path string
  30. }
  31. func (o *BaseRead) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
  32. logger.
  33. WithField("Output", o.Output).
  34. WithField("UserSpace", o.UserSpace).
  35. WithField("Path", o.Path).
  36. Debug("base read")
  37. defer logger.Debug("base read end")
  38. stgPool, err := exec.GetValueByType[*pool.Pool](ctx)
  39. if err != nil {
  40. return fmt.Errorf("getting storage pool: %w", err)
  41. }
  42. store, err := stgPool.GetBaseStore(&o.UserSpace)
  43. if err != nil {
  44. return fmt.Errorf("getting base store of storage %v: %w", o.UserSpace, err)
  45. }
  46. stream, err := store.Read(o.Path)
  47. if err != nil {
  48. return fmt.Errorf("reading object %v: %w", o.Path, err)
  49. }
  50. fut := future.NewSetVoid()
  51. output := &exec.StreamValue{
  52. Stream: io2.AfterReadClosed(stream, func(closer io.ReadCloser) {
  53. fut.SetVoid()
  54. }),
  55. }
  56. e.PutVar(o.Output, output)
  57. return fut.Wait(ctx.Context)
  58. }
  59. func (o *BaseRead) String() string {
  60. return fmt.Sprintf("PublicRead %v:%v -> %v", o.UserSpace, o.Path, o.Output)
  61. }
  62. type BaseWrite struct {
  63. Input exec.VarID
  64. UserSpace clitypes.UserSpaceDetail
  65. Path string
  66. FileInfo exec.VarID
  67. }
  68. func (o *BaseWrite) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
  69. logger.
  70. WithField("Input", o.Input).
  71. Debugf("write file to base store")
  72. defer logger.Debugf("write file to base store finished")
  73. stgPool, err := exec.GetValueByType[*pool.Pool](ctx)
  74. if err != nil {
  75. return fmt.Errorf("getting storage pool: %w", err)
  76. }
  77. store, err := stgPool.GetBaseStore(&o.UserSpace)
  78. if err != nil {
  79. return fmt.Errorf("getting base store of storage %v: %w", o.UserSpace, err)
  80. }
  81. input, err := exec.BindVar[*exec.StreamValue](e, ctx.Context, o.Input)
  82. if err != nil {
  83. return err
  84. }
  85. defer input.Stream.Close()
  86. info, err := store.Write(o.Path, input.Stream)
  87. if err != nil {
  88. return err
  89. }
  90. e.PutVar(o.FileInfo, &FileInfoValue{
  91. Hash: info.Hash,
  92. Size: info.Size,
  93. })
  94. return nil
  95. }
  96. func (o *BaseWrite) String() string {
  97. return fmt.Sprintf("PublicWrite %v -> %v:%v", o.Input, o.UserSpace, o.Path)
  98. }
  99. type BaseReadNode struct {
  100. dag.NodeBase
  101. From ioswitch2.From
  102. UserSpace clitypes.UserSpaceDetail
  103. Path string
  104. }
  105. func (b *GraphNodeBuilder) NewPublicRead(from ioswitch2.From, userSpace clitypes.UserSpaceDetail, path string) *BaseReadNode {
  106. node := &BaseReadNode{
  107. From: from,
  108. UserSpace: userSpace,
  109. Path: path,
  110. }
  111. b.AddNode(node)
  112. node.OutputStreams().Init(node, 1)
  113. return node
  114. }
  115. func (t *BaseReadNode) GetFrom() ioswitch2.From {
  116. return t.From
  117. }
  118. func (t *BaseReadNode) Output() dag.StreamOutputSlot {
  119. return dag.StreamOutputSlot{
  120. Node: t,
  121. Index: 0,
  122. }
  123. }
  124. func (t *BaseReadNode) GenerateOp() (exec.Op, error) {
  125. return &BaseRead{
  126. Output: t.Output().Var().VarID,
  127. UserSpace: t.UserSpace,
  128. Path: t.Path,
  129. }, nil
  130. }
  131. type BaseWriteNode struct {
  132. dag.NodeBase
  133. To ioswitch2.To
  134. UserSpace clitypes.UserSpaceDetail
  135. Path string
  136. FileInfoStoreKey string
  137. }
  138. func (b *GraphNodeBuilder) NewPublicWrite(to ioswitch2.To, userSpace clitypes.UserSpaceDetail, path string) *BaseWriteNode {
  139. node := &BaseWriteNode{
  140. To: to,
  141. UserSpace: userSpace,
  142. Path: path,
  143. }
  144. b.AddNode(node)
  145. node.InputStreams().Init(1)
  146. return node
  147. }
  148. func (t *BaseWriteNode) GetTo() ioswitch2.To {
  149. return t.To
  150. }
  151. func (t *BaseWriteNode) SetInput(input *dag.StreamVar) {
  152. input.To(t, 0)
  153. }
  154. func (t *BaseWriteNode) Input() dag.StreamInputSlot {
  155. return dag.StreamInputSlot{
  156. Node: t,
  157. Index: 0,
  158. }
  159. }
  160. func (t *BaseWriteNode) FileInfoVar() *dag.ValueVar {
  161. return t.OutputValues().Get(0)
  162. }
  163. func (t *BaseWriteNode) GenerateOp() (exec.Op, error) {
  164. return &BaseWrite{
  165. Input: t.InputStreams().Get(0).VarID,
  166. UserSpace: t.UserSpace,
  167. Path: t.Path,
  168. FileInfo: t.FileInfoVar().VarID,
  169. }, nil
  170. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。