You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ipfs.go 3.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. package ops2
  2. import (
  3. "context"
  4. "fmt"
  5. "io"
  6. "gitlink.org.cn/cloudream/common/pkgs/future"
  7. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
  8. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
  9. "gitlink.org.cn/cloudream/common/pkgs/ipfs"
  10. "gitlink.org.cn/cloudream/common/pkgs/logger"
  11. "gitlink.org.cn/cloudream/common/utils/io2"
  12. stgglb "gitlink.org.cn/cloudream/storage/common/globals"
  13. "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc"
  14. )
  15. func init() {
  16. exec.UseOp[*IPFSRead]()
  17. exec.UseOp[*IPFSWrite]()
  18. }
  19. type IPFSRead struct {
  20. Output *exec.StreamVar `json:"output"`
  21. FileHash string `json:"fileHash"`
  22. Option ipfs.ReadOption `json:"option"`
  23. }
  24. func (o *IPFSRead) Execute(ctx context.Context, e *exec.Executor) error {
  25. logger.
  26. WithField("FileHash", o.FileHash).
  27. Debugf("ipfs read op")
  28. defer logger.Debugf("ipfs read op finished")
  29. ipfsCli, err := stgglb.IPFSPool.Acquire()
  30. if err != nil {
  31. return fmt.Errorf("new ipfs client: %w", err)
  32. }
  33. defer stgglb.IPFSPool.Release(ipfsCli)
  34. file, err := ipfsCli.OpenRead(o.FileHash, o.Option)
  35. if err != nil {
  36. return fmt.Errorf("reading ipfs: %w", err)
  37. }
  38. defer file.Close()
  39. fut := future.NewSetVoid()
  40. o.Output.Stream = io2.AfterReadClosedOnce(file, func(closer io.ReadCloser) {
  41. fut.SetVoid()
  42. })
  43. e.PutVars(o.Output)
  44. return fut.Wait(ctx)
  45. }
  46. func (o *IPFSRead) String() string {
  47. return fmt.Sprintf("IPFSRead %v -> %v", o.FileHash, o.Output.ID)
  48. }
  49. type IPFSWrite struct {
  50. Input *exec.StreamVar `json:"input"`
  51. FileHash *exec.StringVar `json:"fileHash"`
  52. }
  53. func (o *IPFSWrite) Execute(ctx context.Context, e *exec.Executor) error {
  54. logger.
  55. WithField("Input", o.Input.ID).
  56. WithField("FileHashVar", o.FileHash.ID).
  57. Debugf("ipfs write op")
  58. ipfsCli, err := stgglb.IPFSPool.Acquire()
  59. if err != nil {
  60. return fmt.Errorf("new ipfs client: %w", err)
  61. }
  62. defer stgglb.IPFSPool.Release(ipfsCli)
  63. err = e.BindVars(ctx, o.Input)
  64. if err != nil {
  65. return err
  66. }
  67. defer o.Input.Stream.Close()
  68. o.FileHash.Value, err = ipfsCli.CreateFile(o.Input.Stream)
  69. if err != nil {
  70. return fmt.Errorf("creating ipfs file: %w", err)
  71. }
  72. e.PutVars(o.FileHash)
  73. return nil
  74. }
  75. func (o *IPFSWrite) String() string {
  76. return fmt.Sprintf("IPFSWrite %v -> %v", o.Input.ID, o.FileHash.ID)
  77. }
  78. type IPFSReadType struct {
  79. FileHash string
  80. Option ipfs.ReadOption
  81. }
  82. func (t *IPFSReadType) InitNode(node *dag.Node) {
  83. dag.NodeNewOutputStream(node, &ioswitchlrc.VarProps{})
  84. }
  85. func (t *IPFSReadType) GenerateOp(n *dag.Node) (exec.Op, error) {
  86. return &IPFSRead{
  87. Output: n.OutputStreams[0].Var,
  88. FileHash: t.FileHash,
  89. Option: t.Option,
  90. }, nil
  91. }
  92. func (t *IPFSReadType) String(node *dag.Node) string {
  93. return fmt.Sprintf("IPFSRead[%s,%v+%v]%v%v", t.FileHash, t.Option.Offset, t.Option.Length, formatStreamIO(node), formatValueIO(node))
  94. }
  95. type IPFSWriteType struct {
  96. FileHashStoreKey string
  97. Range exec.Range
  98. }
  99. func (t *IPFSWriteType) InitNode(node *dag.Node) {
  100. dag.NodeDeclareInputStream(node, 1)
  101. dag.NodeNewOutputValue(node, dag.StringValueVar, &ioswitchlrc.VarProps{})
  102. }
  103. func (t *IPFSWriteType) GenerateOp(op *dag.Node) (exec.Op, error) {
  104. return &IPFSWrite{
  105. Input: op.InputStreams[0].Var,
  106. FileHash: op.OutputValues[0].Var.(*exec.StringVar),
  107. }, nil
  108. }
  109. func (t *IPFSWriteType) String(node *dag.Node) string {
  110. return fmt.Sprintf("IPFSWrite[%s,%v+%v]%v%v", t.FileHashStoreKey, t.Range.Offset, t.Range.Length, formatStreamIO(node), formatValueIO(node))
  111. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。