You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ipfs.go 3.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. package ops2
  2. import (
  3. "context"
  4. "fmt"
  5. "io"
  6. "gitlink.org.cn/cloudream/common/pkgs/future"
  7. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
  8. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
  9. "gitlink.org.cn/cloudream/common/pkgs/ipfs"
  10. "gitlink.org.cn/cloudream/common/pkgs/logger"
  11. "gitlink.org.cn/cloudream/common/utils/io2"
  12. stgglb "gitlink.org.cn/cloudream/storage/common/globals"
  13. "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
  14. )
  15. func init() {
  16. exec.UseOp[*IPFSRead]()
  17. exec.UseOp[*IPFSWrite]()
  18. }
  19. type IPFSRead struct {
  20. Output *exec.StreamVar `json:"output"`
  21. FileHash string `json:"fileHash"`
  22. Option ipfs.ReadOption `json:"option"`
  23. }
  24. func (o *IPFSRead) Execute(ctx context.Context, e *exec.Executor) error {
  25. logger.
  26. WithField("FileHash", o.FileHash).
  27. Debugf("ipfs read op")
  28. defer logger.Debugf("ipfs read op finished")
  29. ipfsCli, err := stgglb.IPFSPool.Acquire()
  30. if err != nil {
  31. return fmt.Errorf("new ipfs client: %w", err)
  32. }
  33. defer stgglb.IPFSPool.Release(ipfsCli)
  34. file, err := ipfsCli.OpenRead(o.FileHash, o.Option)
  35. if err != nil {
  36. return fmt.Errorf("reading ipfs: %w", err)
  37. }
  38. defer file.Close()
  39. fut := future.NewSetVoid()
  40. o.Output.Stream = io2.AfterReadClosedOnce(file, func(closer io.ReadCloser) {
  41. fut.SetVoid()
  42. })
  43. e.PutVars(o.Output)
  44. return fut.Wait(ctx)
  45. }
  46. type IPFSWrite struct {
  47. Input *exec.StreamVar `json:"input"`
  48. FileHash *exec.StringVar `json:"fileHash"`
  49. }
  50. func (o *IPFSWrite) Execute(ctx context.Context, e *exec.Executor) error {
  51. logger.
  52. WithField("Input", o.Input.ID).
  53. WithField("FileHashVar", o.FileHash.ID).
  54. Debugf("ipfs write op")
  55. ipfsCli, err := stgglb.IPFSPool.Acquire()
  56. if err != nil {
  57. return fmt.Errorf("new ipfs client: %w", err)
  58. }
  59. defer stgglb.IPFSPool.Release(ipfsCli)
  60. err = e.BindVars(ctx, o.Input)
  61. if err != nil {
  62. return err
  63. }
  64. defer o.Input.Stream.Close()
  65. o.FileHash.Value, err = ipfsCli.CreateFile(o.Input.Stream)
  66. if err != nil {
  67. return fmt.Errorf("creating ipfs file: %w", err)
  68. }
  69. e.PutVars(o.FileHash)
  70. return nil
  71. }
  72. type IPFSReadType struct {
  73. FileHash string
  74. Option ipfs.ReadOption
  75. }
  76. func (t *IPFSReadType) InitNode(node *dag.Node) {
  77. dag.NodeNewOutputStream(node, &ioswitch2.VarProps{})
  78. }
  79. func (t *IPFSReadType) GenerateOp(n *dag.Node) (exec.Op, error) {
  80. return &IPFSRead{
  81. Output: n.OutputStreams[0].Var,
  82. FileHash: t.FileHash,
  83. Option: t.Option,
  84. }, nil
  85. }
  86. func (t *IPFSReadType) String(node *dag.Node) string {
  87. return fmt.Sprintf("IPFSRead[%s,%v+%v]%v%v", t.FileHash, t.Option.Offset, t.Option.Length, formatStreamIO(node), formatValueIO(node))
  88. }
  89. type IPFSWriteType struct {
  90. FileHashStoreKey string
  91. Range exec.Range
  92. }
  93. func (t *IPFSWriteType) InitNode(node *dag.Node) {
  94. dag.NodeDeclareInputStream(node, 1)
  95. dag.NodeNewOutputValue(node, dag.StringValueVar, &ioswitch2.VarProps{})
  96. }
  97. func (t *IPFSWriteType) GenerateOp(op *dag.Node) (exec.Op, error) {
  98. return &IPFSWrite{
  99. Input: op.InputStreams[0].Var,
  100. FileHash: op.OutputValues[0].Var.(*exec.StringVar),
  101. }, nil
  102. }
  103. func (t *IPFSWriteType) String(node *dag.Node) string {
  104. return fmt.Sprintf("IPFSWrite[%s,%v+%v]%v%v", t.FileHashStoreKey, t.Range.Offset, t.Range.Length, formatStreamIO(node), formatValueIO(node))
  105. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。