You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

chunked.go 4.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. package ops2
  2. import (
  3. "context"
  4. "fmt"
  5. "io"
  6. "github.com/samber/lo"
  7. "gitlink.org.cn/cloudream/common/pkgs/future"
  8. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
  9. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
  10. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/utils"
  11. "gitlink.org.cn/cloudream/common/utils/io2"
  12. "golang.org/x/sync/semaphore"
  13. )
  14. func init() {
  15. exec.UseOp[*ChunkedSplit]()
  16. exec.UseOp[*ChunkedJoin]()
  17. }
  18. type ChunkedSplit struct {
  19. Input *exec.StreamVar `json:"input"`
  20. Outputs []*exec.StreamVar `json:"outputs"`
  21. ChunkSize int `json:"chunkSize"`
  22. PaddingZeros bool `json:"paddingZeros"`
  23. }
  24. func (o *ChunkedSplit) Execute(ctx context.Context, e *exec.Executor) error {
  25. err := e.BindVars(ctx, o.Input)
  26. if err != nil {
  27. return err
  28. }
  29. defer o.Input.Stream.Close()
  30. outputs := io2.ChunkedSplit(o.Input.Stream, o.ChunkSize, len(o.Outputs), io2.ChunkedSplitOption{
  31. PaddingZeros: o.PaddingZeros,
  32. })
  33. sem := semaphore.NewWeighted(int64(len(outputs)))
  34. for i := range outputs {
  35. sem.Acquire(ctx, 1)
  36. o.Outputs[i].Stream = io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) {
  37. sem.Release(1)
  38. })
  39. }
  40. exec.PutArrayVars(e, o.Outputs)
  41. return sem.Acquire(ctx, int64(len(outputs)))
  42. }
  43. func (o *ChunkedSplit) String() string {
  44. return fmt.Sprintf(
  45. "ChunkedSplit(chunkSize=%v, paddingZeros=%v), %v -> (%v)",
  46. o.ChunkSize,
  47. o.PaddingZeros,
  48. o.Input.ID,
  49. utils.FormatVarIDs(o.Outputs),
  50. )
  51. }
  52. type ChunkedJoin struct {
  53. Inputs []*exec.StreamVar `json:"inputs"`
  54. Output *exec.StreamVar `json:"output"`
  55. ChunkSize int `json:"chunkSize"`
  56. }
  57. func (o *ChunkedJoin) Execute(ctx context.Context, e *exec.Executor) error {
  58. err := exec.BindArrayVars(e, ctx, o.Inputs)
  59. if err != nil {
  60. return err
  61. }
  62. var strReaders []io.Reader
  63. for _, s := range o.Inputs {
  64. strReaders = append(strReaders, s.Stream)
  65. }
  66. defer func() {
  67. for _, str := range o.Inputs {
  68. str.Stream.Close()
  69. }
  70. }()
  71. fut := future.NewSetVoid()
  72. o.Output.Stream = io2.AfterReadClosedOnce(io2.BufferedChunkedJoin(strReaders, o.ChunkSize), func(closer io.ReadCloser) {
  73. fut.SetVoid()
  74. })
  75. e.PutVars(o.Output)
  76. return fut.Wait(ctx)
  77. }
  78. func (o *ChunkedJoin) String() string {
  79. return fmt.Sprintf(
  80. "ChunkedJoin(chunkSize=%v), (%v) -> %v",
  81. o.ChunkSize,
  82. utils.FormatVarIDs(o.Inputs),
  83. o.Output.ID,
  84. )
  85. }
  86. type ChunkedSplitNode struct {
  87. dag.NodeBase
  88. ChunkSize int
  89. }
  90. func (b *GraphNodeBuilder) NewChunkedSplit(chunkSize int) *ChunkedSplitNode {
  91. node := &ChunkedSplitNode{
  92. ChunkSize: chunkSize,
  93. }
  94. b.AddNode(node)
  95. return node
  96. }
  97. func (t *ChunkedSplitNode) Split(input *dag.StreamVar, cnt int) {
  98. t.InputStreams().EnsureSize(1)
  99. input.Connect(t, 0)
  100. t.OutputStreams().Resize(cnt)
  101. for i := 0; i < cnt; i++ {
  102. t.OutputStreams().Setup(t, t.Graph().NewStreamVar(), i)
  103. }
  104. }
  105. func (t *ChunkedSplitNode) SubStream(idx int) *dag.StreamVar {
  106. return t.OutputStreams().Get(idx)
  107. }
  108. func (t *ChunkedSplitNode) SplitCount() int {
  109. return t.OutputStreams().Len()
  110. }
  111. func (t *ChunkedSplitNode) GenerateOp() (exec.Op, error) {
  112. return &ChunkedSplit{
  113. Input: t.InputStreams().Get(0).Var,
  114. Outputs: lo.Map(t.OutputStreams().RawArray(), func(v *dag.StreamVar, idx int) *exec.StreamVar {
  115. return v.Var
  116. }),
  117. ChunkSize: t.ChunkSize,
  118. PaddingZeros: true,
  119. }, nil
  120. }
  121. // func (t *ChunkedSplitNode) String() string {
  122. // return fmt.Sprintf("ChunkedSplit[%v]%v%v", t.ChunkSize, formatStreamIO(node), formatValueIO(node))
  123. // }
  124. type ChunkedJoinNode struct {
  125. dag.NodeBase
  126. ChunkSize int
  127. }
  128. func (b *GraphNodeBuilder) NewChunkedJoin(chunkSize int) *ChunkedJoinNode {
  129. node := &ChunkedJoinNode{
  130. ChunkSize: chunkSize,
  131. }
  132. b.AddNode(node)
  133. node.OutputStreams().SetupNew(node, b.Graph.NewStreamVar())
  134. return node
  135. }
  136. func (t *ChunkedJoinNode) AddInput(str *dag.StreamVar) {
  137. idx := t.InputStreams().EnlargeOne()
  138. str.Connect(t, idx)
  139. }
  140. func (t *ChunkedJoinNode) Joined() *dag.StreamVar {
  141. return t.OutputStreams().Get(0)
  142. }
  143. func (t *ChunkedJoinNode) GenerateOp() (exec.Op, error) {
  144. return &ChunkedJoin{
  145. Inputs: lo.Map(t.InputStreams().RawArray(), func(v *dag.StreamVar, idx int) *exec.StreamVar {
  146. return v.Var
  147. }),
  148. Output: t.OutputStreams().Get(0).Var,
  149. ChunkSize: t.ChunkSize,
  150. }, nil
  151. }
  152. // func (t *ChunkedJoinType) String() string {
  153. // return fmt.Sprintf("ChunkedJoin[%v]%v%v", t.ChunkSize, formatStreamIO(node), formatValueIO(node))
  154. // }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。