|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208 |
- package ops2
-
- import (
- "fmt"
- "io"
-
- "gitlink.org.cn/cloudream/common/pkgs/future"
- "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
- "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
- "gitlink.org.cn/cloudream/common/pkgs/logger"
- cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
- "gitlink.org.cn/cloudream/common/utils/io2"
- "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
- "gitlink.org.cn/cloudream/storage/common/pkgs/storage/mgr"
- "gitlink.org.cn/cloudream/storage/common/pkgs/storage/types"
- )
-
- func init() {
- exec.UseOp[*ShardRead]()
- exec.UseOp[*ShardWrite]()
- exec.UseVarValue[*FileHashValue]()
- }
-
- type FileHashValue struct {
- Hash cdssdk.FileHash `json:"hash"`
- }
-
- func (v *FileHashValue) Clone() exec.VarValue {
- return &FileHashValue{Hash: v.Hash}
- }
-
- type ShardRead struct {
- Output exec.VarID `json:"output"`
- StorageID cdssdk.StorageID `json:"storageID"`
- Open types.OpenOption `json:"option"`
- }
-
- func (o *ShardRead) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
- logger.
- WithField("Open", o.Open.String()).
- Debugf("reading from shard store")
- defer logger.Debugf("reading from shard store finished")
-
- stgMgr, err := exec.GetValueByType[*mgr.Manager](ctx)
- if err != nil {
- return fmt.Errorf("getting storage manager: %w", err)
- }
-
- store, err := stgMgr.GetShardStore(o.StorageID)
- if err != nil {
- return fmt.Errorf("getting shard store of storage %v: %w", o.StorageID, err)
- }
-
- file, err := store.Open(o.Open)
- if err != nil {
- return fmt.Errorf("opening shard store file: %w", err)
- }
-
- fut := future.NewSetVoid()
- e.PutVar(o.Output, &exec.StreamValue{
- Stream: io2.AfterReadClosedOnce(file, func(closer io.ReadCloser) {
- fut.SetVoid()
- }),
- })
-
- return fut.Wait(ctx.Context)
- }
-
- func (o *ShardRead) String() string {
- return fmt.Sprintf("ShardRead %v -> %v", o.Open.String(), o.Output)
- }
-
- type ShardWrite struct {
- Input exec.VarID `json:"input"`
- FileHash exec.VarID `json:"fileHash"`
- StorageID cdssdk.StorageID `json:"storageID"`
- }
-
- func (o *ShardWrite) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
- logger.
- WithField("Input", o.Input).
- WithField("FileHash", o.FileHash).
- Debugf("writting file to shard store")
- defer logger.Debugf("write to shard store finished")
-
- stgMgr, err := exec.GetValueByType[*mgr.Manager](ctx)
- if err != nil {
- return fmt.Errorf("getting storage manager: %w", err)
- }
-
- store, err := stgMgr.GetShardStore(o.StorageID)
- if err != nil {
- return fmt.Errorf("getting shard store of storage %v: %w", o.StorageID, err)
- }
-
- input, err := exec.BindVar[*exec.StreamValue](e, ctx.Context, o.Input)
- if err != nil {
- return err
- }
- defer input.Stream.Close()
-
- fileInfo, err := store.Create(input.Stream)
- if err != nil {
- return fmt.Errorf("writing file to shard store: %w", err)
- }
-
- e.PutVar(o.FileHash, &FileHashValue{
- Hash: fileInfo.Hash,
- })
- return nil
- }
-
- func (o *ShardWrite) String() string {
- return fmt.Sprintf("ShardWrite %v -> %v", o.Input, o.FileHash)
- }
-
- type ShardReadNode struct {
- dag.NodeBase
- From ioswitch2.From
- StorageID cdssdk.StorageID
- Open types.OpenOption
- }
-
- func (b *GraphNodeBuilder) NewShardRead(fr ioswitch2.From, stgID cdssdk.StorageID, open types.OpenOption) *ShardReadNode {
- node := &ShardReadNode{
- From: fr,
- StorageID: stgID,
- Open: open,
- }
- b.AddNode(node)
-
- node.OutputStreams().Init(node, 1)
- return node
- }
-
- func (t *ShardReadNode) GetFrom() ioswitch2.From {
- return t.From
- }
-
- func (t *ShardReadNode) Output() dag.StreamSlot {
- return dag.StreamSlot{
- Var: t.OutputStreams().Get(0),
- Index: 0,
- }
- }
-
- func (t *ShardReadNode) GenerateOp() (exec.Op, error) {
- return &ShardRead{
- Output: t.OutputStreams().Get(0).VarID,
- StorageID: t.StorageID,
- Open: t.Open,
- }, nil
- }
-
- // func (t *IPFSReadType) String() string {
- // return fmt.Sprintf("IPFSRead[%s,%v+%v]%v%v", t.FileHash, t.Option.Offset, t.Option.Length, formatStreamIO(node), formatValueIO(node))
- // }
-
- type ShardWriteNode struct {
- dag.NodeBase
- To ioswitch2.To
- StorageID cdssdk.StorageID
- FileHashStoreKey string
- }
-
- func (b *GraphNodeBuilder) NewShardWrite(to ioswitch2.To, stgID cdssdk.StorageID, fileHashStoreKey string) *ShardWriteNode {
- node := &ShardWriteNode{
- To: to,
- StorageID: stgID,
- FileHashStoreKey: fileHashStoreKey,
- }
- b.AddNode(node)
-
- node.InputStreams().Init(1)
- node.OutputValues().Init(node, 1)
- return node
- }
-
- func (t *ShardWriteNode) GetTo() ioswitch2.To {
- return t.To
- }
-
- func (t *ShardWriteNode) SetInput(input *dag.StreamVar) {
- input.To(t, 0)
- }
-
- func (t *ShardWriteNode) Input() dag.StreamSlot {
- return dag.StreamSlot{
- Var: t.InputStreams().Get(0),
- Index: 0,
- }
- }
-
- func (t *ShardWriteNode) FileHashVar() *dag.ValueVar {
- return t.OutputValues().Get(0)
- }
-
- func (t *ShardWriteNode) GenerateOp() (exec.Op, error) {
- return &ShardWrite{
- Input: t.InputStreams().Get(0).VarID,
- FileHash: t.OutputValues().Get(0).VarID,
- StorageID: t.StorageID,
- }, nil
- }
-
- // func (t *IPFSWriteType) String() string {
- // return fmt.Sprintf("IPFSWrite[%s,%v+%v]%v%v", t.FileHashStoreKey, t.Range.Offset, t.Range.Length, formatStreamIO(node), formatValueIO(node))
- // }
|