You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

parser.go 15 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586
  1. package parser
  2. import (
  3. "fmt"
  4. "math"
  5. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
  6. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
  7. "gitlink.org.cn/cloudream/common/pkgs/ioswitch/plan"
  8. cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
  9. "gitlink.org.cn/cloudream/common/utils/lo2"
  10. "gitlink.org.cn/cloudream/common/utils/math2"
  11. "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
  12. "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2"
  13. "gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/types"
  14. )
  15. type DefaultParser struct {
  16. EC cdssdk.ECRedundancy
  17. }
  18. func NewParser(ec cdssdk.ECRedundancy) *DefaultParser {
  19. return &DefaultParser{
  20. EC: ec,
  21. }
  22. }
  23. type IndexedStream struct {
  24. Stream *dag.StreamVar
  25. DataIndex int
  26. }
  27. type ParseContext struct {
  28. Ft ioswitch2.FromTo
  29. DAG *ops2.GraphNodeBuilder
  30. // 为了产生所有To所需的数据范围,而需要From打开的范围。
  31. // 这个范围是基于整个文件的,且上下界都取整到条带大小的整数倍,因此上界是有可能超过文件大小的。
  32. ToNodes map[ioswitch2.To]ops2.ToNode
  33. IndexedStreams []IndexedStream
  34. StreamRange exec.Range
  35. }
  36. func (p *DefaultParser) Parse(ft ioswitch2.FromTo, blder *exec.PlanBuilder) error {
  37. ctx := ParseContext{
  38. Ft: ft,
  39. DAG: ops2.NewGraphNodeBuilder(),
  40. ToNodes: make(map[ioswitch2.To]ops2.ToNode),
  41. }
  42. // 分成两个阶段:
  43. // 1. 基于From和To生成更多指令,初步匹配to的需求
  44. // 计算一下打开流的范围
  45. p.calcStreamRange(&ctx)
  46. err := p.extend(&ctx)
  47. if err != nil {
  48. return err
  49. }
  50. // 2. 优化上一步生成的指令
  51. // 对于删除指令的优化,需要反复进行,直到没有变化为止。
  52. // 从目前实现上来说不会死循环
  53. for {
  54. opted := false
  55. if p.removeUnusedJoin(&ctx) {
  56. opted = true
  57. }
  58. if p.removeUnusedMultiplyOutput(&ctx) {
  59. opted = true
  60. }
  61. if p.removeUnusedSplit(&ctx) {
  62. opted = true
  63. }
  64. if p.omitSplitJoin(&ctx) {
  65. opted = true
  66. }
  67. if !opted {
  68. break
  69. }
  70. }
  71. // 确定指令执行位置的过程,也需要反复进行,直到没有变化为止。
  72. for p.pin(&ctx) {
  73. }
  74. // 下面这些只需要执行一次,但需要按顺序
  75. p.dropUnused(&ctx)
  76. p.storeIPFSWriteResult(&ctx)
  77. p.generateClone(&ctx)
  78. p.generateRange(&ctx)
  79. return plan.Generate(ctx.DAG.Graph, blder)
  80. }
  81. func (p *DefaultParser) findOutputStream(ctx *ParseContext, streamIndex int) *dag.StreamVar {
  82. var ret *dag.StreamVar
  83. for _, s := range ctx.IndexedStreams {
  84. if s.DataIndex == streamIndex {
  85. ret = s.Stream
  86. break
  87. }
  88. }
  89. return ret
  90. }
  91. // 计算输入流的打开范围。会把流的范围按条带大小取整
  92. func (p *DefaultParser) calcStreamRange(ctx *ParseContext) {
  93. stripSize := int64(p.EC.ChunkSize * p.EC.K)
  94. rng := exec.Range{
  95. Offset: math.MaxInt64,
  96. }
  97. for _, to := range ctx.Ft.Toes {
  98. if to.GetDataIndex() == -1 {
  99. toRng := to.GetRange()
  100. rng.ExtendStart(math2.Floor(toRng.Offset, stripSize))
  101. if toRng.Length != nil {
  102. rng.ExtendEnd(math2.Ceil(toRng.Offset+*toRng.Length, stripSize))
  103. } else {
  104. rng.Length = nil
  105. }
  106. } else {
  107. toRng := to.GetRange()
  108. blkStartIndex := math2.FloorDiv(toRng.Offset, int64(p.EC.ChunkSize))
  109. rng.ExtendStart(blkStartIndex * stripSize)
  110. if toRng.Length != nil {
  111. blkEndIndex := math2.CeilDiv(toRng.Offset+*toRng.Length, int64(p.EC.ChunkSize))
  112. rng.ExtendEnd(blkEndIndex * stripSize)
  113. } else {
  114. rng.Length = nil
  115. }
  116. }
  117. }
  118. ctx.StreamRange = rng
  119. }
  120. func (p *DefaultParser) extend(ctx *ParseContext) error {
  121. for _, fr := range ctx.Ft.Froms {
  122. frNode, err := p.buildFromNode(ctx, fr)
  123. if err != nil {
  124. return err
  125. }
  126. ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{
  127. Stream: frNode.Output().Var,
  128. DataIndex: fr.GetDataIndex(),
  129. })
  130. // 对于完整文件的From,生成Split指令
  131. if fr.GetDataIndex() == -1 {
  132. splitNode := ctx.DAG.NewChunkedSplit(p.EC.ChunkSize)
  133. splitNode.Split(frNode.Output().Var, p.EC.K)
  134. for i := 0; i < p.EC.K; i++ {
  135. ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{
  136. Stream: splitNode.SubStream(i),
  137. DataIndex: i,
  138. })
  139. }
  140. }
  141. }
  142. // 如果有K个不同的文件块流,则生成Multiply指令,同时针对其生成的流,生成Join指令
  143. ecInputStrs := make(map[int]*dag.StreamVar)
  144. for _, s := range ctx.IndexedStreams {
  145. if s.DataIndex >= 0 && ecInputStrs[s.DataIndex] == nil {
  146. ecInputStrs[s.DataIndex] = s.Stream
  147. if len(ecInputStrs) == p.EC.K {
  148. break
  149. }
  150. }
  151. }
  152. if len(ecInputStrs) == p.EC.K {
  153. mulNode := ctx.DAG.NewECMultiply(p.EC)
  154. for i, s := range ecInputStrs {
  155. mulNode.AddInput(s, i)
  156. }
  157. for i := 0; i < p.EC.N; i++ {
  158. ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{
  159. Stream: mulNode.NewOutput(i),
  160. DataIndex: i,
  161. })
  162. }
  163. joinNode := ctx.DAG.NewChunkedJoin(p.EC.ChunkSize)
  164. for i := 0; i < p.EC.K; i++ {
  165. // 不可能找不到流
  166. joinNode.AddInput(p.findOutputStream(ctx, i))
  167. }
  168. ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{
  169. Stream: joinNode.Joined(),
  170. DataIndex: -1,
  171. })
  172. }
  173. // 为每一个To找到一个输入流
  174. for _, to := range ctx.Ft.Toes {
  175. toNode, err := p.buildToNode(ctx, to)
  176. if err != nil {
  177. return err
  178. }
  179. ctx.ToNodes[to] = toNode
  180. str := p.findOutputStream(ctx, to.GetDataIndex())
  181. if str == nil {
  182. return fmt.Errorf("no output stream found for data index %d", to.GetDataIndex())
  183. }
  184. toNode.SetInput(str)
  185. }
  186. return nil
  187. }
  188. func (p *DefaultParser) buildFromNode(ctx *ParseContext, f ioswitch2.From) (ops2.FromNode, error) {
  189. var repRange exec.Range
  190. var blkRange exec.Range
  191. repRange.Offset = ctx.StreamRange.Offset
  192. blkRange.Offset = ctx.StreamRange.Offset / int64(p.EC.ChunkSize*p.EC.K) * int64(p.EC.ChunkSize)
  193. if ctx.StreamRange.Length != nil {
  194. repRngLen := *ctx.StreamRange.Length
  195. repRange.Length = &repRngLen
  196. blkRngLen := *ctx.StreamRange.Length / int64(p.EC.ChunkSize*p.EC.K) * int64(p.EC.ChunkSize)
  197. blkRange.Length = &blkRngLen
  198. }
  199. switch f := f.(type) {
  200. case *ioswitch2.FromNode:
  201. t := ctx.DAG.NewShardRead(f.Storage.StorageID, types.NewOpen(f.FileHash))
  202. if f.DataIndex == -1 {
  203. t.Open.WithNullableLength(repRange.Offset, repRange.Length)
  204. } else {
  205. t.Open.WithNullableLength(blkRange.Offset, blkRange.Length)
  206. }
  207. switch typeInfo := f.Node.Address.(type) {
  208. case *cdssdk.HttpAddressInfo:
  209. t.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Node: f.Node})
  210. t.Env().Pinned = true
  211. case *cdssdk.GRPCAddressInfo:
  212. t.Env().ToEnvWorker(&ioswitch2.AgentWorker{Node: f.Node})
  213. t.Env().Pinned = true
  214. default:
  215. return nil, fmt.Errorf("unsupported node address type %T", typeInfo)
  216. }
  217. return t, nil
  218. case *ioswitch2.FromDriver:
  219. n := ctx.DAG.NewFromDriver(f.Handle)
  220. n.Env().ToEnvDriver()
  221. n.Env().Pinned = true
  222. if f.DataIndex == -1 {
  223. f.Handle.RangeHint.Offset = repRange.Offset
  224. f.Handle.RangeHint.Length = repRange.Length
  225. } else {
  226. f.Handle.RangeHint.Offset = blkRange.Offset
  227. f.Handle.RangeHint.Length = blkRange.Length
  228. }
  229. return n, nil
  230. default:
  231. return nil, fmt.Errorf("unsupported from type %T", f)
  232. }
  233. }
  234. func (p *DefaultParser) buildToNode(ctx *ParseContext, t ioswitch2.To) (ops2.ToNode, error) {
  235. switch t := t.(type) {
  236. case *ioswitch2.ToNode:
  237. n := ctx.DAG.NewShardWrite(t.FileHashStoreKey)
  238. n.Env().ToEnvWorker(&ioswitch2.AgentWorker{Node: t.Node})
  239. n.Env().Pinned = true
  240. return n, nil
  241. case *ioswitch2.ToDriver:
  242. n := ctx.DAG.NewToDriver(t.Handle)
  243. n.Env().ToEnvDriver()
  244. n.Env().Pinned = true
  245. return n, nil
  246. default:
  247. return nil, fmt.Errorf("unsupported to type %T", t)
  248. }
  249. }
  250. // 删除输出流未被使用的Join指令
  251. func (p *DefaultParser) removeUnusedJoin(ctx *ParseContext) bool {
  252. changed := false
  253. dag.WalkOnlyType[*ops2.ChunkedJoinNode](ctx.DAG.Graph, func(node *ops2.ChunkedJoinNode) bool {
  254. if node.InputStreams().Len() > 0 {
  255. return true
  256. }
  257. node.RemoveAllInputs()
  258. ctx.DAG.RemoveNode(node)
  259. return true
  260. })
  261. return changed
  262. }
  263. // 减少未使用的Multiply指令的输出流。如果减少到0,则删除该指令
  264. func (p *DefaultParser) removeUnusedMultiplyOutput(ctx *ParseContext) bool {
  265. changed := false
  266. dag.WalkOnlyType[*ops2.ECMultiplyNode](ctx.DAG.Graph, func(node *ops2.ECMultiplyNode) bool {
  267. outArr := node.OutputStreams().RawArray()
  268. for i2, out := range outArr {
  269. if out.To().Len() > 0 {
  270. continue
  271. }
  272. outArr[i2] = nil
  273. node.OutputIndexes[i2] = -2
  274. changed = true
  275. }
  276. node.OutputStreams().SetRawArray(lo2.RemoveAllDefault(outArr))
  277. node.OutputIndexes = lo2.RemoveAll(node.OutputIndexes, -2)
  278. // 如果所有输出流都被删除,则删除该指令
  279. if node.OutputStreams().Len() == 0 {
  280. node.RemoveAllInputs()
  281. ctx.DAG.RemoveNode(node)
  282. changed = true
  283. }
  284. return true
  285. })
  286. return changed
  287. }
  288. // 删除未使用的Split指令
  289. func (p *DefaultParser) removeUnusedSplit(ctx *ParseContext) bool {
  290. changed := false
  291. dag.WalkOnlyType[*ops2.ChunkedSplitNode](ctx.DAG.Graph, func(typ *ops2.ChunkedSplitNode) bool {
  292. // Split出来的每一个流都没有被使用,才能删除这个指令
  293. for _, out := range typ.OutputStreams().RawArray() {
  294. if out.To().Len() > 0 {
  295. return true
  296. }
  297. }
  298. typ.Clear()
  299. ctx.DAG.RemoveNode(typ)
  300. changed = true
  301. return true
  302. })
  303. return changed
  304. }
  305. // 如果Split的结果被完全用于Join,则省略Split和Join指令
  306. func (p *DefaultParser) omitSplitJoin(ctx *ParseContext) bool {
  307. changed := false
  308. dag.WalkOnlyType[*ops2.ChunkedSplitNode](ctx.DAG.Graph, func(splitNode *ops2.ChunkedSplitNode) bool {
  309. // Split指令的每一个输出都有且只有一个目的地
  310. var dstNode dag.Node
  311. for _, out := range splitNode.OutputStreams().RawArray() {
  312. if out.To().Len() != 1 {
  313. return true
  314. }
  315. if dstNode == nil {
  316. dstNode = out.To().Get(0).Node
  317. } else if dstNode != out.To().Get(0).Node {
  318. return true
  319. }
  320. }
  321. if dstNode == nil {
  322. return true
  323. }
  324. // 且这个目的地要是一个Join指令
  325. joinNode, ok := dstNode.(*ops2.ChunkedJoinNode)
  326. if !ok {
  327. return true
  328. }
  329. // 同时这个Join指令的输入也必须全部来自Split指令的输出。
  330. // 由于上面判断了Split指令的输出目的地都相同,所以这里只要判断Join指令的输入数量是否与Split指令的输出数量相同即可
  331. if joinNode.InputStreams().Len() != splitNode.OutputStreams().Len() {
  332. return true
  333. }
  334. // 所有条件都满足,可以开始省略操作,将Join操作的目的地的输入流替换为Split操作的输入流:
  335. // F->Split->Join->T 变换为:F->T
  336. splitInput := splitNode.InputStreams().Get(0)
  337. for _, to := range joinNode.Joined().To().RawArray() {
  338. splitInput.Connect(to.Node, to.SlotIndex)
  339. }
  340. splitInput.Disconnect(splitNode, 0)
  341. // 并删除这两个指令
  342. ctx.DAG.RemoveNode(joinNode)
  343. ctx.DAG.RemoveNode(splitNode)
  344. changed = true
  345. return true
  346. })
  347. return changed
  348. }
  349. // 通过流的输入输出位置来确定指令的执行位置。
  350. // To系列的指令都会有固定的执行位置,这些位置会随着pin操作逐步扩散到整个DAG,
  351. // 所以理论上不会出现有指令的位置始终无法确定的情况。
  352. func (p *DefaultParser) pin(ctx *ParseContext) bool {
  353. changed := false
  354. ctx.DAG.Walk(func(node dag.Node) bool {
  355. if node.Env().Pinned {
  356. return true
  357. }
  358. var toEnv *dag.NodeEnv
  359. for _, out := range node.OutputStreams().RawArray() {
  360. for _, to := range out.To().RawArray() {
  361. if to.Node.Env().Type == dag.EnvUnknown {
  362. continue
  363. }
  364. if toEnv == nil {
  365. toEnv = to.Node.Env()
  366. } else if !toEnv.Equals(to.Node.Env()) {
  367. toEnv = nil
  368. break
  369. }
  370. }
  371. }
  372. if toEnv != nil {
  373. if !node.Env().Equals(toEnv) {
  374. changed = true
  375. }
  376. *node.Env() = *toEnv
  377. return true
  378. }
  379. // 否则根据输入流的始发地来固定
  380. var fromEnv *dag.NodeEnv
  381. for _, in := range node.InputStreams().RawArray() {
  382. if in.From().Node.Env().Type == dag.EnvUnknown {
  383. continue
  384. }
  385. if fromEnv == nil {
  386. fromEnv = in.From().Node.Env()
  387. } else if !fromEnv.Equals(in.From().Node.Env()) {
  388. fromEnv = nil
  389. break
  390. }
  391. }
  392. if fromEnv != nil {
  393. if !node.Env().Equals(fromEnv) {
  394. changed = true
  395. }
  396. *node.Env() = *fromEnv
  397. }
  398. return true
  399. })
  400. return changed
  401. }
  402. // 对于所有未使用的流,增加Drop指令
  403. func (p *DefaultParser) dropUnused(ctx *ParseContext) {
  404. ctx.DAG.Walk(func(node dag.Node) bool {
  405. for _, out := range node.OutputStreams().RawArray() {
  406. if out.To().Len() == 0 {
  407. n := ctx.DAG.NewDropStream()
  408. *n.Env() = *node.Env()
  409. n.SetInput(out)
  410. }
  411. }
  412. return true
  413. })
  414. }
  415. // 为IPFS写入指令存储结果
  416. func (p *DefaultParser) storeIPFSWriteResult(ctx *ParseContext) {
  417. dag.WalkOnlyType[*ops2.ShardWriteNode](ctx.DAG.Graph, func(n *ops2.ShardWriteNode) bool {
  418. if n.FileHashStoreKey == "" {
  419. return true
  420. }
  421. storeNode := ctx.DAG.NewStore()
  422. storeNode.Env().ToEnvDriver()
  423. storeNode.Store(n.FileHashStoreKey, n.FileHashVar())
  424. return true
  425. })
  426. }
  427. // 生成Range指令。StreamRange可能超过文件总大小,但Range指令会在数据量不够时不报错而是正常返回
  428. func (p *DefaultParser) generateRange(ctx *ParseContext) {
  429. for i := 0; i < len(ctx.Ft.Toes); i++ {
  430. to := ctx.Ft.Toes[i]
  431. toNode := ctx.ToNodes[to]
  432. toDataIdx := to.GetDataIndex()
  433. toRng := to.GetRange()
  434. if toDataIdx == -1 {
  435. n := ctx.DAG.NewRange()
  436. toInput := toNode.Input()
  437. *n.Env() = *toInput.Var.From().Node.Env()
  438. rnged := n.RangeStream(toInput.Var, exec.Range{
  439. Offset: toRng.Offset - ctx.StreamRange.Offset,
  440. Length: toRng.Length,
  441. })
  442. toInput.Var.Disconnect(toNode, toInput.Index)
  443. toNode.SetInput(rnged)
  444. } else {
  445. stripSize := int64(p.EC.ChunkSize * p.EC.K)
  446. blkStartIdx := ctx.StreamRange.Offset / stripSize
  447. blkStart := blkStartIdx * int64(p.EC.ChunkSize)
  448. n := ctx.DAG.NewRange()
  449. toInput := toNode.Input()
  450. *n.Env() = *toInput.Var.From().Node.Env()
  451. rnged := n.RangeStream(toInput.Var, exec.Range{
  452. Offset: toRng.Offset - blkStart,
  453. Length: toRng.Length,
  454. })
  455. toInput.Var.Disconnect(toNode, toInput.Index)
  456. toNode.SetInput(rnged)
  457. }
  458. }
  459. }
  460. // 生成Clone指令
  461. func (p *DefaultParser) generateClone(ctx *ParseContext) {
  462. ctx.DAG.Walk(func(node dag.Node) bool {
  463. for _, out := range node.OutputStreams().RawArray() {
  464. if out.To().Len() <= 1 {
  465. continue
  466. }
  467. c := ctx.DAG.NewCloneStream()
  468. *c.Env() = *node.Env()
  469. for _, to := range out.To().RawArray() {
  470. c.NewOutput().Connect(to.Node, to.SlotIndex)
  471. }
  472. out.To().Resize(0)
  473. c.SetInput(out)
  474. }
  475. for _, out := range node.OutputValues().RawArray() {
  476. if out.To().Len() <= 1 {
  477. continue
  478. }
  479. t := ctx.DAG.NewCloneValue()
  480. *t.Env() = *node.Env()
  481. for _, to := range out.To().RawArray() {
  482. t.NewOutput().Connect(to.Node, to.SlotIndex)
  483. }
  484. out.To().Resize(0)
  485. t.SetInput(out)
  486. }
  487. return true
  488. })
  489. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。