You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

client_command_ec.go 14 kB

2 years ago
2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. package services
  2. // TODO 将这里的逻辑拆分到services中实现
  3. import (
  4. "bytes"
  5. "fmt"
  6. "io"
  7. "math/rand"
  8. "os"
  9. "path/filepath"
  10. "sync"
  11. "time"
  12. "gitlink.org.cn/cloudream/storage-client/internal/config"
  13. "gitlink.org.cn/cloudream/storage-common/pkgs/ec"
  14. "gitlink.org.cn/cloudream/storage-common/utils"
  15. //"gitlink.org.cn/cloudream/common/pkg/distlock/reqbuilder"
  16. log "gitlink.org.cn/cloudream/common/pkg/logger"
  17. agtcli "gitlink.org.cn/cloudream/storage-common/pkgs/mq/client/agent"
  18. ramsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message"
  19. agtmsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/agent"
  20. coormsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/coordinator"
  21. agentcaller "gitlink.org.cn/cloudream/storage-common/pkgs/proto"
  22. mygrpc "gitlink.org.cn/cloudream/storage-common/utils/grpc"
  23. "google.golang.org/grpc"
  24. "google.golang.org/grpc/credentials/insecure"
  25. )
  26. func (svc *ObjectService) UploadEcObject(userID int64, bucketID int64, objectName string, file io.ReadCloser, fileSize int64, ecName string) error {
  27. // TODO 需要加锁
  28. /*reqBlder := reqbuilder.NewBuilder()
  29. for _, uploadObject := range t.Objects {
  30. reqBlder.Metadata().
  31. // 用于防止创建了多个同名对象
  32. Object().CreateOne(t.bucketID, uploadObject.ObjectName)
  33. }*/
  34. /*
  35. mutex, err := reqBlder.
  36. Metadata().
  37. // 用于判断用户是否有桶的权限
  38. UserBucket().ReadOne(userID, bucketID).
  39. // 用于查询可用的上传节点
  40. Node().ReadAny().
  41. // 用于设置Rep配置
  42. ObjectRep().CreateAny().
  43. // 用于创建Cache记录
  44. Cache().CreateAny().
  45. MutexLock(ctx.DistLock)
  46. if err != nil {
  47. return fmt.Errorf("acquire locks failed, err: %w", err)
  48. }
  49. defer mutex.Unlock()
  50. */
  51. //发送写请求,请求Coor分配写入节点Ip
  52. ecWriteResp, err := svc.coordinator.PreUploadEcObject(coormsg.NewPreUploadEcObject(bucketID, objectName, fileSize, ecName, userID, config.Cfg().ExternalIP))
  53. if err != nil {
  54. return fmt.Errorf("request to coordinator failed, err: %w", err)
  55. }
  56. if len(ecWriteResp.Nodes) == 0 {
  57. return fmt.Errorf("no node to upload file")
  58. }
  59. //生成纠删码的写入节点序列
  60. nodes := make([]ramsg.RespNode, ecWriteResp.Ec.EcN)
  61. numNodes := len(ecWriteResp.Nodes)
  62. startWriteNodeID := rand.Intn(numNodes)
  63. for i := 0; i < ecWriteResp.Ec.EcN; i++ {
  64. nodes[i] = ecWriteResp.Nodes[(startWriteNodeID+i)%numNodes]
  65. }
  66. hashs, err := svc.ecWrite(file, fileSize, ecWriteResp.Ec.EcK, ecWriteResp.Ec.EcN, nodes)
  67. if err != nil {
  68. return fmt.Errorf("EcWrite failed, err: %w", err)
  69. }
  70. nodeIDs := make([]int64, len(nodes))
  71. for i := 0; i < len(nodes); i++ {
  72. nodeIDs[i] = nodes[i].ID
  73. }
  74. //第二轮通讯:插入元数据hashs
  75. dirName := utils.GetDirectoryName(objectName)
  76. _, err = svc.coordinator.CreateEcObject(coormsg.NewCreateEcObject(bucketID, objectName, fileSize, userID, nodeIDs, hashs, ecName, dirName))
  77. if err != nil {
  78. return fmt.Errorf("request to coordinator failed, err: %w", err)
  79. }
  80. return nil
  81. }
  82. func (svc *ObjectService) ecWrite(file io.ReadCloser, fileSize int64, ecK int, ecN int, nodes []ramsg.RespNode) ([]string, error) {
  83. // TODO 需要参考RepWrite函数的代码逻辑,做好错误处理
  84. //获取文件大小
  85. var coefs = [][]int64{{1, 1, 1}, {1, 2, 3}} //2应替换为ecK,3应替换为ecN
  86. //计算每个块的packet数
  87. numPacket := (fileSize + int64(ecK)*config.Cfg().ECPacketSize - 1) / (int64(ecK) * config.Cfg().ECPacketSize)
  88. //fmt.Println(numPacket)
  89. //创建channel
  90. loadBufs := make([]chan []byte, ecN)
  91. encodeBufs := make([]chan []byte, ecN)
  92. for i := 0; i < ecN; i++ {
  93. loadBufs[i] = make(chan []byte)
  94. }
  95. for i := 0; i < ecN; i++ {
  96. encodeBufs[i] = make(chan []byte)
  97. }
  98. hashs := make([]string, ecN)
  99. //正式开始写入
  100. go load(file, loadBufs[:ecN], ecK, numPacket*int64(ecK)) //从本地文件系统加载数据
  101. go encode(loadBufs[:ecN], encodeBufs[:ecN], ecK, coefs, numPacket)
  102. var wg sync.WaitGroup
  103. wg.Add(ecN)
  104. /*mutex, err := reqbuilder.NewBuilder().
  105. // 防止上传的副本被清除
  106. IPFS().CreateAnyRep(node.ID).
  107. MutexLock(svc.distlock)
  108. if err != nil {
  109. return fmt.Errorf("acquire locks failed, err: %w", err)
  110. }
  111. defer mutex.Unlock()
  112. */
  113. for i := 0; i < ecN; i++ {
  114. go svc.send(nodes[i], encodeBufs[i], numPacket, &wg, hashs, i)
  115. }
  116. wg.Wait()
  117. return hashs, nil
  118. }
  119. func (svc *ObjectService) downloadEcObject(fileSize int64, ecK int, ecN int, blockIDs []int, nodeIDs []int64, nodeIPs []string, hashs []string) (io.ReadCloser, error) {
  120. // TODO zkx 先试用同步方式实现逻辑,做好错误处理。同时也方便下面直接使用uploadToNode和uploadToLocalIPFS来优化代码结构
  121. //wg := sync.WaitGroup{}
  122. numPacket := (fileSize + int64(ecK)*config.Cfg().ECPacketSize - 1) / (int64(ecK) * config.Cfg().ECPacketSize)
  123. getBufs := make([]chan []byte, ecN)
  124. decodeBufs := make([]chan []byte, ecK)
  125. for i := 0; i < ecN; i++ {
  126. getBufs[i] = make(chan []byte)
  127. }
  128. for i := 0; i < ecK; i++ {
  129. decodeBufs[i] = make(chan []byte)
  130. }
  131. for i := 0; i < len(blockIDs); i++ {
  132. go svc.get(hashs[i], nodeIPs[i], getBufs[blockIDs[i]], numPacket)
  133. }
  134. print(numPacket)
  135. go decode(getBufs[:], decodeBufs[:], blockIDs, ecK, numPacket)
  136. r, w := io.Pipe()
  137. //persist函数,将解码得到的文件写入pipe
  138. go func() {
  139. for i := 0; int64(i) < numPacket; i++ {
  140. for j := 0; j < len(decodeBufs); j++ {
  141. tmp := <-decodeBufs[j]
  142. _, err := w.Write(tmp)
  143. if err != nil {
  144. fmt.Errorf("persist file falied, err:%w", err)
  145. }
  146. }
  147. }
  148. w.Close()
  149. }()
  150. return r, nil
  151. }
  152. func (svc *ObjectService) get(blockHash string, nodeIP string, getBuf chan []byte, numPacket int64) error {
  153. downloadFromAgent := false
  154. //使用本地IPFS获取
  155. if svc.ipfs != nil {
  156. log.Infof("try to use local IPFS to download file")
  157. //获取IPFS的reader
  158. reader, err := svc.downloadFromLocalIPFS(blockHash)
  159. if err != nil {
  160. downloadFromAgent = true
  161. fmt.Errorf("read ipfs block failed, err: %w", err)
  162. }
  163. defer reader.Close()
  164. for i := 0; int64(i) < numPacket; i++ {
  165. buf := make([]byte, config.Cfg().ECPacketSize)
  166. _, err := io.ReadFull(reader, buf)
  167. if err != nil {
  168. downloadFromAgent = true
  169. fmt.Errorf("read file falied, err:%w", err)
  170. }
  171. getBuf <- buf
  172. }
  173. if downloadFromAgent == false {
  174. close(getBuf)
  175. return nil
  176. }
  177. } else {
  178. downloadFromAgent = true
  179. }
  180. //从agent获取
  181. if downloadFromAgent == true {
  182. /*// 二次获取锁
  183. mutex, err := reqbuilder.NewBuilder().
  184. // 用于从IPFS下载文件
  185. IPFS().ReadOneRep(nodeID, fileHash).
  186. MutexLock(svc.distlock)
  187. if err != nil {
  188. return fmt.Errorf("acquire locks failed, err: %w", err)
  189. }
  190. defer mutex.Unlock()
  191. */
  192. // 连接grpc
  193. grpcAddr := fmt.Sprintf("%s:%d", nodeIP, config.Cfg().GRPCPort)
  194. conn, err := grpc.Dial(grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials()))
  195. if err != nil {
  196. return fmt.Errorf("connect to grpc server at %s failed, err: %w", grpcAddr, err)
  197. }
  198. // 下载文件
  199. client := agentcaller.NewFileTransportClient(conn)
  200. reader, err := mygrpc.GetFileAsStream(client, blockHash)
  201. if err != nil {
  202. conn.Close()
  203. return fmt.Errorf("request to get file failed, err: %w", err)
  204. }
  205. for i := 0; int64(i) < numPacket; i++ {
  206. buf := make([]byte, config.Cfg().ECPacketSize)
  207. _, _ = reader.Read(buf)
  208. fmt.Println(buf)
  209. fmt.Println(numPacket, "\n")
  210. getBuf <- buf
  211. }
  212. close(getBuf)
  213. reader.Close()
  214. return nil
  215. }
  216. return nil
  217. }
  218. func load(file io.ReadCloser, loadBufs []chan []byte, ecK int, totalNumPacket int64) error {
  219. for i := 0; int64(i) < totalNumPacket; i++ {
  220. buf := make([]byte, config.Cfg().ECPacketSize)
  221. idx := i % ecK
  222. _, err := file.Read(buf)
  223. if err != nil {
  224. return fmt.Errorf("read file falied, err:%w", err)
  225. }
  226. loadBufs[idx] <- buf
  227. if idx == ecK-1 {
  228. for j := ecK; j < len(loadBufs); j++ {
  229. zeroPkt := make([]byte, config.Cfg().ECPacketSize)
  230. loadBufs[j] <- zeroPkt
  231. }
  232. }
  233. if err != nil && err != io.EOF {
  234. return fmt.Errorf("load file to buf failed, err:%w", err)
  235. }
  236. }
  237. for i := 0; i < len(loadBufs); i++ {
  238. close(loadBufs[i])
  239. }
  240. file.Close()
  241. return nil
  242. }
  243. func encode(inBufs []chan []byte, outBufs []chan []byte, ecK int, coefs [][]int64, numPacket int64) {
  244. var tmpIn [][]byte
  245. tmpIn = make([][]byte, len(outBufs))
  246. enc := ec.NewRsEnc(ecK, len(outBufs))
  247. for i := 0; int64(i) < numPacket; i++ {
  248. for j := 0; j < len(outBufs); j++ {
  249. tmpIn[j] = <-inBufs[j]
  250. }
  251. enc.Encode(tmpIn)
  252. for j := 0; j < len(outBufs); j++ {
  253. outBufs[j] <- tmpIn[j]
  254. }
  255. }
  256. for i := 0; i < len(outBufs); i++ {
  257. close(outBufs[i])
  258. }
  259. }
  260. func decode(inBufs []chan []byte, outBufs []chan []byte, blockSeq []int, ecK int, numPacket int64) {
  261. fmt.Println("decode ")
  262. var tmpIn [][]byte
  263. var zeroPkt []byte
  264. tmpIn = make([][]byte, len(inBufs))
  265. hasBlock := map[int]bool{}
  266. for j := 0; j < len(blockSeq); j++ {
  267. hasBlock[blockSeq[j]] = true
  268. }
  269. needRepair := false //检测是否传入了所有数据块
  270. for j := 0; j < len(outBufs); j++ {
  271. if blockSeq[j] != j {
  272. needRepair = true
  273. }
  274. }
  275. enc := ec.NewRsEnc(ecK, len(inBufs))
  276. for i := 0; int64(i) < numPacket; i++ {
  277. print("!!!!!")
  278. for j := 0; j < len(inBufs); j++ {
  279. if hasBlock[j] {
  280. tmpIn[j] = <-inBufs[j]
  281. } else {
  282. tmpIn[j] = zeroPkt
  283. }
  284. }
  285. if needRepair {
  286. err := enc.Repair(tmpIn)
  287. if err != nil {
  288. fmt.Fprintf(os.Stderr, "Decode Repair Error: %s", err.Error())
  289. }
  290. }
  291. for j := 0; j < len(outBufs); j++ {
  292. outBufs[j] <- tmpIn[j]
  293. }
  294. }
  295. for i := 0; i < len(outBufs); i++ {
  296. close(outBufs[i])
  297. }
  298. }
  299. func (svc *ObjectService) send(node ramsg.RespNode, inBuf chan []byte, numPacket int64, wg *sync.WaitGroup, hashs []string, idx int) error {
  300. // TODO zkx 先直接复制client\internal\task\upload_rep_objects.go中的uploadToNode和uploadToLocalIPFS来替代这部分逻辑
  301. // 方便之后异步化处理
  302. // uploadToAgent的逻辑反了,而且中间步骤失败,就必须打印日志后停止后续操作
  303. uploadToAgent := true
  304. if svc.ipfs != nil { //使用IPFS传输
  305. //创建IPFS文件
  306. log.Infof("try to use local IPFS to upload block")
  307. writer, err := svc.ipfs.CreateFile()
  308. if err != nil {
  309. uploadToAgent = false
  310. fmt.Errorf("create IPFS file failed, err: %w", err)
  311. }
  312. //逐packet写进ipfs
  313. for i := 0; int64(i) < numPacket; i++ {
  314. buf := <-inBuf
  315. reader := bytes.NewReader(buf)
  316. _, err = io.Copy(writer, reader)
  317. if err != nil {
  318. uploadToAgent = false
  319. fmt.Errorf("copying block data to IPFS file failed, err: %w", err)
  320. }
  321. }
  322. //finish, 获取哈希
  323. fileHash, err := writer.Finish()
  324. if err != nil {
  325. log.Warnf("upload block to local IPFS failed, so try to upload by agent, err: %s", err.Error())
  326. uploadToAgent = false
  327. fmt.Errorf("finish writing blcok to IPFS failed, err: %w", err)
  328. }
  329. hashs[idx] = fileHash
  330. if err != nil {
  331. }
  332. nodeID := node.ID
  333. // 然后让最近节点pin本地上传的文件
  334. agentClient, err := agtcli.NewClient(nodeID, &config.Cfg().RabbitMQ)
  335. if err != nil {
  336. uploadToAgent = false
  337. fmt.Errorf("create agent client to %d failed, err: %w", nodeID, err)
  338. }
  339. defer agentClient.Close()
  340. pinObjResp, err := agentClient.StartPinningObject(agtmsg.NewStartPinningObject(fileHash))
  341. if err != nil {
  342. uploadToAgent = false
  343. fmt.Errorf("start pinning object: %w", err)
  344. }
  345. for {
  346. waitResp, err := agentClient.WaitPinningObject(agtmsg.NewWaitPinningObject(pinObjResp.TaskID, int64(time.Second)*5))
  347. if err != nil {
  348. uploadToAgent = false
  349. fmt.Errorf("waitting pinning object: %w", err)
  350. }
  351. if waitResp.IsComplete {
  352. if waitResp.Error != "" {
  353. uploadToAgent = false
  354. fmt.Errorf("agent pinning object: %s", waitResp.Error)
  355. }
  356. break
  357. }
  358. }
  359. if uploadToAgent == false {
  360. return nil
  361. }
  362. }
  363. //////////////////////////////通过Agent上传
  364. if uploadToAgent == true {
  365. // 如果客户端与节点在同一个地域,则使用内网地址连接节点
  366. nodeIP := node.ExternalIP
  367. if node.IsSameLocation {
  368. nodeIP = node.LocalIP
  369. log.Infof("client and node %d are at the same location, use local ip\n", node.ID)
  370. }
  371. grpcAddr := fmt.Sprintf("%s:%d", nodeIP, config.Cfg().GRPCPort)
  372. grpcCon, err := grpc.Dial(grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials()))
  373. if err != nil {
  374. return fmt.Errorf("connect to grpc server at %s failed, err: %w", grpcAddr, err)
  375. }
  376. defer grpcCon.Close()
  377. client := agentcaller.NewFileTransportClient(grpcCon)
  378. upload, err := mygrpc.SendFileAsStream(client)
  379. if err != nil {
  380. return fmt.Errorf("request to send file failed, err: %w", err)
  381. }
  382. // 发送文件数据
  383. for i := 0; int64(i) < numPacket; i++ {
  384. buf := <-inBuf
  385. reader := bytes.NewReader(buf)
  386. _, err = io.Copy(upload, reader)
  387. if err != nil {
  388. // 发生错误则关闭连接
  389. upload.Abort(io.ErrClosedPipe)
  390. return fmt.Errorf("copy block date to upload stream failed, err: %w", err)
  391. }
  392. }
  393. // 发送EOF消息,并获得FileHash
  394. fileHash, err := upload.Finish()
  395. if err != nil {
  396. upload.Abort(io.ErrClosedPipe)
  397. return fmt.Errorf("send EOF failed, err: %w", err)
  398. }
  399. hashs[idx] = fileHash
  400. wg.Done()
  401. }
  402. return nil
  403. }
  404. func persist(inBuf []chan []byte, numPacket int64, localFilePath string, wg *sync.WaitGroup) {
  405. fDir, err := os.Executable()
  406. if err != nil {
  407. panic(err)
  408. }
  409. fURL := filepath.Join(filepath.Dir(fDir), "assets")
  410. _, err = os.Stat(fURL)
  411. if os.IsNotExist(err) {
  412. os.MkdirAll(fURL, os.ModePerm)
  413. }
  414. file, err := os.Create(filepath.Join(fURL, localFilePath))
  415. if err != nil {
  416. return
  417. }
  418. for i := 0; int64(i) < numPacket; i++ {
  419. for j := 0; j < len(inBuf); j++ {
  420. tmp := <-inBuf[j]
  421. fmt.Println(tmp)
  422. file.Write(tmp)
  423. }
  424. }
  425. file.Close()
  426. wg.Done()
  427. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。