You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

cache.go 19 kB

8 months ago
8 months ago
8 months ago
8 months ago
8 months ago
8 months ago
8 months ago
8 months ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790
  1. package cache
  2. import (
  3. "errors"
  4. "io"
  5. "os"
  6. "path/filepath"
  7. "sync"
  8. "syscall"
  9. "time"
  10. "github.com/inhies/go-bytesize"
  11. "github.com/samber/lo"
  12. "gitlink.org.cn/cloudream/common/pkgs/logger"
  13. "gitlink.org.cn/cloudream/common/pkgs/trie"
  14. "gitlink.org.cn/cloudream/common/utils/io2"
  15. "gitlink.org.cn/cloudream/common/utils/lo2"
  16. "gitlink.org.cn/cloudream/storage2/client/internal/db"
  17. "gitlink.org.cn/cloudream/storage2/client/internal/downloader"
  18. "gitlink.org.cn/cloudream/storage2/client/internal/mount/config"
  19. "gitlink.org.cn/cloudream/storage2/client/internal/mount/fuse"
  20. "gitlink.org.cn/cloudream/storage2/client/internal/uploader"
  21. clitypes "gitlink.org.cn/cloudream/storage2/client/types"
  22. )
  23. type CacheEntry interface {
  24. fuse.FsEntry
  25. // 在虚拟文件系统中的路径,即不包含缓存目录的路径
  26. PathComps() []string
  27. }
  28. type CacheEntryInfo struct {
  29. PathComps []string
  30. Size int64
  31. Mode os.FileMode
  32. ModTime time.Time
  33. IsDir bool
  34. }
  35. type Cache struct {
  36. cfg *config.Config
  37. db *db.DB
  38. uploader *uploader.Uploader
  39. downloader *downloader.Downloader
  40. lock *sync.RWMutex
  41. cacheDone chan any
  42. activeCache *trie.Trie[*CacheFile]
  43. }
  44. func NewCache(cfg *config.Config, db *db.DB, uploader *uploader.Uploader, downloader *downloader.Downloader) *Cache {
  45. return &Cache{
  46. cfg: cfg,
  47. db: db,
  48. uploader: uploader,
  49. downloader: downloader,
  50. lock: &sync.RWMutex{},
  51. cacheDone: make(chan any),
  52. activeCache: trie.NewTrie[*CacheFile](),
  53. }
  54. }
  55. func (c *Cache) Start() {
  56. go c.scanningCache()
  57. go c.scanningData()
  58. }
  59. func (c *Cache) Stop() {
  60. close(c.cacheDone)
  61. }
  62. func (c *Cache) GetCacheDataPath(comps ...string) string {
  63. comps2 := make([]string, len(comps)+1)
  64. comps2[0] = c.cfg.DataDir
  65. copy(comps2[1:], comps)
  66. return filepath.Join(comps2...)
  67. }
  68. func (c *Cache) GetCacheMetaPath(comps ...string) string {
  69. comps2 := make([]string, len(comps)+1)
  70. comps2[0] = c.cfg.MetaDir
  71. copy(comps2[1:], comps)
  72. return filepath.Join(comps2...)
  73. }
  74. // 获取指定位置的缓存条目信息。如果路径不存在,则返回nil。
  75. func (c *Cache) Stat(pathComps []string) *CacheEntryInfo {
  76. c.lock.RLock()
  77. defer c.lock.RUnlock()
  78. node, ok := c.activeCache.WalkEnd(pathComps)
  79. if ok && node.Value != nil {
  80. info := node.Value.Info()
  81. return &info
  82. }
  83. dataPath := c.GetCacheDataPath(pathComps...)
  84. stat, err := os.Stat(dataPath)
  85. if err != nil {
  86. // TODO 日志记录
  87. return nil
  88. }
  89. if stat.IsDir() {
  90. info, err := loadCacheDirInfo(c, pathComps, stat)
  91. if err != nil {
  92. return nil
  93. }
  94. return info
  95. }
  96. info, err := loadCacheFileInfo(c, pathComps, stat)
  97. if err != nil {
  98. return nil
  99. }
  100. return info
  101. }
  102. // 创建一个缓存文件。如果文件已经存在,则会覆盖已有文件。如果加载过程中发生了错误,或者目标位置是一个目录,则会返回nil。
  103. //
  104. // 记得使用Release减少引用计数
  105. func (c *Cache) CreateFile(pathComps []string) *CacheFile {
  106. c.lock.Lock()
  107. defer c.lock.Unlock()
  108. node, ok := c.activeCache.WalkEnd(pathComps)
  109. if ok && node.Value != nil {
  110. node.Value.Delete()
  111. if node.Value.state.uploading != nil {
  112. node.Value.state.uploading.isDeleted = true
  113. }
  114. }
  115. ch, err := createNewCacheFile(c, pathComps)
  116. if err != nil {
  117. logger.Warnf("create new cache file %v: %v", pathComps, err)
  118. return nil
  119. }
  120. ch.state.refCount++
  121. c.activeCache.CreateWords(pathComps).Value = ch
  122. logger.Debugf("create new cache file %v", pathComps)
  123. return ch
  124. }
  125. // 尝试加载缓存文件,如果文件不存在,则使用obj的信息创建一个新缓存文件,而如果obj为nil,那么会返回nil。
  126. //
  127. // 记得使用Release减少引用计数
  128. func (c *Cache) LoadFile(pathComps []string, obj *clitypes.Object) *CacheFile {
  129. c.lock.Lock()
  130. defer c.lock.Unlock()
  131. node, ok := c.activeCache.WalkEnd(pathComps)
  132. if ok && node.Value != nil {
  133. if !node.Value.state.isLoaded {
  134. err := node.Value.Load()
  135. if err != nil {
  136. logger.Warnf("load cache %v: %v", pathComps, err)
  137. return nil
  138. }
  139. }
  140. return node.Value
  141. }
  142. ch, err := loadCacheFile(c, pathComps)
  143. if err == nil {
  144. ch.remoteObj = obj
  145. ch.state.refCount++
  146. c.activeCache.CreateWords(pathComps).Value = ch
  147. logger.Debugf("load cache %v", pathComps)
  148. return ch
  149. }
  150. if !os.IsNotExist(err) {
  151. // TODO 日志记录
  152. logger.Warnf("load cache %v: %v", pathComps, err)
  153. return nil
  154. }
  155. if obj == nil {
  156. return nil
  157. }
  158. ch, err = newCacheFileFromObject(c, pathComps, obj)
  159. if err != nil {
  160. logger.Warnf("create cache %v from object: %v", pathComps, err)
  161. return nil
  162. }
  163. ch.state.refCount++
  164. c.activeCache.CreateWords(pathComps).Value = ch
  165. logger.Debugf("create cache %v from object %v", pathComps, obj.ObjectID)
  166. return ch
  167. }
  168. // 创建一个缓存目录。如果目录已经存在,则会重置目录属性。如果加载过程中发生了错误,或者目标位置是一个文件,则会返回nil
  169. func (c *Cache) CreateDir(pathComps []string) *CacheDir {
  170. c.lock.Lock()
  171. defer c.lock.Unlock()
  172. ch, err := createNewCacheDir(c, pathComps)
  173. if err != nil {
  174. logger.Warnf("create cache dir: %v", err)
  175. return nil
  176. }
  177. return ch
  178. }
  179. type CreateDirOption struct {
  180. ModTime time.Time
  181. }
  182. // 加载指定缓存目录,如果目录不存在,则使用createOpt选项创建目录,而如果createOpt为nil,那么会返回nil。
  183. func (c *Cache) LoadDir(pathComps []string, createOpt *CreateDirOption) *CacheDir {
  184. c.lock.Lock()
  185. defer c.lock.Unlock()
  186. ch, err := loadCacheDir(c, pathComps)
  187. if err == nil {
  188. return ch
  189. }
  190. if !os.IsNotExist(err) {
  191. // TODO 日志记录
  192. return nil
  193. }
  194. if createOpt == nil {
  195. return nil
  196. }
  197. // 创建目录
  198. ch, err = makeCacheDirFromOption(c, pathComps, *createOpt)
  199. if err != nil {
  200. // TODO 日志记录
  201. return nil
  202. }
  203. return ch
  204. }
  205. // 加载指定路径下的所有缓存条目信息
  206. func (c *Cache) StatMany(pathComps []string) []CacheEntryInfo {
  207. c.lock.RLock()
  208. defer c.lock.RUnlock()
  209. var infos []CacheEntryInfo
  210. exists := make(map[string]bool)
  211. node, ok := c.activeCache.WalkEnd(pathComps)
  212. if ok {
  213. for name, child := range node.WordNexts {
  214. if child.Value != nil {
  215. infos = append(infos, child.Value.Info())
  216. exists[name] = true
  217. }
  218. }
  219. }
  220. osEns, err := os.ReadDir(c.GetCacheDataPath(pathComps...))
  221. if err != nil {
  222. return nil
  223. }
  224. for _, e := range osEns {
  225. if exists[e.Name()] {
  226. continue
  227. }
  228. info, err := e.Info()
  229. if err != nil {
  230. continue
  231. }
  232. if e.IsDir() {
  233. info, err := loadCacheDirInfo(c, append(lo2.ArrayClone(pathComps), e.Name()), info)
  234. if err != nil {
  235. continue
  236. }
  237. infos = append(infos, *info)
  238. } else {
  239. info, err := loadCacheFileInfo(c, append(lo2.ArrayClone(pathComps), e.Name()), info)
  240. if err != nil {
  241. continue
  242. }
  243. infos = append(infos, *info)
  244. }
  245. }
  246. return infos
  247. }
  248. // 删除指定路径的缓存文件或目录。删除目录时如果目录不为空,则会报错。
  249. func (c *Cache) Remove(pathComps []string) error {
  250. c.lock.Lock()
  251. defer c.lock.Unlock()
  252. node, ok := c.activeCache.WalkEnd(pathComps)
  253. if ok {
  254. if len(node.WordNexts) > 0 {
  255. return fuse.ErrNotEmpty
  256. }
  257. if node.Value != nil {
  258. node.Value.Delete()
  259. if node.Value.state.uploading != nil {
  260. node.Value.state.uploading.isDeleted = true
  261. }
  262. }
  263. node.RemoveSelf(true)
  264. logger.Debugf("active cache %v removed", pathComps)
  265. return nil
  266. }
  267. metaPath := c.GetCacheMetaPath(pathComps...)
  268. dataPath := c.GetCacheDataPath(pathComps...)
  269. os.Remove(metaPath)
  270. err := os.Remove(dataPath)
  271. if err == nil || os.IsNotExist(err) {
  272. logger.Debugf("local cache %v removed", pathComps)
  273. return nil
  274. }
  275. if errors.Is(err, syscall.ENOTEMPTY) {
  276. return fuse.ErrNotEmpty
  277. }
  278. return err
  279. }
  280. // 移动指定路径的缓存文件或目录到新的路径。如果目标路径已经存在,则会报错。
  281. //
  282. // 如果移动成功,则返回移动后的缓存文件或目录。如果文件或目录不存在,则返回nil。
  283. func (c *Cache) Move(pathComps []string, newPathComps []string) error {
  284. c.lock.Lock()
  285. defer c.lock.Unlock()
  286. _, ok := c.activeCache.WalkEnd(newPathComps)
  287. if ok {
  288. return fuse.ErrExists
  289. }
  290. newMetaPath := c.GetCacheMetaPath(newPathComps...)
  291. newDataPath := c.GetCacheDataPath(newPathComps...)
  292. _, err := os.Stat(newDataPath)
  293. if err == nil {
  294. return fuse.ErrExists
  295. }
  296. if !os.IsNotExist(err) {
  297. return err
  298. }
  299. oldMetaPath := c.GetCacheMetaPath(pathComps...)
  300. oldDataPath := c.GetCacheDataPath(pathComps...)
  301. // 确定源文件存在,再进行后面的操作
  302. _, err = os.Stat(oldDataPath)
  303. if err != nil {
  304. if os.IsNotExist(err) {
  305. return fuse.ErrNotExists
  306. }
  307. return err
  308. }
  309. // 创建父目录是为了解决被移动的文件不在本地的问题。
  310. // 但同时也导致了如果目的路径的父目录确实不存在,这里会意外的创建了这个目录
  311. newMetaDir := filepath.Dir(newMetaPath)
  312. err = os.MkdirAll(newMetaDir, 0755)
  313. if err != nil {
  314. return err
  315. }
  316. newDataDir := filepath.Dir(newDataPath)
  317. err = os.MkdirAll(newDataDir, 0755)
  318. if err != nil {
  319. return err
  320. }
  321. // 每个缓存文件持有meta文件和data文件的句柄,所以这里移动文件,不影响句柄的使用。
  322. // 只能忽略这里的错误
  323. os.Rename(oldMetaPath, newMetaPath)
  324. os.Rename(oldDataPath, newDataPath)
  325. // 更新缓存
  326. oldNode, ok := c.activeCache.WalkEnd(pathComps)
  327. if ok {
  328. newNode := c.activeCache.CreateWords(newPathComps)
  329. newNode.Value = oldNode.Value
  330. newNode.WordNexts = oldNode.WordNexts
  331. oldNode.RemoveSelf(false)
  332. if newNode.Value != nil {
  333. newNode.Value.Move(newPathComps)
  334. }
  335. newNode.Iterate(func(path []string, node *trie.Node[*CacheFile], isWordNode bool) trie.VisitCtrl {
  336. if node.Value != nil {
  337. node.Value.Move(lo2.AppendNew(newPathComps, path...))
  338. }
  339. return trie.VisitContinue
  340. })
  341. }
  342. logger.Debugf("cache moved: %v -> %v", pathComps, newPathComps)
  343. return nil
  344. }
  345. type uploadingPackage struct {
  346. bktName string
  347. pkgName string
  348. pkg clitypes.Package
  349. upObjs []*uploadingObject
  350. }
  351. type uploadingObject struct {
  352. pathComps []string
  353. cache *CacheFile
  354. reader *CacheFileHandle
  355. isDeleted bool
  356. isSuccess bool
  357. }
  358. func (c *Cache) scanningCache() {
  359. ticker := time.NewTicker(time.Second * 5)
  360. defer ticker.Stop()
  361. lastScanPath := []string{}
  362. for {
  363. select {
  364. case _, ok := <-c.cacheDone:
  365. if !ok {
  366. return
  367. }
  368. case <-ticker.C:
  369. }
  370. c.lock.Lock()
  371. type packageFullName struct {
  372. bktName string
  373. pkgName string
  374. }
  375. uploadingPkgs := make(map[packageFullName]*uploadingPackage)
  376. visitCnt := 0
  377. visitBreak := false
  378. node, _ := c.activeCache.WalkEnd(lastScanPath)
  379. node.Iterate(func(path []string, node *trie.Node[*CacheFile], isWordNode bool) trie.VisitCtrl {
  380. ch := node.Value
  381. if ch == nil {
  382. return trie.VisitContinue
  383. }
  384. if ch.state.refCount > 0 {
  385. logger.Debugf("skip cache %v, refCount: %v", path, ch.state.refCount)
  386. return trie.VisitContinue
  387. }
  388. visitCnt++
  389. shouldUpload := true
  390. // 不存放在Package里的文件,不需要上传
  391. if len(ch.pathComps) <= 2 {
  392. shouldUpload = false
  393. }
  394. if ch.Revision() > 0 && shouldUpload {
  395. // 1. 本地缓存被修改了,如果一段时间内没有被使用,则进行上传
  396. if time.Since(ch.state.freeTime) > c.cfg.UploadPendingTime && ch.state.uploading == nil {
  397. fullName := packageFullName{ch.pathComps[0], ch.pathComps[1]}
  398. pkg, ok := uploadingPkgs[fullName]
  399. if !ok {
  400. pkg = &uploadingPackage{
  401. bktName: ch.pathComps[0],
  402. pkgName: ch.pathComps[1],
  403. }
  404. uploadingPkgs[fullName] = pkg
  405. }
  406. obj := &uploadingObject{
  407. pathComps: lo2.ArrayClone(ch.pathComps),
  408. cache: ch,
  409. reader: ch.OpenReadWhenScanning(),
  410. }
  411. pkg.upObjs = append(pkg.upObjs, obj)
  412. ch.state.uploading = obj
  413. }
  414. } else if ch.state.isLoaded {
  415. // 2. 本地缓存没有被修改,如果一段时间内没有被使用,则进行卸载
  416. if time.Since(ch.state.freeTime) > c.cfg.CacheActiveTime {
  417. ch.Unload()
  418. ch.state.isLoaded = false
  419. ch.state.unloadTime = time.Now()
  420. }
  421. } else {
  422. // 3. 卸载后的缓存,如果一段时间内没有被使用,则进行删除。
  423. if time.Since(ch.state.unloadTime) > c.cfg.CacheExpireTime {
  424. // 如果文件已经同步到远端,则可以直接删除本地缓存
  425. if ch.Revision() == 0 {
  426. ch.Delete()
  427. }
  428. node.RemoveSelf(true)
  429. }
  430. }
  431. // 每次最多遍历500个节点,防止占用锁太久
  432. if visitCnt > 500 {
  433. lastScanPath = lo2.ArrayClone(path)
  434. visitBreak = true
  435. return trie.VisitBreak
  436. }
  437. return trie.VisitContinue
  438. })
  439. if !visitBreak {
  440. lastScanPath = []string{}
  441. }
  442. c.lock.Unlock()
  443. if len(uploadingPkgs) > 0 {
  444. go c.doUploading(lo.Values(uploadingPkgs))
  445. }
  446. }
  447. }
  448. func (c *Cache) scanningData() {
  449. ticker := time.NewTicker(c.cfg.ScanDataDirInterval)
  450. defer ticker.Stop()
  451. var walkTrace []*os.File
  452. var walkTraceComps []string
  453. for {
  454. select {
  455. case <-ticker.C:
  456. case <-c.cacheDone:
  457. return
  458. }
  459. logger.Infof("begin scanning data dir")
  460. if len(walkTrace) == 0 {
  461. dir, err := os.Open(c.cfg.DataDir)
  462. if err != nil {
  463. logger.Warnf("open data dir: %v", err)
  464. continue
  465. }
  466. walkTrace = []*os.File{dir}
  467. walkTraceComps = []string{c.cfg.MetaDir}
  468. }
  469. const maxVisitCnt = 5000
  470. const maxUntrackedFiles = 500
  471. var untrackedFiles [][]string
  472. visitCnt := 0
  473. // 一次最多遍历5000个文件(包括路径上的文件夹),一次最多添加500个未跟踪文件
  474. for len(walkTrace) > 0 && visitCnt < maxVisitCnt && len(untrackedFiles) < maxUntrackedFiles {
  475. lastNode := walkTrace[len(walkTrace)-1]
  476. visitCnt++
  477. e, err := lastNode.ReadDir(1)
  478. if err == io.EOF {
  479. lastNode.Close()
  480. walkTrace = walkTrace[:len(walkTrace)-1]
  481. walkTraceComps = walkTraceComps[:len(walkTraceComps)-1]
  482. continue
  483. }
  484. if err != nil {
  485. logger.Warnf("read dir %v: %v", lastNode.Name(), err)
  486. lastNode.Close()
  487. walkTrace = walkTrace[:len(walkTrace)-1]
  488. walkTraceComps = walkTraceComps[:len(walkTraceComps)-1]
  489. continue
  490. }
  491. if e[0].IsDir() {
  492. child, err := os.Open(filepath.Join(lastNode.Name(), e[0].Name()))
  493. if err != nil {
  494. logger.Warnf("open dir %v: %v", e[0].Name(), err)
  495. continue
  496. }
  497. walkTrace = append(walkTrace, child)
  498. walkTraceComps = append(walkTraceComps, e[0].Name())
  499. continue
  500. }
  501. // 对于不在Package层级的文件,不跟踪
  502. if len(walkTrace) <= 2 {
  503. continue
  504. }
  505. walkTraceComps = append(walkTraceComps, e[0].Name())
  506. fileMetaPath := filepath.Join(walkTraceComps...)
  507. _, err = os.Stat(fileMetaPath)
  508. if err == nil || !os.IsNotExist(err) {
  509. walkTraceComps = walkTraceComps[:len(walkTraceComps)-1]
  510. continue
  511. }
  512. untrackedFiles = append(untrackedFiles, lo2.ArrayClone(walkTraceComps[1:]))
  513. walkTraceComps = walkTraceComps[:len(walkTraceComps)-1]
  514. }
  515. if len(untrackedFiles) > 0 {
  516. for _, comps := range untrackedFiles {
  517. ch := c.LoadFile(comps, nil)
  518. if ch != nil {
  519. ch.Release()
  520. }
  521. }
  522. }
  523. logger.Infof("%v file visited, %v untracked files found", visitCnt, len(untrackedFiles))
  524. }
  525. }
  526. func (c *Cache) doUploading(pkgs []*uploadingPackage) {
  527. /// 1. 先尝试创建Package
  528. var sucPkgs []*uploadingPackage
  529. var failedPkgs []*uploadingPackage
  530. for _, pkg := range pkgs {
  531. p, err := db.DoTx21(c.db, c.db.Package().TryCreateAll, pkg.bktName, pkg.pkgName)
  532. if err != nil {
  533. logger.Warnf("try create package %v/%v: %v", pkg.bktName, pkg.pkgName, err)
  534. failedPkgs = append(failedPkgs, pkg)
  535. continue
  536. }
  537. pkg.pkg = p
  538. sucPkgs = append(sucPkgs, pkg)
  539. }
  540. /// 2. 对于创建失败的Package,直接关闭文件,不进行上传
  541. // 在锁的保护下取消上传状态
  542. c.lock.Lock()
  543. for _, pkg := range failedPkgs {
  544. for _, obj := range pkg.upObjs {
  545. obj.cache.state.uploading = nil
  546. }
  547. }
  548. c.lock.Unlock()
  549. // 关闭文件必须在锁外
  550. for _, pkg := range failedPkgs {
  551. for _, obj := range pkg.upObjs {
  552. obj.reader.Close()
  553. }
  554. }
  555. /// 3. 开始上传每个Package
  556. for _, p := range sucPkgs {
  557. uploader, err := c.uploader.BeginUpdate(p.pkg.PackageID, 0, nil, nil)
  558. if err != nil {
  559. logger.Warnf("begin update package %v/%v: %v", p.bktName, p.pkgName, err)
  560. // 取消上传状态
  561. c.lock.Lock()
  562. for _, obj := range p.upObjs {
  563. obj.cache.state.uploading = nil
  564. }
  565. c.lock.Unlock()
  566. for _, obj := range p.upObjs {
  567. obj.reader.Close()
  568. }
  569. continue
  570. }
  571. upSuc := 0
  572. upSucAmt := int64(0)
  573. upFailed := 0
  574. upStartTime := time.Now()
  575. logger.Infof("begin uploading %v objects to package %v/%v", len(p.upObjs), p.bktName, p.pkgName)
  576. for _, o := range p.upObjs {
  577. rd := cacheFileReader{
  578. rw: o.reader,
  579. }
  580. counter := io2.Counter(&rd)
  581. err = uploader.Upload(clitypes.JoinObjectPath(o.pathComps[2:]...), counter)
  582. if err != nil {
  583. logger.Warnf("upload object %v: %v", o.pathComps, err)
  584. upFailed++
  585. continue
  586. }
  587. o.isSuccess = true
  588. upSuc++
  589. upSucAmt += counter.Count()
  590. }
  591. // 在锁保护下登记上传结果
  592. c.lock.Lock()
  593. upCancel := 0
  594. upRename := 0
  595. // 检查是否有文件在上传期间发生了变化
  596. var sucObjs []*uploadingObject
  597. for _, o := range p.upObjs {
  598. o.cache.state.uploading = nil
  599. if !o.isSuccess {
  600. continue
  601. }
  602. oldPath := clitypes.JoinObjectPath(o.pathComps[2:]...)
  603. newPath := clitypes.JoinObjectPath(o.cache.pathComps[2:]...)
  604. if o.isDeleted {
  605. uploader.CancelObject(oldPath)
  606. upCancel++
  607. continue
  608. }
  609. // 如果对象移动到了另一个Package,那么也要取消上传
  610. if !lo2.ArrayEquals(o.pathComps[:2], o.cache.pathComps[:2]) {
  611. uploader.CancelObject(oldPath)
  612. upCancel++
  613. continue
  614. }
  615. // 只有仍在同Package内移动的对象才能直接重命名
  616. if newPath != oldPath {
  617. uploader.RenameObject(oldPath, newPath)
  618. upRename++
  619. }
  620. sucObjs = append(sucObjs, o)
  621. }
  622. _, err = uploader.Commit()
  623. if err != nil {
  624. logger.Warnf("commit update package %v/%v: %v", p.bktName, p.pkgName, err)
  625. } else {
  626. for _, obj := range sucObjs {
  627. obj.cache.RevisionUploaded(obj.reader.revision)
  628. }
  629. upTime := time.Since(upStartTime)
  630. logger.Infof("upload package %v/%v in %v, upload: %v, size: %v, speed: %v/s, cancel: %v, rename: %v",
  631. p.bktName, p.pkgName, upTime, upSuc, upSucAmt, bytesize.New(float64(upSucAmt)/upTime.Seconds()), upCancel, upRename)
  632. }
  633. c.lock.Unlock()
  634. // 在Cache锁以外关闭文件。
  635. // 关闭文件会影响refCount,所以无论是上传失败还是上传成功,都会在等待一段时间后才进行下一阶段的操作
  636. for _, obj := range p.upObjs {
  637. obj.reader.Close()
  638. }
  639. }
  640. }
  641. type cacheFileReader struct {
  642. rw *CacheFileHandle
  643. pos int64
  644. }
  645. func (r *cacheFileReader) Read(p []byte) (int, error) {
  646. n, err := r.rw.ReadAt(p, r.pos)
  647. r.pos += int64(n)
  648. if err != nil {
  649. return n, err
  650. }
  651. if n != len(p) {
  652. return n, io.EOF
  653. }
  654. return n, nil
  655. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。