You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

cache.go 26 kB

8 months ago
8 months ago
8 months ago
8 months ago
8 months ago
8 months ago
8 months ago
8 months ago
7 months ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099
  1. package cache
  2. import (
  3. "errors"
  4. "io"
  5. "os"
  6. "path/filepath"
  7. "sync"
  8. "syscall"
  9. "time"
  10. "github.com/inhies/go-bytesize"
  11. "github.com/samber/lo"
  12. "gitlink.org.cn/cloudream/common/pkgs/logger"
  13. "gitlink.org.cn/cloudream/common/pkgs/trie"
  14. "gitlink.org.cn/cloudream/common/utils/io2"
  15. "gitlink.org.cn/cloudream/common/utils/lo2"
  16. "gitlink.org.cn/cloudream/common/utils/sort2"
  17. "gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
  18. "gitlink.org.cn/cloudream/jcs-pub/client/internal/downloader"
  19. "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/config"
  20. "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse"
  21. "gitlink.org.cn/cloudream/jcs-pub/client/internal/uploader"
  22. clitypes "gitlink.org.cn/cloudream/jcs-pub/client/types"
  23. )
  24. type CacheEntry interface {
  25. fuse.FsEntry
  26. // 在虚拟文件系统中的路径,即不包含缓存目录的路径
  27. PathComps() []string
  28. }
  29. type CacheEntryInfo struct {
  30. PathComps []string
  31. Size int64
  32. Perm os.FileMode
  33. ModTime time.Time
  34. IsDir bool
  35. // 元数据版本号
  36. MetaRevision int
  37. // 文件数据版本号
  38. DataRevision int
  39. // 引用计数
  40. RefCount int
  41. // 上次引用计数归零的时间,也即上次使用时间
  42. FreeTime time.Time
  43. // 缓存等级
  44. Level CacheLevel
  45. // 缓存等级改变时间
  46. ChangeLevelTime time.Time
  47. }
  48. type Cache struct {
  49. cfg *config.Config
  50. db *db.DB
  51. uploader *uploader.Uploader
  52. downloader *downloader.Downloader
  53. lock *sync.RWMutex
  54. cacheDone chan any
  55. doFullScan chan any
  56. activeCache *trie.Trie[*CacheFile]
  57. }
  58. func NewCache(cfg *config.Config, db *db.DB, uploader *uploader.Uploader, downloader *downloader.Downloader) *Cache {
  59. return &Cache{
  60. cfg: cfg,
  61. db: db,
  62. uploader: uploader,
  63. downloader: downloader,
  64. lock: &sync.RWMutex{},
  65. cacheDone: make(chan any, 1),
  66. doFullScan: make(chan any, 1),
  67. activeCache: trie.NewTrie[*CacheFile](),
  68. }
  69. }
  70. func (c *Cache) Start() {
  71. go c.scanningCache()
  72. go c.scanningData()
  73. }
  74. func (c *Cache) Stop() {
  75. close(c.cacheDone)
  76. }
  77. func (c *Cache) GetCacheDataPath(comps ...string) string {
  78. comps2 := make([]string, len(comps)+1)
  79. comps2[0] = c.cfg.DataDir
  80. copy(comps2[1:], comps)
  81. return filepath.Join(comps2...)
  82. }
  83. func (c *Cache) GetCacheMetaPath(comps ...string) string {
  84. comps2 := make([]string, len(comps)+1)
  85. comps2[0] = c.cfg.MetaDir
  86. copy(comps2[1:], comps)
  87. return filepath.Join(comps2...)
  88. }
  89. func (c *Cache) Dump() CacheStatus {
  90. c.lock.RLock()
  91. defer c.lock.RUnlock()
  92. var activeFiles []CacheFileStatus
  93. c.activeCache.Iterate(func(path []string, node *trie.Node[*CacheFile], isWordNode bool) trie.VisitCtrl {
  94. if node.Value == nil {
  95. return trie.VisitContinue
  96. }
  97. info := node.Value.Info()
  98. activeFiles = append(activeFiles, CacheFileStatus{
  99. Path: filepath.Join(path...),
  100. RefCount: info.RefCount,
  101. Level: info.Level.String(),
  102. IsUploading: node.Value.state.uploading != nil,
  103. })
  104. return trie.VisitContinue
  105. })
  106. return CacheStatus{
  107. ActiveFiles: activeFiles,
  108. }
  109. }
  110. func (c *Cache) ReclaimSpace() {
  111. select {
  112. case c.doFullScan <- nil:
  113. default:
  114. }
  115. }
  116. // 获取指定位置的缓存条目信息。如果路径不存在,则返回nil。
  117. func (c *Cache) Stat(pathComps []string) *CacheEntryInfo {
  118. c.lock.RLock()
  119. defer c.lock.RUnlock()
  120. node, ok := c.activeCache.WalkEnd(pathComps)
  121. if ok && node.Value != nil {
  122. info := node.Value.Info()
  123. return &info
  124. }
  125. dataPath := c.GetCacheDataPath(pathComps...)
  126. stat, err := os.Stat(dataPath)
  127. if err != nil {
  128. // TODO 日志记录
  129. return nil
  130. }
  131. if stat.IsDir() {
  132. info, err := loadCacheDirInfo(c, pathComps, stat)
  133. if err != nil {
  134. return nil
  135. }
  136. return info
  137. }
  138. info, err := loadCacheFileInfo(c, pathComps, stat)
  139. if err != nil {
  140. return nil
  141. }
  142. return info
  143. }
  144. // 创建一个缓存文件。如果文件已经存在,则会覆盖已有文件。如果加载过程中发生了错误,或者目标位置是一个目录,则会返回nil。
  145. //
  146. // 记得使用Release减少引用计数
  147. func (c *Cache) CreateFile(pathComps []string) *CacheFile {
  148. c.lock.Lock()
  149. defer c.lock.Unlock()
  150. node, ok := c.activeCache.WalkEnd(pathComps)
  151. if ok && node.Value != nil {
  152. node.Value.Delete()
  153. if node.Value.state.uploading != nil {
  154. node.Value.state.uploading.isDeleted = true
  155. }
  156. }
  157. ch, err := createNewCacheFile(c, pathComps)
  158. if err != nil {
  159. logger.Warnf("create new cache file %v: %v", pathComps, err)
  160. return nil
  161. }
  162. ch.IncRef()
  163. c.activeCache.CreateWords(pathComps).Value = ch
  164. logger.Debugf("create new cache file %v", pathComps)
  165. return ch
  166. }
  167. // 尝试加载缓存文件,如果文件不存在,则使用obj的信息创建一个新缓存文件,而如果obj为nil,那么会返回nil。
  168. //
  169. // 记得使用Release减少引用计数
  170. func (c *Cache) LoadFile(pathComps []string, obj *clitypes.Object) *CacheFile {
  171. c.lock.Lock()
  172. defer c.lock.Unlock()
  173. node, ok := c.activeCache.WalkEnd(pathComps)
  174. if ok && node.Value != nil {
  175. node.Value.IncRef()
  176. return node.Value
  177. }
  178. ch, err := loadCacheFile(c, pathComps)
  179. if err == nil {
  180. ch.remoteObj = obj
  181. ch.IncRef()
  182. c.activeCache.CreateWords(pathComps).Value = ch
  183. logger.Debugf("load cache %v", pathComps)
  184. return ch
  185. }
  186. if !os.IsNotExist(err) {
  187. // TODO 日志记录
  188. logger.Warnf("load cache %v: %v", pathComps, err)
  189. return nil
  190. }
  191. if obj == nil {
  192. return nil
  193. }
  194. ch, err = newCacheFileFromObject(c, pathComps, obj)
  195. if err != nil {
  196. logger.Warnf("create cache %v from object: %v", pathComps, err)
  197. return nil
  198. }
  199. ch.IncRef()
  200. c.activeCache.CreateWords(pathComps).Value = ch
  201. logger.Debugf("create cache %v from object %v", pathComps, obj.ObjectID)
  202. return ch
  203. }
  204. // 仅加载文件的元数据,如果文件不存在,则返回nil
  205. //
  206. // 记得使用Release减少引用计数
  207. func (c *Cache) LoadReadOnlyFile(pathComps []string) *CacheFile {
  208. c.lock.Lock()
  209. defer c.lock.Unlock()
  210. node, ok := c.activeCache.WalkEnd(pathComps)
  211. if ok && node.Value != nil {
  212. node.Value.IncRef()
  213. return node.Value
  214. }
  215. ch, err := loadReadOnlyCacheFile(c, pathComps)
  216. if err == nil {
  217. ch.IncRef()
  218. c.activeCache.CreateWords(pathComps).Value = ch
  219. logger.Debugf("load cache %v", pathComps)
  220. return ch
  221. }
  222. if !os.IsNotExist(err) {
  223. // TODO 日志记录
  224. logger.Warnf("load cache %v: %v", pathComps, err)
  225. return nil
  226. }
  227. return nil
  228. }
  229. // 创建一个缓存目录。如果目录已经存在,则会重置目录属性。如果加载过程中发生了错误,或者目标位置是一个文件,则会返回nil
  230. func (c *Cache) CreateDir(pathComps []string) *CacheDir {
  231. c.lock.Lock()
  232. defer c.lock.Unlock()
  233. ch, err := createNewCacheDir(c, pathComps)
  234. if err != nil {
  235. logger.Warnf("create cache dir: %v", err)
  236. return nil
  237. }
  238. return ch
  239. }
  240. type CreateDirOption struct {
  241. ModTime time.Time
  242. }
  243. // 加载指定缓存目录,如果目录不存在,则使用createOpt选项创建目录,而如果createOpt为nil,那么会返回nil。
  244. func (c *Cache) LoadDir(pathComps []string, createOpt *CreateDirOption) *CacheDir {
  245. c.lock.Lock()
  246. defer c.lock.Unlock()
  247. ch, err := loadCacheDir(c, pathComps)
  248. if err == nil {
  249. return ch
  250. }
  251. if !os.IsNotExist(err) {
  252. // TODO 日志记录
  253. return nil
  254. }
  255. if createOpt == nil {
  256. return nil
  257. }
  258. // 创建目录
  259. ch, err = makeCacheDirFromOption(c, pathComps, *createOpt)
  260. if err != nil {
  261. // TODO 日志记录
  262. return nil
  263. }
  264. return ch
  265. }
  266. // 加载指定路径下的所有缓存条目信息
  267. func (c *Cache) StatMany(pathComps []string) []CacheEntryInfo {
  268. c.lock.RLock()
  269. defer c.lock.RUnlock()
  270. var infos []CacheEntryInfo
  271. exists := make(map[string]bool)
  272. node, ok := c.activeCache.WalkEnd(pathComps)
  273. if ok {
  274. for name, child := range node.WordNexts {
  275. if child.Value != nil {
  276. infos = append(infos, child.Value.Info())
  277. exists[name] = true
  278. }
  279. }
  280. }
  281. osEns, err := os.ReadDir(c.GetCacheDataPath(pathComps...))
  282. if err != nil {
  283. return nil
  284. }
  285. for _, e := range osEns {
  286. if exists[e.Name()] {
  287. continue
  288. }
  289. info, err := e.Info()
  290. if err != nil {
  291. continue
  292. }
  293. if e.IsDir() {
  294. info, err := loadCacheDirInfo(c, append(lo2.ArrayClone(pathComps), e.Name()), info)
  295. if err != nil {
  296. continue
  297. }
  298. infos = append(infos, *info)
  299. } else {
  300. info, err := loadCacheFileInfo(c, append(lo2.ArrayClone(pathComps), e.Name()), info)
  301. if err != nil {
  302. continue
  303. }
  304. infos = append(infos, *info)
  305. }
  306. }
  307. return infos
  308. }
  309. // 删除指定路径的缓存文件或目录。删除目录时如果目录不为空,则会报错。
  310. func (c *Cache) Remove(pathComps []string) error {
  311. c.lock.Lock()
  312. defer c.lock.Unlock()
  313. node, ok := c.activeCache.WalkEnd(pathComps)
  314. if ok {
  315. if len(node.WordNexts) > 0 {
  316. return fuse.ErrNotEmpty
  317. }
  318. if node.Value != nil {
  319. node.Value.Delete()
  320. if node.Value.state.uploading != nil {
  321. node.Value.state.uploading.isDeleted = true
  322. }
  323. }
  324. node.RemoveSelf(true)
  325. logger.Debugf("active cache %v removed", pathComps)
  326. return nil
  327. }
  328. metaPath := c.GetCacheMetaPath(pathComps...)
  329. dataPath := c.GetCacheDataPath(pathComps...)
  330. os.Remove(metaPath)
  331. err := os.Remove(dataPath)
  332. if err == nil || os.IsNotExist(err) {
  333. logger.Debugf("local cache %v removed", pathComps)
  334. return nil
  335. }
  336. if errors.Is(err, syscall.ENOTEMPTY) {
  337. return fuse.ErrNotEmpty
  338. }
  339. return err
  340. }
  341. // 移动指定路径的缓存文件或目录到新的路径。如果目标路径已经存在,则会报错。
  342. //
  343. // 如果移动成功,则返回移动后的缓存文件或目录。如果文件或目录不存在,则返回nil。
  344. func (c *Cache) Move(pathComps []string, newPathComps []string) error {
  345. c.lock.Lock()
  346. defer c.lock.Unlock()
  347. _, ok := c.activeCache.WalkEnd(newPathComps)
  348. if ok {
  349. return fuse.ErrExists
  350. }
  351. newMetaPath := c.GetCacheMetaPath(newPathComps...)
  352. newDataPath := c.GetCacheDataPath(newPathComps...)
  353. _, err := os.Stat(newDataPath)
  354. if err == nil {
  355. return fuse.ErrExists
  356. }
  357. if !os.IsNotExist(err) {
  358. return err
  359. }
  360. oldMetaPath := c.GetCacheMetaPath(pathComps...)
  361. oldDataPath := c.GetCacheDataPath(pathComps...)
  362. // 确定源文件存在,再进行后面的操作
  363. _, err = os.Stat(oldDataPath)
  364. if err != nil {
  365. if os.IsNotExist(err) {
  366. return fuse.ErrNotExists
  367. }
  368. return err
  369. }
  370. // 创建父目录是为了解决被移动的文件不在本地的问题。
  371. // 但同时也导致了如果目的路径的父目录确实不存在,这里会意外的创建了这个目录
  372. newMetaDir := filepath.Dir(newMetaPath)
  373. err = os.MkdirAll(newMetaDir, 0755)
  374. if err != nil {
  375. return err
  376. }
  377. newDataDir := filepath.Dir(newDataPath)
  378. err = os.MkdirAll(newDataDir, 0755)
  379. if err != nil {
  380. return err
  381. }
  382. // 每个缓存文件持有meta文件和data文件的句柄,所以这里移动文件,不影响句柄的使用。
  383. // 只能忽略这里的错误
  384. os.Rename(oldMetaPath, newMetaPath)
  385. os.Rename(oldDataPath, newDataPath)
  386. // 更新缓存
  387. oldNode, ok := c.activeCache.WalkEnd(pathComps)
  388. if ok {
  389. newNode := c.activeCache.CreateWords(newPathComps)
  390. newNode.Value = oldNode.Value
  391. newNode.WordNexts = oldNode.WordNexts
  392. oldNode.RemoveSelf(false)
  393. if newNode.Value != nil {
  394. newNode.Value.Move(newPathComps)
  395. }
  396. newNode.Iterate(func(path []string, node *trie.Node[*CacheFile], isWordNode bool) trie.VisitCtrl {
  397. if node.Value != nil {
  398. node.Value.Move(lo2.AppendNew(newPathComps, path...))
  399. }
  400. return trie.VisitContinue
  401. })
  402. }
  403. logger.Debugf("cache moved: %v -> %v", pathComps, newPathComps)
  404. return nil
  405. }
  406. type syncPackage struct {
  407. bktName string
  408. pkgName string
  409. pkg clitypes.Package
  410. upObjs []*uploadingObject
  411. }
  412. type uploadingObject struct {
  413. pathComps []string
  414. cache *CacheFile
  415. reader *CacheFileHandle
  416. modTime time.Time
  417. metaRevision int
  418. isDeleted bool
  419. isSuccess bool
  420. }
  421. type packageFullName struct {
  422. bktName string
  423. pkgName string
  424. }
  425. func (c *Cache) scanningCache() {
  426. ticker := time.NewTicker(time.Second * 5)
  427. defer ticker.Stop()
  428. lastScanPath := []string{}
  429. nextFullScan := false
  430. for {
  431. select {
  432. case _, ok := <-c.cacheDone:
  433. if !ok {
  434. return
  435. }
  436. case <-ticker.C:
  437. case <-c.doFullScan:
  438. nextFullScan = true
  439. }
  440. // 每完成一轮快速的渐进全量扫描,就进行一次即时全量扫描
  441. if nextFullScan {
  442. c.fullScan()
  443. nextFullScan = false
  444. continue
  445. }
  446. lastScanPath = c.fastScan(lastScanPath)
  447. if len(lastScanPath) == 0 {
  448. nextFullScan = true
  449. }
  450. }
  451. }
  452. // 全量扫描,主要是检查总缓存大小是否超标
  453. func (c *Cache) fullScan() {
  454. log := logger.WithField("Mod", "Mount")
  455. startTime := time.Now()
  456. log.Debug("begin full scan")
  457. defer func() {
  458. log.Debugf("full scan done, time: %v", time.Since(startTime))
  459. }()
  460. c.lock.Lock()
  461. defer c.lock.Unlock()
  462. totalCacheSize := int64(0)
  463. type readOnlyCache struct {
  464. Info CacheEntryInfo
  465. Node *trie.Node[*CacheFile]
  466. }
  467. var readOnlyCaches []readOnlyCache
  468. c.activeCache.Iterate(func(path []string, node *trie.Node[*CacheFile], isWordNode bool) trie.VisitCtrl {
  469. ch := node.Value
  470. if ch == nil {
  471. return trie.VisitContinue
  472. }
  473. info := ch.Info()
  474. if info.Level > LevelReadOnly {
  475. return trie.VisitContinue
  476. }
  477. if info.DataRevision > 0 || info.MetaRevision > 0 {
  478. return trie.VisitContinue
  479. }
  480. readOnlyCaches = append(readOnlyCaches, readOnlyCache{
  481. Info: info,
  482. Node: node,
  483. })
  484. totalCacheSize += info.Size
  485. return trie.VisitContinue
  486. })
  487. // 如果总缓存文件大小超过限制,那么就从最早被使用的开始删除
  488. if c.cfg.MaxCacheSize > 0 {
  489. needReclaim := totalCacheSize - c.cfg.MaxCacheSize
  490. if needReclaim > 0 {
  491. readOnlyCaches = sort2.Sort(readOnlyCaches, func(left, right readOnlyCache) int {
  492. return left.Info.FreeTime.Compare(right.Info.FreeTime)
  493. })
  494. reclaimed := int64(0)
  495. rmCnt := 0
  496. for _, rc := range readOnlyCaches {
  497. rc.Node.Value.Delete()
  498. rc.Node.RemoveSelf(true)
  499. needReclaim -= rc.Info.Size
  500. reclaimed += rc.Info.Size
  501. rmCnt += 1
  502. if needReclaim <= 0 {
  503. break
  504. }
  505. }
  506. log.Infof("%v cache file removed, reclaimed %v bytes, total cache size: %v", rmCnt, reclaimed, totalCacheSize-reclaimed)
  507. }
  508. }
  509. // TODO 还可以做点其他的检查,比如文件句柄数
  510. }
  511. // 快速扫描,每次只扫描一部分节点,做的事情会繁重一点
  512. func (c *Cache) fastScan(lastScanPath []string) []string {
  513. c.lock.Lock()
  514. uploadingPkgs := make(map[packageFullName]*syncPackage)
  515. visitCnt := 0
  516. visitBreak := false
  517. node, _ := c.activeCache.WalkEnd(lastScanPath)
  518. node.Iterate(func(path []string, node *trie.Node[*CacheFile], isWordNode bool) trie.VisitCtrl {
  519. ch := node.Value
  520. if ch == nil {
  521. return trie.VisitContinue
  522. }
  523. info := ch.Info()
  524. if info.RefCount > 0 {
  525. logger.Debugf("skip cache %v, refCount: %v", path, info.RefCount)
  526. return trie.VisitContinue
  527. }
  528. visitCnt++
  529. c.visitNode(path, node, ch, info, uploadingPkgs)
  530. // 每次最多遍历500个节点,防止占用锁太久
  531. if visitCnt > 500 {
  532. lastScanPath = lo2.ArrayClone(path)
  533. visitBreak = true
  534. return trie.VisitBreak
  535. }
  536. return trie.VisitContinue
  537. })
  538. if !visitBreak {
  539. lastScanPath = []string{}
  540. }
  541. c.lock.Unlock()
  542. if len(uploadingPkgs) > 0 {
  543. go c.doSync(lo.Values(uploadingPkgs))
  544. }
  545. return lastScanPath
  546. }
  547. func (c *Cache) visitNode(path []string, node *trie.Node[*CacheFile], ch *CacheFile, info CacheEntryInfo, uploadingPkgs map[packageFullName]*syncPackage) {
  548. shouldUpload := true
  549. // 不存放在Package里的文件,不需要上传
  550. if len(ch.pathComps) <= 2 {
  551. shouldUpload = false
  552. }
  553. // 1. 本地缓存被修改了,如果一段时间内没有被使用,则进行上传
  554. if shouldUpload && (info.DataRevision > 0 || info.MetaRevision > 0) {
  555. if time.Since(info.FreeTime) < c.cfg.UploadPendingTime {
  556. return
  557. }
  558. if ch.state.uploading != nil {
  559. return
  560. }
  561. // 上传文件需要完全加载级别的缓存等级
  562. if !ch.LevelUp(LevelComplete) {
  563. return
  564. }
  565. fullName := packageFullName{ch.pathComps[0], ch.pathComps[1]}
  566. pkg, ok := uploadingPkgs[fullName]
  567. if !ok {
  568. pkg = &syncPackage{
  569. bktName: ch.pathComps[0],
  570. pkgName: ch.pathComps[1],
  571. }
  572. uploadingPkgs[fullName] = pkg
  573. }
  574. up := &uploadingObject{
  575. pathComps: lo2.ArrayClone(ch.pathComps),
  576. cache: ch,
  577. }
  578. pkg.upObjs = append(pkg.upObjs, up)
  579. ch.state.uploading = up
  580. if info.DataRevision > 0 {
  581. up.reader = ch.OpenReadWhenScanning()
  582. }
  583. if info.MetaRevision > 0 {
  584. up.modTime = info.ModTime
  585. up.metaRevision = info.MetaRevision
  586. }
  587. return
  588. }
  589. // 2. 本地缓存没有被修改,如果一段时间内没有被使用,则进行卸载
  590. if info.Level > LevelReadOnly {
  591. if time.Since(info.FreeTime) > c.cfg.CacheActiveTime {
  592. ch.LevelDown(LevelReadOnly)
  593. }
  594. return
  595. }
  596. // 3. 卸载后的缓存,如果一段时间内没有被使用,则进行删除。
  597. if info.Level <= LevelReadOnly {
  598. // 需要同时满足距上次使用时间和距上次卸载时间超过配置的时间,才可以删除
  599. if time.Since(info.FreeTime) > c.cfg.CacheExpireTime && time.Since(info.ChangeLevelTime) > c.cfg.CacheExpireTime {
  600. // 如果文件已经同步到远端,则可以直接删除本地缓存
  601. if info.MetaRevision == 0 && info.DataRevision == 0 {
  602. ch.Delete()
  603. }
  604. // 文件数据或者元数据有修改,但缓存等级是ReadOnly以下,意味着在之前检查是否需要上传时被判定为不需要上传
  605. // 这种文件删除缓存记录即可(但会在扫描数据目录时再次被加载进来)
  606. node.RemoveSelf(true)
  607. }
  608. return
  609. }
  610. }
  611. // 扫描文件数据目录
  612. func (c *Cache) scanningData() {
  613. ticker := time.NewTicker(c.cfg.ScanDataDirInterval)
  614. defer ticker.Stop()
  615. var walkTrace []*os.File
  616. var walkTraceComps []string
  617. for {
  618. select {
  619. case <-ticker.C:
  620. case <-c.cacheDone:
  621. return
  622. }
  623. startTime := time.Now()
  624. logger.Infof("begin scanning data dir")
  625. if len(walkTrace) == 0 {
  626. dir, err := os.Open(c.cfg.DataDir)
  627. if err != nil {
  628. logger.Warnf("open data dir: %v", err)
  629. continue
  630. }
  631. walkTrace = []*os.File{dir}
  632. walkTraceComps = []string{c.cfg.MetaDir}
  633. }
  634. const maxVisitCnt = 5000
  635. const maxUntrackedFiles = 500
  636. var untrackedFiles [][]string
  637. visitCnt := 0
  638. // 一次最多遍历5000个文件(包括路径上的文件夹),一次最多添加500个未跟踪文件
  639. for len(walkTrace) > 0 && visitCnt < maxVisitCnt && len(untrackedFiles) < maxUntrackedFiles {
  640. lastNode := walkTrace[len(walkTrace)-1]
  641. visitCnt++
  642. e, err := lastNode.Readdir(1)
  643. if err == io.EOF {
  644. lastNode.Close()
  645. walkTrace = walkTrace[:len(walkTrace)-1]
  646. walkTraceComps = walkTraceComps[:len(walkTraceComps)-1]
  647. continue
  648. }
  649. if err != nil {
  650. logger.Warnf("read dir %v: %v", lastNode.Name(), err)
  651. lastNode.Close()
  652. walkTrace = walkTrace[:len(walkTrace)-1]
  653. walkTraceComps = walkTraceComps[:len(walkTraceComps)-1]
  654. continue
  655. }
  656. if e[0].IsDir() {
  657. child, err := os.Open(filepath.Join(lastNode.Name(), e[0].Name()))
  658. if err != nil {
  659. logger.Warnf("open dir %v: %v", e[0].Name(), err)
  660. continue
  661. }
  662. walkTrace = append(walkTrace, child)
  663. walkTraceComps = append(walkTraceComps, e[0].Name())
  664. continue
  665. }
  666. // 对于不在Package层级的文件,不跟踪
  667. if len(walkTrace) <= 2 {
  668. continue
  669. }
  670. // 无条件加载缓存,可能会导致一些不需要被同步到云端的文件在缓存等级降到最低取消跟踪后,又重新被加载进来
  671. // 不过由于扫描频率不高,所以问题不大
  672. walkTraceComps = append(walkTraceComps, e[0].Name())
  673. untrackedFiles = append(untrackedFiles, lo2.ArrayClone(walkTraceComps[1:]))
  674. walkTraceComps = walkTraceComps[:len(walkTraceComps)-1]
  675. }
  676. if len(untrackedFiles) > 0 {
  677. for _, comps := range untrackedFiles {
  678. ch := c.LoadReadOnlyFile(comps)
  679. if ch != nil {
  680. ch.Release()
  681. }
  682. }
  683. }
  684. logger.Infof("%v file visited, %v untracked files found, time: %v", visitCnt, len(untrackedFiles), time.Since(startTime))
  685. }
  686. }
  687. func (c *Cache) doSync(pkgs []*syncPackage) {
  688. var uploadPkgs []*syncPackage
  689. var updateOnlyPkgs []*syncPackage
  690. for _, p := range pkgs {
  691. var updateOnly *syncPackage
  692. var upload *syncPackage
  693. for _, o := range p.upObjs {
  694. if o.reader != nil {
  695. if upload == nil {
  696. upload = &syncPackage{
  697. bktName: p.bktName,
  698. pkgName: p.pkgName,
  699. }
  700. }
  701. upload.upObjs = append(upload.upObjs, o)
  702. } else {
  703. if updateOnly == nil {
  704. updateOnly = &syncPackage{
  705. bktName: p.bktName,
  706. pkgName: p.pkgName,
  707. }
  708. }
  709. updateOnly.upObjs = append(updateOnly.upObjs, o)
  710. }
  711. }
  712. if upload != nil {
  713. uploadPkgs = append(uploadPkgs, upload)
  714. }
  715. if updateOnly != nil {
  716. updateOnlyPkgs = append(updateOnlyPkgs, updateOnly)
  717. }
  718. }
  719. // 先上传文件,再更新文件元数据。上传文件时会创建Package,这样后续更新元数据时就能查到Package。
  720. if len(uploadPkgs) > 0 {
  721. c.doUploading(uploadPkgs)
  722. }
  723. if len(updateOnlyPkgs) > 0 {
  724. c.doUpdatingOnly(updateOnlyPkgs)
  725. }
  726. }
  727. func (c *Cache) doUpdatingOnly(pkgs []*syncPackage) {
  728. /// 1. 只是更新元数据,那么就只尝试查询Package
  729. var sucPkgs []*syncPackage
  730. var failedPkgs []*syncPackage
  731. for _, pkg := range pkgs {
  732. p, err := c.db.Package().GetByFullName(c.db.DefCtx(), pkg.bktName, pkg.pkgName)
  733. if err != nil {
  734. logger.Warnf("get package %v/%v: %v", pkg.bktName, pkg.pkgName, err)
  735. failedPkgs = append(failedPkgs, pkg)
  736. continue
  737. }
  738. pkg.pkg = p
  739. sucPkgs = append(sucPkgs, pkg)
  740. }
  741. /// 2. 对于创建失败的Package, 在锁的保护下取消上传状态
  742. c.lock.Lock()
  743. for _, pkg := range failedPkgs {
  744. for _, obj := range pkg.upObjs {
  745. obj.cache.state.uploading = nil
  746. }
  747. }
  748. c.lock.Unlock()
  749. /// 3. 开始更新每个Package
  750. for _, p := range sucPkgs {
  751. pathes := make([]string, 0, len(p.upObjs))
  752. modTimes := make([]time.Time, 0, len(p.upObjs))
  753. for _, obj := range p.upObjs {
  754. pathes = append(pathes, clitypes.JoinObjectPath(obj.pathComps[2:]...))
  755. modTimes = append(modTimes, obj.modTime)
  756. }
  757. err := c.db.Object().BatchUpdateUpdateTimeByPath(c.db.DefCtx(), p.pkg.PackageID, pathes, modTimes)
  758. if err != nil {
  759. logger.Warnf("batch update package %v/%v: %v", p.bktName, p.pkgName, err)
  760. c.lock.Lock()
  761. for _, obj := range p.upObjs {
  762. obj.cache.state.uploading = nil
  763. }
  764. c.lock.Unlock()
  765. continue
  766. }
  767. logger.Infof("update %v object in package %v/%v", len(p.upObjs), p.bktName, p.pkgName)
  768. // 登记上传结果
  769. c.lock.Lock()
  770. for _, obj := range p.upObjs {
  771. obj.cache.state.uploading = nil
  772. obj.cache.RevisionUploaded(0, obj.metaRevision)
  773. }
  774. c.lock.Unlock()
  775. }
  776. }
  777. func (c *Cache) doUploading(pkgs []*syncPackage) {
  778. /// 1. 先尝试创建Package
  779. var sucPkgs []*syncPackage
  780. var failedPkgs []*syncPackage
  781. for _, pkg := range pkgs {
  782. p, err := db.DoTx21(c.db, c.db.Package().TryCreateAll, pkg.bktName, pkg.pkgName)
  783. if err != nil {
  784. logger.Warnf("try create package %v/%v: %v", pkg.bktName, pkg.pkgName, err)
  785. failedPkgs = append(failedPkgs, pkg)
  786. continue
  787. }
  788. pkg.pkg = p
  789. sucPkgs = append(sucPkgs, pkg)
  790. }
  791. /// 2. 对于创建失败的Package,直接关闭文件,不进行上传
  792. // 在锁的保护下取消上传状态
  793. c.lock.Lock()
  794. for _, pkg := range failedPkgs {
  795. for _, obj := range pkg.upObjs {
  796. obj.cache.state.uploading = nil
  797. }
  798. }
  799. c.lock.Unlock()
  800. for _, pkg := range failedPkgs {
  801. for _, obj := range pkg.upObjs {
  802. obj.reader.Close()
  803. }
  804. }
  805. /// 3. 开始上传每个Package
  806. for _, p := range sucPkgs {
  807. upder, err := c.uploader.BeginUpdate(p.pkg.PackageID, 0, nil, nil)
  808. if err != nil {
  809. logger.Warnf("begin upload package %v/%v: %v", p.bktName, p.pkgName, err)
  810. // 取消上传状态
  811. c.lock.Lock()
  812. for _, obj := range p.upObjs {
  813. obj.cache.state.uploading = nil
  814. }
  815. c.lock.Unlock()
  816. for _, obj := range p.upObjs {
  817. obj.reader.Close()
  818. }
  819. continue
  820. }
  821. upSuc := 0
  822. upSucAmt := int64(0)
  823. upFailed := 0
  824. upStartTime := time.Now()
  825. logger.Infof("begin uploading %v objects to package %v/%v", len(p.upObjs), p.bktName, p.pkgName)
  826. for _, o := range p.upObjs {
  827. rd := cacheFileReader{
  828. rw: o.reader,
  829. }
  830. counter := io2.Counter(&rd)
  831. err = upder.Upload(clitypes.PathFromComps(o.pathComps[2:]...), counter, uploader.UploadOption{
  832. CreateTime: o.modTime,
  833. })
  834. if err != nil {
  835. logger.Warnf("upload object %v: %v", o.pathComps, err)
  836. upFailed++
  837. continue
  838. }
  839. o.isSuccess = true
  840. upSuc++
  841. upSucAmt += counter.Count()
  842. }
  843. // 在锁保护下登记上传结果
  844. c.lock.Lock()
  845. upCancel := 0
  846. upRename := 0
  847. // 检查是否有文件在上传期间发生了变化
  848. var sucObjs []*uploadingObject
  849. for _, o := range p.upObjs {
  850. o.cache.state.uploading = nil
  851. if !o.isSuccess {
  852. continue
  853. }
  854. oldPath := clitypes.JoinObjectPath(o.pathComps[2:]...)
  855. newPath := clitypes.JoinObjectPath(o.cache.pathComps[2:]...)
  856. if o.isDeleted {
  857. upder.CancelObject(oldPath)
  858. upCancel++
  859. continue
  860. }
  861. // 如果对象移动到了另一个Package,那么也要取消上传
  862. if !lo2.ArrayEquals(o.pathComps[:2], o.cache.pathComps[:2]) {
  863. upder.CancelObject(oldPath)
  864. upCancel++
  865. continue
  866. }
  867. // 只有仍在同Package内移动的对象才能直接重命名
  868. if newPath != oldPath {
  869. upder.RenameObject(oldPath, newPath)
  870. upRename++
  871. }
  872. sucObjs = append(sucObjs, o)
  873. }
  874. _, err = upder.Commit()
  875. if err != nil {
  876. logger.Warnf("commit update package %v/%v: %v", p.bktName, p.pkgName, err)
  877. } else {
  878. for _, obj := range sucObjs {
  879. obj.cache.RevisionUploaded(obj.reader.revision, obj.metaRevision)
  880. }
  881. upTime := time.Since(upStartTime)
  882. logger.Infof("upload package %v/%v in %v, upload: %v, size: %v, speed: %v/s, cancel: %v, rename: %v",
  883. p.bktName, p.pkgName, upTime, upSuc, upSucAmt, bytesize.New(float64(upSucAmt)/upTime.Seconds()), upCancel, upRename)
  884. }
  885. c.lock.Unlock()
  886. // 关闭文件会影响refCount,所以无论是上传失败还是上传成功,都会在等待一段时间后才进行下一阶段的操作
  887. for _, obj := range p.upObjs {
  888. obj.reader.Close()
  889. }
  890. }
  891. }
  892. type cacheFileReader struct {
  893. rw *CacheFileHandle
  894. pos int64
  895. }
  896. func (r *cacheFileReader) Read(p []byte) (int, error) {
  897. n, err := r.rw.ReadAt(p, r.pos)
  898. r.pos += int64(n)
  899. if err != nil {
  900. return n, err
  901. }
  902. if n != len(p) {
  903. return n, io.EOF
  904. }
  905. return n, nil
  906. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。