| @@ -20,7 +20,7 @@ func (s *Server) Mount() *MountService { | |||||
| } | } | ||||
| func (m *MountService) DumpStatus(ctx *gin.Context) { | func (m *MountService) DumpStatus(ctx *gin.Context) { | ||||
| log := logger.WithField("HTTP", "Object.ListByPath") | |||||
| log := logger.WithField("HTTP", "Mount.DumpStatus") | |||||
| var req cliapi.MountDumpStatus | var req cliapi.MountDumpStatus | ||||
| if err := ctx.ShouldBindQuery(&req); err != nil { | if err := ctx.ShouldBindQuery(&req); err != nil { | ||||
| @@ -30,7 +30,20 @@ func (m *MountService) DumpStatus(ctx *gin.Context) { | |||||
| } | } | ||||
| dumpStatus := m.svc.Mount.Dump() | dumpStatus := m.svc.Mount.Dump() | ||||
| ctx.JSON(http.StatusOK, cliapi.MountDumpStatusPathResp{ | |||||
| ctx.JSON(http.StatusOK, OK(cliapi.MountDumpStatusResp{ | |||||
| MountStatus: dumpStatus, | MountStatus: dumpStatus, | ||||
| }) | |||||
| })) | |||||
| } | |||||
| func (m *MountService) StartReclaimSpace(ctx *gin.Context) { | |||||
| // log := logger.WithField("HTTP", "Mount.ReclaimSpace") | |||||
| // var req cliapi.MountReclaimSpace | |||||
| // if err := ctx.ShouldBindJSON(&req); err != nil { | |||||
| // log.Warnf("binding body: %s", err.Error()) | |||||
| // ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) | |||||
| // return | |||||
| // } | |||||
| m.svc.Mount.StartReclaimSpace() | |||||
| ctx.JSON(http.StatusOK, OK(cliapi.StartMountReclaimSpaceResp{})) | |||||
| } | } | ||||
| @@ -71,4 +71,5 @@ func (s *Server) InitRouters(rt gin.IRoutes) { | |||||
| rt.POST(cliapi.PresignedObjectCompleteMultipartUploadPath, awsAuth.PresignedAuth, s.Presigned().ObjectCompleteMultipartUpload) | rt.POST(cliapi.PresignedObjectCompleteMultipartUploadPath, awsAuth.PresignedAuth, s.Presigned().ObjectCompleteMultipartUpload) | ||||
| rt.GET(cliapi.MountDumpStatusPath, awsAuth.Auth, s.Mount().DumpStatus) | rt.GET(cliapi.MountDumpStatusPath, awsAuth.Auth, s.Mount().DumpStatus) | ||||
| rt.POST(cliapi.MountStartReclaimSpacePath, awsAuth.Auth, s.Mount().StartReclaimSpace) | |||||
| } | } | ||||
| @@ -1,6 +1,8 @@ | |||||
| package config | package config | ||||
| import "time" | |||||
| import ( | |||||
| "time" | |||||
| ) | |||||
| type Config struct { | type Config struct { | ||||
| Enabled bool `json:"enabled"` | Enabled bool `json:"enabled"` | ||||
| @@ -10,8 +12,10 @@ type Config struct { | |||||
| // 缓存数据的目录,可以是已有内容的目录,此时通过挂载点查看文件夹内容时,能看到此目录中已有的文件。 | // 缓存数据的目录,可以是已有内容的目录,此时通过挂载点查看文件夹内容时,能看到此目录中已有的文件。 | ||||
| DataDir string `json:"dataDir"` | DataDir string `json:"dataDir"` | ||||
| // 缓存元数据的目录,此目录中保存了文件的元数据,包括文件名、大小、修改时间、权限等信息,目录结构将与DataDir保持一致。 | // 缓存元数据的目录,此目录中保存了文件的元数据,包括文件名、大小、修改时间、权限等信息,目录结构将与DataDir保持一致。 | ||||
| MetaDir string `json:"metaDir"` | |||||
| AttrTimeout time.Duration `json:"attrTimeout"` | |||||
| MetaDir string `json:"metaDir"` | |||||
| // 缓存文件最大的总大小(不包括在缓存目录内,但还没有同步到云端的文件) | |||||
| MaxCacheSize int64 `json:"maxCacheSize"` | |||||
| AttrTimeout time.Duration `json:"attrTimeout"` | |||||
| // 被修改的文件在被上传到云端之前的等待时间,如果期间有任何读写操作,则重置等待时间 | // 被修改的文件在被上传到云端之前的等待时间,如果期间有任何读写操作,则重置等待时间 | ||||
| UploadPendingTime time.Duration `json:"uploadPendingTime"` | UploadPendingTime time.Duration `json:"uploadPendingTime"` | ||||
| // 被加载到内存的缓存文件信息的过期时间,如果文件在此时间内没有被访问过,则从缓存中删除 | // 被加载到内存的缓存文件信息的过期时间,如果文件在此时间内没有被访问过,则从缓存中删除 | ||||
| @@ -99,6 +99,14 @@ func (m *Mount) Dump() MountStatus { | |||||
| } | } | ||||
| } | } | ||||
| func (m *Mount) ReclaimSpace() { | |||||
| if m.vfs == nil { | |||||
| return | |||||
| } | |||||
| m.vfs.ReclaimSpace() | |||||
| } | |||||
| func (m *Mount) NotifyObjectInvalid(obj clitypes.Object) { | func (m *Mount) NotifyObjectInvalid(obj clitypes.Object) { | ||||
| } | } | ||||
| @@ -39,6 +39,10 @@ func (m *Mount) Dump() MountStatus { | |||||
| return MountStatus{} | return MountStatus{} | ||||
| } | } | ||||
| func (m *Mount) StartReclaimSpace() { | |||||
| } | |||||
| func (m *Mount) NotifyObjectInvalid(obj clitypes.Object) { | func (m *Mount) NotifyObjectInvalid(obj clitypes.Object) { | ||||
| } | } | ||||
| @@ -15,6 +15,7 @@ import ( | |||||
| "gitlink.org.cn/cloudream/common/pkgs/trie" | "gitlink.org.cn/cloudream/common/pkgs/trie" | ||||
| "gitlink.org.cn/cloudream/common/utils/io2" | "gitlink.org.cn/cloudream/common/utils/io2" | ||||
| "gitlink.org.cn/cloudream/common/utils/lo2" | "gitlink.org.cn/cloudream/common/utils/lo2" | ||||
| "gitlink.org.cn/cloudream/common/utils/sort2" | |||||
| "gitlink.org.cn/cloudream/jcs-pub/client/internal/db" | "gitlink.org.cn/cloudream/jcs-pub/client/internal/db" | ||||
| "gitlink.org.cn/cloudream/jcs-pub/client/internal/downloader" | "gitlink.org.cn/cloudream/jcs-pub/client/internal/downloader" | ||||
| "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/config" | "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/config" | ||||
| @@ -41,7 +42,7 @@ type CacheEntryInfo struct { | |||||
| DataRevision int | DataRevision int | ||||
| // 引用计数 | // 引用计数 | ||||
| RefCount int | RefCount int | ||||
| // 上次引用计数归零的时间 | |||||
| // 上次引用计数归零的时间,也即上次使用时间 | |||||
| FreeTime time.Time | FreeTime time.Time | ||||
| // 缓存等级 | // 缓存等级 | ||||
| Level CacheLevel | Level CacheLevel | ||||
| @@ -56,6 +57,7 @@ type Cache struct { | |||||
| downloader *downloader.Downloader | downloader *downloader.Downloader | ||||
| lock *sync.RWMutex | lock *sync.RWMutex | ||||
| cacheDone chan any | cacheDone chan any | ||||
| doFullScan chan any | |||||
| activeCache *trie.Trie[*CacheFile] | activeCache *trie.Trie[*CacheFile] | ||||
| } | } | ||||
| @@ -66,7 +68,8 @@ func NewCache(cfg *config.Config, db *db.DB, uploader *uploader.Uploader, downlo | |||||
| uploader: uploader, | uploader: uploader, | ||||
| downloader: downloader, | downloader: downloader, | ||||
| lock: &sync.RWMutex{}, | lock: &sync.RWMutex{}, | ||||
| cacheDone: make(chan any), | |||||
| cacheDone: make(chan any, 1), | |||||
| doFullScan: make(chan any, 1), | |||||
| activeCache: trie.NewTrie[*CacheFile](), | activeCache: trie.NewTrie[*CacheFile](), | ||||
| } | } | ||||
| } | } | ||||
| @@ -120,6 +123,13 @@ func (c *Cache) Dump() CacheStatus { | |||||
| } | } | ||||
| } | } | ||||
| func (c *Cache) ReclaimSpace() { | |||||
| select { | |||||
| case c.doFullScan <- nil: | |||||
| default: | |||||
| } | |||||
| } | |||||
| // 获取指定位置的缓存条目信息。如果路径不存在,则返回nil。 | // 获取指定位置的缓存条目信息。如果路径不存在,则返回nil。 | ||||
| func (c *Cache) Stat(pathComps []string) *CacheEntryInfo { | func (c *Cache) Stat(pathComps []string) *CacheEntryInfo { | ||||
| c.lock.RLock() | c.lock.RLock() | ||||
| @@ -229,6 +239,37 @@ func (c *Cache) LoadFile(pathComps []string, obj *clitypes.Object) *CacheFile { | |||||
| return ch | return ch | ||||
| } | } | ||||
| // 仅加载文件的元数据,如果文件不存在,则返回nil | |||||
| // | |||||
| // 记得使用Release减少引用计数 | |||||
| func (c *Cache) LoadReadOnlyFile(pathComps []string) *CacheFile { | |||||
| c.lock.Lock() | |||||
| defer c.lock.Unlock() | |||||
| node, ok := c.activeCache.WalkEnd(pathComps) | |||||
| if ok && node.Value != nil { | |||||
| node.Value.IncRef() | |||||
| return node.Value | |||||
| } | |||||
| ch, err := loadReadOnlyCacheFile(c, pathComps) | |||||
| if err == nil { | |||||
| ch.IncRef() | |||||
| c.activeCache.CreateWords(pathComps).Value = ch | |||||
| logger.Debugf("load cache %v", pathComps) | |||||
| return ch | |||||
| } | |||||
| if !os.IsNotExist(err) { | |||||
| // TODO 日志记录 | |||||
| logger.Warnf("load cache %v: %v", pathComps, err) | |||||
| return nil | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // 创建一个缓存目录。如果目录已经存在,则会重置目录属性。如果加载过程中发生了错误,或者目标位置是一个文件,则会返回nil | // 创建一个缓存目录。如果目录已经存在,则会重置目录属性。如果加载过程中发生了错误,或者目标位置是一个文件,则会返回nil | ||||
| func (c *Cache) CreateDir(pathComps []string) *CacheDir { | func (c *Cache) CreateDir(pathComps []string) *CacheDir { | ||||
| c.lock.Lock() | c.lock.Lock() | ||||
| @@ -475,6 +516,7 @@ func (c *Cache) scanningCache() { | |||||
| defer ticker.Stop() | defer ticker.Stop() | ||||
| lastScanPath := []string{} | lastScanPath := []string{} | ||||
| nextFullScan := false | |||||
| for { | for { | ||||
| select { | select { | ||||
| @@ -484,51 +526,147 @@ func (c *Cache) scanningCache() { | |||||
| } | } | ||||
| case <-ticker.C: | case <-ticker.C: | ||||
| case <-c.doFullScan: | |||||
| nextFullScan = true | |||||
| } | } | ||||
| c.lock.Lock() | |||||
| // 每完成一轮快速的渐进全量扫描,就进行一次即时全量扫描 | |||||
| if nextFullScan { | |||||
| c.fullScan() | |||||
| nextFullScan = false | |||||
| continue | |||||
| } | |||||
| uploadingPkgs := make(map[packageFullName]*syncPackage) | |||||
| lastScanPath = c.fastScan(lastScanPath) | |||||
| if len(lastScanPath) == 0 { | |||||
| nextFullScan = true | |||||
| } | |||||
| } | |||||
| } | |||||
| visitCnt := 0 | |||||
| visitBreak := false | |||||
| // 全量扫描,主要是检查总缓存大小是否超标 | |||||
| func (c *Cache) fullScan() { | |||||
| log := logger.WithField("Mod", "Mount") | |||||
| node, _ := c.activeCache.WalkEnd(lastScanPath) | |||||
| node.Iterate(func(path []string, node *trie.Node[*CacheFile], isWordNode bool) trie.VisitCtrl { | |||||
| ch := node.Value | |||||
| if ch == nil { | |||||
| return trie.VisitContinue | |||||
| } | |||||
| startTime := time.Now() | |||||
| log.Debug("begin full scan") | |||||
| defer func() { | |||||
| log.Debugf("full scan done, time: %v", time.Since(startTime)) | |||||
| }() | |||||
| info := ch.Info() | |||||
| c.lock.Lock() | |||||
| defer c.lock.Unlock() | |||||
| if info.RefCount > 0 { | |||||
| logger.Debugf("skip cache %v, refCount: %v", path, info.RefCount) | |||||
| return trie.VisitContinue | |||||
| } | |||||
| totalCacheSize := int64(0) | |||||
| visitCnt++ | |||||
| type readOnlyCache struct { | |||||
| Info CacheEntryInfo | |||||
| Node *trie.Node[*CacheFile] | |||||
| } | |||||
| c.visitNode(path, node, ch, info, uploadingPkgs) | |||||
| var readOnlyCaches []readOnlyCache | |||||
| // 每次最多遍历500个节点,防止占用锁太久 | |||||
| if visitCnt > 500 { | |||||
| lastScanPath = lo2.ArrayClone(path) | |||||
| visitBreak = true | |||||
| return trie.VisitBreak | |||||
| } | |||||
| c.activeCache.Iterate(func(path []string, node *trie.Node[*CacheFile], isWordNode bool) trie.VisitCtrl { | |||||
| ch := node.Value | |||||
| if ch == nil { | |||||
| return trie.VisitContinue | return trie.VisitContinue | ||||
| } | |||||
| info := ch.Info() | |||||
| if info.Level > LevelReadOnly { | |||||
| return trie.VisitContinue | |||||
| } | |||||
| if info.DataRevision > 0 || info.MetaRevision > 0 { | |||||
| return trie.VisitContinue | |||||
| } | |||||
| readOnlyCaches = append(readOnlyCaches, readOnlyCache{ | |||||
| Info: info, | |||||
| Node: node, | |||||
| }) | }) | ||||
| if !visitBreak { | |||||
| lastScanPath = []string{} | |||||
| totalCacheSize += info.Size | |||||
| return trie.VisitContinue | |||||
| }) | |||||
| // 如果总缓存文件大小超过限制,那么就从最早被使用的开始删除 | |||||
| if c.cfg.MaxCacheSize > 0 { | |||||
| needReclaim := totalCacheSize - c.cfg.MaxCacheSize | |||||
| if needReclaim > 0 { | |||||
| readOnlyCaches = sort2.Sort(readOnlyCaches, func(left, right readOnlyCache) int { | |||||
| return left.Info.FreeTime.Compare(right.Info.FreeTime) | |||||
| }) | |||||
| reclaimed := int64(0) | |||||
| rmCnt := 0 | |||||
| for _, rc := range readOnlyCaches { | |||||
| rc.Node.Value.Delete() | |||||
| rc.Node.RemoveSelf(true) | |||||
| needReclaim -= rc.Info.Size | |||||
| reclaimed += rc.Info.Size | |||||
| rmCnt += 1 | |||||
| if needReclaim <= 0 { | |||||
| break | |||||
| } | |||||
| } | |||||
| log.Infof("%v cache file removed, reclaimed %v bytes, total cache size: %v", rmCnt, reclaimed, totalCacheSize-reclaimed) | |||||
| } | } | ||||
| } | |||||
| c.lock.Unlock() | |||||
| // TODO 还可以做点其他的检查,比如文件句柄数 | |||||
| } | |||||
| // 快速扫描,每次只扫描一部分节点,做的事情会繁重一点 | |||||
| func (c *Cache) fastScan(lastScanPath []string) []string { | |||||
| c.lock.Lock() | |||||
| if len(uploadingPkgs) > 0 { | |||||
| go c.doSync(lo.Values(uploadingPkgs)) | |||||
| uploadingPkgs := make(map[packageFullName]*syncPackage) | |||||
| visitCnt := 0 | |||||
| visitBreak := false | |||||
| node, _ := c.activeCache.WalkEnd(lastScanPath) | |||||
| node.Iterate(func(path []string, node *trie.Node[*CacheFile], isWordNode bool) trie.VisitCtrl { | |||||
| ch := node.Value | |||||
| if ch == nil { | |||||
| return trie.VisitContinue | |||||
| } | |||||
| info := ch.Info() | |||||
| if info.RefCount > 0 { | |||||
| logger.Debugf("skip cache %v, refCount: %v", path, info.RefCount) | |||||
| return trie.VisitContinue | |||||
| } | |||||
| visitCnt++ | |||||
| c.visitNode(path, node, ch, info, uploadingPkgs) | |||||
| // 每次最多遍历500个节点,防止占用锁太久 | |||||
| if visitCnt > 500 { | |||||
| lastScanPath = lo2.ArrayClone(path) | |||||
| visitBreak = true | |||||
| return trie.VisitBreak | |||||
| } | } | ||||
| return trie.VisitContinue | |||||
| }) | |||||
| if !visitBreak { | |||||
| lastScanPath = []string{} | |||||
| } | |||||
| c.lock.Unlock() | |||||
| if len(uploadingPkgs) > 0 { | |||||
| go c.doSync(lo.Values(uploadingPkgs)) | |||||
| } | } | ||||
| return lastScanPath | |||||
| } | } | ||||
| func (c *Cache) visitNode(path []string, node *trie.Node[*CacheFile], ch *CacheFile, info CacheEntryInfo, uploadingPkgs map[packageFullName]*syncPackage) { | func (c *Cache) visitNode(path []string, node *trie.Node[*CacheFile], ch *CacheFile, info CacheEntryInfo, uploadingPkgs map[packageFullName]*syncPackage) { | ||||
| @@ -549,6 +687,9 @@ func (c *Cache) visitNode(path []string, node *trie.Node[*CacheFile], ch *CacheF | |||||
| return | return | ||||
| } | } | ||||
| // 上传文件需要完全加载级别的缓存等级 | |||||
| ch.LevelUp(LevelComplete) | |||||
| fullName := packageFullName{ch.pathComps[0], ch.pathComps[1]} | fullName := packageFullName{ch.pathComps[0], ch.pathComps[1]} | ||||
| pkg, ok := uploadingPkgs[fullName] | pkg, ok := uploadingPkgs[fullName] | ||||
| if !ok { | if !ok { | ||||
| @@ -594,12 +735,15 @@ func (c *Cache) visitNode(path []string, node *trie.Node[*CacheFile], ch *CacheF | |||||
| ch.Delete() | ch.Delete() | ||||
| } | } | ||||
| // 文件数据或者元数据有修改,但缓存等级是ReadOnly以下,意味着在之前检查是否需要上传时被判定为不需要上传 | |||||
| // 这种文件删除缓存记录即可(但会在扫描数据目录时再次被加载进来) | |||||
| node.RemoveSelf(true) | node.RemoveSelf(true) | ||||
| } | } | ||||
| return | return | ||||
| } | } | ||||
| } | } | ||||
| // 扫描文件数据目录 | |||||
| func (c *Cache) scanningData() { | func (c *Cache) scanningData() { | ||||
| ticker := time.NewTicker(c.cfg.ScanDataDirInterval) | ticker := time.NewTicker(c.cfg.ScanDataDirInterval) | ||||
| defer ticker.Stop() | defer ticker.Stop() | ||||
| @@ -613,6 +757,7 @@ func (c *Cache) scanningData() { | |||||
| return | return | ||||
| } | } | ||||
| startTime := time.Now() | |||||
| logger.Infof("begin scanning data dir") | logger.Infof("begin scanning data dir") | ||||
| if len(walkTrace) == 0 { | if len(walkTrace) == 0 { | ||||
| @@ -668,28 +813,23 @@ func (c *Cache) scanningData() { | |||||
| continue | continue | ||||
| } | } | ||||
| // 无条件加载缓存,可能会导致一些不需要被同步到云端的文件在缓存等级降到最低取消跟踪后,又重新被加载进来 | |||||
| // 不过由于扫描频率不高,所以问题不大 | |||||
| walkTraceComps = append(walkTraceComps, e[0].Name()) | walkTraceComps = append(walkTraceComps, e[0].Name()) | ||||
| fileMetaPath := filepath.Join(walkTraceComps...) | |||||
| _, err = os.Stat(fileMetaPath) | |||||
| if err == nil || !os.IsNotExist(err) { | |||||
| walkTraceComps = walkTraceComps[:len(walkTraceComps)-1] | |||||
| continue | |||||
| } | |||||
| untrackedFiles = append(untrackedFiles, lo2.ArrayClone(walkTraceComps[1:])) | untrackedFiles = append(untrackedFiles, lo2.ArrayClone(walkTraceComps[1:])) | ||||
| walkTraceComps = walkTraceComps[:len(walkTraceComps)-1] | walkTraceComps = walkTraceComps[:len(walkTraceComps)-1] | ||||
| } | } | ||||
| if len(untrackedFiles) > 0 { | if len(untrackedFiles) > 0 { | ||||
| for _, comps := range untrackedFiles { | for _, comps := range untrackedFiles { | ||||
| ch := c.LoadFile(comps, nil) | |||||
| ch := c.LoadReadOnlyFile(comps) | |||||
| if ch != nil { | if ch != nil { | ||||
| ch.Release() | ch.Release() | ||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| logger.Infof("%v file visited, %v untracked files found", visitCnt, len(untrackedFiles)) | |||||
| logger.Infof("%v file visited, %v untracked files found, time: %v", visitCnt, len(untrackedFiles), time.Since(startTime)) | |||||
| } | } | ||||
| } | } | ||||
| @@ -166,18 +166,20 @@ func createNewCacheFile(cache *Cache, pathComps []string) (*CacheFile, error) { | |||||
| } | } | ||||
| ch := &CacheFile{ | ch := &CacheFile{ | ||||
| cache: cache, | |||||
| pathComps: pathComps, | |||||
| info: info, | |||||
| rwLock: &sync.RWMutex{}, | |||||
| saveMetaChan: make(chan any, 1), | |||||
| saveMetaLock: &sync.Mutex{}, | |||||
| stopSaveMeta: new(bool), | |||||
| level: LevelComplete, | |||||
| metaFile: metaFile, | |||||
| dataFile: dataFile, | |||||
| writeLock: &sync.RWMutex{}, | |||||
| state: cacheState{}, | |||||
| cache: cache, | |||||
| pathComps: pathComps, | |||||
| info: info, | |||||
| rwLock: &sync.RWMutex{}, | |||||
| saveMetaChan: make(chan any, 1), | |||||
| saveMetaLock: &sync.Mutex{}, | |||||
| stopSaveMeta: new(bool), | |||||
| level: LevelComplete, | |||||
| freeTime: time.Now(), | |||||
| changeLevelTime: time.Now(), | |||||
| metaFile: metaFile, | |||||
| dataFile: dataFile, | |||||
| writeLock: &sync.RWMutex{}, | |||||
| state: cacheState{}, | |||||
| } | } | ||||
| go ch.serving(ch.saveMetaChan, ch.stopSaveMeta) | go ch.serving(ch.saveMetaChan, ch.stopSaveMeta) | ||||
| @@ -239,18 +241,20 @@ func loadCacheFile(cache *Cache, pathComps []string) (*CacheFile, error) { | |||||
| } | } | ||||
| ch := &CacheFile{ | ch := &CacheFile{ | ||||
| cache: cache, | |||||
| pathComps: pathComps, | |||||
| info: *info, | |||||
| rwLock: &sync.RWMutex{}, | |||||
| saveMetaChan: make(chan any, 1), | |||||
| saveMetaLock: &sync.Mutex{}, | |||||
| stopSaveMeta: new(bool), | |||||
| level: LevelComplete, | |||||
| metaFile: metaFile, | |||||
| dataFile: dataFile, | |||||
| writeLock: &sync.RWMutex{}, | |||||
| state: cacheState{}, | |||||
| cache: cache, | |||||
| pathComps: pathComps, | |||||
| info: *info, | |||||
| rwLock: &sync.RWMutex{}, | |||||
| saveMetaChan: make(chan any, 1), | |||||
| saveMetaLock: &sync.Mutex{}, | |||||
| stopSaveMeta: new(bool), | |||||
| level: LevelComplete, | |||||
| freeTime: time.Now(), | |||||
| changeLevelTime: time.Now(), | |||||
| metaFile: metaFile, | |||||
| dataFile: dataFile, | |||||
| writeLock: &sync.RWMutex{}, | |||||
| state: cacheState{}, | |||||
| } | } | ||||
| go ch.serving(ch.saveMetaChan, ch.stopSaveMeta) | go ch.serving(ch.saveMetaChan, ch.stopSaveMeta) | ||||
| @@ -297,19 +301,21 @@ func newCacheFileFromObject(cache *Cache, pathComps []string, obj *clitypes.Obje | |||||
| } | } | ||||
| ch := &CacheFile{ | ch := &CacheFile{ | ||||
| cache: cache, | |||||
| pathComps: pathComps, | |||||
| info: info, | |||||
| remoteObj: obj, | |||||
| rwLock: &sync.RWMutex{}, | |||||
| saveMetaChan: make(chan any, 1), | |||||
| saveMetaLock: &sync.Mutex{}, | |||||
| stopSaveMeta: new(bool), | |||||
| level: LevelComplete, | |||||
| metaFile: metaFile, | |||||
| dataFile: dataFile, | |||||
| writeLock: &sync.RWMutex{}, | |||||
| state: cacheState{}, | |||||
| cache: cache, | |||||
| pathComps: pathComps, | |||||
| info: info, | |||||
| remoteObj: obj, | |||||
| rwLock: &sync.RWMutex{}, | |||||
| saveMetaChan: make(chan any, 1), | |||||
| saveMetaLock: &sync.Mutex{}, | |||||
| stopSaveMeta: new(bool), | |||||
| level: LevelComplete, | |||||
| freeTime: time.Now(), | |||||
| changeLevelTime: time.Now(), | |||||
| metaFile: metaFile, | |||||
| dataFile: dataFile, | |||||
| writeLock: &sync.RWMutex{}, | |||||
| state: cacheState{}, | |||||
| } | } | ||||
| go ch.serving(ch.saveMetaChan, ch.stopSaveMeta) | go ch.serving(ch.saveMetaChan, ch.stopSaveMeta) | ||||
| @@ -317,6 +323,67 @@ func newCacheFileFromObject(cache *Cache, pathComps []string, obj *clitypes.Obje | |||||
| return ch, nil | return ch, nil | ||||
| } | } | ||||
| func loadReadOnlyCacheFile(cache *Cache, pathComps []string) (*CacheFile, error) { | |||||
| metaPath := cache.GetCacheMetaPath(pathComps...) | |||||
| dataPath := cache.GetCacheDataPath(pathComps...) | |||||
| dataStat, err := os.Stat(dataPath) | |||||
| if err != nil { | |||||
| // 不要包装这里的err | |||||
| return nil, err | |||||
| } | |||||
| if dataStat.IsDir() { | |||||
| return nil, fmt.Errorf("target is a directory") | |||||
| } | |||||
| info := &FileInfo{} | |||||
| metaData, err := os.ReadFile(metaPath) | |||||
| if err != nil { | |||||
| // 如果有数据文件,而没有元数据文件,则创建一个元数据文件 | |||||
| if !os.IsNotExist(err) { | |||||
| return nil, err | |||||
| } | |||||
| err = os.MkdirAll(filepath.Dir(metaPath), 0755) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| info.Size = dataStat.Size() | |||||
| info.ModTime = dataStat.ModTime() | |||||
| info.Perm = dataStat.Mode().Perm() | |||||
| info.Segments = []*Range{{Position: 0, Length: info.Size}} | |||||
| info.MetaRevision = 1 // 未同步的文件视为已修改 | |||||
| info.DataRevision = 1 | |||||
| } else { | |||||
| err = serder.JSONToObject(metaData, info) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| ch := &CacheFile{ | |||||
| cache: cache, | |||||
| pathComps: pathComps, | |||||
| info: *info, | |||||
| rwLock: &sync.RWMutex{}, | |||||
| saveMetaChan: nil, | |||||
| saveMetaLock: &sync.Mutex{}, | |||||
| stopSaveMeta: nil, | |||||
| level: LevelReadOnly, | |||||
| freeTime: time.Now(), | |||||
| changeLevelTime: time.Now(), | |||||
| metaFile: nil, | |||||
| dataFile: nil, | |||||
| writeLock: &sync.RWMutex{}, | |||||
| state: cacheState{}, | |||||
| } | |||||
| return ch, nil | |||||
| } | |||||
| func loadCacheFileInfo(cache *Cache, pathComps []string, dataFileInfo os.FileInfo) (*CacheEntryInfo, error) { | func loadCacheFileInfo(cache *Cache, pathComps []string, dataFileInfo os.FileInfo) (*CacheEntryInfo, error) { | ||||
| metaPath := cache.GetCacheMetaPath(pathComps...) | metaPath := cache.GetCacheMetaPath(pathComps...) | ||||
| @@ -42,3 +42,7 @@ func (v *Vfs) Stats() fuse.FsStats { | |||||
| func (v *Vfs) Dump() cache.CacheStatus { | func (v *Vfs) Dump() cache.CacheStatus { | ||||
| return v.cache.Dump() | return v.cache.Dump() | ||||
| } | } | ||||
| func (v *Vfs) ReclaimSpace() { | |||||
| v.cache.ReclaimSpace() | |||||
| } | |||||
| @@ -25,14 +25,32 @@ func (r *MountDumpStatus) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeQueryParam(http.MethodGet, MountDumpStatusPath, r) | return sdks.MakeQueryParam(http.MethodGet, MountDumpStatusPath, r) | ||||
| } | } | ||||
| type MountDumpStatusPathResp struct { | |||||
| type MountDumpStatusResp struct { | |||||
| mount.MountStatus | mount.MountStatus | ||||
| } | } | ||||
| func (r *MountDumpStatusPathResp) ParseResponse(resp *http.Response) error { | |||||
| func (r *MountDumpStatusResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | return sdks.ParseCodeDataJSONResponse(resp, r) | ||||
| } | } | ||||
| func (c *MountService) DumpStatus(req MountDumpStatus) (*MountDumpStatusPathResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &MountDumpStatusPathResp{}) | |||||
| func (c *MountService) DumpStatus(req MountDumpStatus) (*MountDumpStatusResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &MountDumpStatusResp{}) | |||||
| } | |||||
| const MountStartReclaimSpacePath = "/mount/startReclaimSpace" | |||||
| type StartMountReclaimSpace struct{} | |||||
| func (r *StartMountReclaimSpace) MakeParam() *sdks.RequestParam { | |||||
| return sdks.MakeJSONParam(http.MethodPost, MountStartReclaimSpacePath, r) | |||||
| } | |||||
| type StartMountReclaimSpaceResp struct{} | |||||
| func (r *StartMountReclaimSpaceResp) ParseResponse(resp *http.Response) error { | |||||
| return sdks.ParseCodeDataJSONResponse(resp, r) | |||||
| } | |||||
| func (c *MountService) StartReclaimSpace(req StartMountReclaimSpace) (*StartMountReclaimSpaceResp, error) { | |||||
| return JSONAPI(c.cfg, http.DefaultClient, &req, &StartMountReclaimSpaceResp{}) | |||||
| } | } | ||||
| @@ -1,8 +1,5 @@ | |||||
| { | { | ||||
| "local": { | "local": { | ||||
| "userID": 1, | |||||
| "localIP": "127.0.0.1", | |||||
| "externalIP": "127.0.0.1", | |||||
| "locationID": 1 | "locationID": 1 | ||||
| }, | }, | ||||
| "hubRPC": { | "hubRPC": { | ||||
| @@ -64,6 +61,7 @@ | |||||
| "uid": 0, | "uid": 0, | ||||
| "dataDir": "", | "dataDir": "", | ||||
| "metaDir": "", | "metaDir": "", | ||||
| "maxCacheSize": 0, | |||||
| "attrTimeout": "10s", | "attrTimeout": "10s", | ||||
| "uploadPendingTime": "30s", | "uploadPendingTime": "30s", | ||||
| "cacheActiveTime": "1m", | "cacheActiveTime": "1m", | ||||