2024-05-19 21:24:09 +00:00
|
|
|
package torrent
|
2024-01-28 20:22:49 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2024-06-14 22:14:44 +00:00
|
|
|
"errors"
|
2024-03-28 13:09:42 +00:00
|
|
|
"io/fs"
|
2024-01-28 20:22:49 +00:00
|
|
|
"log/slog"
|
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"path/filepath"
|
|
|
|
"slices"
|
|
|
|
|
2024-06-14 22:14:44 +00:00
|
|
|
"git.kmsign.ru/royalcat/tstor/pkg/rlog"
|
2024-01-28 20:22:49 +00:00
|
|
|
"github.com/anacrolix/torrent"
|
|
|
|
"github.com/anacrolix/torrent/metainfo"
|
|
|
|
"github.com/anacrolix/torrent/storage"
|
|
|
|
)
|
|
|
|
|
|
|
|
// NewFileStorage creates a new ClientImplCloser that stores files using the OS native filesystem.
|
2024-05-19 21:36:22 +00:00
|
|
|
func NewFileStorage(baseDir string, pc storage.PieceCompletion) *fileStorage {
|
|
|
|
return &fileStorage{
|
2024-06-14 22:14:44 +00:00
|
|
|
client: storage.NewFileOpts(storage.NewFileClientOpts{
|
2024-01-28 20:22:49 +00:00
|
|
|
ClientBaseDir: baseDir,
|
|
|
|
PieceCompletion: pc,
|
2024-06-14 22:14:44 +00:00
|
|
|
TorrentDirMaker: func(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string {
|
|
|
|
return torrentDir(baseDir, infoHash)
|
|
|
|
},
|
|
|
|
FilePathMaker: func(opts storage.FilePathMakerOpts) string {
|
|
|
|
return filePath(*opts.File)
|
|
|
|
},
|
2024-01-28 20:22:49 +00:00
|
|
|
}),
|
2024-03-28 13:09:42 +00:00
|
|
|
baseDir: baseDir,
|
2024-01-28 20:22:49 +00:00
|
|
|
pieceCompletion: pc,
|
2024-06-14 22:14:44 +00:00
|
|
|
dupIndex: newDupIndex(),
|
|
|
|
log: rlog.Component("daemon", "torrent"),
|
2024-01-28 20:22:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// File-based storage for torrents, that isn't yet bound to a particular torrent.
|
2024-05-19 21:36:22 +00:00
|
|
|
type fileStorage struct {
|
2024-06-14 22:14:44 +00:00
|
|
|
baseDir string
|
|
|
|
client storage.ClientImplCloser
|
2024-01-28 20:22:49 +00:00
|
|
|
pieceCompletion storage.PieceCompletion
|
2024-06-14 22:14:44 +00:00
|
|
|
dupIndex *dupIndex
|
|
|
|
log *rlog.Logger
|
2024-01-28 20:22:49 +00:00
|
|
|
}
|
|
|
|
|
2024-06-14 22:14:44 +00:00
|
|
|
var _ storage.ClientImplCloser = (*fileStorage)(nil)
|
2024-02-22 22:54:56 +00:00
|
|
|
|
2024-06-14 22:14:44 +00:00
|
|
|
func (me *fileStorage) Close() error {
|
|
|
|
return errors.Join(
|
|
|
|
me.client.Close(),
|
|
|
|
me.pieceCompletion.Close(),
|
|
|
|
)
|
2024-01-28 20:22:49 +00:00
|
|
|
}
|
|
|
|
|
2024-06-14 22:14:44 +00:00
|
|
|
func (fs *fileStorage) fullFilePath(infoHash metainfo.Hash, fileInfo metainfo.FileInfo) string {
|
|
|
|
return filepath.Join(
|
|
|
|
torrentDir(fs.baseDir, infoHash),
|
|
|
|
filePath(fileInfo),
|
|
|
|
)
|
2024-01-28 20:22:49 +00:00
|
|
|
}
|
|
|
|
|
2024-05-19 21:36:22 +00:00
|
|
|
func (fs *fileStorage) DeleteFile(file *torrent.File) error {
|
2024-01-28 20:22:49 +00:00
|
|
|
infoHash := file.Torrent().InfoHash()
|
2024-06-14 22:14:44 +00:00
|
|
|
torrentDir := torrentDir(fs.baseDir, infoHash)
|
2024-01-28 20:22:49 +00:00
|
|
|
fileInfo := file.FileInfo()
|
2024-06-14 22:14:44 +00:00
|
|
|
relFilePath := filePath(fileInfo)
|
2024-01-28 20:22:49 +00:00
|
|
|
filePath := path.Join(torrentDir, relFilePath)
|
|
|
|
for i := file.BeginPieceIndex(); i < file.EndPieceIndex(); i++ {
|
|
|
|
pk := metainfo.PieceKey{InfoHash: infoHash, Index: i}
|
|
|
|
err := fs.pieceCompletion.Set(pk, false)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return os.Remove(filePath)
|
|
|
|
}
|
|
|
|
|
2024-05-19 21:36:22 +00:00
|
|
|
func (fs *fileStorage) CleanupDirs(ctx context.Context, expected []*Controller, dryRun bool) ([]string, error) {
|
2024-06-14 22:14:44 +00:00
|
|
|
log := fs.log.With(slog.Int("expectedTorrents", len(expected)), slog.Bool("dryRun", dryRun))
|
2024-01-28 20:22:49 +00:00
|
|
|
|
|
|
|
expectedEntries := []string{}
|
|
|
|
for _, e := range expected {
|
2024-06-14 22:14:44 +00:00
|
|
|
expectedEntries = append(expectedEntries, e.Torrent().InfoHash().HexString())
|
2024-01-28 20:22:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
entries, err := os.ReadDir(fs.baseDir)
|
|
|
|
if err != nil {
|
2024-03-28 13:09:42 +00:00
|
|
|
return nil, err
|
2024-01-28 20:22:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
toDelete := []string{}
|
|
|
|
for _, v := range entries {
|
|
|
|
if !slices.Contains(expectedEntries, v.Name()) {
|
|
|
|
toDelete = append(toDelete, v.Name())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.Err() != nil {
|
2024-03-28 13:09:42 +00:00
|
|
|
return nil, ctx.Err()
|
2024-01-28 20:22:49 +00:00
|
|
|
}
|
|
|
|
|
2024-06-14 22:14:44 +00:00
|
|
|
log.Info(ctx, "deleting trash data", slog.Int("dirsCount", len(toDelete)))
|
2024-01-28 20:22:49 +00:00
|
|
|
if !dryRun {
|
|
|
|
for i, name := range toDelete {
|
|
|
|
p := path.Join(fs.baseDir, name)
|
2024-06-14 22:14:44 +00:00
|
|
|
log.Warn(ctx, "deleting trash data", slog.String("path", p))
|
2024-01-28 20:22:49 +00:00
|
|
|
err := os.RemoveAll(p)
|
|
|
|
if err != nil {
|
2024-03-28 13:09:42 +00:00
|
|
|
return toDelete[:i], err
|
2024-01-28 20:22:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-28 13:09:42 +00:00
|
|
|
return toDelete, nil
|
2024-01-28 20:22:49 +00:00
|
|
|
}
|
|
|
|
|
2024-05-19 21:36:22 +00:00
|
|
|
func (s *fileStorage) CleanupFiles(ctx context.Context, expected []*Controller, dryRun bool) ([]string, error) {
|
2024-06-14 22:14:44 +00:00
|
|
|
log := s.log.With(slog.Int("expectedTorrents", len(expected)), slog.Bool("dryRun", dryRun))
|
2024-01-28 20:22:49 +00:00
|
|
|
|
|
|
|
expectedEntries := []string{}
|
|
|
|
{
|
|
|
|
for _, e := range expected {
|
2024-03-17 21:00:34 +00:00
|
|
|
files, err := e.Files(ctx)
|
2024-01-28 20:22:49 +00:00
|
|
|
if err != nil {
|
2024-03-28 13:09:42 +00:00
|
|
|
return nil, err
|
2024-01-28 20:22:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, f := range files {
|
2024-06-14 22:14:44 +00:00
|
|
|
expectedEntries = append(expectedEntries, s.fullFilePath(e.Torrent().InfoHash(), f.FileInfo()))
|
2024-01-28 20:22:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
entries := []string{}
|
2024-03-28 13:09:42 +00:00
|
|
|
err := filepath.WalkDir(s.baseDir,
|
|
|
|
func(path string, info fs.DirEntry, err error) error {
|
2024-01-28 20:22:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
if info.IsDir() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
entries = append(entries, path)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
2024-03-28 13:09:42 +00:00
|
|
|
return nil, err
|
2024-01-28 20:22:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
toDelete := []string{}
|
|
|
|
for _, v := range entries {
|
|
|
|
if !slices.Contains(expectedEntries, v) {
|
|
|
|
toDelete = append(toDelete, v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.Err() != nil {
|
2024-03-28 13:09:42 +00:00
|
|
|
return toDelete, ctx.Err()
|
2024-01-28 20:22:49 +00:00
|
|
|
}
|
|
|
|
|
2024-06-14 22:14:44 +00:00
|
|
|
log.Info(ctx, "deleting trash data", slog.Int("filesCount", len(toDelete)))
|
2024-01-28 20:22:49 +00:00
|
|
|
if !dryRun {
|
|
|
|
for i, p := range toDelete {
|
2024-06-14 22:14:44 +00:00
|
|
|
s.log.Warn(ctx, "deleting trash data", slog.String("path", p))
|
2024-01-28 20:22:49 +00:00
|
|
|
err := os.Remove(p)
|
|
|
|
if err != nil {
|
2024-03-28 13:09:42 +00:00
|
|
|
return toDelete[i:], err
|
2024-01-28 20:22:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-03-28 13:09:42 +00:00
|
|
|
return toDelete, nil
|
|
|
|
}
|
|
|
|
|
2024-05-19 21:36:22 +00:00
|
|
|
func (s *fileStorage) iterFiles(ctx context.Context, iter func(ctx context.Context, path string, entry fs.FileInfo) error) error {
|
2024-03-28 13:09:42 +00:00
|
|
|
return filepath.Walk(s.baseDir,
|
|
|
|
func(path string, info fs.FileInfo, err error) error {
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
if info.IsDir() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return iter(ctx, path, info)
|
|
|
|
})
|
|
|
|
}
|