This commit is contained in:
royalcat 2024-09-24 16:26:15 +03:00
parent 0bc6227427
commit 80884aca6a
14 changed files with 299 additions and 155 deletions

View file

@ -1,27 +0,0 @@
package delivery
import (
"net/http"
"github.com/gin-gonic/gin"
)
var indexHandler = func(c *gin.Context) {
c.HTML(http.StatusOK, "index.html", nil)
}
// var routesHandler = func(ss *torrent.Stats) gin.HandlerFunc {
// return func(c *gin.Context) {
// c.HTML(http.StatusOK, "routes.html", ss.RoutesStats())
// }
// }
var logsHandler = func(c *gin.Context) {
c.HTML(http.StatusOK, "logs.html", nil)
}
var serversFoldersHandler = func() gin.HandlerFunc {
return func(c *gin.Context) {
c.HTML(http.StatusOK, "servers.html", nil)
}
}

View file

@ -86,6 +86,10 @@ func (bfs *fsWrapper) ReadDir(ctx context.Context, path string) ([]fs.FileInfo,
out := make([]fs.FileInfo, 0, len(ffs))
for _, v := range ffs {
if v == nil {
continue
}
if info, ok := v.(fs.FileInfo); ok {
out = append(out, info)
} else {

View file

@ -7,24 +7,75 @@ import (
"time"
"git.kmsign.ru/royalcat/tstor/pkg/qbittorrent"
"github.com/hashicorp/golang-lru/v2/expirable"
"github.com/royalcat/btrgo/btrsync"
)
type cacheClient struct {
qb qbittorrent.Client
propertiesCache *expirable.LRU[string, qbittorrent.TorrentProperties]
torrentsCache *expirable.LRU[string, qbittorrent.TorrentInfo]
pieceCache btrsync.MapOf[pieceKey, int]
}
type pieceKey struct {
hash string
index int
}
func wrapClient(qb qbittorrent.Client) *cacheClient {
return &cacheClient{qb: qb}
const (
cacheSize = 5000
cacheTTL = time.Minute
)
return &cacheClient{
qb: qb,
propertiesCache: expirable.NewLRU[string, qbittorrent.TorrentProperties](cacheSize, nil, cacheTTL),
torrentsCache: expirable.NewLRU[string, qbittorrent.TorrentInfo](cacheSize, nil, cacheTTL),
pieceCache: btrsync.MapOf[pieceKey, int]{},
}
}
var errNotFound = fmt.Errorf("not found")
func (f *cacheClient) getInfo(ctx context.Context, hash string) (*qbittorrent.TorrentInfo, error) {
if v, ok := f.torrentsCache.Get(hash); ok {
return &v, nil
}
infos, err := f.qb.Torrent().GetTorrents(ctx, &qbittorrent.TorrentOption{
Hashes: []string{hash},
})
if err != nil {
return nil, fmt.Errorf("error to check torrent existence: %w", err)
}
if len(infos) == 0 {
return nil, nil
}
if len(infos) > 1 {
return nil, fmt.Errorf("multiple torrents with the same hash")
}
f.torrentsCache.Add(hash, *infos[0])
return infos[0], nil
}
func (f *cacheClient) getProperties(ctx context.Context, hash string) (*qbittorrent.TorrentProperties, error) {
if v, ok := f.propertiesCache.Get(hash); ok {
return &v, nil
}
info, err := f.qb.Torrent().GetProperties(ctx, hash)
if err != nil {
return nil, err
}
f.propertiesCache.Add(hash, *info)
return info, nil
}
@ -53,11 +104,20 @@ func (f *cacheClient) getContent(ctx context.Context, hash string, contentIndex
}
func (f *cacheClient) isPieceComplete(ctx context.Context, hash string, pieceIndex int) (bool, error) {
cachedPieceState, ok := f.pieceCache.Load(pieceKey{hash: hash, index: pieceIndex})
if ok && cachedPieceState == 2 {
return true, nil
}
completion, err := f.qb.Torrent().GetPiecesStates(ctx, hash)
if err != nil {
return false, err
}
for i, v := range completion {
f.pieceCache.Store(pieceKey{hash: hash, index: i}, v)
}
if completion[pieceIndex] == 2 {
return true, nil
}

View file

@ -20,8 +20,11 @@ import (
"github.com/anacrolix/torrent/types/infohash"
infohash_v2 "github.com/anacrolix/torrent/types/infohash-v2"
"github.com/royalcat/ctxio"
"go.opentelemetry.io/otel"
)
var trace = otel.Tracer("git.kmsign.ru/royalcat/tstor/src/sources/qbittorrent")
type Daemon struct {
proc *os.Process
qb qbittorrent.Client
@ -130,11 +133,14 @@ func (d *Daemon) Close(ctx context.Context) error {
return nil
}
func (d *Daemon) torrentPath(ih infohash.T) (string, error) {
return filepath.Abs(path.Join(d.dataDir, ih.HexString()))
func torrentDataPath(dataDir string, ih string) (string, error) {
return filepath.Abs(path.Join(dataDir, ih))
}
func (fs *Daemon) TorrentFS(ctx context.Context, file vfs.File) (vfs.Filesystem, error) {
func (fs *Daemon) GetTorrentFS(ctx context.Context, file vfs.File) (vfs.Filesystem, error) {
ctx, span := trace.Start(ctx, "GetTorrentFS")
defer span.End()
log := fs.log.With(slog.String("file", file.Name()))
ih, err := readInfoHash(ctx, file)
@ -143,7 +149,7 @@ func (fs *Daemon) TorrentFS(ctx context.Context, file vfs.File) (vfs.Filesystem,
}
log = log.With(slog.String("infohash", ih.HexString()))
torrentPath, err := fs.torrentPath(ih)
torrentPath, err := torrentDataPath(fs.dataDir, ih.HexString())
if err != nil {
return nil, fmt.Errorf("error getting torrent path: %w", err)
}
@ -160,18 +166,18 @@ func (fs *Daemon) TorrentFS(ctx context.Context, file vfs.File) (vfs.Filesystem,
}
func (d *Daemon) syncTorrentState(ctx context.Context, file vfs.File, ih metainfo.Hash, torrentPath string) error {
ctx, span := trace.Start(ctx, "syncTorrentState")
defer span.End()
log := d.log.With(slog.String("file", file.Name()), slog.String("infohash", ih.HexString()))
existing, err := d.qb.Torrent().GetTorrents(ctx, &qbittorrent.TorrentOption{
Hashes: []string{ih.HexString()},
})
info, err := d.client.getInfo(ctx, ih.HexString())
if err != nil {
return fmt.Errorf("error to check torrent existence: %w", err)
return err
}
log = log.With(slog.String("torrentPath", torrentPath))
if len(existing) == 0 {
if info == nil {
_, err := file.Seek(0, io.SeekStart)
if err != nil {
return err
@ -195,7 +201,7 @@ func (d *Daemon) syncTorrentState(ctx context.Context, file vfs.File, ih metainf
return err
}
for {
_, err := d.qb.Torrent().GetProperties(ctx, ih.HexString())
_, err := d.client.getProperties(ctx, ih.HexString())
if err == nil {
break
}
@ -211,9 +217,9 @@ func (d *Daemon) syncTorrentState(ctx context.Context, file vfs.File, ih metainf
}
return nil
} else if len(existing) == 1 {
} else {
// info := existing[0]
props, err := d.qb.Torrent().GetProperties(ctx, ih.HexString())
props, err := d.client.getProperties(ctx, ih.HexString())
if err != nil {
return err
}
@ -228,9 +234,6 @@ func (d *Daemon) syncTorrentState(ctx context.Context, file vfs.File, ih metainf
return nil
}
return fmt.Errorf("multiple torrents with the same infohash")
}
// TODO caching

View file

@ -2,44 +2,62 @@ package qbittorrent
import (
"context"
"errors"
"fmt"
"io"
"io/fs"
"log/slog"
"os"
"path"
"strings"
"sync"
"time"
"git.kmsign.ru/royalcat/tstor/pkg/qbittorrent"
"git.kmsign.ru/royalcat/tstor/pkg/rlog"
"git.kmsign.ru/royalcat/tstor/src/vfs"
)
type FS struct {
mu sync.Mutex
client *cacheClient
name string
hash string
dataDir string
dataDir string // directory where torrent files are stored
content map[string]*qbittorrent.TorrentContent
files map[string]fs.FileInfo
entries map[string]fileEntry
log *rlog.Logger
vfs.FilesystemPrototype
}
type fileEntry struct {
fs.FileInfo
Content *qbittorrent.TorrentContent
}
var _ vfs.Filesystem = (*FS)(nil)
func newTorrentFS(ctx context.Context, client *cacheClient, name string, hash string, dataDir string) (*FS, error) {
ctx, span := trace.Start(ctx, "newTorrentFS")
defer span.End()
cnts, err := client.listContent(ctx, hash)
if err != nil {
return nil, fmt.Errorf("failed to list content for hash %s: %w", hash, err)
}
content := make(map[string]*qbittorrent.TorrentContent, len(cnts))
files := make(map[string]fs.FileInfo, len(cnts))
entries := make(map[string]fileEntry, len(cnts))
for _, cnt := range cnts {
path := vfs.AbsPath(cnt.Name)
files[path] = vfs.NewFileInfo(cnt.Name, cnt.Size)
content[path] = cnt
if cnt.Priority == qbittorrent.PriorityDoNotDownload {
continue
}
entries[vfs.AbsPath(cnt.Name)] = fileEntry{
Content: cnt,
FileInfo: vfs.NewFileInfo(cnt.Name, cnt.Size),
}
}
return &FS{
@ -49,8 +67,9 @@ func newTorrentFS(ctx context.Context, client *cacheClient, name string, hash st
dataDir: dataDir,
content: content,
files: files,
entries: entries,
log: rlog.Component("qbittorrent", "fs"),
FilesystemPrototype: vfs.FilesystemPrototype(name),
}, nil
@ -62,12 +81,11 @@ func (f *FS) Open(ctx context.Context, name string) (vfs.File, error) {
return vfs.NewDirFile(name), nil
}
cnt, ok := f.content[name]
if ok {
return openFile(ctx, f.client, f.dataDir, f.hash, cnt)
if entry, ok := f.entries[name]; ok {
return openFile(ctx, f.client, f.dataDir, f.hash, entry.Content)
}
for p := range f.content {
for p := range f.entries {
if strings.HasPrefix(p, name) {
return vfs.NewDirFile(name), nil
}
@ -77,22 +95,79 @@ func (f *FS) Open(ctx context.Context, name string) (vfs.File, error) {
}
// ReadDir implements vfs.Filesystem.
func (fs *FS) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) {
return vfs.ListDirFromInfo(fs.files, name)
func (f *FS) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) {
infos := make(map[string]fs.FileInfo, len(f.entries))
for k, v := range f.entries {
infos[k] = v.FileInfo
}
return vfs.ListDirFromInfo(infos, name)
}
// Stat implements vfs.Filesystem.
func (f *FS) Stat(ctx context.Context, name string) (fs.FileInfo, error) {
info, ok := f.files[name]
if !ok {
return nil, vfs.ErrNotExist
name = vfs.AbsPath(path.Clean(name))
if vfs.IsRoot(name) {
return vfs.NewDirInfo(f.name), nil
}
return info, nil
if entry, ok := f.entries[name]; ok {
return entry.FileInfo, nil
}
for p := range f.entries {
if strings.HasPrefix(p, name) {
return vfs.NewDirInfo(name), nil
}
}
return nil, vfs.ErrNotExist
}
// Unlink implements vfs.Filesystem.
func (f *FS) Unlink(ctx context.Context, filename string) error {
return vfs.ErrNotImplemented
filename = vfs.AbsPath(path.Clean(filename))
// we cannot delete a torrent itself, cause it will be added on next source scan and all delited files will be restored
if entry, ok := f.entries[filename]; ok {
return f.removeFile(ctx, f.hash, entry.Content)
}
for p, entry := range f.entries {
if strings.HasPrefix(p, filename) {
return f.removeFile(ctx, f.hash, entry.Content)
}
}
return vfs.ErrNotExist
}
func (f *FS) removeFile(ctx context.Context, hash string, content *qbittorrent.TorrentContent) error {
log := f.log.With(slog.String("hash", hash), slog.String("file", content.Name))
f.mu.Lock()
defer f.mu.Unlock()
fpath := vfs.AbsPath(content.Name)
if _, ok := f.entries[fpath]; !ok {
return fmt.Errorf("file %s is does not found", fpath)
}
delete(f.entries, fpath)
err := f.client.qb.Torrent().SetFilePriority(ctx, f.hash, content.Index, qbittorrent.PriorityDoNotDownload)
if err != nil {
return fmt.Errorf("failed to set priority for torrent %s for file %s: %w", hash, content.Name, err)
}
err = os.Remove(path.Join(f.dataDir, vfs.RelPath(content.Name)))
if err != nil && !errors.Is(err, fs.ErrNotExist) {
log.Warn(ctx, "failed to remove file", rlog.Error(err))
return fmt.Errorf("failed to remove file %s: %w", content.Name, err)
}
return nil
}
func openFile(ctx context.Context, client *cacheClient, torrentDir string, hash string, content *qbittorrent.TorrentContent) (*File, error) {
@ -158,11 +233,73 @@ func (f *File) Name() string {
return path.Base(f.filePath)
}
func (f *File) canExpectSoon(ctx context.Context) (bool, error) {
info, err := f.client.getInfo(ctx, f.hash)
if err != nil {
return false, err
}
return info.Completed == info.Size || info.State == qbittorrent.TorrentStateCheckingUP || info.State == qbittorrent.TorrentStateDownloading || info.State == qbittorrent.TorrentStateForcedDL, nil
}
func (f *File) isRangeComplete(ctx context.Context, offset int64, size int) (bool, error) {
startPieceIndex := int(offset / int64(f.pieceSize))
pieceCount := (size + f.pieceSize - 1) / f.pieceSize // rouding up
for i := range pieceCount {
ok, err := f.client.isPieceComplete(ctx, f.hash, startPieceIndex+i)
if err != nil {
return false, err
}
if !ok {
return false, nil
}
}
return true, nil
}
func (f *File) waitPieceAvailable(ctx context.Context, offset int64, size int) error {
complete, err := f.isRangeComplete(ctx, offset, size)
if err != nil {
return err
}
if complete {
return nil
}
canExpectSoon, err := f.canExpectSoon(ctx)
if err != nil {
return err
}
if !canExpectSoon {
return fmt.Errorf("torrent is not downloading")
}
const checkingInterval = 1 * time.Second
ticker := time.NewTicker(checkingInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
complete, err := f.isRangeComplete(ctx, offset, size)
if err != nil {
return err
}
if complete {
return nil
}
}
}
}
// Read implements vfs.File.
func (f *File) Read(ctx context.Context, p []byte) (n int, err error) {
pieceIndex := int(f.offset / int64(f.pieceSize))
err = f.client.waitPieceToComplete(ctx, f.hash, pieceIndex)
if err != nil {
if err := f.waitPieceAvailable(ctx, f.offset, len(p)); err != nil {
return 0, err
}
@ -178,9 +315,7 @@ func (f *File) Read(ctx context.Context, p []byte) (n int, err error) {
// ReadAt implements vfs.File.
func (f *File) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
pieceIndex := int(off / int64(f.pieceSize))
err = f.client.waitPieceToComplete(ctx, f.hash, pieceIndex)
if err != nil {
if err := f.waitPieceAvailable(ctx, f.offset, len(p)); err != nil {
return 0, err
}

View file

@ -1,6 +1,8 @@
package sources
import (
"context"
"git.kmsign.ru/royalcat/tstor/src/sources/qbittorrent"
"git.kmsign.ru/royalcat/tstor/src/sources/ytdlp"
"git.kmsign.ru/royalcat/tstor/src/vfs"
@ -8,7 +10,13 @@ import (
func NewHostedFS(sourceFS vfs.Filesystem, tsrv *qbittorrent.Daemon, ytdlpsrv *ytdlp.Daemon) vfs.Filesystem {
factories := map[string]vfs.FsFactory{
".torrent": tsrv.TorrentFS,
".torrent": func(ctx context.Context, f vfs.File) (vfs.Filesystem, error) {
tfs, err := tsrv.GetTorrentFS(ctx, f)
if err != nil {
return nil, err
}
return vfs.NewResolveFS(tfs, vfs.ArchiveFactories), nil
},
".ts-ytdlp": ytdlpsrv.BuildFS,
}

View file

@ -282,16 +282,17 @@ func (d *archiveFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, e
}
n, err = d.buffer.ReadAt(p, off)
if err != nil && err != io.EOF {
return n, fmt.Errorf("failed to read from buffer: %w", err)
return n, fmt.Errorf("failed to readAt from buffer: %w", err)
}
return n, err
}
func (d *archiveFile) Close(ctx context.Context) error {
d.m.Lock()
defer d.m.Unlock()
return nil
// d.m.Lock()
// defer d.m.Unlock()
return d.buffer.Close()
// return d.buffer.Close()
}
type archiveFileReaderFactory func(ctx context.Context) (ctxio.ReadCloser, error)

View file

@ -15,6 +15,9 @@ var ErrOsHashLen = errors.New("oshash: buffer length must be a multiple of 8")
type Hash string
func FileHash(ctx context.Context, f File) (Hash, error) {
ctx, span := tracer.Start(ctx, "FileHash")
defer span.End()
_, err := f.Seek(0, io.SeekStart)
if err != nil {
return "", fmt.Errorf("error seeking file: %w", err)

View file

@ -2,7 +2,9 @@ package vfs
import (
"context"
"errors"
"fmt"
"io"
"io/fs"
"log/slog"
"reflect"
@ -34,7 +36,7 @@ type LogFS struct {
}
func isLoggableError(err error) bool {
return err != nil // && !errors.Is(err, fs.ErrNotExist) && !errors.Is(err, io.EOF)
return err != nil && !errors.Is(err, io.EOF) // && !errors.Is(err, fs.ErrNotExist)
}
var _ Filesystem = (*LogFS)(nil)
@ -169,10 +171,11 @@ func (lfs *LogFS) Stat(ctx context.Context, filename string) (info fs.FileInfo,
}
span.End()
}()
log := lfs.log.With(slog.String("filename", filename))
info, err = lfs.fs.Stat(ctx, filename)
if isLoggableError(err) {
lfs.log.Error(ctx, "Failed to stat", rlog.Error(err))
log.Error(ctx, "Failed to stat", rlog.Error(err))
}
return info, err
}
@ -190,10 +193,11 @@ func (fs *LogFS) Unlink(ctx context.Context, filename string) (err error) {
}
span.End()
}()
log := fs.log.With(slog.String("filename", filename))
err = fs.fs.Unlink(ctx, filename)
if isLoggableError(err) {
fs.log.Error(ctx, "Failed to stat", rlog.Error(err))
log.Error(ctx, "Failed to unlink", rlog.Error(err))
}
return err
}

View file

@ -120,7 +120,7 @@ func (r *ResolverFS) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, e
return nil, err
}
nestedfs, err := r.resolver.nestedFs(ctx, filepath, file)
if errors.Is(err, context.DeadlineExceeded) {
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
return nil, err
}
if err != nil {

View file

@ -24,6 +24,10 @@ func trimRelPath(p, t string) string {
// return path.Clean(Separator + strings.ReplaceAll(p, "\\", "/"))
// }
func RelPath(p string) string {
return strings.TrimLeft(p, Separator)
}
func AbsPath(p string) string {
if p == "" || p[0] != '/' {
return Separator + p