small refactor*

This commit is contained in:
royalcat 2025-03-22 08:49:14 +04:00
parent b6b541e050
commit 24a4d30275
232 changed files with 2164 additions and 1906 deletions

101
plugins/archive/archive.go Normal file
View file

@ -0,0 +1,101 @@
package archive
import (
"context"
"io/fs"
"strings"
"time"
"git.kmsign.ru/royalcat/tstor/server/src/vfs"
)
type archiveLoader func(ctx context.Context, archivePath string, r vfs.File, size int64) (map[string]fileEntry, error)
var _ vfs.Filesystem = &ArchiveFS{}
type fileEntry struct {
fs.FileInfo
open func(ctx context.Context) (vfs.File, error)
}
type ArchiveFS struct {
name string
size int64
files map[string]fileEntry
modTime time.Time
}
// Rename implements Filesystem.
func (a *ArchiveFS) Rename(ctx context.Context, oldpath string, newpath string) error {
return vfs.ErrNotImplemented
}
// ModTime implements Filesystem.
func (a *ArchiveFS) ModTime() time.Time {
return a.modTime
}
// Mode implements Filesystem.
func (a *ArchiveFS) Mode() fs.FileMode {
return fs.ModeDir
}
// Size implements Filesystem.
func (a *ArchiveFS) Size() int64 {
return int64(a.size)
}
// Sys implements Filesystem.
func (a *ArchiveFS) Sys() any {
return nil
}
// FsName implements Filesystem.
func (a *ArchiveFS) FsName() string {
return "archivefs"
}
func (d *Daemon) NewArchiveFS(ctx context.Context, archivePath, name string, f vfs.File, size int64, loader archiveLoader) (*ArchiveFS, error) {
archiveFiles, err := loader(ctx, archivePath, f, size)
if err != nil {
return nil, err
}
// TODO make optional
singleDir := true
for k := range archiveFiles {
if !strings.HasPrefix(k, "/"+name+"/") {
singleDir = false
break
}
}
files := make(map[string]fileEntry, len(archiveFiles))
for k, v := range archiveFiles {
// TODO make optional
if strings.Contains(k, "/__MACOSX/") {
continue
}
if singleDir {
k, _ = strings.CutPrefix(k, "/"+name)
}
files[k] = v
}
// FIXME configurable
files["/.forcegallery"] = fileEntry{
FileInfo: vfs.NewFileInfo("/.forcegallery", 0, time.Time{}),
open: func(ctx context.Context) (vfs.File, error) {
return vfs.NewMemoryFile(".forcegallery", []byte{}), nil
},
}
return &ArchiveFS{
name: name,
size: size,
files: files,
}, nil
}

View file

@ -0,0 +1,142 @@
package archive_test
// TODO
// func TestArchiveFactories(t *testing.T) {
// t.Parallel()
// ctx := context.Background()
// require := require.New(t)
// require.Contains(vfs.ArchiveFactories, ".zip")
// require.Contains(vfs.ArchiveFactories, ".rar")
// require.Contains(vfs.ArchiveFactories, ".7z")
// fs, err := vfs.ArchiveFactories[".zip"](ctx, &vfs.DummyFile{})
// require.NoError(err)
// require.NotNil(fs)
// fs, err = vfs.ArchiveFactories[".rar"](ctx, &vfs.DummyFile{})
// require.NoError(err)
// require.NotNil(fs)
// fs, err = vfs.ArchiveFactories[".7z"](ctx, &vfs.DummyFile{})
// require.NoError(err)
// require.NotNil(fs)
// }
// var fileContent []byte = []byte("Hello World")
// func TestZipFilesystem(t *testing.T) {
// t.Parallel()
// require := require.New(t)
// zReader, size := createTestZip(require)
// ctx := context.Background()
// // TODO add single dir collapse test
// zfs, err := archive.NewArchive(ctx, "test", "test", zReader, size, archive.ZipLoader)
// require.NoError(err)
// files, err := zfs.ReadDir(ctx, "/path/to/test/file")
// require.NoError(err)
// require.Len(files, 1)
// e := files[0]
// require.Equal("1.txt", e.Name())
// require.NotNil(e)
// out := make([]byte, 5)
// f, err := zfs.Open(ctx, "/path/to/test/file/1.txt")
// require.NoError(err)
// n, err := f.Read(ctx, out)
// require.ErrorIs(err, io.EOF)
// require.Equal(5, n)
// require.Equal([]byte("Hello"), out)
// outSpace := make([]byte, 1)
// n, err = f.Read(ctx, outSpace)
// require.ErrorIs(err, io.EOF)
// require.Equal(1, n)
// require.Equal([]byte(" "), outSpace)
// n, err = f.Read(ctx, out)
// require.ErrorIs(err, io.EOF)
// require.Equal(5, n)
// require.Equal([]byte("World"), out)
// }
// func createTestZip(require *require.Assertions) (vfs.File, int64) {
// buf := bytes.NewBuffer([]byte{})
// zWriter := zip.NewWriter(buf)
// f1, err := zWriter.Create("path/to/test/file/1.txt")
// require.NoError(err)
// _, err = f1.Write(fileContent)
// require.NoError(err)
// err = zWriter.Close()
// require.NoError(err)
// return newCBR(buf.Bytes()), int64(buf.Len())
// }
// func newCBR(b []byte) *closeableByteReader {
// return &closeableByteReader{
// data: bytes.NewReader(b),
// }
// }
// var _ vfs.File = &closeableByteReader{}
// type closeableByteReader struct {
// data *bytes.Reader
// }
// // ReadAt implements ctxio.ReaderAt.
// func (c *closeableByteReader) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
// return c.data.ReadAt(p, off)
// }
// // Close implements vfs.File.
// func (c *closeableByteReader) Close(ctx context.Context) error {
// panic("unimplemented")
// }
// // Info implements vfs.File.
// func (c *closeableByteReader) Info() (fs.FileInfo, error) {
// panic("unimplemented")
// }
// // IsDir implements vfs.File.
// func (c *closeableByteReader) IsDir() bool {
// panic("unimplemented")
// }
// // Name implements vfs.File.
// func (c *closeableByteReader) Name() string {
// panic("unimplemented")
// }
// // Read implements vfs.File.
// func (c *closeableByteReader) Read(ctx context.Context, p []byte) (n int, err error) {
// return c.data.Read(p)
// }
// // Seek implements vfs.File.
// func (c *closeableByteReader) Seek(offset int64, whence int) (int64, error) {
// return c.data.Seek(offset, whence)
// }
// // Size implements vfs.File.
// func (c *closeableByteReader) Size() int64 {
// return c.data.Size()
// }
// // Type implements vfs.File.
// func (c *closeableByteReader) Type() fs.FileMode {
// panic("unimplemented")
// }

176
plugins/archive/cache.go Normal file
View file

@ -0,0 +1,176 @@
package archive
import (
"context"
"errors"
"fmt"
"io"
"sync"
"git.kmsign.ru/royalcat/tstor/server/src/vfs"
"github.com/hashicorp/golang-lru/arc/v2"
"github.com/royalcat/ctxio"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// TODO переделать кеш в демон
const blockSize = 1024 * 16 // 16KB
const cacheSize = 1024 * 1024 * 1024 * 4 // 4GB of total usage
const defaultBlockCount = cacheSize / blockSize
type archiveFileReaderFactory func(ctx context.Context) (ctxio.ReadCloser, error)
type archiveFileIndex struct {
archiveHash vfs.Hash
filename string
}
type blockIndex struct {
fileIndex archiveFileIndex
off int64
}
type block struct {
data [blockSize]byte
len int
}
func newRandomReaderFromLinear(blockCache *arc.ARCCache[blockIndex, block], index archiveFileIndex, size int64, readerFactory archiveFileReaderFactory) *randomReaderFromLinear {
return &randomReaderFromLinear{
blockCache: blockCache,
index: index,
size: size,
readerFactory: readerFactory,
}
}
type randomReaderFromLinear struct {
blockCache *arc.ARCCache[blockIndex, block]
index archiveFileIndex
readerFactory archiveFileReaderFactory
reader ctxio.ReadCloser
readen int64
readerMutex sync.Mutex
size int64
closed bool
}
var _ ctxio.ReaderAt = (*randomReaderFromLinear)(nil)
var _ ctxio.Closer = (*randomReaderFromLinear)(nil)
// ReadAt implements ctxio.ReaderAt.
func (a *randomReaderFromLinear) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
ctx, span := tracer.Start(ctx, "archive.RandomReader.ReadAt")
defer span.End()
if a.closed {
return 0, errors.New("reader is closed")
}
if off >= a.size {
return 0, ctxio.EOF
}
aligntOff := (off / blockSize) * blockSize
bI := blockIndex{fileIndex: a.index, off: aligntOff}
block, err := a.readBlock(ctx, bI)
if err != nil && err != ctxio.EOF {
return 0, err
}
if off-aligntOff >= int64(block.len) {
return 0, ctxio.EOF
}
return copy(p, block.data[off-aligntOff:block.len]), err
}
func (a *randomReaderFromLinear) readBlock(ctx context.Context, bI blockIndex) (block, error) {
ctx, span := tracer.Start(ctx, "archive.RandomReader.readBlock")
defer span.End()
// check block in cache before locking
if b, ok := a.blockCache.Get(bI); ok && b.len != 0 {
return b, nil
}
a.readerMutex.Lock()
defer a.readerMutex.Unlock()
if b, ok := a.blockCache.Get(bI); ok && b.len != 0 { // check again, maybe another goroutine already read this block
return b, nil
}
if a.reader == nil || a.readen > bI.off {
span.AddEvent("reader not valid, creating new reader", trace.WithAttributes(
attribute.Bool("reader_initialized", a.reader != nil),
attribute.Int64("readen", a.readen),
attribute.Int64("target_offset", bI.off),
))
if a.reader != nil {
if err := a.reader.Close(ctx); err != nil {
return block{}, fmt.Errorf("failed to close previous reader: %w", err)
}
}
var err error
a.reader, err = a.readerFactory(context.TODO())
if err != nil {
return block{}, err
}
a.readen = 0
}
for off := a.readen; off <= bI.off; off += blockSize {
// TODO sync.Pool ?
buf := [blockSize]byte{}
n, err := a.reader.Read(ctx, buf[:])
if err != nil && err != ctxio.EOF {
return block{}, err
}
a.readen += int64(n)
if n == 0 {
return block{}, io.EOF
}
a.blockCache.Add(blockIndex{bI.fileIndex, off}, block{len: n, data: buf})
if off == bI.off {
return block{len: n, data: buf}, err
}
if n < int(blockSize) && errors.Is(err, ctxio.EOF) {
return block{}, err
}
}
return block{}, io.EOF
}
// Close implements ctxio.Closer.
func (a *randomReaderFromLinear) Close(ctx context.Context) error {
if a.closed {
return nil
}
a.closed = true
var errs []error
if a.reader != nil {
errs = append(errs, a.reader.Close(ctx))
}
for _, block := range a.blockCache.Keys() {
if block.fileIndex == a.index {
a.blockCache.Remove(block)
}
}
return errors.Join(errs...)
}

65
plugins/archive/daemon.go Normal file
View file

@ -0,0 +1,65 @@
package archive
import (
"context"
"fmt"
"path"
"git.kmsign.ru/royalcat/tstor/server/src/daemon"
"git.kmsign.ru/royalcat/tstor/server/src/vfs"
"github.com/hashicorp/golang-lru/arc/v2"
"github.com/knadh/koanf/v2"
"go.opentelemetry.io/otel"
)
const DaemonName string = "archive"
var _ daemon.DaemonConstructor = NewDaemon
func NewDaemon(ctx context.Context, koanf *koanf.Koanf) (daemon.Daemon, error) {
return &Daemon{}, nil
}
var tracer = otel.Tracer("git.kmsign.ru/royalcat/tstor/plugins/archive")
var _ daemon.Daemon = (*Daemon)(nil)
type Daemon struct {
blockcache *arc.ARCCache[blockIndex, block]
}
// Name implements daemon.Daemon.
func (d *Daemon) Name() string {
return DaemonName
}
// Extensions implements daemon.Daemon.
func (d *Daemon) Extensions() []string {
return []string{".zip", ".rar", ".7z"}
}
// GetFS implements daemon.Daemon.
func (d *Daemon) GetFS(ctx context.Context, sourcePath string, file vfs.File) (vfs.Filesystem, error) {
ext := path.Ext(sourcePath)
stat, err := file.Info()
if err != nil {
return nil, err
}
switch ext {
case ".zip":
return d.NewArchiveFS(ctx, sourcePath, stat.Name(), file, stat.Size(), d.ZipLoader)
case ".rar":
return d.NewArchiveFS(ctx, sourcePath, stat.Name(), file, stat.Size(), d.RarLoader)
case ".7z":
return d.NewArchiveFS(ctx, sourcePath, stat.Name(), file, stat.Size(), d.SevenZipLoader)
}
return nil, fmt.Errorf("unknown archive type")
}
// Close implements daemon.Daemon.
func (d *Daemon) Close(ctx context.Context) error {
panic("unimplemented")
}

158
plugins/archive/fs.go Normal file
View file

@ -0,0 +1,158 @@
package archive
import (
"context"
"io"
"io/fs"
"path"
"strings"
"sync"
"time"
"git.kmsign.ru/royalcat/tstor/server/src/vfs"
)
// Unlink implements Filesystem.
func (a *ArchiveFS) Unlink(ctx context.Context, filename string) error {
return vfs.ErrNotImplemented
}
func (a *ArchiveFS) Open(ctx context.Context, filename string) (vfs.File, error) {
if filename == vfs.Separator {
return vfs.NewDirFile(filename), nil
}
f, ok := a.files[filename]
if ok {
return f.open(ctx)
}
for p := range a.files {
if strings.HasPrefix(p, filename) {
return vfs.NewDirFile(filename), nil
}
}
return nil, vfs.ErrNotExist
}
func (a *ArchiveFS) ReadDir(ctx context.Context, path string) ([]fs.DirEntry, error) {
infos := make(map[string]fs.FileInfo, len(a.files))
for k, v := range a.files {
infos[k] = v
}
return vfs.ListDirFromInfo(infos, path)
}
// Stat implements Filesystem.
func (afs *ArchiveFS) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
if entry, ok := afs.files[filename]; ok {
return entry, nil
}
for p := range afs.files {
if strings.HasPrefix(p, filename) {
return vfs.NewDirInfo(path.Base(filename), time.Time{}), nil
}
}
return nil, vfs.ErrNotExist
}
// Info implements Filesystem.
func (a *ArchiveFS) Info() (fs.FileInfo, error) {
return a, nil
}
// IsDir implements Filesystem.
func (a *ArchiveFS) IsDir() bool {
return true
}
// Name implements Filesystem.
func (a *ArchiveFS) Name() string {
return a.name
}
// Type implements Filesystem.
func (a *ArchiveFS) Type() fs.FileMode {
return fs.ModeDir
}
var _ vfs.File = (*archiveFile)(nil)
func newArchiveFile(name string, size int64, rr *randomReaderFromLinear) *archiveFile {
return &archiveFile{
name: name,
size: size,
rr: rr,
}
}
type archiveFile struct {
name string
size int64
m sync.Mutex
offset int64
rr *randomReaderFromLinear
}
// Seek implements File.
func (d *archiveFile) Seek(offset int64, whence int) (int64, error) {
switch whence {
case io.SeekStart:
d.offset = offset
case io.SeekCurrent:
d.offset += offset
case io.SeekEnd:
d.offset = d.size + offset
}
return d.offset, nil
}
// Name implements File.
func (d *archiveFile) Name() string {
return d.name
}
// Type implements File.
func (d *archiveFile) Type() fs.FileMode {
return vfs.ModeFileRO
}
func (d *archiveFile) Info() (fs.FileInfo, error) {
return vfs.NewFileInfo(d.name, d.size, time.Time{}), nil
}
func (d *archiveFile) Size() int64 {
return d.size
}
func (d *archiveFile) IsDir() bool {
return false
}
func (d *archiveFile) Read(ctx context.Context, p []byte) (n int, err error) {
ctx, span := tracer.Start(ctx, "archive.File.Read")
defer span.End()
n, err = d.rr.ReadAt(ctx, p, d.offset)
d.offset += int64(n)
return n, err
}
func (d *archiveFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
d.m.Lock()
defer d.m.Unlock()
return d.rr.ReadAt(ctx, p, off)
}
func (d *archiveFile) Close(ctx context.Context) error {
// FIXME close should do nothing as archive fs currently reuse the same file instances
return nil
}

67
plugins/archive/rar.go Normal file
View file

@ -0,0 +1,67 @@
package archive
import (
"context"
"fmt"
"io"
"git.kmsign.ru/royalcat/tstor/server/pkg/ioutils"
"git.kmsign.ru/royalcat/tstor/server/src/vfs"
"github.com/nwaples/rardecode/v2"
"github.com/royalcat/ctxio"
)
func (d *Daemon) RarLoader(ctx context.Context, archivePath string, f vfs.File, size int64) (map[string]fileEntry, error) {
hash, err := vfs.FileHash(ctx, f)
if err != nil {
return nil, err
}
reader := ioutils.WrapIoReadSeeker(ctx, f, size)
r, err := rardecode.NewReader(reader)
if err != nil {
return nil, err
}
out := make(map[string]fileEntry)
for {
header, err := r.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
name := header.Name
af := func(ctx context.Context) (ctxio.ReadCloser, error) {
reader := ioutils.WrapIoReadSeeker(ctx, f, size)
r, err := rardecode.NewReader(reader)
if err != nil {
return nil, err
}
for header, err := r.Next(); err != io.EOF; header, err = r.Next() {
if err != nil {
return nil, err
}
if header.Name == name {
return ctxio.NopCloser(ctxio.WrapIoReader(r)), nil
}
}
return nil, fmt.Errorf("file with name '%s' not found", name)
}
rr := newRandomReaderFromLinear(d.blockcache, archiveFileIndex{archiveHash: hash, filename: header.Name}, header.UnPackedSize, af)
out[vfs.AbsPath(header.Name)] = fileEntry{
FileInfo: vfs.NewFileInfo(header.Name, header.UnPackedSize, header.ModificationTime),
open: func(ctx context.Context) (vfs.File, error) {
return newArchiveFile(header.Name, header.UnPackedSize, rr), nil
},
}
}
return out, nil
}

View file

@ -0,0 +1,57 @@
package archive
import (
"context"
"git.kmsign.ru/royalcat/tstor/server/src/vfs"
"github.com/bodgit/sevenzip"
"github.com/royalcat/ctxio"
)
func (d *Daemon) SevenZipLoader(ctx context.Context, archivePath string, ctxreader vfs.File, size int64) (map[string]fileEntry, error) {
hash, err := vfs.FileHash(ctx, ctxreader)
if err != nil {
return nil, err
}
reader := ctxio.IoReaderAt(ctx, ctxreader)
r, err := sevenzip.NewReader(reader, size)
if err != nil {
return nil, err
}
out := make(map[string]fileEntry)
for i, f := range r.File {
if f.FileInfo().IsDir() {
continue
}
af := func(ctx context.Context) (ctxio.ReadCloser, error) {
reader := ctxio.IoReaderAt(ctx, ctxreader)
zr, err := sevenzip.NewReader(reader, size)
if err != nil {
return nil, err
}
rc, err := zr.File[i].Open()
if err != nil {
return nil, err
}
return ctxio.WrapIoReadCloser(rc), nil
}
info := f.FileInfo()
rr := newRandomReaderFromLinear(d.blockcache, archiveFileIndex{archiveHash: hash, filename: f.Name}, info.Size(), af)
out[vfs.AbsPath(f.Name)] = fileEntry{
FileInfo: f.FileInfo(),
open: func(ctx context.Context) (vfs.File, error) {
return newArchiveFile(info.Name(), info.Size(), rr), nil
},
}
}
return out, nil
}

61
plugins/archive/zip.go Normal file
View file

@ -0,0 +1,61 @@
package archive
import (
"archive/zip"
"context"
"fmt"
"git.kmsign.ru/royalcat/tstor/server/src/vfs"
"github.com/royalcat/ctxio"
)
func (d *Daemon) ZipLoader(ctx context.Context, archivePath string, f vfs.File, size int64) (map[string]fileEntry, error) {
hash, err := vfs.FileHash(ctx, f)
if err != nil {
return nil, err
}
reader := ctxio.IoReaderAt(ctx, f)
zr, err := zip.NewReader(reader, size)
if err != nil {
return nil, err
}
out := make(map[string]fileEntry)
for i := range zr.File {
zipFile := zr.File[i]
if zipFile.FileInfo().IsDir() {
continue
}
i := i
af := func(ctx context.Context) (ctxio.ReadCloser, error) {
reader := ctxio.IoReaderAt(ctx, f)
zr, err := zip.NewReader(reader, size)
if err != nil {
return nil, fmt.Errorf("failed to create zip reader: %w", err)
}
rc, err := zr.File[i].Open()
if err != nil {
return nil, fmt.Errorf("failed to open file in zip archive: %w", err)
}
return ctxio.WrapIoReadCloser(rc), nil
}
info := zipFile.FileInfo()
rr := newRandomReaderFromLinear(d.blockcache, archiveFileIndex{archiveHash: hash, filename: zipFile.Name}, info.Size(), af)
out[vfs.AbsPath(zipFile.Name)] = fileEntry{
FileInfo: info,
open: func(ctx context.Context) (vfs.File, error) {
return newArchiveFile(info.Name(), info.Size(), rr), nil
},
}
}
return out, nil
}

View file

@ -0,0 +1,6 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/oapi-codegen/oapi-codegen/HEAD/configuration-schema.json
package: client
output: genclient/client.gen.go
generate:
models: true
client: true

118
plugins/kemono/daemon.go Normal file
View file

@ -0,0 +1,118 @@
package kemono
import (
"context"
"database/sql"
"fmt"
"io"
"path"
"github.com/thanos-io/objstore"
"github.com/thanos-io/objstore/providers/filesystem"
"golang.org/x/sync/errgroup"
"git.kmsign.ru/royalcat/tstor/plugins/kemono/kemonoapi"
)
const DaemonName = "kemono"
type Daemon struct {
coomerClient *kemonoapi.Client
kemonoClient *kemonoapi.Client
db *sql.DB
storage objstore.Bucket
}
type creator struct {
Service string
CreatorID string
}
func NewDaemon(dataDir string) (*Daemon, error) {
bucket, err := filesystem.NewBucket(dataDir)
if err != nil {
return nil, fmt.Errorf("failed to create filesystem bucket: %w", err)
}
return &Daemon{
coomerClient: kemonoapi.NewClient("https://coomer.su/"),
kemonoClient: kemonoapi.NewClient("https://kemono.su/"),
storage: bucket,
}, nil
}
func (d *Daemon) getClient(service string) *kemonoapi.Client {
switch service {
case "onlyfans", "fansly", "candfans":
return d.coomerClient
case "patreon", "fanbox", "fantia", "gumroad", "discord", "boosty", "subscribestar", "dlsite", "afdian":
return d.kemonoClient
}
return nil
}
func getCreatorPath(creator creator) string {
return path.Join(creator.Service, creator.CreatorID)
}
func (d *Daemon) scrapCreator(ctx context.Context, creator creator) error {
client := d.getClient(creator.Service)
if client == nil {
return fmt.Errorf("no site for service %s", creator.Service)
}
posts := client.FetchPosts(ctx, creator.Service, creator.CreatorID)
for post, err := range posts {
if err != nil {
return err
}
for _, att := range append([]kemonoapi.File{post.File}, post.Attachments...) {
err := d.downloadFile(ctx, client, att)
if err != nil {
return fmt.Errorf("failed to download file: %w", err)
}
}
}
return nil
}
func getStorageFilePath(file kemonoapi.File) string {
return path.Join("data", file.Path)
}
func (d *Daemon) downloadFile(ctx context.Context, client *kemonoapi.Client, file kemonoapi.File) error {
info, err := client.HeadFile(ctx, path.Join("data", file.Path))
if err != nil {
return fmt.Errorf("failed to get file info: %w", err)
}
storageFilePath := getStorageFilePath(file)
attrs, err := d.storage.Attributes(ctx, storageFilePath)
if err == nil {
return nil
}
if attrs.Size == info.Length && attrs.LastModified.After(info.LastModified) {
return nil
}
r, w := io.Pipe()
var g errgroup.Group
g.Go(func() error {
defer w.Close()
return client.DownloadFile(ctx, w, info.URL)
})
g.Go(func() error {
return d.storage.Upload(ctx, storageFilePath, r)
})
return g.Wait()
}

34
plugins/kemono/go.mod Normal file
View file

@ -0,0 +1,34 @@
module git.kmsign.ru/royalcat/tstor/plugins/kemono
go 1.23.5
require (
github.com/go-resty/resty/v2 v2.16.2
github.com/spf13/cast v1.7.0
github.com/thanos-io/objstore v0.0.0-20241212213936-d69df7208cba
golang.org/x/sync v0.12.0
golang.org/x/time v0.6.0
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.20.4 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.60.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/stretchr/testify v1.10.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
golang.org/x/net v0.37.0 // indirect
golang.org/x/sys v0.31.0 // indirect
google.golang.org/protobuf v1.36.5 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)

63
plugins/kemono/go.sum Normal file
View file

@ -0,0 +1,63 @@
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4 h1:rydBwnBoywKQMjWF0z8SriYtQ+uUcaFsxuijMjJr5PI=
github.com/efficientgo/core v1.0.0-rc.0.0.20221201130417-ba593f67d2a4/go.mod h1:kQa0V74HNYMfuJH6jiPiwNdpWXl4xd/K4tzlrcvYDQI=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-resty/resty/v2 v2.16.2 h1:CpRqTjIzq/rweXUt9+GxzzQdlkqMdt8Lm/fuK/CAbAg=
github.com/go-resty/resty/v2 v2.16.2/go.mod h1:0fHAoK7JoBy/Ch36N8VFeMsK7xQOHhvWaC3iOktwmIU=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/thanos-io/objstore v0.0.0-20241212213936-d69df7208cba h1:X5YtKhjFsMAgfaD1MxT+hYrP9QftK9iA+UYS3eQW0E0=
github.com/thanos-io/objstore v0.0.0-20241212213936-d69df7208cba/go.mod h1:vyzFrBXgP+fGNG2FopEGWOO/zrIuoy7zt3LpLeezRsw=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View file

@ -0,0 +1,25 @@
package kemonoapi_test
import (
"context"
"fmt"
"testing"
"git.kmsign.ru/royalcat/tstor/plugins/kemono/kemonoapi"
)
func TestScrapCreator(t *testing.T) {
k := kemonoapi.NewClient("https://coomer.su/")
ctx := context.Background()
posts := k.FetchPosts(ctx, "onlyfans", "bigtittygothegg")
for post, err := range posts {
if err != nil {
t.Fatal(err)
}
if post.ID == "" {
t.Fatal(fmt.Errorf("post id is empty"))
}
}
}

View file

@ -0,0 +1,64 @@
package kemonoapi
import (
"context"
"fmt"
"io"
"strconv"
"time"
)
type fileInfo struct {
URL string
Length int64
ContentType string
LastModified time.Time
}
func (c *Client) HeadFile(ctx context.Context, url string) (*fileInfo, error) {
resp, err := c.client.R().SetContext(ctx).Head(url)
if err != nil {
return nil, fmt.Errorf("failed to download url %s: %w", url, err)
}
loc := resp.Header().Get("Location")
if loc != "" {
return c.HeadFile(ctx, loc)
}
length, err := strconv.ParseInt(resp.Header().Get("Content-Length"), 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse Content-Length header: %w", err)
}
lastModified, err := time.Parse(time.RFC1123, resp.Header().Get("Last-Modified"))
if err != nil {
return nil, fmt.Errorf("failed to parse Last-Modified header: %w", err)
}
contentType := resp.Header().Get("Content-Type")
return &fileInfo{
URL: url,
Length: length,
ContentType: contentType,
LastModified: lastModified,
}, nil
}
func (c *Client) DownloadFile(ctx context.Context, out io.Writer, url string) error {
resp, err := c.client.R().SetContext(ctx).SetDoNotParseResponse(true).Get(url)
if err != nil {
return fmt.Errorf("failed to download url %s: %w", url, err)
}
body := resp.RawBody()
defer body.Close()
_, err = io.Copy(out, body)
if err != nil {
return fmt.Errorf("failed to download url %s: %w", url, err)
}
return nil
}

View file

@ -0,0 +1,91 @@
package kemonoapi
import (
"context"
"encoding/json"
"fmt"
"iter"
"log/slog"
"strconv"
)
// FetchCreators fetch Creator list
func (k *Client) FetchCreators() (creators []Creator, err error) {
// k.log.Print("fetching creator list...")
// url := fmt.Sprintf("https://%s/api/v1/creators", k.Site)
// resp, err := k.Downloader.Get(url)
// if err != nil {
// return nil, fmt.Errorf("fetch creator list error: %s", err)
// }
// reader, err := handleCompressedHTTPResponse(resp)
// if err != nil {
// return nil, err
// }
// data, err := ioutil.ReadAll(reader)
// if err != nil {
// return nil, fmt.Errorf("fetch creator list error: %s", err)
// }
// err = json.Unmarshal(data, &creators)
// if err != nil {
// return nil, fmt.Errorf("unmarshal creator list error: %s", err)
// }
return
}
// FetchPosts fetch post list
func (k *Client) FetchPosts(ctx context.Context, service, creator_id string) iter.Seq2[Post, error] {
const perUnit = 50
return func(yield func(Post, error) bool) {
page := 0
for {
k.log.Info("fetching post list", slog.Int("page", page))
if err := k.ratelimit.Wait(ctx); err != nil {
yield(Post{}, err)
return
}
posts, err := k.fetchPostsPage(ctx, service, creator_id, page*perUnit)
if err != nil {
yield(Post{}, err)
return
}
if len(posts) == 0 {
break
}
for _, post := range posts {
if !yield(post, nil) {
return
}
}
page++
}
}
}
func (k *Client) fetchPostsPage(ctx context.Context, service, creator_id string, offset int) ([]Post, error) {
resp, err := k.client.R().
SetContext(ctx).
SetQueryParam("o", strconv.Itoa(offset)).
SetPathParam("service", service).
SetPathParam("creator_id", creator_id).
Get("/{service}/user/{creator_id}")
if err != nil {
return nil, fmt.Errorf("fetch post list error: %s", err)
}
var posts []Post
err = json.Unmarshal(resp.Body(), &posts)
if err != nil {
return nil, fmt.Errorf("unmarshal post list error: %s", err)
}
return posts, nil
}

View file

@ -0,0 +1,40 @@
package kemonoapi
import (
"log/slog"
"net/http"
"time"
"github.com/go-resty/resty/v2"
"golang.org/x/time/rate"
)
type Downloader interface {
Download(<-chan FileWithIndex, Creator, Post) <-chan error
Get(url string) (resp *http.Response, err error)
WriteContent(Creator, Post, string) error
}
type Client struct {
client *resty.Client
ratelimit *rate.Limiter
log *slog.Logger
}
func NewClient(site string) *Client {
k := &Client{
ratelimit: rate.NewLimiter(rate.Every(time.Second), 3),
client: resty.New().
SetBaseURL(site + "/api/v1").
SetRetryCount(3).
SetRetryWaitTime(5 * time.Second).
AddRetryCondition(func(r *resty.Response, err error) bool {
return r != nil && r.StatusCode() == http.StatusTooManyRequests
}),
}
if k.log == nil {
k.log = slog.Default()
}
return k
}

View file

@ -0,0 +1,186 @@
package kemonoapi
import (
"encoding/json"
"fmt"
"net/url"
"path/filepath"
"time"
"github.com/spf13/cast"
)
type Timestamp struct {
Time time.Time
}
func (t *Timestamp) UnmarshalJSON(b []byte) error {
var timestamp float64
err := json.Unmarshal(b, &timestamp)
if err != nil {
return err
}
t.Time = time.Unix(int64(timestamp), int64((timestamp-float64(int64(timestamp)))*1e9))
return nil
}
type Creator struct {
Favorited int `json:"favorited"`
Id string `json:"id"`
Indexed Timestamp `json:"indexed"`
Name string `json:"name"`
Service string `json:"service"`
Updated Timestamp `json:"updated"`
}
// GetID get creator id
func (c Creator) GetID() string {
return c.Id
}
// GetService get creator Service
func (c Creator) GetService() string {
return c.Service
}
func (c Creator) PairString() string {
return fmt.Sprintf("%s:%s", c.Service, c.Id)
}
func NewCreator(service, id string) Creator {
return Creator{
Service: service,
Id: id,
}
}
// FindCreator Get the Creator by ID and Service
func FindCreator(creators []Creator, id, service string) (Creator, bool) {
for _, creator := range creators {
if creator.Id == id && creator.Service == service {
return creator, true
}
}
return Creator{}, false
}
type File struct {
Name string `json:"name"`
Path string `json:"path"`
}
// GetURL return the url
func (f File) GetURL() string {
ext := filepath.Ext(f.Name)
name := f.Name[:len(f.Name)-len(ext)]
return fmt.Sprintf("%s?f=%s%s", f.Path, url.QueryEscape(name), ext)
}
// GetHash get hash from file path
func (f File) GetHash() (string, error) {
return SplitHash(f.Path)
}
func (f File) Index(n int) FileWithIndex {
return FileWithIndex{
Index: n,
File: f,
}
}
type FileWithIndex struct {
Index int
File
}
type Attachment struct {
Name string `json:"name,omitempty"`
Path string `json:"path,omitempty"`
}
type Author struct {
ID string `json:"id,omitempty"`
Avatar string `json:"avatar,omitempty"`
Discriminator string `json:"discriminator,omitempty"`
PublicFlags int64 `json:"public_flags,omitempty"`
Username string `json:"username,omitempty"`
}
type Post struct {
ID string `json:"id,omitempty"`
User string `json:"user,omitempty"`
Service string `json:"service,omitempty"`
Title string `json:"title,omitempty"`
Content string `json:"content,omitempty"`
// Embed []any `json:"embed,omitempty"`
SharedFile bool `json:"shared_file,omitempty"`
Added Time `json:"added,omitempty"`
Published Time `json:"published,omitempty"`
Edited Time `json:"edited,omitempty"`
File File `json:"file,omitempty"`
Attachments []File `json:"attachments,omitempty"`
}
type Time time.Time
func (t *Time) UnmarshalJSON(b []byte) error {
var timestamp string
err := json.Unmarshal(b, &timestamp)
if err != nil {
return err
}
if timestamp == "" {
return nil
}
parsed, err := cast.StringToDate(timestamp)
if err != nil {
return err
}
*t = Time(parsed)
return nil
}
// User a creator according to the service and id
type User struct {
Service string `json:"service"`
Id string `json:"id"`
}
// GetID get user id
func (u User) GetID() string {
return u.Id
}
// GetService get user Service
func (u User) GetService() string {
return u.Service
}
type FavoriteCreator struct {
FavedSeq int `json:"faved_seq"`
Id string `json:"id"`
Index string `json:"index"`
Name string `json:"name"`
Service string `json:"service"`
Update string `json:"update"`
}
var SiteMap = map[string]string{
"patreon": "kemono",
"fanbox": "kemono",
"gumroad": "kemono",
"subscribestar": "kemono",
"dlsite": "kemono",
"discord": "kemono",
"fantia": "kemono",
"boosty": "kemono",
"afdian": "kemono",
"onlyfans": "coomer",
"fansly": "coomer",
}

View file

@ -0,0 +1,25 @@
package kemonoapi
import (
"path/filepath"
"strings"
)
func isImage(ext string) bool {
switch ext {
case ".apng", ".avif", ".bmp", ".gif", ".ico", ".cur", ".jpg", ".jpeg", ".jfif", ".pjpeg", ".pjp", ".png", ".svg", ".tif", ".tiff", ".webp", ".jpe":
return true
default:
return false
}
}
func SplitHash(str string) (string, error) {
parts := strings.Split(str, "/")
if len(parts) < 4 {
return "", nil
}
ext := filepath.Ext(parts[3])
name := parts[3][:len(parts[3])-len(ext)]
return name, nil
}

View file

@ -0,0 +1,10 @@
package main
import "git.kmsign.ru/royalcat/tstor/plugins/kemono"
func main() {
}
const DaemonName = kemono.DaemonName
var NewDaemon = kemono.NewDaemon

View file

@ -0,0 +1,27 @@
package qbittorrent
import (
"context"
"git.kmsign.ru/royalcat/tstor/plugins/qbittorrent/pkg/qbittorrent"
)
func (d *Daemon) ListTorrents(ctx context.Context) ([]*qbittorrent.TorrentInfo, error) {
return d.client.qb.Torrent().GetTorrents(ctx, &qbittorrent.TorrentOption{})
}
func (d *Daemon) SourceFiles(ctx context.Context, hash string) ([]string, error) {
d.sourceFilesMu.Lock()
defer d.sourceFilesMu.Unlock()
out := make([]string, 0, 1)
for k, h := range d.sourceFiles {
if h != hash {
continue
}
out = append(out, k)
}
return out, nil
}

View file

@ -0,0 +1,120 @@
package qbittorrent
import (
"context"
"errors"
"fmt"
"log/slog"
"os"
"path"
"slices"
"git.kmsign.ru/royalcat/tstor/plugins/qbittorrent/pkg/qbittorrent"
"git.kmsign.ru/royalcat/tstor/server/pkg/rlog"
)
func (d *Daemon) Cleanup(ctx context.Context, run bool) ([]string, error) {
d.log.Info(ctx, "cleanup started")
torrentInfos, err := d.client.qb.Torrent().GetTorrents(ctx, &qbittorrent.TorrentOption{})
if err != nil {
d.log.Error(ctx, "failed to get torrents", rlog.Error(err))
return nil, fmt.Errorf("failed to get torrents: %w", err)
}
daemonsHashes := []string{}
for _, info := range torrentInfos {
daemonsHashes = append(daemonsHashes, info.Hash)
}
dataDirs, err := os.ReadDir(d.dataDir)
if err != nil {
d.log.Error(ctx, "failed to read data directory", slog.String("path", d.dataDir), rlog.Error(err))
return nil, fmt.Errorf("failed to read data directory: %w", err)
}
dataHashes := []string{}
for _, entry := range dataDirs {
dataHashes = append(dataHashes, entry.Name())
}
hashToDelete := make([]string, 0, 5)
for _, v := range dataHashes {
if !slices.Contains(daemonsHashes, v) {
hashToDelete = append(hashToDelete, v)
}
}
d.log.Info(ctx, "marked torrents to delete",
slog.Int("count", len(hashToDelete)),
slog.Any("infohashes", hashToDelete),
)
if !run {
d.log.Info(ctx, "dry run, skipping deletion")
return hashToDelete, nil
}
for _, hash := range hashToDelete {
d.log.Info(ctx, "deleting stale torrent data", slog.String("infohash", hash))
err := os.RemoveAll(path.Join(d.dataDir, hash))
if err != nil {
d.log.Error(ctx, "failed to delete torrent data", slog.String("infohash", hash), rlog.Error(err))
return nil, fmt.Errorf("failed to delete torrent data: %w", err)
}
}
return hashToDelete, nil
}
func (d *Daemon) CleanupUnregistred(ctx context.Context, run bool) ([]string, error) {
d.log.Info(ctx, "cleanup started")
torrentInfos, err := d.client.qb.Torrent().GetTorrents(ctx, &qbittorrent.TorrentOption{})
if err != nil {
d.log.Error(ctx, "failed to get torrents", rlog.Error(err))
return nil, fmt.Errorf("failed to get torrents: %w", err)
}
torrentToDelete := make([]string, 0, 5)
for _, info := range torrentInfos {
if d.registeredTorrents.Contains(info.Hash) {
continue
}
d.log.Info(ctx, "torrent not found in registry", slog.String("infohash", info.Hash))
torrentToDelete = append(torrentToDelete, info.Hash)
}
d.log.Info(ctx, "marked torrents to delete",
slog.Int("count", len(torrentToDelete)),
slog.Any("infohashes", torrentToDelete),
)
if !run {
d.log.Info(ctx, "dry run, skipping deletion")
return torrentToDelete, nil
}
err = d.client.qb.Torrent().DeleteTorrents(ctx, torrentToDelete, true)
if err != nil {
d.log.Error(ctx, "failed to delete torrents", slog.Any("infohashes", torrentToDelete), rlog.Error(err))
return nil, fmt.Errorf("failed to delete torrents: %w", err)
}
d.log.Info(ctx, "torrents deleted from qbittorrent", slog.Int("count", len(torrentToDelete)))
for _, hash := range torrentToDelete {
torrentPath := path.Join(d.dataDir, hash)
_, err := os.Stat(torrentPath)
if errors.Is(err, os.ErrNotExist) {
continue
}
if err != nil {
d.log.Error(ctx, "failed to get torrent path", slog.String("path", torrentPath), rlog.Error(err))
continue
}
d.log.Warn(ctx, "leftover data for torrent detected, cleaning up", slog.String("infohash", hash), slog.String("path", torrentPath))
}
return torrentToDelete, nil
}

View file

@ -0,0 +1,273 @@
package qbittorrent
import (
"context"
"fmt"
"slices"
"time"
"git.kmsign.ru/royalcat/tstor/plugins/qbittorrent/pkg/qbittorrent"
"github.com/hashicorp/golang-lru/v2/expirable"
"github.com/royalcat/btrgo/btrsync"
"github.com/viccon/sturdyc"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
)
var meter = otel.Meter("git.kmsign.ru/royalcat/tstor/plugins/qbittorrent")
type cacheClient struct {
qb qbittorrent.Client
propertiesCache *expirable.LRU[string, qbittorrent.TorrentProperties]
pieceCache btrsync.MapOf[pieceKey, int]
infoClient *sturdyc.Client[*qbittorrent.TorrentInfo]
}
type pieceKey struct {
hash string
index int
}
func wrapClient(qb qbittorrent.Client) *cacheClient {
const (
cacheSize = 5000
cacheTTL = time.Minute
)
return &cacheClient{
qb: qb,
propertiesCache: expirable.NewLRU[string, qbittorrent.TorrentProperties](cacheSize, nil, cacheTTL),
infoClient: sturdyc.New[*qbittorrent.TorrentInfo](cacheSize, 1, cacheTTL, 10,
sturdyc.WithEarlyRefreshes(time.Minute, time.Minute*5, time.Second*10, time.Second*10),
sturdyc.WithRefreshCoalescing(100, time.Second/4),
sturdyc.WithMetrics(newSturdycMetrics()),
),
pieceCache: btrsync.MapOf[pieceKey, int]{},
}
}
func (f *cacheClient) getInfo(ctx context.Context, hash string) (*qbittorrent.TorrentInfo, error) {
out, err := f.infoClient.GetOrFetchBatch(ctx, []string{hash},
f.infoClient.BatchKeyFn(""),
func(ctx context.Context, ids []string) (map[string]*qbittorrent.TorrentInfo, error) {
infos, err := f.qb.Torrent().GetTorrents(ctx, &qbittorrent.TorrentOption{
Hashes: ids,
})
if err != nil {
return nil, fmt.Errorf("error to get torrents: %w", err)
}
out := make(map[string]*qbittorrent.TorrentInfo)
for _, info := range infos {
out[info.Hash] = info
}
return out, nil
},
)
if err != nil {
return nil, err
}
if out[hash] == nil {
return nil, nil
}
return out[hash], nil
}
func (f *cacheClient) getProperties(ctx context.Context, hash string) (*qbittorrent.TorrentProperties, error) {
if v, ok := f.propertiesCache.Get(hash); ok {
return &v, nil
}
info, err := f.qb.Torrent().GetProperties(ctx, hash)
if err != nil {
return nil, err
}
f.propertiesCache.Add(hash, *info)
return info, nil
}
func (f *cacheClient) listContent(ctx context.Context, hash string) ([]*qbittorrent.TorrentContent, error) {
contents, err := f.qb.Torrent().GetContents(ctx, hash)
if err != nil {
return nil, err
}
return contents, nil
}
func (f *cacheClient) getContent(ctx context.Context, hash string, contentIndex int) (*qbittorrent.TorrentContent, error) {
contents, err := f.qb.Torrent().GetContents(ctx, hash, contentIndex)
if err != nil {
return nil, err
}
contentI := slices.IndexFunc(contents, func(c *qbittorrent.TorrentContent) bool {
return c.Index == contentIndex
})
if contentI == -1 {
return nil, fmt.Errorf("content not found")
}
return contents[contentI], nil
}
func (f *cacheClient) isPieceComplete(ctx context.Context, hash string, pieceIndex int) (bool, error) {
cachedPieceState, ok := f.pieceCache.Load(pieceKey{hash: hash, index: pieceIndex})
if ok && cachedPieceState == 2 {
return true, nil
}
completion, err := f.qb.Torrent().GetPiecesStates(ctx, hash)
if err != nil {
return false, err
}
for i, v := range completion {
f.pieceCache.Store(pieceKey{hash: hash, index: i}, v)
}
if completion[pieceIndex] == 2 {
return true, nil
}
return false, nil
}
func (f *cacheClient) waitPieceToComplete(ctx context.Context, hash string, pieceIndex int) error {
const checkingInterval = 1 * time.Second
ok, err := f.isPieceComplete(ctx, hash, pieceIndex)
if err != nil {
return err
}
if ok {
return nil
}
if deadline, ok := ctx.Deadline(); ok && time.Until(deadline) < checkingInterval {
return context.DeadlineExceeded
}
ticker := time.NewTicker(checkingInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
ok, err := f.isPieceComplete(ctx, hash, pieceIndex)
if err != nil {
return err
}
if ok {
return nil
}
if deadline, ok := ctx.Deadline(); ok && time.Until(deadline) < checkingInterval {
return context.DeadlineExceeded
}
}
}
}
type sturdycMetrics struct {
ctx context.Context
cacheHit metric.Int64Counter
cacheMiss metric.Int64Counter
refresh metric.Int64Counter
missing metric.Int64Counter
forcedEviction metric.Int64Counter
entryEviction metric.Int64Counter
batchSize metric.Int64Histogram
observeCacheSize func() int
}
var _ sturdyc.MetricsRecorder = (*sturdycMetrics)(nil)
func newSturdycMetrics() *sturdycMetrics {
m := &sturdycMetrics{
ctx: context.Background(),
cacheHit: must(meter.Int64Counter("sturdyc_cache_hit")),
cacheMiss: must(meter.Int64Counter("sturdyc_cache_miss")),
refresh: must(meter.Int64Counter("sturdyc_cache_refresh")),
missing: must(meter.Int64Counter("sturdyc_cache_missing")),
forcedEviction: must(meter.Int64Counter("sturdyc_cache_forced_eviction")),
entryEviction: must(meter.Int64Counter("sturdyc_cache_entry_eviction")),
batchSize: must(meter.Int64Histogram("sturdyc_cache_batch_size")),
}
must(meter.Int64ObservableGauge("sturdyc_cache_size",
metric.WithInt64Callback(func(ctx context.Context, io metric.Int64Observer) error {
if m.observeCacheSize == nil {
return nil
}
io.Observe(int64(m.observeCacheSize()))
return nil
})))
return m
}
func (s *sturdycMetrics) CacheHit() {
s.cacheHit.Add(s.ctx, 1)
}
func (s *sturdycMetrics) CacheMiss() {
s.cacheMiss.Add(s.ctx, 1)
}
func (s *sturdycMetrics) Refresh() {
s.refresh.Add(s.ctx, 1)
}
func (s *sturdycMetrics) MissingRecord() {
s.missing.Add(s.ctx, 1)
}
func (s *sturdycMetrics) ForcedEviction() {
s.forcedEviction.Add(s.ctx, 1)
}
func (s *sturdycMetrics) CacheBatchRefreshSize(size int) {
s.batchSize.Record(s.ctx, int64(size))
}
func (s *sturdycMetrics) ObserveCacheSize(callback func() int) {
s.observeCacheSize = callback
}
func (s *sturdycMetrics) EntriesEvicted(evictd int) {
s.entryEviction.Add(s.ctx, int64(evictd))
}
// AsynchronousRefresh implements sturdyc.MetricsRecorder.
func (s *sturdycMetrics) AsynchronousRefresh() {
return
}
// SynchronousRefresh implements sturdyc.MetricsRecorder.
func (s *sturdycMetrics) SynchronousRefresh() {
return
}
func (s *sturdycMetrics) ShardIndex(int) {
return
}
func must[T any](v T, err error) T {
if err != nil {
panic(err)
}
return v
}

View file

@ -0,0 +1,74 @@
package qbittorrent
import (
"github.com/knadh/koanf/providers/structs"
"github.com/knadh/koanf/v2"
)
type Config struct {
DataDir string `koanf:"data_dir"`
MetadataDir string `koanf:"metadata_dir"`
}
var defaultConfig = Config{
DataDir: "/data/qbittorrent/data",
MetadataDir: "/data/qbittorrent/metadata",
}
func loadConfig(k *koanf.Koanf) (Config, error) {
koanf := koanf.New(".")
err := koanf.Load(structs.Provider(defaultConfig, "koanf"), nil)
if err != nil {
return Config{}, err
}
err = koanf.Merge(k)
if err != nil {
return Config{}, err
}
var config Config
if err := k.Unmarshal("", &config); err != nil {
return Config{}, err
}
return config, nil
}
// var defaultRoutes = []Route{
// {
// Name: "multimedia",
// Torrents: []Torrent{
// {
// MagnetURI: "magnet:?xt=urn:btih:c9e15763f722f23e98a29decdfae341b98d53056&dn=Cosmos+Laundromat&tr=udp%3A%2F%2Fexplodie.org%3A6969&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Ftracker.empire-js.us%3A1337&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337&tr=wss%3A%2F%2Ftracker.btorrent.xyz&tr=wss%3A%2F%2Ftracker.fastcast.nz&tr=wss%3A%2F%2Ftracker.openwebtorrent.com&ws=https%3A%2F%2Fwebtorrent.io%2Ftorrents%2F&xs=https%3A%2F%2Fwebtorrent.io%2Ftorrents%2Fcosmos-laundromat.torrent",
// },
// {
// MagnetURI: "magnet:?xt=urn:btih:dd8255ecdc7ca55fb0bbf81323d87062db1f6d1c&dn=Big+Buck+Bunny&tr=udp%3A%2F%2Fexplodie.org%3A6969&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Ftracker.empire-js.us%3A1337&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337&tr=wss%3A%2F%2Ftracker.btorrent.xyz&tr=wss%3A%2F%2Ftracker.fastcast.nz&tr=wss%3A%2F%2Ftracker.openwebtorrent.com&ws=https%3A%2F%2Fwebtorrent.io%2Ftorrents%2F&xs=https%3A%2F%2Fwebtorrent.io%2Ftorrents%2Fbig-buck-bunny.torrent",
// },
// {
// MagnetURI: "magnet:?xt=urn:btih:08ada5a7a6183aae1e09d831df6748d566095a10&dn=Sintel&tr=udp%3A%2F%2Fexplodie.org%3A6969&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Ftracker.empire-js.us%3A1337&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337&tr=wss%3A%2F%2Ftracker.btorrent.xyz&tr=wss%3A%2F%2Ftracker.fastcast.nz&tr=wss%3A%2F%2Ftracker.openwebtorrent.com&ws=https%3A%2F%2Fwebtorrent.io%2Ftorrents%2F&xs=https%3A%2F%2Fwebtorrent.io%2Ftorrents%2Fsintel.torrent",
// },
// {
// MagnetURI: "magnet:?xt=urn:btih:209c8226b299b308beaf2b9cd3fb49212dbd13ec&dn=Tears+of+Steel&tr=udp%3A%2F%2Fexplodie.org%3A6969&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Ftracker.empire-js.us%3A1337&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337&tr=wss%3A%2F%2Ftracker.btorrent.xyz&tr=wss%3A%2F%2Ftracker.fastcast.nz&tr=wss%3A%2F%2Ftracker.openwebtorrent.com&ws=https%3A%2F%2Fwebtorrent.io%2Ftorrents%2F&xs=https%3A%2F%2Fwebtorrent.io%2Ftorrents%2Ftears-of-steel.torrent",
// },
// {
// MagnetURI: "magnet:?xt=urn:btih:a88fda5954e89178c372716a6a78b8180ed4dad3&dn=The+WIRED+CD+-+Rip.+Sample.+Mash.+Share&tr=udp%3A%2F%2Fexplodie.org%3A6969&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Ftracker.empire-js.us%3A1337&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337&tr=wss%3A%2F%2Ftracker.btorrent.xyz&tr=wss%3A%2F%2Ftracker.fastcast.nz&tr=wss%3A%2F%2Ftracker.openwebtorrent.com&ws=https%3A%2F%2Fwebtorrent.io%2Ftorrents%2F&xs=https%3A%2F%2Fwebtorrent.io%2Ftorrents%2Fwired-cd.torrent",
// },
// },
// },
// }
// var defaultServers = []Server{
// {
// Name: "server",
// Path: "server",
// Trackers: []string{
// "wss://tracker.btorrent.xyz",
// "wss://tracker.openwebtorrent.com",
// "http://p4p.arenabg.com:1337/announce",
// "udp://tracker.opentrackr.org:1337/announce",
// "udp://open.tracker.cl:1337/announce",
// "http://openbittorrent.com:80/announce",
// },
// },
// }

View file

@ -0,0 +1,306 @@
package qbittorrent
import (
"context"
"errors"
"fmt"
"io"
"log/slog"
"os"
"path"
"path/filepath"
"sync"
"time"
"git.kmsign.ru/royalcat/tstor/plugins/qbittorrent/pkg/qbittorrent"
"git.kmsign.ru/royalcat/tstor/server/pkg/rlog"
"git.kmsign.ru/royalcat/tstor/server/src/daemon"
"git.kmsign.ru/royalcat/tstor/server/src/logwrap"
"git.kmsign.ru/royalcat/tstor/server/src/vfs"
"git.kmsign.ru/royalcat/tstor/server/tstor"
"github.com/anacrolix/torrent/metainfo"
"github.com/anacrolix/torrent/types/infohash"
infohash_v2 "github.com/anacrolix/torrent/types/infohash-v2"
mapset "github.com/deckarep/golang-set/v2"
"github.com/knadh/koanf/v2"
"github.com/royalcat/ctxio"
"go.opentelemetry.io/otel"
)
var trace = otel.Tracer("git.kmsign.ru/royalcat/tstor/plugins/qbittorrent")
type Daemon struct {
proc *os.Process
qb qbittorrent.Client
client *cacheClient
sourceFilesMu sync.Mutex
sourceFiles map[string]string // [sourcePath]infohash
registeredTorrents mapset.Set[string] // infohash list
dataDir string
log *rlog.Logger
}
const defaultConf = `
[LegalNotice]
Accepted=true
[Preferences]
WebUI\LocalHostAuth=false
WebUI\Password_PBKDF2="@ByteArray(qef5I4wZBkDG+PP6/5mQwA==:LoTmorQM/QM5RHI4+dOiu6xfAz9xak6fhR4ZGpRtJF3JNCGG081Yrtva4G71kXz//ODUuWQKTLlrZPuIDvzqUQ==)"
`
var Plugin = &tstor.Plugin{
Name: DaemonName,
DaemonConstructor: NewDaemon,
}
const DaemonName = "qbittorrent"
var _ daemon.DaemonConstructor = NewDaemon
func NewDaemon(ctx context.Context, koanf *koanf.Koanf) (daemon.Daemon, error) {
log := rlog.Component(DaemonName)
log.Debug(ctx, "QBittorrent plugin loaded. Starting qbittorrent-nox")
config, err := loadConfig(koanf)
if err != nil {
return nil, err
}
binPath := config.MetadataDir + "/qbittorrent-nox"
err = downloadLatestQbitRelease(ctx, binPath)
if err != nil {
return nil, err
}
daemonLog := log.WithComponent("process")
outLog := logwrap.NewSlogWriter(ctx, slog.LevelInfo, daemonLog.Slog())
errLog := logwrap.NewSlogWriter(ctx, slog.LevelError, daemonLog.Slog())
_, err = os.Stat(config.MetadataDir + "/profile/qBittorrent/config/qBittorrent.conf")
if errors.Is(err, os.ErrNotExist) {
err = os.MkdirAll(config.MetadataDir+"/profile/qBittorrent/config", 0744)
if err != nil {
return nil, err
}
err = os.WriteFile(config.MetadataDir+"/profile/qBittorrent/config/qBittorrent.conf", []byte(defaultConf), 0644)
if err != nil {
return nil, err
}
}
err = os.MkdirAll(config.DataDir, 0744)
if err != nil {
return nil, err
}
const port = 25436
proc, err := runQBittorrent(binPath, config.MetadataDir+"/profile", port, outLog, errLog)
if err != nil {
return nil, err
}
time.Sleep(time.Second)
qb, err := qbittorrent.NewClient(ctx, &qbittorrent.Config{
Address: fmt.Sprintf("http://localhost:%d", port),
})
if err != nil {
return nil, err
}
for { // wait for qbittorrent to start
ver, err := qb.Application().Version(ctx)
log.Info(ctx, "qbittorrent started", slog.String("version", ver))
if err == nil {
break
}
log.Warn(ctx, "waiting for qbittorrent to start", rlog.Error(err))
time.Sleep(time.Second)
}
dataDir, err := filepath.Abs(config.DataDir)
if err != nil {
return nil, err
}
err = qb.Application().SetPreferences(ctx, &qbittorrent.Preferences{
SavePath: dataDir,
})
if err != nil {
return nil, err
}
return &Daemon{
qb: qb,
proc: proc,
dataDir: config.DataDir,
sourceFiles: make(map[string]string),
registeredTorrents: mapset.NewSet[string](),
client: wrapClient(qb),
log: rlog.Component(DaemonName),
}, nil
}
func (d *Daemon) Name() string {
return DaemonName
}
func (d *Daemon) Extensions() []string {
return []string{".torrent"}
}
func (d *Daemon) Close(ctx context.Context) error {
err := d.proc.Signal(os.Interrupt)
if err != nil {
return err
}
_, err = d.proc.Wait()
if err != nil {
return err
}
return nil
}
func torrentDataPath(dataDir string, ih string) (string, error) {
return filepath.Abs(path.Join(dataDir, ih))
}
func (fs *Daemon) GetFS(ctx context.Context, sourcePath string, file vfs.File) (vfs.Filesystem, error) {
ctx, span := trace.Start(ctx, "GetTorrentFS")
defer span.End()
stat, err := file.Info()
if err != nil {
return nil, err
}
log := fs.log.With(slog.String("file", file.Name()))
ih, err := readInfoHash(ctx, file)
if err != nil {
return nil, err
}
log = log.With(slog.String("infohash", ih.HexString()))
torrentPath, err := torrentDataPath(fs.dataDir, ih.HexString())
if err != nil {
return nil, fmt.Errorf("error getting torrent path: %w", err)
}
log = log.With(slog.String("torrentPath", torrentPath))
log.Debug(ctx, "creating fs for torrent")
err = fs.syncTorrentState(ctx, file, ih, torrentPath)
if err != nil {
return nil, fmt.Errorf("error syncing torrent state: %w", err)
}
fs.sourceFilesMu.Lock()
fs.sourceFiles[sourcePath] = ih.HexString()
fs.sourceFilesMu.Unlock()
return newTorrentFS(ctx, fs.client, file.Name(), ih.HexString(), stat.ModTime(), torrentPath)
}
func (d *Daemon) syncTorrentState(ctx context.Context, file vfs.File, ih metainfo.Hash, torrentPath string) error {
ctx, span := trace.Start(ctx, "syncTorrentState")
defer span.End()
log := d.log.With(slog.String("file", file.Name()), slog.String("infohash", ih.HexString()))
info, err := d.client.getInfo(ctx, ih.HexString())
if err != nil {
return err
}
log = log.With(slog.String("torrentPath", torrentPath))
if info == nil {
_, err := file.Seek(0, io.SeekStart)
if err != nil {
return err
}
data, err := ctxio.ReadAll(ctx, file)
if err != nil {
return err
}
err = d.qb.Torrent().AddNewTorrent(ctx, &qbittorrent.TorrentAddOption{
Torrents: []*qbittorrent.TorrentAddFileMetadata{
{
Data: data,
},
},
SavePath: torrentPath,
// SequentialDownload: "true",
FirstLastPiecePrio: "true",
})
if err != nil {
d.log.Error(ctx, "error adding torrent", rlog.Error(err))
return fmt.Errorf("error adding torrent: %w", err)
}
var props *qbittorrent.TorrentProperties
for {
props, err = d.client.getProperties(ctx, ih.HexString())
if err == nil {
break
}
if errors.Is(err, context.DeadlineExceeded) {
return err
}
log.Error(ctx, "waiting for torrent to be added", rlog.Error(err))
time.Sleep(time.Millisecond * 15)
}
log.Info(ctx, "added torrent", slog.String("infohash", ih.HexString()))
d.registeredTorrents.Add(props.Hash)
return nil
} else {
// info := existing[0]
props, err := d.client.getProperties(ctx, ih.HexString())
if err != nil {
return fmt.Errorf("error getting torrent properties: %w for infohash: %s", err, ih.HexString())
}
d.registeredTorrents.Add(props.Hash)
if props.SavePath != torrentPath {
log.Info(ctx, "moving torrent to correct location", slog.String("oldPath", props.SavePath))
err = d.qb.Torrent().SetLocation(ctx, []string{ih.HexString()}, torrentPath)
if err != nil {
return fmt.Errorf("error moving torrent: %w", err)
}
}
return nil
}
}
// TODO caching
func readInfoHash(ctx context.Context, file vfs.File) (infohash.T, error) {
mi, err := metainfo.Load(ctxio.IoReader(ctx, file))
if err != nil {
return infohash.T{}, err
}
info, err := mi.UnmarshalInfo()
if err != nil {
return infohash.T{}, err
}
if info.HasV2() {
ih := infohash_v2.HashBytes(mi.InfoBytes)
return *(&ih).ToShort(), nil
}
return infohash.HashBytes(mi.InfoBytes), nil
}

410
plugins/qbittorrent/fs.go Normal file
View file

@ -0,0 +1,410 @@
package qbittorrent
import (
"context"
"errors"
"fmt"
"io"
"io/fs"
"log/slog"
"os"
"path"
"strings"
"sync"
"time"
"git.kmsign.ru/royalcat/tstor/plugins/qbittorrent/pkg/qbittorrent"
"git.kmsign.ru/royalcat/tstor/server/pkg/rlog"
"git.kmsign.ru/royalcat/tstor/server/src/vfs"
)
type FS struct {
mu sync.Mutex
client *cacheClient
name string
hash string
dataDir string // directory where torrent files are stored
modTime time.Time
entries map[string]fileEntry
log *rlog.Logger
vfs.FilesystemPrototype
}
type fileEntry struct {
fs.FileInfo
Content *qbittorrent.TorrentContent
}
var _ vfs.Filesystem = (*FS)(nil)
func newTorrentFS(ctx context.Context, client *cacheClient, name string, hash string, modTime time.Time, dataDir string) (*FS, error) {
ctx, span := trace.Start(ctx, "newTorrentFS")
defer span.End()
cnts, err := client.listContent(ctx, hash)
if err != nil {
return nil, fmt.Errorf("failed to list content for hash %s: %w", hash, err)
}
entries := make(map[string]fileEntry, len(cnts))
for _, cnt := range cnts {
if cnt.Priority == qbittorrent.PriorityDoNotDownload {
continue
}
entries[vfs.AbsPath(cnt.Name)] = fileEntry{
Content: cnt,
FileInfo: vfs.NewFileInfo(cnt.Name, cnt.Size, modTime),
}
}
return &FS{
client: client,
name: name,
hash: hash,
modTime: modTime,
dataDir: dataDir,
entries: entries,
log: rlog.Component("qbittorrent", "fs"),
FilesystemPrototype: vfs.FilesystemPrototype(name),
}, nil
}
// Open implements vfs.Filesystem.
func (f *FS) Open(ctx context.Context, name string) (vfs.File, error) {
if name == vfs.Separator {
return vfs.NewDirFile(name), nil
}
if entry, ok := f.entries[name]; ok {
return openFile(ctx, f.client, f.dataDir, f.hash, entry.Content)
}
for p := range f.entries {
if strings.HasPrefix(p, name) {
return vfs.NewDirFile(name), nil
}
}
return nil, vfs.ErrNotExist
}
// ReadDir implements vfs.Filesystem.
func (f *FS) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) {
infos := make(map[string]fs.FileInfo, len(f.entries))
for k, v := range f.entries {
infos[k] = v.FileInfo
}
return vfs.ListDirFromInfo(infos, name)
}
// Stat implements vfs.Filesystem.
func (f *FS) Stat(ctx context.Context, name string) (fs.FileInfo, error) {
name = vfs.AbsPath(path.Clean(name))
if vfs.IsRoot(name) {
return vfs.NewDirInfo(f.name, f.modTime), nil
}
if entry, ok := f.entries[name]; ok {
return entry.FileInfo, nil
}
for p := range f.entries {
if strings.HasPrefix(p, name) {
return vfs.NewDirInfo(name, f.modTime), nil
}
}
return nil, vfs.ErrNotExist
}
// Unlink implements vfs.Filesystem.
func (f *FS) Unlink(ctx context.Context, filename string) error {
filename = vfs.AbsPath(path.Clean(filename))
// we cannot delete a torrent itself, cause it will be added on next source scan and all delited files will be restored
if entry, ok := f.entries[filename]; ok {
return f.removeFile(ctx, f.hash, entry.Content)
}
for p, entry := range f.entries {
if strings.HasPrefix(p, filename) {
return f.removeFile(ctx, f.hash, entry.Content)
}
}
return vfs.ErrNotExist
}
func (f *FS) Rename(ctx context.Context, oldpath string, newpath string) error {
oldpath = vfs.AbsPath(path.Clean(oldpath))
newpath = vfs.AbsPath(path.Clean(newpath))
if _, ok := f.entries[oldpath]; ok {
err := f.client.qb.Torrent().RenameFile(ctx, f.hash, vfs.RelPath(oldpath), vfs.RelPath(newpath))
if err != nil {
return fmt.Errorf("failed to rename file %s to %s: %w", oldpath, newpath, err)
}
f.mu.Lock()
defer f.mu.Unlock()
f.entries[newpath] = f.entries[oldpath]
return nil
}
return vfs.ErrNotExist
}
func (f *FS) removeFile(ctx context.Context, hash string, content *qbittorrent.TorrentContent) error {
log := f.log.With(slog.String("hash", hash), slog.String("file", content.Name))
f.mu.Lock()
defer f.mu.Unlock()
fpath := vfs.AbsPath(content.Name)
if _, ok := f.entries[fpath]; !ok {
return fmt.Errorf("file %s is does not found", fpath)
}
delete(f.entries, fpath)
err := f.client.qb.Torrent().SetFilePriority(ctx, f.hash, content.Index, qbittorrent.PriorityDoNotDownload)
if err != nil {
return fmt.Errorf("failed to set priority for torrent %s for file %s: %w", hash, content.Name, err)
}
err = os.Remove(path.Join(f.dataDir, vfs.RelPath(content.Name)))
if err != nil && !errors.Is(err, fs.ErrNotExist) {
log.Warn(ctx, "failed to remove file", rlog.Error(err))
return fmt.Errorf("failed to remove file %s: %w", content.Name, err)
}
return nil
}
func openFile(ctx context.Context, client *cacheClient, torrentDir string, hash string, content *qbittorrent.TorrentContent) (*File, error) {
props, err := client.getProperties(ctx, hash)
if err != nil {
return nil, err
}
// FIXME error when file not started downloading
file, err := os.OpenFile(path.Join(torrentDir, content.Name), os.O_RDONLY, 0)
if err != nil {
return nil, err
}
return &File{
client: client,
hash: hash,
torrentDir: torrentDir,
filePath: content.Name,
contentIndex: content.Index,
pieceSize: props.PieceSize,
fileSize: content.Size,
file: file,
offset: 0,
}, nil
}
type File struct {
client *cacheClient
hash string
torrentModTime time.Time
torrentDir string
filePath string // path inside a torrent directory
contentIndex int
pieceSize int
fileSize int64
mu sync.Mutex
file *os.File
offset int64
}
var _ vfs.File = (*File)(nil)
// Info implements vfs.File.
func (f *File) Info() (fs.FileInfo, error) {
return vfs.NewFileInfo(path.Base(f.filePath), f.fileSize, f.torrentModTime), nil
}
// IsDir implements vfs.File.
func (f *File) IsDir() bool {
return false
}
// Seek implements vfs.File.
func (f *File) Seek(offset int64, whence int) (int64, error) {
switch whence {
case io.SeekStart:
f.offset = offset
case io.SeekCurrent:
f.offset += offset
case io.SeekEnd:
f.offset = f.fileSize + offset
}
return f.offset, nil
}
// Name implements vfs.File.
func (f *File) Name() string {
return path.Base(f.filePath)
}
func (f *File) canExpectSoon(ctx context.Context) (bool, error) {
info, err := f.client.getInfo(ctx, f.hash)
if err != nil {
return false, err
}
if info == nil {
return false, nil
}
return info.Completed == info.Size || info.State == qbittorrent.TorrentStateCheckingUP || info.State == qbittorrent.TorrentStateDownloading || info.State == qbittorrent.TorrentStateForcedDL, nil
}
func (f *File) isRangeComplete(ctx context.Context, offset int64, size int) (bool, error) {
startPieceIndex := int(offset / int64(f.pieceSize))
pieceCount := (size + f.pieceSize - 1) / f.pieceSize // rouding up
for i := range pieceCount {
ok, err := f.client.isPieceComplete(ctx, f.hash, startPieceIndex+i)
if err != nil {
return false, err
}
if !ok {
return false, nil
}
}
return true, nil
}
func (f *File) waitRangeAvailable(ctx context.Context, offset int64, size int) error {
complete, err := f.isRangeComplete(ctx, offset, size)
if err != nil {
return err
}
if complete {
return nil
}
canExpectSoon, err := f.canExpectSoon(ctx)
if err != nil {
return err
}
if !canExpectSoon {
return fmt.Errorf("torrent is not downloading")
}
const checkingInterval = 1 * time.Second
ticker := time.NewTicker(checkingInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
complete, err := f.isRangeComplete(ctx, offset, size)
if err != nil {
return err
}
if complete {
return nil
}
}
}
}
// Read implements vfs.File.
func (f *File) Read(ctx context.Context, p []byte) (int, error) {
f.mu.Lock()
defer f.mu.Unlock()
if err := f.waitRangeAvailable(ctx, f.offset, len(p)); err != nil {
return 0, err
}
n, err := f.file.ReadAt(p, f.offset)
f.offset += int64(n)
return n, err
}
// ReadAt implements vfs.File.
func (f *File) ReadAt(ctx context.Context, p []byte, off int64) (int, error) {
if err := f.waitRangeAvailable(ctx, f.offset, len(p)); err != nil {
return 0, err
}
return f.file.ReadAt(p, off)
}
// Size implements vfs.File.
func (f *File) Size() int64 {
return f.fileSize
}
// Type implements vfs.File.
func (f *File) Type() fs.FileMode {
return fs.ModeDir
}
// Close implements vfs.File.
func (f *File) Close(ctx context.Context) error {
return f.file.Close()
}
// type fileInfo struct {
// name string
// size int64
// modTime time.Time
// }
// var _ fs.FileInfo = (*fileInfo)(nil)
// // IsDir implements fs.FileInfo.
// func (f *fileInfo) IsDir() bool {
// return false
// }
// // ModTime implements fs.FileInfo.
// func (f *fileInfo) ModTime() time.Time {
// return f.modTime
// }
// // Mode implements fs.FileInfo.
// func (f *fileInfo) Mode() fs.FileMode {
// return vfs.ModeFileRO
// }
// // Name implements fs.FileInfo.
// func (f *fileInfo) Name() string {
// return f.name
// }
// // Size implements fs.FileInfo.
// func (f *fileInfo) Size() int64 {
// return f.size
// }
// // Sys implements fs.FileInfo.
// func (f *fileInfo) Sys() any {
// return nil
// }

View file

@ -0,0 +1,56 @@
module git.kmsign.ru/royalcat/tstor/plugins/qbittorrent
go 1.23.5
toolchain go1.24.1
replace github.com/iceber/iouring-go => github.com/royalcat/iouring-go v0.0.0-20240925200811-286062ac1b23
require (
github.com/anacrolix/torrent v1.58.1-0.20241228235504-75e6b6565845
github.com/deckarep/golang-set/v2 v2.7.0
github.com/google/go-github/v63 v63.0.0
github.com/gorilla/schema v1.4.1
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/knadh/koanf/providers/structs v0.1.0
github.com/knadh/koanf/v2 v2.1.2
github.com/royalcat/btrgo v0.0.0-20240318160410-19bd27154450
github.com/royalcat/ctxio v0.0.0-20240602084623-009bd79b3176
github.com/stretchr/testify v1.10.0
github.com/viccon/sturdyc v1.1.1
go.opentelemetry.io/otel v1.34.0
go.opentelemetry.io/otel/metric v1.34.0
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8
golang.org/x/sys v0.31.0
)
require (
github.com/anacrolix/dht/v2 v2.22.0 // indirect
github.com/anacrolix/generics v0.0.3-0.20240902042256-7fb2702ef0ca // indirect
github.com/anacrolix/missinggo v1.3.0 // indirect
github.com/anacrolix/missinggo/v2 v2.7.4 // indirect
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/fatih/structs v1.1.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/huandu/xstrings v1.4.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
github.com/knadh/koanf/maps v0.1.1 // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-varint v0.0.6 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/otel/trace v1.34.0 // indirect
golang.org/x/crypto v0.36.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.1.6 // indirect
)

332
plugins/qbittorrent/go.sum Normal file
View file

@ -0,0 +1,332 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797/go.mod h1:sXBiorCo8c46JlQV3oXPKINnZ8mcqnye1EkVkqsectk=
crawshaw.io/sqlite v0.3.2/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI=
github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/anacrolix/dht/v2 v2.22.0 h1:wat5FLdT25vltHsjX377GBrpK9o6L2QVn541bIguCYo=
github.com/anacrolix/dht/v2 v2.22.0/go.mod h1:shbBjhgvezqsJoE+hMo/ezHYQFF18V9jUllNIP5xV9k=
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
github.com/anacrolix/envpprof v1.1.0/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4=
github.com/anacrolix/generics v0.0.3-0.20240902042256-7fb2702ef0ca h1:aiiGqSQWjtVNdi8zUMfA//IrM8fPkv2bWwZVPbDe0wg=
github.com/anacrolix/generics v0.0.3-0.20240902042256-7fb2702ef0ca/go.mod h1:MN3ve08Z3zSV/rTuX/ouI4lNdlfTxgdafQJiLzyNRB8=
github.com/anacrolix/log v0.3.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU=
github.com/anacrolix/log v0.6.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU=
github.com/anacrolix/missinggo v1.1.0/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo=
github.com/anacrolix/missinggo v1.1.2-0.20190815015349-b888af804467/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo=
github.com/anacrolix/missinggo v1.2.1/go.mod h1:J5cMhif8jPmFoC3+Uvob3OXXNIhOUikzMt+uUjeM21Y=
github.com/anacrolix/missinggo v1.3.0 h1:06HlMsudotL7BAELRZs0yDZ4yVXsHXGi323QBjAVASw=
github.com/anacrolix/missinggo v1.3.0/go.mod h1:bqHm8cE8xr+15uVfMG3BFui/TxyB6//H5fwlq/TeqMc=
github.com/anacrolix/missinggo/perf v1.0.0/go.mod h1:ljAFWkBuzkO12MQclXzZrosP5urunoLS0Cbvb4V0uMQ=
github.com/anacrolix/missinggo/v2 v2.2.0/go.mod h1:o0jgJoYOyaoYQ4E2ZMISVa9c88BbUBVQQW4QeRkNCGY=
github.com/anacrolix/missinggo/v2 v2.5.1/go.mod h1:WEjqh2rmKECd0t1VhQkLGTdIWXO6f6NLjp5GlMZ+6FA=
github.com/anacrolix/missinggo/v2 v2.7.4 h1:47h5OXoPV8JbA/ACA+FLwKdYbAinuDO8osc2Cu9xkxg=
github.com/anacrolix/missinggo/v2 v2.7.4/go.mod h1:vVO5FEziQm+NFmJesc7StpkquZk+WJFCaL0Wp//2sa0=
github.com/anacrolix/multiless v0.4.0 h1:lqSszHkliMsZd2hsyrDvHOw4AbYWa+ijQ66LzbjqWjM=
github.com/anacrolix/multiless v0.4.0/go.mod h1:zJv1JF9AqdZiHwxqPgjuOZDGWER6nyE48WBCi/OOrMM=
github.com/anacrolix/stm v0.2.0/go.mod h1:zoVQRvSiGjGoTmbM0vSLIiaKjWtNPeTvXUSdJQA4hsg=
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8=
github.com/anacrolix/torrent v1.58.1-0.20241228235504-75e6b6565845 h1:ZuYsqgbLCVJHHmYQKG6ImMtz+3hUOI1qvRJTuxTVEZY=
github.com/anacrolix/torrent v1.58.1-0.20241228235504-75e6b6565845/go.mod h1:n3SjHIE8oHXeH0Px0d5FXQ7cU4IgbEfTroen6B9KWJk=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8=
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set/v2 v2.7.0 h1:gIloKvD7yH2oip4VLhsv3JyLLFnC0Y2mlusgcvJYW5k=
github.com/deckarep/golang-set/v2 v2.7.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github/v63 v63.0.0 h1:13xwK/wk9alSokujB9lJkuzdmQuVn2QCPeck76wR3nE=
github.com/google/go-github/v63 v63.0.0/go.mod h1:IqbcrgUmIcEaioWrGYei/09o+ge5vhffGOcxrO0AfmA=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E=
github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs=
github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/structs v0.1.0 h1:wJRteCNn1qvLtE5h8KQBvLJovidSdntfdyIbbCzEyE0=
github.com/knadh/koanf/providers/structs v0.1.0/go.mod h1:sw2YZ3txUcqA3Z27gPlmmBzWn1h8Nt9O6EP/91MkcWE=
github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ=
github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY=
github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/royalcat/btrgo v0.0.0-20240318160410-19bd27154450 h1:AZyZxXZLniAR0DaZhTS4RVcHtOvYMW8IunplqC9A0mk=
github.com/royalcat/btrgo v0.0.0-20240318160410-19bd27154450/go.mod h1:m3TPa9l/wMKpm/7WHrMs3dSFUxo7kLHaI8ap+SFGYhQ=
github.com/royalcat/ctxio v0.0.0-20240602084623-009bd79b3176 h1:2jCQJow6jRvhpdMJCo1Okd7tq5Rg4YXlUxqT0q0NWAg=
github.com/royalcat/ctxio v0.0.0-20240602084623-009bd79b3176/go.mod h1:81eB8eOH/UU7pzI7J1Rsg3KLpshF7BXg4+UHbex+27I=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/viccon/sturdyc v1.1.1 h1:ZrAdlQHDhkE2zwTLH/948vKCuaEOXGP3ezKDWh8xOHw=
github.com/viccon/sturdyc v1.1.1/go.mod h1:OCBEgG/i48uugKQ498UQlfMHmf5j8MYY8a4BApfVnMo=
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA=
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
lukechampine.com/blake3 v1.1.6 h1:H3cROdztr7RCfoaTpGZFQsrqvweFLrqS73j7L7cmR5c=
lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=

View file

@ -0,0 +1,150 @@
package qbittorrent
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"log/slog"
"net/http"
"os"
"os/exec"
"path"
"runtime"
"time"
"github.com/google/go-github/v63/github"
"golang.org/x/sys/cpu"
)
const (
repoOwner = "userdocs"
repoName = "qbittorrent-nox-static"
)
func runQBittorrent(binPath string, profileDir string, port int, stdout, stderr io.Writer) (*os.Process, error) {
err := os.Chmod(binPath, 0755)
if err != nil {
return nil, err
}
cmd := exec.Command(binPath, fmt.Sprintf("--profile=%s", profileDir), fmt.Sprintf("--webui-port=%d", port))
cmd.Stdin = bytes.NewReader([]byte("y\n"))
cmd.Stdout = stdout
cmd.Stderr = stderr
err = cmd.Start()
if err != nil {
return nil, err
}
return cmd.Process, nil
}
func downloadLatestQbitRelease(ctx context.Context, binPath string) error {
client := github.NewClient(nil)
rel, _, err := client.Repositories.GetLatestRelease(ctx, repoOwner, repoName)
if err != nil {
return err
}
arch := ""
switch runtime.GOARCH {
case "amd64":
arch = "x86_64"
case "arm":
arch = "armhf" // this is a safe version, go does not distinguish between armv6 and armv7
if cpu.ARM.HasNEON {
arch = "armv7"
}
case "arm64":
arch = "aarch64"
}
if arch == "" {
return errors.New("unsupported architecture")
}
binName := arch + "-qbittorrent-nox"
var targetRelease *github.ReleaseAsset
for _, v := range rel.Assets {
if v.GetName() == binName {
targetRelease = v
break
}
}
if targetRelease == nil {
return fmt.Errorf("target asset %s not found", binName)
}
downloadUrl := targetRelease.GetBrowserDownloadURL()
if downloadUrl == "" {
return errors.New("download url is empty")
}
err = os.MkdirAll(path.Dir(binPath), 0755)
if err != nil {
return err
}
slog.InfoContext(ctx, "downloading latest qbittorrent-nox release", slog.String("url", downloadUrl))
return downloadFile(ctx, binPath, downloadUrl)
}
func downloadFile(ctx context.Context, filepath string, webUrl string) error {
if stat, err := os.Stat(filepath); err == nil {
resp, err := http.Head(webUrl)
if err != nil {
return err
}
defer resp.Body.Close()
var lastModified time.Time
lastModifiedHeader := resp.Header.Get("Last-Modified")
if lastModifiedHeader != "" {
lastModified, err = time.Parse(http.TimeFormat, lastModifiedHeader)
if err != nil {
return err
}
}
if resp.ContentLength == stat.Size() && lastModified.Before(stat.ModTime()) {
slog.InfoContext(ctx, "there is already newest version of the file", slog.String("filepath", filepath))
return nil
}
}
// Create the file
out, err := os.Create(filepath)
if err != nil {
return err
}
defer out.Close()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, webUrl, nil)
if err != nil {
return err
}
// Get the data
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
// Check server response
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("bad status: %s", resp.Status)
}
// Writer the body to file
_, err = io.Copy(out, resp.Body)
if err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,18 @@
package qbittorrent
import (
"context"
"testing"
"github.com/stretchr/testify/require"
)
func TestDownloadQBittorent(t *testing.T) {
ctx := context.Background()
tempDir := t.TempDir()
require := require.New(t)
err := downloadLatestQbitRelease(ctx, tempDir)
require.NoError(err)
err = downloadLatestQbitRelease(ctx, tempDir)
require.NoError(err)
}

View file

@ -0,0 +1,396 @@
package qbittorrent
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
)
type Application interface {
// Version get application version
Version(context.Context) (string, error)
// WebApiVersion get webapi version
WebApiVersion(context.Context) (string, error)
// BuildInfo get build info
BuildInfo(context.Context) (*BuildInfo, error)
// Shutdown exit application
Shutdown(context.Context) error
// GetPreferences get application preferences
GetPreferences(context.Context) (*Preferences, error)
// SetPreferences set application preferences
SetPreferences(context.Context, *Preferences) error
// DefaultSavePath get default save path
DefaultSavePath(context.Context) (string, error)
}
type BuildInfo struct {
BitNess int `json:"bitness,omitempty"`
Boost string `json:"boost,omitempty"`
LibTorrent string `json:"libtorrent,omitempty"`
Openssl string `json:"openssl,omitempty"`
QT string `json:"qt,omitempty"`
Zlib string `json:"zlib,omitempty"`
}
type Preferences struct {
AddToTopOfQueue bool `json:"add_to_top_of_queue,omitempty"`
AddTrackers string `json:"add_trackers,omitempty"`
AddTrackersEnabled bool `json:"add_trackers_enabled,omitempty"`
AltDlLimit int `json:"alt_dl_limit,omitempty"`
AltUpLimit int `json:"alt_up_limit,omitempty"`
AlternativeWebuiEnabled bool `json:"alternative_webui_enabled,omitempty"`
AlternativeWebuiPath string `json:"alternative_webui_path,omitempty"`
AnnounceIP string `json:"announce_ip,omitempty"`
AnnounceToAllTiers bool `json:"announce_to_all_tiers,omitempty"`
AnnounceToAllTrackers bool `json:"announce_to_all_trackers,omitempty"`
AnonymousMode bool `json:"anonymous_mode,omitempty"`
AsyncIoThreads int `json:"async_io_threads,omitempty"`
AutoDeleteMode int `json:"auto_delete_mode,omitempty"`
AutoTmmEnabled bool `json:"auto_tmm_enabled,omitempty"`
AutorunEnabled bool `json:"autorun_enabled,omitempty"`
AutorunOnTorrentAddedEnabled bool `json:"autorun_on_torrent_added_enabled,omitempty"`
AutorunOnTorrentAddedProgram string `json:"autorun_on_torrent_added_program,omitempty"`
AutorunProgram string `json:"autorun_program,omitempty"`
BannedIPs string `json:"banned_IPs,omitempty"`
BdecodeDepthLimit int `json:"bdecode_depth_limit,omitempty"`
BdecodeTokenLimit int `json:"bdecode_token_limit,omitempty"`
BittorrentProtocol int `json:"bittorrent_protocol,omitempty"`
BlockPeersOnPrivilegedPorts bool `json:"block_peers_on_privileged_ports,omitempty"`
BypassAuthSubnetWhitelist string `json:"bypass_auth_subnet_whitelist,omitempty"`
BypassAuthSubnetWhitelistEnabled bool `json:"bypass_auth_subnet_whitelist_enabled,omitempty"`
BypassLocalAuth bool `json:"bypass_local_auth,omitempty"`
CategoryChangedTmmEnabled bool `json:"category_changed_tmm_enabled,omitempty"`
CheckingMemoryUse int `json:"checking_memory_use,omitempty"`
ConnectionSpeed int `json:"connection_speed,omitempty"`
CurrentInterfaceAddress string `json:"current_interface_address,omitempty"`
CurrentInterfaceName string `json:"current_interface_name,omitempty"`
CurrentNetworkInterface string `json:"current_network_interface,omitempty"`
Dht bool `json:"dht,omitempty"`
DiskCache int `json:"disk_cache,omitempty"`
DiskCacheTTL int `json:"disk_cache_ttl,omitempty"`
DiskIoReadMode int `json:"disk_io_read_mode,omitempty"`
DiskIoType int `json:"disk_io_type,omitempty"`
DiskIoWriteMode int `json:"disk_io_write_mode,omitempty"`
DiskQueueSize int `json:"disk_queue_size,omitempty"`
DlLimit int `json:"dl_limit,omitempty"`
DontCountSlowTorrents bool `json:"dont_count_slow_torrents,omitempty"`
DyndnsDomain string `json:"dyndns_domain,omitempty"`
DyndnsEnabled bool `json:"dyndns_enabled,omitempty"`
DyndnsPassword string `json:"dyndns_password,omitempty"`
DyndnsService int `json:"dyndns_service,omitempty"`
DyndnsUsername string `json:"dyndns_username,omitempty"`
EmbeddedTrackerPort int `json:"embedded_tracker_port,omitempty"`
EmbeddedTrackerPortForwarding bool `json:"embedded_tracker_port_forwarding,omitempty"`
EnableCoalesceReadWrite bool `json:"enable_coalesce_read_write,omitempty"`
EnableEmbeddedTracker bool `json:"enable_embedded_tracker,omitempty"`
EnableMultiConnectionsFromSameIP bool `json:"enable_multi_connections_from_same_ip,omitempty"`
EnablePieceExtentAffinity bool `json:"enable_piece_extent_affinity,omitempty"`
EnableUploadSuggestions bool `json:"enable_upload_suggestions,omitempty"`
Encryption int `json:"encryption,omitempty"`
ExcludedFileNames string `json:"excluded_file_names,omitempty"`
ExcludedFileNamesEnabled bool `json:"excluded_file_names_enabled,omitempty"`
ExportDir string `json:"export_dir,omitempty"`
ExportDirFin string `json:"export_dir_fin,omitempty"`
FileLogAge int `json:"file_log_age,omitempty"`
FileLogAgeType int `json:"file_log_age_type,omitempty"`
FileLogBackupEnabled bool `json:"file_log_backup_enabled,omitempty"`
FileLogDeleteOld bool `json:"file_log_delete_old,omitempty"`
FileLogEnabled bool `json:"file_log_enabled,omitempty"`
FileLogMaxSize int `json:"file_log_max_size,omitempty"`
FileLogPath string `json:"file_log_path,omitempty"`
FilePoolSize int `json:"file_pool_size,omitempty"`
HashingThreads int `json:"hashing_threads,omitempty"`
I2PAddress string `json:"i2p_address,omitempty"`
I2PEnabled bool `json:"i2p_enabled,omitempty"`
I2PInboundLength int `json:"i2p_inbound_length,omitempty"`
I2PInboundQuantity int `json:"i2p_inbound_quantity,omitempty"`
I2PMixedMode bool `json:"i2p_mixed_mode,omitempty"`
I2POutboundLength int `json:"i2p_outbound_length,omitempty"`
I2POutboundQuantity int `json:"i2p_outbound_quantity,omitempty"`
I2PPort int `json:"i2p_port,omitempty"`
IdnSupportEnabled bool `json:"idn_support_enabled,omitempty"`
IncompleteFilesExt bool `json:"incomplete_files_ext,omitempty"`
IPFilterEnabled bool `json:"ip_filter_enabled,omitempty"`
IPFilterPath string `json:"ip_filter_path,omitempty"`
IPFilterTrackers bool `json:"ip_filter_trackers,omitempty"`
LimitLanPeers bool `json:"limit_lan_peers,omitempty"`
LimitTCPOverhead bool `json:"limit_tcp_overhead,omitempty"`
LimitUtpRate bool `json:"limit_utp_rate,omitempty"`
ListenPort int `json:"listen_port,omitempty"`
Locale string `json:"locale,omitempty"`
Lsd bool `json:"lsd,omitempty"`
MailNotificationAuthEnabled bool `json:"mail_notification_auth_enabled,omitempty"`
MailNotificationEmail string `json:"mail_notification_email,omitempty"`
MailNotificationEnabled bool `json:"mail_notification_enabled,omitempty"`
MailNotificationPassword string `json:"mail_notification_password,omitempty"`
MailNotificationSender string `json:"mail_notification_sender,omitempty"`
MailNotificationSMTP string `json:"mail_notification_smtp,omitempty"`
MailNotificationSslEnabled bool `json:"mail_notification_ssl_enabled,omitempty"`
MailNotificationUsername string `json:"mail_notification_username,omitempty"`
MaxActiveCheckingTorrents int `json:"max_active_checking_torrents,omitempty"`
MaxActiveDownloads int `json:"max_active_downloads,omitempty"`
MaxActiveTorrents int `json:"max_active_torrents,omitempty"`
MaxActiveUploads int `json:"max_active_uploads,omitempty"`
MaxConcurrentHTTPAnnounces int `json:"max_concurrent_http_announces,omitempty"`
MaxConnec int `json:"max_connec,omitempty"`
MaxConnecPerTorrent int `json:"max_connec_per_torrent,omitempty"`
MaxInactiveSeedingTime int `json:"max_inactive_seeding_time,omitempty"`
MaxInactiveSeedingTimeEnabled bool `json:"max_inactive_seeding_time_enabled,omitempty"`
MaxRatio int `json:"max_ratio,omitempty"`
MaxRatioAct int `json:"max_ratio_act,omitempty"`
MaxRatioEnabled bool `json:"max_ratio_enabled,omitempty"`
MaxSeedingTime int `json:"max_seeding_time,omitempty"`
MaxSeedingTimeEnabled bool `json:"max_seeding_time_enabled,omitempty"`
MaxUploads int `json:"max_uploads,omitempty"`
MaxUploadsPerTorrent int `json:"max_uploads_per_torrent,omitempty"`
MemoryWorkingSetLimit int `json:"memory_working_set_limit,omitempty"`
MergeTrackers bool `json:"merge_trackers,omitempty"`
OutgoingPortsMax int `json:"outgoing_ports_max,omitempty"`
OutgoingPortsMin int `json:"outgoing_ports_min,omitempty"`
PeerTos int `json:"peer_tos,omitempty"`
PeerTurnover int `json:"peer_turnover,omitempty"`
PeerTurnoverCutoff int `json:"peer_turnover_cutoff,omitempty"`
PeerTurnoverInterval int `json:"peer_turnover_interval,omitempty"`
PerformanceWarning bool `json:"performance_warning,omitempty"`
Pex bool `json:"pex,omitempty"`
PreallocateAll bool `json:"preallocate_all,omitempty"`
ProxyAuthEnabled bool `json:"proxy_auth_enabled,omitempty"`
ProxyBittorrent bool `json:"proxy_bittorrent,omitempty"`
ProxyHostnameLookup bool `json:"proxy_hostname_lookup,omitempty"`
ProxyIP string `json:"proxy_ip,omitempty"`
ProxyMisc bool `json:"proxy_misc,omitempty"`
ProxyPassword string `json:"proxy_password,omitempty"`
ProxyPeerConnections bool `json:"proxy_peer_connections,omitempty"`
ProxyPort int `json:"proxy_port,omitempty"`
ProxyRss bool `json:"proxy_rss,omitempty"`
ProxyType string `json:"proxy_type,omitempty"`
ProxyUsername string `json:"proxy_username,omitempty"`
QueueingEnabled bool `json:"queueing_enabled,omitempty"`
RandomPort bool `json:"random_port,omitempty"`
ReannounceWhenAddressChanged bool `json:"reannounce_when_address_changed,omitempty"`
RecheckCompletedTorrents bool `json:"recheck_completed_torrents,omitempty"`
RefreshInterval int `json:"refresh_interval,omitempty"`
RequestQueueSize int `json:"request_queue_size,omitempty"`
ResolvePeerCountries bool `json:"resolve_peer_countries,omitempty"`
ResumeDataStorageType string `json:"resume_data_storage_type,omitempty"`
RssAutoDownloadingEnabled bool `json:"rss_auto_downloading_enabled,omitempty"`
RssDownloadRepackProperEpisodes bool `json:"rss_download_repack_proper_episodes,omitempty"`
RssMaxArticlesPerFeed int `json:"rss_max_articles_per_feed,omitempty"`
RssProcessingEnabled bool `json:"rss_processing_enabled,omitempty"`
RssRefreshInterval int `json:"rss_refresh_interval,omitempty"`
RssSmartEpisodeFilters string `json:"rss_smart_episode_filters,omitempty"`
SavePath string `json:"save_path,omitempty"`
SavePathChangedTmmEnabled bool `json:"save_path_changed_tmm_enabled,omitempty"`
SaveResumeDataInterval int `json:"save_resume_data_interval,omitempty"`
ScheduleFromHour int `json:"schedule_from_hour,omitempty"`
ScheduleFromMin int `json:"schedule_from_min,omitempty"`
ScheduleToHour int `json:"schedule_to_hour,omitempty"`
ScheduleToMin int `json:"schedule_to_min,omitempty"`
SchedulerDays int `json:"scheduler_days,omitempty"`
SchedulerEnabled bool `json:"scheduler_enabled,omitempty"`
SendBufferLowWatermark int `json:"send_buffer_low_watermark,omitempty"`
SendBufferWatermark int `json:"send_buffer_watermark,omitempty"`
SendBufferWatermarkFactor int `json:"send_buffer_watermark_factor,omitempty"`
SlowTorrentDlRateThreshold int `json:"slow_torrent_dl_rate_threshold,omitempty"`
SlowTorrentInactiveTimer int `json:"slow_torrent_inactive_timer,omitempty"`
SlowTorrentUlRateThreshold int `json:"slow_torrent_ul_rate_threshold,omitempty"`
SocketBacklogSize int `json:"socket_backlog_size,omitempty"`
SocketReceiveBufferSize int `json:"socket_receive_buffer_size,omitempty"`
SocketSendBufferSize int `json:"socket_send_buffer_size,omitempty"`
SsrfMitigation bool `json:"ssrf_mitigation,omitempty"`
StartPausedEnabled bool `json:"start_paused_enabled,omitempty"`
StopTrackerTimeout int `json:"stop_tracker_timeout,omitempty"`
TempPath string `json:"temp_path,omitempty"`
TempPathEnabled bool `json:"temp_path_enabled,omitempty"`
TorrentChangedTmmEnabled bool `json:"torrent_changed_tmm_enabled,omitempty"`
TorrentContentLayout string `json:"torrent_content_layout,omitempty"`
TorrentFileSizeLimit int `json:"torrent_file_size_limit,omitempty"`
TorrentStopCondition string `json:"torrent_stop_condition,omitempty"`
UpLimit int `json:"up_limit,omitempty"`
UploadChokingAlgorithm int `json:"upload_choking_algorithm,omitempty"`
UploadSlotsBehavior int `json:"upload_slots_behavior,omitempty"`
Upnp bool `json:"upnp,omitempty"`
UpnpLeaseDuration int `json:"upnp_lease_duration,omitempty"`
UseCategoryPathsInManualMode bool `json:"use_category_paths_in_manual_mode,omitempty"`
UseHTTPS bool `json:"use_https,omitempty"`
UseSubcategories bool `json:"use_subcategories,omitempty"`
UtpTCPMixedMode int `json:"utp_tcp_mixed_mode,omitempty"`
ValidateHTTPSTrackerCertificate bool `json:"validate_https_tracker_certificate,omitempty"`
WebUIAddress string `json:"web_ui_address,omitempty"`
WebUIBanDuration int `json:"web_ui_ban_duration,omitempty"`
WebUIClickjackingProtectionEnabled bool `json:"web_ui_clickjacking_protection_enabled,omitempty"`
WebUICsrfProtectionEnabled bool `json:"web_ui_csrf_protection_enabled,omitempty"`
WebUICustomHTTPHeaders string `json:"web_ui_custom_http_headers,omitempty"`
WebUIDomainList string `json:"web_ui_domain_list,omitempty"`
WebUIHostHeaderValidationEnabled bool `json:"web_ui_host_header_validation_enabled,omitempty"`
WebUIHTTPSCertPath string `json:"web_ui_https_cert_path,omitempty"`
WebUIHTTPSKeyPath string `json:"web_ui_https_key_path,omitempty"`
WebUIMaxAuthFailCount int `json:"web_ui_max_auth_fail_count,omitempty"`
WebUIPort int `json:"web_ui_port,omitempty"`
WebUIReverseProxiesList string `json:"web_ui_reverse_proxies_list,omitempty"`
WebUIReverseProxyEnabled bool `json:"web_ui_reverse_proxy_enabled,omitempty"`
WebUISecureCookieEnabled bool `json:"web_ui_secure_cookie_enabled,omitempty"`
WebUISessionTimeout int `json:"web_ui_session_timeout,omitempty"`
WebUIUpnp bool `json:"web_ui_upnp,omitempty"`
WebUIUseCustomHTTPHeadersEnabled bool `json:"web_ui_use_custom_http_headers_enabled,omitempty"`
WebUIUsername string `json:"web_ui_username,omitempty"`
}
func (c *client) Version(ctx context.Context) (string, error) {
ctx, span := trace.Start(ctx, "qbittorrent.Application.Version")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/app/version", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return "", err
}
if result.code != 200 {
return "", errors.New("get version failed: " + string(result.body))
}
return string(result.body), nil
}
func (c *client) WebApiVersion(ctx context.Context) (string, error) {
ctx, span := trace.Start(ctx, "qbittorrent.Application.WebApiVersion")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/app/webapiVersion", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return "", err
}
if result.code != 200 {
return "", errors.New("get version failed: " + string(result.body))
}
return string(result.body), nil
}
func (c *client) BuildInfo(ctx context.Context) (*BuildInfo, error) {
ctx, span := trace.Start(ctx, "qbittorrent.Application.BuildInfo")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/app/buildInfo", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get build info failed: " + string(result.body))
}
var build = new(BuildInfo)
if err := json.Unmarshal(result.body, build); err != nil {
return nil, err
}
return build, nil
}
func (c *client) Shutdown(ctx context.Context) error {
ctx, span := trace.Start(ctx, "qbittorrent.Application.Shutdown")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/app/shutdown", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
method: http.MethodPost,
url: apiUrl,
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("shutdown application failed: " + string(result.body))
}
return nil
}
func (c *client) GetPreferences(ctx context.Context) (*Preferences, error) {
ctx, span := trace.Start(ctx, "qbittorrent.Application.GetPreferences")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/app/preferences", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get preference failed: " + string(result.body))
}
var preferences = new(Preferences)
if err := json.Unmarshal(result.body, preferences); err != nil {
return nil, err
}
return preferences, nil
}
func (c *client) SetPreferences(ctx context.Context, prefs *Preferences) error {
ctx, span := trace.Start(ctx, "qbittorrent.Application.SetPreferences")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/app/setPreferences", c.config.Address)
data, err := json.Marshal(prefs)
if err != nil {
return err
}
var formData bytes.Buffer
formData.Write([]byte("json="))
formData.Write(data)
result, err := c.doRequest(ctx, &requestData{
method: http.MethodPost,
url: apiUrl,
contentType: ContentTypeFormUrlEncoded,
body: &formData,
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("set preference failed: " + string(result.body))
}
return nil
}
func (c *client) DefaultSavePath(ctx context.Context) (string, error) {
ctx, span := trace.Start(ctx, "qbittorrent.Application.DefaultSavePath")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/app/defaultSavePath", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return "", err
}
if result.code != 200 {
return "", errors.New("get default save path failed: " + string(result.body))
}
return string(result.body), nil
}

View file

@ -0,0 +1,73 @@
package qbittorrent
import (
"context"
"testing"
)
func TestClient_Version(t *testing.T) {
ctx := context.Background()
version, err := c.Application().Version(ctx)
if err != nil {
t.Fatal(err)
}
t.Log(version)
}
func TestClient_WebApiVersion(t *testing.T) {
ctx := context.Background()
version, err := c.Application().WebApiVersion(ctx)
if err != nil {
t.Fatal(err)
}
t.Log(version)
}
func TestClient_BuildInfo(t *testing.T) {
ctx := context.Background()
info, err := c.Application().BuildInfo(ctx)
if err != nil {
t.Fatal(err)
}
t.Logf("build: %+v", info)
}
func TestClient_Shutdown(t *testing.T) {
ctx := context.Background()
if err := c.Application().Shutdown(ctx); err != nil {
t.Fatal(err)
}
t.Log("shutting down")
}
func TestClient_GetPreferences(t *testing.T) {
ctx := context.Background()
prefs, err := c.Application().GetPreferences(ctx)
if err != nil {
t.Fatal(err)
}
t.Logf("prefs: %+v", prefs)
}
func TestClient_SetPreferences(t *testing.T) {
ctx := context.Background()
prefs, err := c.Application().GetPreferences(ctx)
if err != nil {
t.Fatal(err)
}
prefs.FileLogAge = 301
if err := c.Application().SetPreferences(ctx, prefs); err != nil {
t.Fatal(err)
}
t.Logf("success")
}
func TestClient_DefaultSavePath(t *testing.T) {
ctx := context.Background()
path, err := c.Application().DefaultSavePath(ctx)
if err != nil {
t.Fatal(err)
}
t.Logf("path: %s", path)
}

View file

@ -0,0 +1,91 @@
package qbittorrent
import (
"context"
"errors"
"fmt"
"net/http"
"net/http/cookiejar"
"net/url"
"strings"
)
type Authentication interface {
// Login cookie-based authentication, after calling NewClient, do not need to call Login again,
// it is the default behavior
Login(ctx context.Context) error
// Logout deactivate cookies
Logout(ctx context.Context) error
}
func (c *client) Login(ctx context.Context) error {
ctx, span := trace.Start(ctx, "qbittorrent.Authentication.Login")
defer span.End()
if c.config.Username == "" || c.config.Password == "" {
return errors.New("username or password is empty")
}
formData := url.Values{}
formData.Set("username", c.config.Username)
formData.Set("password", c.config.Password)
encodedFormData := formData.Encode()
apiUrl := fmt.Sprintf("%s/api/v2/auth/login", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
method: http.MethodPost,
url: apiUrl,
body: strings.NewReader(encodedFormData),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("login failed: " + string(result.body))
}
if string(result.body) == "Fails." {
return ErrAuthFailed
}
if string(result.body) != "Ok." {
return errors.New("login failed: " + string(result.body))
}
if c.client.Jar == nil {
c.client.Jar, err = cookiejar.New(nil)
if err != nil {
return err
}
}
u, err := url.Parse(c.config.Address)
if err != nil {
return err
}
c.client.Jar.SetCookies(u, result.cookies)
return nil
}
func (c *client) Logout(ctx context.Context) error {
ctx, span := trace.Start(ctx, "qbittorrent.Authentication.Logout")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/auth/logout", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
method: http.MethodPost,
url: apiUrl,
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("logout failed: " + string(result.body))
}
return nil
}

View file

@ -0,0 +1,24 @@
package qbittorrent
import (
"context"
"testing"
)
func TestClient_Login(t *testing.T) {
ctx := context.Background()
if err := c.Authentication().Login(ctx); err != nil {
t.Fatal(err)
}
}
func TestClient_Logout(t *testing.T) {
ctx := context.Background()
if err := c.Authentication().Login(ctx); err != nil {
t.Fatal(err)
}
if err := c.Authentication().Logout(ctx); err != nil {
t.Fatal(err)
}
}

View file

@ -0,0 +1,72 @@
package qbittorrent
import (
"context"
"crypto/tls"
"net"
"net/http"
"time"
"go.opentelemetry.io/otel"
)
var trace = otel.Tracer("git.kmsign.ru/royalcat/tstor/server/pkg/qbittorrent")
// Client represents a qBittorrent client
type Client interface {
// Authentication auth qBittorrent client
Authentication() Authentication
// Application get qBittorrent application info
Application() Application
// Log get qBittorrent log
Log() Log
// Sync get qBittorrent events
Sync() Sync
// Transfer transfer manage
Transfer() Transfer
// Torrent manage for torrent
Torrent() Torrent
// Search api for search
Search() Search
// RSS api for rss
RSS() RSS
}
func NewClient(ctx context.Context, cfg *Config) (Client, error) {
var c = &client{config: cfg, client: newClient(cfg.ConnectionMaxIdles, cfg.ConnectionTimeout)}
return c, nil
}
func LoginClient(ctx context.Context, cfg *Config) (Client, error) {
var c = &client{config: cfg, client: newClient(cfg.ConnectionMaxIdles, cfg.ConnectionTimeout)}
if err := c.Authentication().Login(ctx); err != nil {
return nil, err
}
if cfg.RefreshCookie {
go c.refreshCookie()
}
return c, nil
}
// newClient creates and returns a new clientPool
func newClient(maxIdle int, timeout time.Duration) *http.Client {
if maxIdle == 0 {
maxIdle = 128
}
if timeout == 0 {
timeout = time.Second * 3
}
return &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
MaxIdleConns: maxIdle,
},
Timeout: timeout,
}
}

View file

@ -0,0 +1,128 @@
package qbittorrent
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
)
type responseResult struct {
code int
body []byte
cookies []*http.Cookie
}
type requestData struct {
method string
url string
contentType string
body io.Reader
}
var _ Client = (*client)(nil)
type client struct {
config *Config
client *http.Client
}
func (c *client) Authentication() Authentication {
return c
}
func (c *client) Application() Application {
return c
}
func (c *client) Log() Log {
return c
}
func (c *client) Sync() Sync {
return c
}
func (c *client) Transfer() Transfer {
return c
}
func (c *client) Torrent() Torrent {
return c
}
func (c *client) Search() Search {
return c
}
func (c *client) RSS() RSS {
return c
}
// doRequest send request
func (c *client) doRequest(ctx context.Context, data *requestData) (*responseResult, error) {
if data.method == "" {
data.method = "GET"
}
if data.contentType == "" {
data.contentType = ContentTypeFormUrlEncoded
}
request, err := http.NewRequestWithContext(ctx, data.method, data.url, data.body)
if err != nil {
return nil, err
}
request.Header.Set("Content-Type", data.contentType)
for key, value := range c.config.CustomHeaders {
request.Header.Set(key, value)
}
resp, err := c.client.Do(request)
if err != nil {
return nil, err
}
defer resp.Body.Close()
readAll, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return &responseResult{code: resp.StatusCode, body: readAll, cookies: resp.Cookies()}, nil
}
func (c *client) cookies() (string, error) {
if c.client.Jar == nil {
return "", ErrNotLogin
}
u, err := url.Parse(c.config.Address)
if err != nil {
return "", err
}
cookies := c.client.Jar.Cookies(u)
if len(cookies) == 0 {
return "", ErrNotLogin
}
var builder strings.Builder
for _, cookie := range cookies {
builder.WriteString(fmt.Sprintf("%s=%s; ", cookie.Name, cookie.Value))
}
return builder.String(), nil
}
func (c *client) refreshCookie() {
ctx := context.Background()
if c.config.RefreshIntervals == 0 {
c.config.RefreshIntervals = time.Hour
}
var ticker = time.NewTicker(c.config.RefreshIntervals)
for range ticker.C {
if err := c.Authentication().Logout(ctx); err != nil {
// todo
}
}
}

View file

@ -0,0 +1,56 @@
package qbittorrent
import (
"context"
"net/url"
"testing"
"time"
)
var (
c Client
)
func init() {
ctx := context.Background()
var err error
c, err = LoginClient(ctx, &Config{
Address: "http://192.168.3.33:38080",
Username: "admin",
Password: "J0710cz5",
RefreshIntervals: time.Hour,
ConnectionTimeout: time.Second * 3,
CustomHeaders: map[string]string{
//"Origin": "http://192.168.3.33:8080",
//"Referer": "http://192.168.3.33:8080",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
},
})
if err != nil {
panic(err)
}
}
func TestFormEncoder(t *testing.T) {
var option = LogOption{
Normal: true,
Info: true,
Warning: false,
Critical: false,
LastKnownId: 0,
}
var form = url.Values{}
err := encoder.Encode(option, form)
if err != nil {
t.Fatal(err)
}
t.Log(form)
}
func TestFormEncode(t *testing.T) {
var form = url.Values{}
form.Add("username", "admin hahaha")
form.Add("password", "J0710c?//&z5")
fe := form.Encode()
t.Log(fe)
}

View file

@ -0,0 +1,10 @@
package qbittorrent
import "github.com/gorilla/schema"
const (
ContentTypeJSON = "application/json"
ContentTypeFormUrlEncoded = "application/x-www-form-urlencoded"
)
var encoder = schema.NewEncoder()

View file

@ -0,0 +1,25 @@
package qbittorrent
import "time"
type Config struct {
// Address qBittorrent endpoint
Address string
// Username used to access the WebUI
Username string
// Password used to access the WebUI
Password string
// HTTP configuration
// CustomHeaders custom headers
CustomHeaders map[string]string
// ConnectionTimeout request timeout, default 3 seconds
ConnectionTimeout time.Duration
// ConnectionMaxIdles http client pool, default 128
ConnectionMaxIdles int
// RefreshCookie whether to automatically refresh cookies
RefreshCookie bool
// SessionTimeout interval for refreshing cookies, default 1 hour
RefreshIntervals time.Duration
}

View file

@ -0,0 +1,8 @@
package qbittorrent
import "errors"
var (
ErrNotLogin = errors.New("not login")
ErrAuthFailed = errors.New("auth failed")
)

View file

@ -0,0 +1,95 @@
package qbittorrent
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/url"
"strconv"
"strings"
)
type LogOption struct {
Normal bool `schema:"normal,omitempty"` // include normal messages
Info bool `schema:"info,omitempty"` // include info messages
Warning bool `schema:"warning,omitempty"` // include warning messages
Critical bool `schema:"critical,omitempty"` // include critical messages
LastKnownId int64 `schema:"last_known_id,omitempty"` // exclude messages with "message id" <= (default: last_known_id-1)
}
type LogEntry struct {
Id int `json:"id,omitempty"` // id of the message or peer
Timestamp int `json:"timestamp,omitempty"` // seconds since epoch
Type int `json:"type,omitempty"` // type of the message, Log::NORMAL: 1, Log::INFO: 2, Log::WARNING: 4, Log::CRITICAL: 8
Message string `json:"message,omitempty"` // text of the message
IP string `json:"ip"` // ip of the peer
Blocked bool `json:"blocked,omitempty"` // whether the peer was blocked
Reason string `json:"reason,omitempty"` // Reason of the block
}
type Log interface {
// GetLog get log
GetLog(ctx context.Context, option *LogOption) ([]*LogEntry, error)
// GetPeerLog get peer log
GetPeerLog(ctx context.Context, lastKnownId int) ([]*LogEntry, error)
}
func (c *client) GetLog(ctx context.Context, option *LogOption) ([]*LogEntry, error) {
ctx, span := trace.Start(ctx, "qbittorrent.Log.GetLog")
defer span.End()
var form = url.Values{}
err := encoder.Encode(option, form)
if err != nil {
return nil, err
}
apiUrl := fmt.Sprintf("%s/api/v2/log/main?%s", c.config.Address, form.Encode())
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
body: strings.NewReader(form.Encode()),
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get log failed: " + string(result.body))
}
var logs []*LogEntry
if err := json.Unmarshal(result.body, &logs); err != nil {
return nil, err
}
return logs, nil
}
func (c *client) GetPeerLog(ctx context.Context, lastKnownId int) ([]*LogEntry, error) {
ctx, span := trace.Start(ctx, "qbittorrent.Log.GetPeerLog")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/log/peers", c.config.Address)
var form = url.Values{}
form.Add("last_known_id", strconv.Itoa(lastKnownId))
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
body: strings.NewReader(form.Encode()),
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get peer log failed: " + string(result.body))
}
var logs []*LogEntry
if err := json.Unmarshal(result.body, &logs); err != nil {
return nil, err
}
return logs, nil
}

View file

@ -0,0 +1,39 @@
package qbittorrent
import (
"context"
"encoding/json"
"testing"
)
func TestClient_GetLog(t *testing.T) {
ctx := context.Background()
entries, err := c.Log().GetLog(ctx, &LogOption{
Normal: true,
Info: true,
Warning: true,
Critical: true,
LastKnownId: 0,
})
if err != nil {
t.Fatal(err)
}
bytes, err := json.Marshal(entries)
if err != nil {
t.Fatal(err)
}
t.Log(string(bytes))
}
func TestClient_GetPeerLog(t *testing.T) {
ctx := context.Background()
entries, err := c.Log().GetPeerLog(ctx, -1)
if err != nil {
t.Fatal(err)
}
bytes, err := json.Marshal(entries)
if err != nil {
t.Fatal(err)
}
t.Log(string(bytes))
}

View file

@ -0,0 +1,359 @@
package qbittorrent
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
)
type RSS interface {
// AddFolder create new folder for rss, full path of added folder such as "The Pirate Bay\Top100"
AddFolder(ctx context.Context, path string) error
// AddFeed add feed
AddFeed(ctx context.Context, option *RssAddFeedOption) error
// RemoveItem remove folder or feed
RemoveItem(ctx context.Context, path string) error
// MoveItem move or rename folder or feed
MoveItem(ctx context.Context, srcPath, destPath string) error
// GetItems list all items, if withData is true, will return all data
GetItems(ctx context.Context, withData bool) (map[string]interface{}, error)
// MarkAsRead if articleId is provided only the article is marked as read otherwise the whole feed
// is going to be marked as read.
MarkAsRead(ctx context.Context, option *RssMarkAsReadOption) error
// RefreshItem refresh folder or feed
RefreshItem(ctx context.Context, itemPath string) error
// SetAutoDownloadingRule set auto-downloading rule
SetAutoDownloadingRule(ctx context.Context, ruleName string, ruleDef *RssAutoDownloadingRuleDef) error
// RenameAutoDownloadingRule rename auto-downloading rule
RenameAutoDownloadingRule(ctx context.Context, ruleName, newRuleName string) error
// RemoveAutoDownloadingRule remove auto-downloading rule
RemoveAutoDownloadingRule(ctx context.Context, ruleName string) error
// GetAllAutoDownloadingRules get all auto-downloading rules
GetAllAutoDownloadingRules(ctx context.Context) (map[string]*RssAutoDownloadingRuleDef, error)
// GetAllArticlesMatchingRule get all articles matching a rule
GetAllArticlesMatchingRule(ctx context.Context, ruleName string) (map[string][]string, error)
}
type RssAddFeedOption struct {
// URL feed of rss such as http://thepiratebay.org/rss//top100/200
URL string `schema:"url"`
// Folder full path of added folder, optional
Folder string `schema:"path,omitempty"`
}
type RssMarkAsReadOption struct {
// ItemPath current full path of item
ItemPath string `schema:"itemPath"`
// ArticleId id of article, optional
ArticleId string `schema:"articleId,omitempty"`
}
type RssAutoDownloadingRuleDefTorrentParams struct {
Category string `json:"category,omitempty"`
DownloadLimit int `json:"download_limit,omitempty"`
DownloadPath int `json:"download_path,omitempty"`
InactiveSeedingTimeLimit int `json:"inactive_seeding_time_limit,omitempty"`
OperatingMode string `json:"operating_mode,omitempty"`
RatioLimit int `json:"ratio_limit,omitempty"`
SavePath string `json:"save_path,omitempty"`
SeedingTimeLimit int `json:"seeding_time_limit,omitempty"`
SkipChecking bool `json:"skip_checking,omitempty"`
Tags []string `json:"tags,omitempty"`
UploadLimit int `json:"upload_limit,omitempty"`
Stopped bool `json:"stopped,omitempty"`
UseAutoTMM bool `json:"use_auto_tmm,omitempty"`
}
type RssAutoDownloadingRuleDef struct {
AddPaused bool `json:"addPaused,omitempty"`
AffectedFeeds []string `json:"affectedFeeds,omitempty"`
AssignedCategory string `json:"assignedCategory,omitempty"`
Enabled bool `json:"enabled,omitempty"`
EpisodeFilter string `json:"episodeFilter,omitempty"`
IgnoreDays int `json:"ignoreDays,omitempty"`
LastMatch string `json:"lastMatch,omitempty"`
MustContain string `json:"mustContain,omitempty"`
MustNotContain string `json:"mustNotContain,omitempty"`
PreviouslyMatchedEpisodes []string `json:"previouslyMatchedEpisodes,omitempty"`
Priority int `json:"priority,omitempty"`
SavePath string `json:"savePath,omitempty"`
SmartFilter bool `json:"smartFilter,omitempty"`
TorrentParams *RssAutoDownloadingRuleDefTorrentParams `json:"torrentParams,omitempty"`
UseRegex bool `json:"useRegex,omitempty"`
}
func (c *client) AddFolder(ctx context.Context, path string) error {
ctx, span := trace.Start(ctx, "qbittorrent.RSS.AddFolder")
defer span.End()
var formData = url.Values{}
formData.Add("path", path)
var apiUrl = fmt.Sprintf("%s/api/v2/rss/addFolder", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("add rss folder failed: " + string(result.body))
}
return nil
}
func (c *client) AddFeed(ctx context.Context, opt *RssAddFeedOption) error {
ctx, span := trace.Start(ctx, "qbittorrent.RSS.AddFeed")
defer span.End()
var formData = url.Values{}
err := encoder.Encode(opt, formData)
if err != nil {
return err
}
var apiUrl = fmt.Sprintf("%s/api/v2/rss/addFolder", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("add rss feed failed: " + string(result.body))
}
return nil
}
func (c *client) RemoveItem(ctx context.Context, path string) error {
ctx, span := trace.Start(ctx, "qbittorrent.RSS.RemoveItem")
defer span.End()
var formData = url.Values{}
formData.Add("path", path)
var apiUrl = fmt.Sprintf("%s/api/v2/rss/removeItem", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("remove rss item failed: " + string(result.body))
}
return nil
}
func (c *client) MoveItem(ctx context.Context, srcPath, destPath string) error {
ctx, span := trace.Start(ctx, "qbittorrent.RSS.MoveItem")
defer span.End()
var formData = url.Values{}
formData.Add("itemPath", srcPath)
formData.Add("destPath", destPath)
var apiUrl = fmt.Sprintf("%s/api/v2/rss/moveItem", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("move rss item failed: " + string(result.body))
}
return nil
}
func (c *client) GetItems(ctx context.Context, withData bool) (map[string]interface{}, error) {
ctx, span := trace.Start(ctx, "qbittorrent.RSS.GetItems")
defer span.End()
var apiUrl = fmt.Sprintf("%s/api/v2/rss/items?withData=%t", c.config.Address, withData)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodGet,
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get rss items failed: " + string(result.body))
}
var data = make(map[string]interface{})
err = json.Unmarshal(result.body, &data)
return data, err
}
func (c *client) MarkAsRead(ctx context.Context, opt *RssMarkAsReadOption) error {
ctx, span := trace.Start(ctx, "qbittorrent.RSS.MarkAsRead")
defer span.End()
var formData = url.Values{}
err := encoder.Encode(opt, formData)
if err != nil {
return err
}
var apiUrl = fmt.Sprintf("%s/api/v2/rss/markAsRead", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("mark as read rss item failed: " + string(result.body))
}
return nil
}
func (c *client) RefreshItem(ctx context.Context, itemPath string) error {
ctx, span := trace.Start(ctx, "qbittorrent.RSS.RefreshItem")
defer span.End()
var formData = url.Values{}
formData.Add("itemPath", itemPath)
var apiUrl = fmt.Sprintf("%s/api/v2/rss/refreshItem", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("refresh rss item failed: " + string(result.body))
}
return nil
}
func (c *client) SetAutoDownloadingRule(ctx context.Context, ruleName string, ruleDef *RssAutoDownloadingRuleDef) error {
ctx, span := trace.Start(ctx, "qbittorrent.RSS.SetAutoDownloadingRule")
defer span.End()
var formData = url.Values{}
formData.Add("ruleName", ruleName)
ruleDefBytes, err := json.Marshal(ruleDef)
if err != nil {
return err
}
formData.Add("ruleDef", string(ruleDefBytes))
var apiUrl = fmt.Sprintf("%s/api/v2/rss/setRule", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("set auto downloading rule failed: " + string(result.body))
}
return nil
}
func (c *client) RenameAutoDownloadingRule(ctx context.Context, ruleName, newRuleName string) error {
ctx, span := trace.Start(ctx, "qbittorrent.RSS.RenameAutoDownloadingRule")
defer span.End()
var formData = url.Values{}
formData.Add("ruleName", ruleName)
formData.Add("newRuleName", newRuleName)
var apiUrl = fmt.Sprintf("%s/api/v2/rss/renameRule", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("rename auto downloading rule failed: " + string(result.body))
}
return nil
}
func (c *client) RemoveAutoDownloadingRule(ctx context.Context, ruleName string) error {
ctx, span := trace.Start(ctx, "qbittorrent.RSS.RemoveAutoDownloadingRule")
defer span.End()
var formData = url.Values{}
formData.Add("ruleName", ruleName)
var apiUrl = fmt.Sprintf("%s/api/v2/rss/removeRule", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("remove auto downloading rule failed: " + string(result.body))
}
return nil
}
func (c *client) GetAllAutoDownloadingRules(ctx context.Context) (map[string]*RssAutoDownloadingRuleDef, error) {
ctx, span := trace.Start(ctx, "qbittorrent.RSS.GetAllAutoDownloadingRules")
defer span.End()
var apiUrl = fmt.Sprintf("%s/api/v2/rss/matchingArticles", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get rss rules failed: " + string(result.body))
}
var data = make(map[string]*RssAutoDownloadingRuleDef)
err = json.Unmarshal(result.body, &data)
return data, err
}
func (c *client) GetAllArticlesMatchingRule(ctx context.Context, ruleName string) (map[string][]string, error) {
ctx, span := trace.Start(ctx, "qbittorrent.RSS.GetAllArticlesMatchingRule")
defer span.End()
var formData = url.Values{}
formData.Add("ruleName", ruleName)
var apiUrl = fmt.Sprintf("%s/api/v2/rss/matchingArticles?%s", c.config.Address, formData.Encode())
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get rss rule match articles failed: " + string(result.body))
}
var data = make(map[string][]string)
err = json.Unmarshal(result.body, &data)
return data, err
}

View file

@ -0,0 +1,64 @@
package qbittorrent
type Search interface {
Start()
Stop()
Status()
Results()
Delete()
Plugins()
InstallPlugins()
UninstallPlugins()
EnableSearchPlugins()
UpdateSearchPlugins()
}
func (c *client) Start() {
//TODO implement me
panic("implement me")
}
func (c *client) Stop() {
//TODO implement me
panic("implement me")
}
func (c *client) Status() {
//TODO implement me
panic("implement me")
}
func (c *client) Results() {
//TODO implement me
panic("implement me")
}
func (c *client) Delete() {
//TODO implement me
panic("implement me")
}
func (c *client) Plugins() {
//TODO implement me
panic("implement me")
}
func (c *client) InstallPlugins() {
//TODO implement me
panic("implement me")
}
func (c *client) UninstallPlugins() {
//TODO implement me
panic("implement me")
}
func (c *client) EnableSearchPlugins() {
//TODO implement me
panic("implement me")
}
func (c *client) UpdateSearchPlugins() {
//TODO implement me
panic("implement me")
}

View file

@ -0,0 +1,127 @@
package qbittorrent
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/url"
"strconv"
)
type Sync interface {
// MainData get sync main data, rid is Response ID. if not provided, will be assumed.
// if the given is different from the one of last server reply, will be (see the server reply details for more info)
MainData(ctx context.Context, rid int) (*SyncMainData, error)
// TorrentPeersData get sync torrent peer data, hash is torrent hash, rid is response id
TorrentPeersData(ctx context.Context, hash string, rid int) (*SyncTorrentPeers, error)
}
type SyncMainData struct {
Rid int `json:"rid,omitempty"`
FullUpdate bool `json:"full_update,omitempty"`
ServerState ServerState `json:"server_state,omitempty"`
Torrents map[string]SyncTorrentInfo `json:"torrents,omitempty"`
}
type ServerState struct {
AllTimeDl int64 `json:"alltime_dl,omitempty"`
AllTimeUl int64 `json:"alltime_ul,omitempty"`
AverageTimeQueue int `json:"average_time_queue,omitempty"`
DlInfoData int64 `json:"dl_info_data,omitempty"`
DlInfoSpeed int `json:"dl_info_speed,omitempty"`
QueuedIoJobs int `json:"queued_io_jobs,omitempty"`
TotalBuffersSize int `json:"total_buffers_size,omitempty"`
UpInfoData int64 `json:"up_info_data,omitempty"`
UpInfoSpeed int `json:"up_info_speed,omitempty"`
WriteCacheOverload string `json:"write_cache_overload,omitempty"`
}
type SyncTorrentInfo struct {
AmountLeft int64 `json:"amount_left,omitempty"`
Completed int `json:"completed,omitempty"`
DlSpeed int `json:"dlspeed,omitempty"`
Downloaded int `json:"downloaded,omitempty"`
DownloadedSession int `json:"downloaded_session,omitempty"`
Eta int `json:"eta,omitempty"`
Progress float64 `json:"progress,omitempty"`
SeenComplete int `json:"seen_complete,omitempty"`
TimeActive int `json:"time_active,omitempty"`
}
type SyncTorrentPeers struct {
Rid int `json:"rid,omitempty"`
FullUpdate bool `json:"full_update,omitempty"`
ShowFlags bool `json:"show_flags,omitempty"`
Peers map[string]SyncTorrentPeer `json:"peers,omitempty"`
}
type SyncTorrentPeer struct {
Client string `json:"client,omitempty"`
Connection string `json:"connection,omitempty"`
Country string `json:"country,omitempty"`
CountryCode string `json:"country_code,omitempty"`
DlSpeed int `json:"dl_speed,omitempty"`
Downloaded int `json:"downloaded,omitempty"`
Files string `json:"files,omitempty"`
Flags string `json:"flags,omitempty"`
FlagsDesc string `json:"flags_desc,omitempty"`
IP string `json:"ip,omitempty"`
PeerIDClient string `json:"peer_id_client,omitempty"`
Port int `json:"port,omitempty"`
Progress float64 `json:"progress,omitempty"`
Relevance float64 `json:"relevance,omitempty"`
UpSpeed int `json:"up_speed,omitempty"`
Uploaded int `json:"uploaded,omitempty"`
}
func (c *client) MainData(ctx context.Context, rid int) (*SyncMainData, error) {
ctx, span := trace.Start(ctx, "qbittorrent.Sync.MainData")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/sync/maindata?rid=%d", c.config.Address, rid)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get main data failed: " + string(result.body))
}
var mainData = new(SyncMainData)
if err := json.Unmarshal(result.body, mainData); err != nil {
return nil, err
}
return mainData, nil
}
func (c *client) TorrentPeersData(ctx context.Context, hash string, rid int) (*SyncTorrentPeers, error) {
ctx, span := trace.Start(ctx, "qbittorrent.Sync.TorrentPeersData")
defer span.End()
var formData = url.Values{}
formData.Add("hash", hash)
formData.Add("rid", strconv.Itoa(rid))
apiUrl := fmt.Sprintf("%s/api/v2/sync/torrentPeers?%s", c.config.Address, formData.Encode())
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get torrent peers data failed: " + string(result.body))
}
var mainData = new(SyncTorrentPeers)
if err := json.Unmarshal(result.body, mainData); err != nil {
return nil, err
}
return mainData, nil
}

View file

@ -0,0 +1,37 @@
package qbittorrent
import (
"context"
"encoding/json"
"testing"
"time"
)
func TestClient_MainData(t *testing.T) {
ctx := context.Background()
syncMainData, err := c.Sync().MainData(ctx, 0)
if err != nil {
t.Fatal(err)
}
t.Logf("sync main data: %+v", syncMainData)
time.Sleep(time.Second)
syncMainData, err = c.Sync().MainData(ctx, 0)
if err != nil {
t.Fatal(err)
}
t.Logf("sync main data: %+v", syncMainData)
}
func TestClient_TorrentPeersData(t *testing.T) {
ctx := context.Background()
peersData, err := c.Sync().TorrentPeersData(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc", 0)
if err != nil {
t.Fatal(err)
}
bytes, err := json.Marshal(peersData)
if err != nil {
t.Fatal(err)
}
t.Log(string(bytes))
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,299 @@
package qbittorrent
import (
"context"
"encoding/json"
"os"
"testing"
)
func TestClient_GetTorrents(t *testing.T) {
ctx := context.Background()
torrents, err := c.Torrent().GetTorrents(ctx, &TorrentOption{
Filter: "",
Category: "movies",
Tag: "hdtime",
Sort: "",
Reverse: false,
Limit: 0,
Offset: 0,
Hashes: nil,
})
if err != nil {
t.Fatal(err)
}
bytes, err := json.Marshal(torrents)
if err != nil {
t.Fatal(err)
}
t.Log(string(bytes))
}
func TestClient_GetProperties(t *testing.T) {
ctx := context.Background()
properties, err := c.Torrent().GetProperties(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc")
if err != nil {
t.Fatal(err)
}
bytes, err := json.Marshal(properties)
if err != nil {
t.Fatal(err)
}
t.Log(string(bytes))
}
func TestClient_GetTrackers(t *testing.T) {
ctx := context.Background()
trackers, err := c.Torrent().GetTrackers(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc")
if err != nil {
t.Fatal(err)
}
bytes, err := json.Marshal(trackers)
if err != nil {
t.Fatal(err)
}
t.Log(string(bytes))
}
func TestClient_GetWebSeeds(t *testing.T) {
ctx := context.Background()
webSeeds, err := c.Torrent().GetWebSeeds(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc")
if err != nil {
t.Fatal(err)
}
bytes, err := json.Marshal(webSeeds)
if err != nil {
t.Fatal(err)
}
t.Log(string(bytes))
}
func TestClient_GetContents(t *testing.T) {
ctx := context.Background()
contents, err := c.Torrent().GetContents(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc")
if err != nil {
t.Fatal(err)
}
bytes, err := json.Marshal(contents)
if err != nil {
t.Fatal(err)
}
t.Log(string(bytes))
}
func TestClient_GetPiecesStates(t *testing.T) {
ctx := context.Background()
states, err := c.Torrent().GetPiecesStates(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc")
if err != nil {
t.Fatal(err)
}
t.Log(states)
}
func TestClient_GetPiecesHashes(t *testing.T) {
ctx := context.Background()
hashes, err := c.Torrent().GetPiecesHashes(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc")
if err != nil {
t.Fatal(err)
}
t.Log(hashes)
}
func TestClient_PauseTorrents(t *testing.T) {
ctx := context.Background()
err := c.Torrent().PauseTorrents(ctx, []string{"202382999be6a4fab395cd9c2c9d294177587904"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent paused")
}
func TestClient_ResumeTorrents(t *testing.T) {
ctx := context.Background()
err := c.Torrent().ResumeTorrents(ctx, []string{"fd3b4bf1937c59a8fd1a240cddc07172e0b979a2"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent resumed")
}
func TestClient_DeleteTorrents(t *testing.T) {
ctx := context.Background()
err := c.Torrent().DeleteTorrents(ctx, []string{"202382999be6a4fab395cd9c2c9d294177587904"}, true)
if err != nil {
t.Fatal(err)
}
t.Log("torrent deleted")
}
func TestClient_RecheckTorrents(t *testing.T) {
ctx := context.Background()
err := c.Torrent().RecheckTorrents(ctx, []string{"fd3b4bf1937c59a8fd1a240cddc07172e0b979a2"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent rechecked")
}
func TestClient_ReAnnounceTorrents(t *testing.T) {
ctx := context.Background()
err := c.Torrent().ReAnnounceTorrents(ctx, []string{"fd3b4bf1937c59a8fd1a240cddc07172e0b979a2"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent reannounceed")
}
func TestClient_AddNewTorrent(t *testing.T) {
ctx := context.Background()
fileContent, err := os.ReadFile("C:\\Users\\xuthu\\Downloads\\bbbbb.torrent")
if err != nil {
t.Fatal(err)
}
err = c.Torrent().AddNewTorrent(ctx, &TorrentAddOption{
Torrents: []*TorrentAddFileMetadata{
{
//Filename: "ttttt.torrent",
Data: fileContent,
},
},
Category: "movies",
Tags: []string{"d", "e", "f"},
SkipChecking: false,
Paused: false,
RootFolder: false,
Rename: "",
UpLimit: 0,
DlLimit: 0,
RatioLimit: 0,
SeedingTimeLimit: 0,
AutoTMM: false,
SequentialDownload: "",
FirstLastPiecePrio: "",
})
if err != nil {
t.Fatal(err)
}
t.Log("torrent added")
}
func TestClient_AddTrackers(t *testing.T) {
ctx := context.Background()
err := c.Torrent().AddTrackers(ctx, "ca4523a3db9c6c3a13d7d7f3a545f97b75083032", []string{"https://hddtime.org/announce"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent trackers added")
}
func TestClient_EditTrackers(t *testing.T) {
ctx := context.Background()
err := c.Torrent().EditTrackers(ctx, "ca4523a3db9c6c3a13d7d7f3a545f97b75083032", "https://hddtime.org/announce", "https://hdctime.org/announce")
if err != nil {
t.Fatal(err)
}
t.Log("torrent trackers edited")
}
func TestClient_RemoveTrackers(t *testing.T) {
ctx := context.Background()
err := c.Torrent().RemoveTrackers(ctx, "ca4523a3db9c6c3a13d7d7f3a545f97b75083032", []string{"https://hdctime.org/announce"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent trackers removed")
}
func TestClient_AddPeers(t *testing.T) {
// todo no test
//c.Torrent().AddPeers([]string{"ca4523a3db9c6c3a13d7d7f3a545f97b75083032"}, []string{"10.0.0.1:38080"})
}
func TestClient_IncreasePriority(t *testing.T) {
ctx := context.Background()
err := c.Torrent().IncreasePriority(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent priority increased")
}
func TestClient_DecreasePriority(t *testing.T) {
ctx := context.Background()
err := c.Torrent().DecreasePriority(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent priority decreased")
}
func TestClient_MaxPriority(t *testing.T) {
ctx := context.Background()
err := c.Torrent().MaxPriority(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent priority maxed")
}
func TestClient_MinPriority(t *testing.T) {
ctx := context.Background()
err := c.Torrent().MinPriority(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent priority mined")
}
func TestClient_SetFilePriority(t *testing.T) {
// todo no test
}
func TestClient_GetDownloadLimit(t *testing.T) {
ctx := context.Background()
downloadLimit, err := c.Torrent().GetDownloadLimit(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent download limit", downloadLimit)
}
func TestClient_SetDownloadLimit(t *testing.T) {
ctx := context.Background()
err := c.Torrent().SetDownloadLimit(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"}, 0)
if err != nil {
t.Fatal(err)
}
t.Log("torrent download limit setted")
}
func TestClient_SetShareLimit(t *testing.T) {
ctx := context.Background()
err := c.Torrent().SetShareLimit(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"}, -2, -2, -2)
if err != nil {
t.Fatal(err)
}
t.Log("torrent share limit setted")
}
func TestClient_GetUploadLimit(t *testing.T) {
ctx := context.Background()
limit, err := c.Torrent().GetUploadLimit(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent upload limit", limit)
}
func TestClient_SetUploadLimit(t *testing.T) {
ctx := context.Background()
err := c.Torrent().SetUploadLimit(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"}, 0)
if err != nil {
t.Fatal(err)
}
t.Log("torrent upload limit setted")
}
func TestClient_SetLocation(t *testing.T) {
// todo test
}

View file

@ -0,0 +1,209 @@
package qbittorrent
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
)
type TransferStatusBar struct {
ConnectionStatus string `json:"connection_status,omitempty"`
DhtNodes int `json:"dht_nodes,omitempty"`
DlInfoData int64 `json:"dl_info_data,omitempty"`
DlInfoSpeed int `json:"dl_info_speed,omitempty"`
DlRateLimit int `json:"dl_rate_limit,omitempty"`
UpInfoData int `json:"up_info_data,omitempty"`
UpInfoSpeed int `json:"up_info_speed,omitempty"`
UpRateLimit int `json:"up_rate_limit,omitempty"`
Queueing bool `json:"queueing,omitempty"`
UseAltSpeedLimits bool `json:"use_alt_speed_limits,omitempty"`
RefreshInterval int `json:"refresh_interval,omitempty"`
}
type Transfer interface {
// GlobalStatusBar usually see in qBittorrent status bar
GlobalStatusBar(ctx context.Context) (*TransferStatusBar, error)
// BanPeers the peer to ban, or multiple peers separated by a pipe.
// each peer is host:port
BanPeers(ctx context.Context, peers []string) error
// GetSpeedLimitsMode get alternative speed limits state
GetSpeedLimitsMode(ctx context.Context) (string, error)
// ToggleSpeedLimitsMode toggle alternative speed limits
ToggleSpeedLimitsMode(ctx context.Context) error
// GetGlobalUploadLimit get global upload limit, the response is the value of current global download speed
// limit in bytes/second; this value will be zero if no limit is applied.
GetGlobalUploadLimit(ctx context.Context) (string, error)
// SetGlobalUploadLimit set global upload limit, set in bytes/second
SetGlobalUploadLimit(ctx context.Context, limit int) error
// GetGlobalDownloadLimit get global download limit, the response is the value of current global download speed
// limit in bytes/second; this value will be zero if no limit is applied.
GetGlobalDownloadLimit(ctx context.Context) (string, error)
// SetGlobalDownloadLimit set global download limit, set in bytes/second
SetGlobalDownloadLimit(ctx context.Context, limit int) error
}
func (c *client) GlobalStatusBar(ctx context.Context) (*TransferStatusBar, error) {
ctx, span := trace.Start(ctx, "qbittorrent.Transfer.GlobalStatusBar")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/transfer/info", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get global transfer status bar failed: " + string(result.body))
}
var data = new(TransferStatusBar)
if err := json.Unmarshal(result.body, data); err != nil {
return nil, err
}
return data, nil
}
func (c *client) BanPeers(ctx context.Context, peers []string) error {
ctx, span := trace.Start(ctx, "qbittorrent.Transfer.BanPeers")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/transfer/banPeers", c.config.Address)
var form = url.Values{}
form.Add("peers", strings.Join(peers, "|"))
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(form.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("ban peers failed: " + string(result.body))
}
return nil
}
func (c *client) GetSpeedLimitsMode(ctx context.Context) (string, error) {
ctx, span := trace.Start(ctx, "qbittorrent.Transfer.GetSpeedLimitsMode")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/transfer/speedLimitsMode", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return "", err
}
if result.code != 200 {
return "", errors.New("ban peers failed: " + string(result.body))
}
return string(result.body), nil
}
func (c *client) ToggleSpeedLimitsMode(ctx context.Context) error {
ctx, span := trace.Start(ctx, "qbittorrent.Transfer.ToggleSpeedLimitsMode")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/transfer/toggleSpeedLimitsMode", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("ban peers failed: " + string(result.body))
}
return nil
}
func (c *client) GetGlobalUploadLimit(ctx context.Context) (string, error) {
ctx, span := trace.Start(ctx, "qbittorrent.Transfer.GetGlobalUploadLimit")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/transfer/uploadLimit", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return "", err
}
if result.code != 200 {
return "", errors.New("get global upload limit failed: " + string(result.body))
}
return string(result.body), nil
}
func (c *client) SetGlobalUploadLimit(ctx context.Context, limit int) error {
ctx, span := trace.Start(ctx, "qbittorrent.Transfer.SetGlobalUploadLimit")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/transfer/setUploadLimit?limit=%d", c.config.Address, limit)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("set global upload limit failed: " + string(result.body))
}
return nil
}
func (c *client) GetGlobalDownloadLimit(ctx context.Context) (string, error) {
ctx, span := trace.Start(ctx, "qbittorrent.Transfer.GetGlobalDownloadLimit")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/transfer/downloadLimit", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return "", err
}
if result.code != 200 {
return "", errors.New("get global download limit failed: " + string(result.body))
}
return string(result.body), nil
}
func (c *client) SetGlobalDownloadLimit(ctx context.Context, limit int) error {
ctx, span := trace.Start(ctx, "qbittorrent.Transfer.SetGlobalDownloadLimit")
defer span.End()
apiUrl := fmt.Sprintf("%s/api/v2/transfer/setDownloadLimit?limit=%d", c.config.Address, limit)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("set global download limit failed: " + string(result.body))
}
return nil
}

View file

@ -0,0 +1,13 @@
package main
import (
"git.kmsign.ru/royalcat/tstor/plugins/qbittorrent"
"git.kmsign.ru/royalcat/tstor/server/src/daemon"
)
func main() {
}
var DaemonName = qbittorrent.DaemonName
var NewDaemon daemon.DaemonConstructor = qbittorrent.NewDaemon

13
plugins/rclone/go.mod Normal file
View file

@ -0,0 +1,13 @@
module git.kmsign.ru/royalcat/tstor/plugins/rclone
go 1.23.5
require github.com/rclone/rclone v1.68.1
require (
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/stretchr/testify v1.10.0 // indirect
golang.org/x/sys v0.31.0 // indirect
golang.org/x/time v0.6.0 // indirect
)

27
plugins/rclone/go.sum Normal file
View file

@ -0,0 +1,27 @@
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 h1:G+9t9cEtnC9jFiTxyptEKuNIAbiN5ZCQzX2a74lj3xg=
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004/go.mod h1:KmHnJWQrgEvbuy0vcvj00gtMqbvNn1L+3YUZLK/B92c=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rclone/rclone v1.68.1 h1:vlEOAuPv4gGxWECM0NIaCwBNUt3ZQY7mCsyBtZjY+68=
github.com/rclone/rclone v1.68.1/go.mod h1:T8XKOt/2Fb9INROUtFH9eF9q9o9rI1W2qTrW2bw2cYU=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

124
plugins/rclone/rclone.go Normal file
View file

@ -0,0 +1,124 @@
package rclone
import (
"context"
"io/fs"
"time"
"git.kmsign.ru/royalcat/tstor/server/src/vfs"
rclonefs "github.com/rclone/rclone/fs"
)
type fsWrapper struct {
vfs.DefaultFS
fs rclonefs.Fs
}
// Open implements vfs.Filesystem.
func (w *fsWrapper) Open(ctx context.Context, filename string) (vfs.File, error) {
obj, err := w.fs.NewObject(ctx, filename)
if err != nil {
return nil, err
}
return &fileWrapper{name: filename, obj: obj}, nil
}
// ReadDir implements vfs.Filesystem.
func (w *fsWrapper) ReadDir(ctx context.Context, path string) ([]fs.DirEntry, error) {
panic("unimplemented")
// path = vfs.RemoveTrailingSlash(path)
// entries, err := w.fs.List(ctx, path)
// if err != nil {
// return nil, err
// }
// out := make([]fs.DirEntry, 0, len(entries))
// for _, e := range entries {
// }
}
// Rename implements vfs.Filesystem.
func (f *fsWrapper) Rename(ctx context.Context, oldpath string, newpath string) error {
panic("unimplemented")
}
// Stat implements vfs.Filesystem.
func (f *fsWrapper) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
panic("unimplemented")
}
// Type implements vfs.Filesystem.
// Subtle: this method shadows the method (DefaultFS).Type of fsWrapper.DefaultFS.
func (f *fsWrapper) Type() fs.FileMode {
panic("unimplemented")
}
// Unlink implements vfs.Filesystem.
func (f *fsWrapper) Unlink(ctx context.Context, filename string) error {
panic("unimplemented")
}
var _ vfs.Filesystem = (*fsWrapper)(nil)
type fileWrapper struct {
name string
obj rclonefs.Object
}
var _ vfs.File = (*fileWrapper)(nil)
// Close implements vfs.File.
func (f *fileWrapper) Close(ctx context.Context) error {
return f.Close(ctx)
}
// Info implements vfs.File.
func (f *fileWrapper) Info() (fs.FileInfo, error) {
return vfs.NewFileInfo(f.name, f.Size(), time.Time{}), nil
}
// IsDir implements vfs.File.
func (f *fileWrapper) IsDir() bool {
return false
}
// Name implements vfs.File.
func (f *fileWrapper) Name() string {
return f.name
}
// Read implements vfs.File.
func (f *fileWrapper) Read(ctx context.Context, p []byte) (n int, err error) {
panic("unimplemented")
}
// Seek implements vfs.File.
func (f *fileWrapper) Seek(offset int64, whence int) (int64, error) {
panic("unimplemented")
}
// ReadAt implements vfs.File.
func (f *fileWrapper) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
rc, err := f.obj.Open(ctx, &rclonefs.RangeOption{Start: off, End: off + int64(len(p))})
if err != nil {
return 0, err
}
defer rc.Close()
return rc.Read(p)
}
// Size implements vfs.File.
func (f *fileWrapper) Size() int64 {
return f.Size()
}
// Type implements vfs.File.
func (f *fileWrapper) Type() fs.FileMode {
return vfs.ModeFileRO
}

111
plugins/ytdlp/controller.go Normal file
View file

@ -0,0 +1,111 @@
package ytdlp
import (
"context"
"os"
"git.kmsign.ru/royalcat/tstor/server/pkg/ctxbilly"
"git.kmsign.ru/royalcat/tstor/server/pkg/kvsingle"
"git.kmsign.ru/royalcat/tstor/server/pkg/rlog"
"git.kmsign.ru/royalcat/tstor/server/pkg/ytdlp"
"git.kmsign.ru/royalcat/tstor/server/src/tasks"
"github.com/royalcat/ctxio"
"github.com/royalcat/ctxprogress"
"github.com/royalcat/kv"
)
type Controller struct {
datafs ctxbilly.Filesystem
source Source
client *ytdlp.Client
cachedinfo *kvsingle.Value[string, ytdlp.Info]
}
func newYtdlpController(datafs ctxbilly.Filesystem, source Source, client *ytdlp.Client) *Controller {
return &Controller{
datafs: datafs,
source: source,
client: client,
}
}
func (c *Controller) Source() Source {
return c.source
}
const sizeApprox = 1024 * 1024 * 1024
func (c *Controller) Update(ctx context.Context, updater tasks.Updater) error {
log := updater.Logger()
ctxprogress.New(ctx)
ctxprogress.Set(ctx, ctxprogress.RangeProgress{Current: 0, Total: 10})
plst, err := c.client.Playlist(ctx, c.source.Url)
ctxprogress.Set(ctx, ctxprogress.RangeProgress{Current: 1, Total: 10})
ctxprogress.Range(ctx, plst, func(ctx context.Context, _ int, e ytdlp.Entry) bool {
if e.OriginalURL == "" {
log.Error("no URL in entry", rlog.Error(err))
return true
}
info, err := c.Info(ctx)
if err != nil {
log.Error("error getting info", rlog.Error(err))
return true
}
dwl := info.RequestedDownloads[0]
fileinfo, err := c.datafs.Stat(ctx, dwl.Filename)
if err != nil {
log.Error("error getting file info", rlog.Error(err))
return true
}
if fileinfo.Size()+sizeApprox > dwl.FilesizeApprox && fileinfo.Size()-sizeApprox < dwl.FilesizeApprox {
log.Debug("file already downloaded", "filename", dwl.Filename)
return true
}
file, err := c.datafs.OpenFile(ctx, dwl.Filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
if err != nil {
log.Error("error opening destination file", rlog.Error(err))
return true
}
err = c.client.Download(ctx, info.OriginalURL, ctxio.IoWriter(ctx, file))
if err != nil {
return false
}
return true
})
ctxprogress.Set(ctx, ctxprogress.RangeProgress{Current: 2, Total: 2})
if err != nil {
return err
}
return nil
}
func (c *Controller) Info(ctx context.Context) (ytdlp.Info, error) {
info, err := c.cachedinfo.Get(ctx)
if err == nil {
return info, nil
}
if err != kv.ErrKeyNotFound {
return info, err
}
info, err = c.Info(ctx)
if err != nil {
return info, err
}
err = c.cachedinfo.Set(ctx, info)
if err != nil {
return info, err
}
return info, nil
}
func (c *Controller) Downloaded() error {
return nil
}

71
plugins/ytdlp/daemon.go Normal file
View file

@ -0,0 +1,71 @@
package ytdlp
import (
"context"
"encoding/json"
"fmt"
"path"
"sync"
"git.kmsign.ru/royalcat/tstor/server/pkg/ctxbilly"
"git.kmsign.ru/royalcat/tstor/server/pkg/ytdlp"
"git.kmsign.ru/royalcat/tstor/server/src/vfs"
"github.com/go-git/go-billy/v5/osfs"
"github.com/royalcat/ctxio"
)
func NewService(dataDir string) (*Daemon, error) {
client, err := ytdlp.New()
if err != nil {
return nil, err
}
s := &Daemon{
mu: sync.Mutex{},
client: client,
dataDir: dataDir,
controllers: make(map[string]*Controller, 0),
}
return s, nil
}
type Daemon struct {
mu sync.Mutex
dataDir string
client *ytdlp.Client
controllers map[string]*Controller
}
func (c *Daemon) addSource(s Source) {
c.mu.Lock()
defer c.mu.Unlock()
ctl := newYtdlpController(ctxbilly.WrapFileSystem(osfs.New(c.sourceDir(s))), s, c.client)
c.controllers[s.Name()] = ctl
}
func (c *Daemon) sourceDir(s Source) string {
return path.Join(c.dataDir, s.Name())
}
func (c *Daemon) BuildFS(ctx context.Context, sourcePath string, f vfs.File) (vfs.Filesystem, error) {
data, err := ctxio.ReadAll(ctx, f)
if err != nil {
return nil, fmt.Errorf("failed to read source file: %w", err)
}
var s Source
err = json.Unmarshal(data, &s)
if err != nil {
return nil, err
}
c.addSource(s)
downloadFS := ctxbilly.WrapFileSystem(osfs.New(c.sourceDir(s)))
return newSourceFS(path.Base(f.Name()), downloadFS, c, s), nil
}

76
plugins/ytdlp/fs.go Normal file
View file

@ -0,0 +1,76 @@
package ytdlp
import (
"context"
"io/fs"
"os"
"time"
"git.kmsign.ru/royalcat/tstor/server/pkg/ctxbilly"
"git.kmsign.ru/royalcat/tstor/server/src/vfs"
)
type SourceFS struct {
service *Daemon
source Source
fs ctxbilly.Filesystem
vfs.DefaultFS
}
var _ vfs.Filesystem = (*SourceFS)(nil)
func newSourceFS(name string, fs ctxbilly.Filesystem, service *Daemon, source Source) *SourceFS {
return &SourceFS{
fs: fs,
service: service,
source: source,
DefaultFS: vfs.DefaultFS(name),
}
}
// Open implements vfs.Filesystem.
func (s *SourceFS) Open(ctx context.Context, filename string) (vfs.File, error) {
info, err := s.fs.Stat(ctx, filename)
if err != nil {
return nil, err
}
f, err := s.fs.OpenFile(ctx, filename, os.O_RDONLY, 0)
if err != nil {
return nil, err
}
return vfs.NewCtxBillyFile(info, f), nil
}
// ReadDir implements vfs.Filesystem.
func (s *SourceFS) ReadDir(ctx context.Context, path string) ([]fs.DirEntry, error) {
infos, err := s.fs.ReadDir(ctx, path)
if err != nil {
return nil, err
}
entries := make([]fs.DirEntry, 0, len(infos))
for _, info := range infos {
entries = append(entries, vfs.NewFileInfo(info.Name(), info.Size(), time.Time{}))
}
return entries, nil
}
// Stat implements vfs.Filesystem.
func (s *SourceFS) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
return s.fs.Stat(ctx, filename)
}
// Unlink implements vfs.Filesystem.
func (s *SourceFS) Unlink(ctx context.Context, filename string) error {
return vfs.ErrNotImplemented
}
// Rename implements vfs.Filesystem.
func (s *SourceFS) Rename(ctx context.Context, oldpath string, newpath string) error {
return vfs.ErrNotImplemented
}

37
plugins/ytdlp/tasks.go Normal file
View file

@ -0,0 +1,37 @@
package ytdlp
import (
"context"
"fmt"
"git.kmsign.ru/royalcat/tstor/server/src/tasks"
)
const executorName = "ytdlp"
type DownloadTask struct {
Name string
}
var _ tasks.Task = (*DownloadTask)(nil)
// Executor implements tasks.Task.
func (d *DownloadTask) Executor() string {
return executorName
}
var _ tasks.TaskExecutor = (*Daemon)(nil)
// ExecutorName implements tasks.TaskExecutor.
func (c *Daemon) ExecutorName() string {
return executorName
}
func (c *Daemon) RunTask(ctx context.Context, upd tasks.Updater, task tasks.Task) error {
switch t := task.(type) {
case *DownloadTask:
return c.controllers[t.Name].Update(ctx, upd)
default:
return fmt.Errorf("unknown task type: %T", task)
}
}

29
plugins/ytdlp/ytdlp.go Normal file
View file

@ -0,0 +1,29 @@
package ytdlp
import (
"crypto/sha1"
"encoding/base64"
"strings"
)
type Source struct {
Url string `json:"url"`
}
var hasher = sha1.New()
var prefixCutset = [...]string{
"https://", "http://", "www.",
}
func urlHash(url string) string {
for _, v := range prefixCutset {
url = strings.TrimPrefix(url, v)
}
return base64.URLEncoding.EncodeToString(hasher.Sum([]byte(url)))
}
func (s *Source) Name() string {
return urlHash(s.Url)
}