qbittorrent fs
All checks were successful
docker / build-docker (push) Successful in 4m11s

This commit is contained in:
royalcat 2024-09-01 02:00:13 +03:00
parent ae4501ae21
commit f75188b412
51 changed files with 4048 additions and 231 deletions

View file

@ -20,7 +20,7 @@ import (
"git.kmsign.ru/royalcat/tstor/src/config" "git.kmsign.ru/royalcat/tstor/src/config"
"git.kmsign.ru/royalcat/tstor/src/delivery" "git.kmsign.ru/royalcat/tstor/src/delivery"
"git.kmsign.ru/royalcat/tstor/src/sources" "git.kmsign.ru/royalcat/tstor/src/sources"
"git.kmsign.ru/royalcat/tstor/src/sources/torrent" "git.kmsign.ru/royalcat/tstor/src/sources/qbittorrent"
"git.kmsign.ru/royalcat/tstor/src/sources/ytdlp" "git.kmsign.ru/royalcat/tstor/src/sources/ytdlp"
"git.kmsign.ru/royalcat/tstor/src/telemetry" "git.kmsign.ru/royalcat/tstor/src/telemetry"
"git.kmsign.ru/royalcat/tstor/src/vfs" "git.kmsign.ru/royalcat/tstor/src/vfs"
@ -91,10 +91,10 @@ func run(configPath string) error {
} }
sourceFs := osfs.New(conf.SourceDir, osfs.WithBoundOS()) sourceFs := osfs.New(conf.SourceDir, osfs.WithBoundOS())
tsrv, err := torrent.NewDaemon(sourceFs, conf.TorrentClient) // tsrv, err := torrent.NewDaemon(sourceFs, conf.Sources.TorrentClient)
if err != nil { // if err != nil {
return fmt.Errorf("error creating service: %w", err) // return fmt.Errorf("error creating service: %w", err)
} // }
err = os.MkdirAll("./ytdlp", 0744) err = os.MkdirAll("./ytdlp", 0744)
if err != nil { if err != nil {
@ -105,9 +105,14 @@ func run(configPath string) error {
return err return err
} }
qtsrv, err := qbittorrent.NewDaemon(conf.Sources.QBittorrent)
if err != nil {
return fmt.Errorf("error creating qbittorrent daemon: %w", err)
}
sfs := sources.NewHostedFS( sfs := sources.NewHostedFS(
vfs.NewCtxBillyFs("/", ctxbilly.WrapFileSystem(sourceFs)), vfs.NewCtxBillyFs("/", ctxbilly.WrapFileSystem(sourceFs)),
tsrv, ytdlpsrv, qtsrv, ytdlpsrv,
) )
sfs, err = vfs.WrapLogFS(sfs) sfs, err = vfs.WrapLogFS(sfs)
if err != nil { if err != nil {
@ -187,7 +192,7 @@ func run(configPath string) error {
go func() { go func() {
logFilename := filepath.Join(conf.Log.Path, "logs") logFilename := filepath.Join(conf.Log.Path, "logs")
err := delivery.Run(tsrv, sfs, logFilename, conf) err := delivery.Run(nil, sfs, logFilename, conf)
if err != nil { if err != nil {
log.Error(ctx, "error initializing HTTP server", rlog.Error(err)) log.Error(ctx, "error initializing HTTP server", rlog.Error(err))
} }
@ -198,6 +203,7 @@ func run(configPath string) error {
<-sigChan <-sigChan
return errors.Join( return errors.Join(
tsrv.Close(ctx), // tsrv.Close(ctx),
qtsrv.Close(ctx),
) )
} }

5
go.mod
View file

@ -2,8 +2,6 @@ module git.kmsign.ru/royalcat/tstor
go 1.22.3 go 1.22.3
replace github.com/bytedance/sonic v1.11.9 => github.com/bytedance/sonic v1.12.1
require ( require (
github.com/99designs/gqlgen v0.17.49 github.com/99designs/gqlgen v0.17.49
github.com/agoda-com/opentelemetry-go/otelslog v0.1.1 github.com/agoda-com/opentelemetry-go/otelslog v0.1.1
@ -20,6 +18,7 @@ require (
github.com/gofrs/uuid/v5 v5.1.0 github.com/gofrs/uuid/v5 v5.1.0
github.com/google/go-github/v63 v63.0.0 github.com/google/go-github/v63 v63.0.0
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/gorilla/schema v1.4.1
github.com/grafana/otel-profiling-go v0.5.1 github.com/grafana/otel-profiling-go v0.5.1
github.com/grafana/pyroscope-go v1.1.2 github.com/grafana/pyroscope-go v1.1.2
github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/hashicorp/golang-lru/v2 v2.0.7
@ -47,7 +46,6 @@ require (
github.com/vektah/gqlparser/v2 v2.5.16 github.com/vektah/gqlparser/v2 v2.5.16
github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00
github.com/willscott/memphis v0.0.0-20210922141505-529d4987ab7e github.com/willscott/memphis v0.0.0-20210922141505-529d4987ab7e
github.com/xuthus5/qbittorrent-client-go v0.0.0-20240710140754-51c95559ea0a
go.opentelemetry.io/otel v1.28.0 go.opentelemetry.io/otel v1.28.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0
@ -122,7 +120,6 @@ require (
github.com/google/btree v1.1.2 // indirect github.com/google/btree v1.1.2 // indirect
github.com/google/flatbuffers v24.3.25+incompatible // indirect github.com/google/flatbuffers v24.3.25+incompatible // indirect
github.com/google/go-querystring v1.1.0 // indirect github.com/google/go-querystring v1.1.0 // indirect
github.com/gorilla/schema v1.4.1 // indirect
github.com/gorilla/websocket v1.5.1 // indirect github.com/gorilla/websocket v1.5.1 // indirect
github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect

8
go.sum
View file

@ -136,8 +136,8 @@ github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2w
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8= github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8=
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og= github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og=
github.com/bytedance/sonic v1.12.1 h1:jWl5Qz1fy7X1ioY74WqO0KjAMtAGQs4sYnjiEBiyX24= github.com/bytedance/sonic v1.11.9 h1:LFHENlIY/SLzDWverzdOvgMztTxcfcF+cqNsz9pK5zg=
github.com/bytedance/sonic v1.12.1/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk= github.com/bytedance/sonic v1.11.9/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/bytedance/sonic/loader v0.2.0 h1:zNprn+lsIP06C/IqCHs3gPQIvnvpKbbxyXQP1iU4kWM= github.com/bytedance/sonic/loader v0.2.0 h1:zNprn+lsIP06C/IqCHs3gPQIvnvpKbbxyXQP1iU4kWM=
github.com/bytedance/sonic/loader v0.2.0/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/bytedance/sonic/loader v0.2.0/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
@ -653,8 +653,6 @@ github.com/willscott/memphis v0.0.0-20210922141505-529d4987ab7e h1:1eHCP4w7tMmpf
github.com/willscott/memphis v0.0.0-20210922141505-529d4987ab7e/go.mod h1:59vHBW4EpjiL5oiqgCrBp1Tc9JXRzKCNMEOaGmNfSHo= github.com/willscott/memphis v0.0.0-20210922141505-529d4987ab7e/go.mod h1:59vHBW4EpjiL5oiqgCrBp1Tc9JXRzKCNMEOaGmNfSHo=
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw=
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk=
github.com/xuthus5/qbittorrent-client-go v0.0.0-20240710140754-51c95559ea0a h1:/3NF320wvXk5nm9Ng02eKTiWSYf20r4acufqecGLpfo=
github.com/xuthus5/qbittorrent-client-go v0.0.0-20240710140754-51c95559ea0a/go.mod h1:lP2yxMU6WGTmHqI9T+SrEw3wo7k5kUyiA9FBOK9NKMQ=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
@ -699,6 +697,7 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc= golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -1019,6 +1018,7 @@ modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
zombiezen.com/go/sqlite v1.3.0 h1:98g1gnCm+CNz6AuQHu0gqyw7gR2WU3O3PJufDOStpUs= zombiezen.com/go/sqlite v1.3.0 h1:98g1gnCm+CNz6AuQHu0gqyw7gR2WU3O3PJufDOStpUs=

View file

@ -255,6 +255,8 @@ func (s *SetFileAttributes) Apply(ctx context.Context, changer Change, fs Filesy
} else if err != nil { } else if err != nil {
return err return err
} }
defer fp.Close(ctx)
if *s.SetSize > math.MaxInt64 { if *s.SetSize > math.MaxInt64 {
return &NFSStatusError{NFSStatusInval, os.ErrInvalid} return &NFSStatusError{NFSStatusInval, os.ErrInvalid}
} }

View file

@ -0,0 +1,375 @@
package qbittorrent
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
)
type Application interface {
// Version get application version
Version(context.Context) (string, error)
// WebApiVersion get webapi version
WebApiVersion(context.Context) (string, error)
// BuildInfo get build info
BuildInfo(context.Context) (*BuildInfo, error)
// Shutdown exit application
Shutdown(context.Context) error
// GetPreferences get application preferences
GetPreferences(context.Context) (*Preferences, error)
// SetPreferences set application preferences
SetPreferences(context.Context, *Preferences) error
// DefaultSavePath get default save path
DefaultSavePath(context.Context) (string, error)
}
type BuildInfo struct {
BitNess int `json:"bitness,omitempty"`
Boost string `json:"boost,omitempty"`
LibTorrent string `json:"libtorrent,omitempty"`
Openssl string `json:"openssl,omitempty"`
QT string `json:"qt,omitempty"`
Zlib string `json:"zlib,omitempty"`
}
type Preferences struct {
AddToTopOfQueue bool `json:"add_to_top_of_queue,omitempty"`
AddTrackers string `json:"add_trackers,omitempty"`
AddTrackersEnabled bool `json:"add_trackers_enabled,omitempty"`
AltDlLimit int `json:"alt_dl_limit,omitempty"`
AltUpLimit int `json:"alt_up_limit,omitempty"`
AlternativeWebuiEnabled bool `json:"alternative_webui_enabled,omitempty"`
AlternativeWebuiPath string `json:"alternative_webui_path,omitempty"`
AnnounceIP string `json:"announce_ip,omitempty"`
AnnounceToAllTiers bool `json:"announce_to_all_tiers,omitempty"`
AnnounceToAllTrackers bool `json:"announce_to_all_trackers,omitempty"`
AnonymousMode bool `json:"anonymous_mode,omitempty"`
AsyncIoThreads int `json:"async_io_threads,omitempty"`
AutoDeleteMode int `json:"auto_delete_mode,omitempty"`
AutoTmmEnabled bool `json:"auto_tmm_enabled,omitempty"`
AutorunEnabled bool `json:"autorun_enabled,omitempty"`
AutorunOnTorrentAddedEnabled bool `json:"autorun_on_torrent_added_enabled,omitempty"`
AutorunOnTorrentAddedProgram string `json:"autorun_on_torrent_added_program,omitempty"`
AutorunProgram string `json:"autorun_program,omitempty"`
BannedIPs string `json:"banned_IPs,omitempty"`
BdecodeDepthLimit int `json:"bdecode_depth_limit,omitempty"`
BdecodeTokenLimit int `json:"bdecode_token_limit,omitempty"`
BittorrentProtocol int `json:"bittorrent_protocol,omitempty"`
BlockPeersOnPrivilegedPorts bool `json:"block_peers_on_privileged_ports,omitempty"`
BypassAuthSubnetWhitelist string `json:"bypass_auth_subnet_whitelist,omitempty"`
BypassAuthSubnetWhitelistEnabled bool `json:"bypass_auth_subnet_whitelist_enabled,omitempty"`
BypassLocalAuth bool `json:"bypass_local_auth,omitempty"`
CategoryChangedTmmEnabled bool `json:"category_changed_tmm_enabled,omitempty"`
CheckingMemoryUse int `json:"checking_memory_use,omitempty"`
ConnectionSpeed int `json:"connection_speed,omitempty"`
CurrentInterfaceAddress string `json:"current_interface_address,omitempty"`
CurrentInterfaceName string `json:"current_interface_name,omitempty"`
CurrentNetworkInterface string `json:"current_network_interface,omitempty"`
Dht bool `json:"dht,omitempty"`
DiskCache int `json:"disk_cache,omitempty"`
DiskCacheTTL int `json:"disk_cache_ttl,omitempty"`
DiskIoReadMode int `json:"disk_io_read_mode,omitempty"`
DiskIoType int `json:"disk_io_type,omitempty"`
DiskIoWriteMode int `json:"disk_io_write_mode,omitempty"`
DiskQueueSize int `json:"disk_queue_size,omitempty"`
DlLimit int `json:"dl_limit,omitempty"`
DontCountSlowTorrents bool `json:"dont_count_slow_torrents,omitempty"`
DyndnsDomain string `json:"dyndns_domain,omitempty"`
DyndnsEnabled bool `json:"dyndns_enabled,omitempty"`
DyndnsPassword string `json:"dyndns_password,omitempty"`
DyndnsService int `json:"dyndns_service,omitempty"`
DyndnsUsername string `json:"dyndns_username,omitempty"`
EmbeddedTrackerPort int `json:"embedded_tracker_port,omitempty"`
EmbeddedTrackerPortForwarding bool `json:"embedded_tracker_port_forwarding,omitempty"`
EnableCoalesceReadWrite bool `json:"enable_coalesce_read_write,omitempty"`
EnableEmbeddedTracker bool `json:"enable_embedded_tracker,omitempty"`
EnableMultiConnectionsFromSameIP bool `json:"enable_multi_connections_from_same_ip,omitempty"`
EnablePieceExtentAffinity bool `json:"enable_piece_extent_affinity,omitempty"`
EnableUploadSuggestions bool `json:"enable_upload_suggestions,omitempty"`
Encryption int `json:"encryption,omitempty"`
ExcludedFileNames string `json:"excluded_file_names,omitempty"`
ExcludedFileNamesEnabled bool `json:"excluded_file_names_enabled,omitempty"`
ExportDir string `json:"export_dir,omitempty"`
ExportDirFin string `json:"export_dir_fin,omitempty"`
FileLogAge int `json:"file_log_age,omitempty"`
FileLogAgeType int `json:"file_log_age_type,omitempty"`
FileLogBackupEnabled bool `json:"file_log_backup_enabled,omitempty"`
FileLogDeleteOld bool `json:"file_log_delete_old,omitempty"`
FileLogEnabled bool `json:"file_log_enabled,omitempty"`
FileLogMaxSize int `json:"file_log_max_size,omitempty"`
FileLogPath string `json:"file_log_path,omitempty"`
FilePoolSize int `json:"file_pool_size,omitempty"`
HashingThreads int `json:"hashing_threads,omitempty"`
I2PAddress string `json:"i2p_address,omitempty"`
I2PEnabled bool `json:"i2p_enabled,omitempty"`
I2PInboundLength int `json:"i2p_inbound_length,omitempty"`
I2PInboundQuantity int `json:"i2p_inbound_quantity,omitempty"`
I2PMixedMode bool `json:"i2p_mixed_mode,omitempty"`
I2POutboundLength int `json:"i2p_outbound_length,omitempty"`
I2POutboundQuantity int `json:"i2p_outbound_quantity,omitempty"`
I2PPort int `json:"i2p_port,omitempty"`
IdnSupportEnabled bool `json:"idn_support_enabled,omitempty"`
IncompleteFilesExt bool `json:"incomplete_files_ext,omitempty"`
IPFilterEnabled bool `json:"ip_filter_enabled,omitempty"`
IPFilterPath string `json:"ip_filter_path,omitempty"`
IPFilterTrackers bool `json:"ip_filter_trackers,omitempty"`
LimitLanPeers bool `json:"limit_lan_peers,omitempty"`
LimitTCPOverhead bool `json:"limit_tcp_overhead,omitempty"`
LimitUtpRate bool `json:"limit_utp_rate,omitempty"`
ListenPort int `json:"listen_port,omitempty"`
Locale string `json:"locale,omitempty"`
Lsd bool `json:"lsd,omitempty"`
MailNotificationAuthEnabled bool `json:"mail_notification_auth_enabled,omitempty"`
MailNotificationEmail string `json:"mail_notification_email,omitempty"`
MailNotificationEnabled bool `json:"mail_notification_enabled,omitempty"`
MailNotificationPassword string `json:"mail_notification_password,omitempty"`
MailNotificationSender string `json:"mail_notification_sender,omitempty"`
MailNotificationSMTP string `json:"mail_notification_smtp,omitempty"`
MailNotificationSslEnabled bool `json:"mail_notification_ssl_enabled,omitempty"`
MailNotificationUsername string `json:"mail_notification_username,omitempty"`
MaxActiveCheckingTorrents int `json:"max_active_checking_torrents,omitempty"`
MaxActiveDownloads int `json:"max_active_downloads,omitempty"`
MaxActiveTorrents int `json:"max_active_torrents,omitempty"`
MaxActiveUploads int `json:"max_active_uploads,omitempty"`
MaxConcurrentHTTPAnnounces int `json:"max_concurrent_http_announces,omitempty"`
MaxConnec int `json:"max_connec,omitempty"`
MaxConnecPerTorrent int `json:"max_connec_per_torrent,omitempty"`
MaxInactiveSeedingTime int `json:"max_inactive_seeding_time,omitempty"`
MaxInactiveSeedingTimeEnabled bool `json:"max_inactive_seeding_time_enabled,omitempty"`
MaxRatio int `json:"max_ratio,omitempty"`
MaxRatioAct int `json:"max_ratio_act,omitempty"`
MaxRatioEnabled bool `json:"max_ratio_enabled,omitempty"`
MaxSeedingTime int `json:"max_seeding_time,omitempty"`
MaxSeedingTimeEnabled bool `json:"max_seeding_time_enabled,omitempty"`
MaxUploads int `json:"max_uploads,omitempty"`
MaxUploadsPerTorrent int `json:"max_uploads_per_torrent,omitempty"`
MemoryWorkingSetLimit int `json:"memory_working_set_limit,omitempty"`
MergeTrackers bool `json:"merge_trackers,omitempty"`
OutgoingPortsMax int `json:"outgoing_ports_max,omitempty"`
OutgoingPortsMin int `json:"outgoing_ports_min,omitempty"`
PeerTos int `json:"peer_tos,omitempty"`
PeerTurnover int `json:"peer_turnover,omitempty"`
PeerTurnoverCutoff int `json:"peer_turnover_cutoff,omitempty"`
PeerTurnoverInterval int `json:"peer_turnover_interval,omitempty"`
PerformanceWarning bool `json:"performance_warning,omitempty"`
Pex bool `json:"pex,omitempty"`
PreallocateAll bool `json:"preallocate_all,omitempty"`
ProxyAuthEnabled bool `json:"proxy_auth_enabled,omitempty"`
ProxyBittorrent bool `json:"proxy_bittorrent,omitempty"`
ProxyHostnameLookup bool `json:"proxy_hostname_lookup,omitempty"`
ProxyIP string `json:"proxy_ip,omitempty"`
ProxyMisc bool `json:"proxy_misc,omitempty"`
ProxyPassword string `json:"proxy_password,omitempty"`
ProxyPeerConnections bool `json:"proxy_peer_connections,omitempty"`
ProxyPort int `json:"proxy_port,omitempty"`
ProxyRss bool `json:"proxy_rss,omitempty"`
ProxyType string `json:"proxy_type,omitempty"`
ProxyUsername string `json:"proxy_username,omitempty"`
QueueingEnabled bool `json:"queueing_enabled,omitempty"`
RandomPort bool `json:"random_port,omitempty"`
ReannounceWhenAddressChanged bool `json:"reannounce_when_address_changed,omitempty"`
RecheckCompletedTorrents bool `json:"recheck_completed_torrents,omitempty"`
RefreshInterval int `json:"refresh_interval,omitempty"`
RequestQueueSize int `json:"request_queue_size,omitempty"`
ResolvePeerCountries bool `json:"resolve_peer_countries,omitempty"`
ResumeDataStorageType string `json:"resume_data_storage_type,omitempty"`
RssAutoDownloadingEnabled bool `json:"rss_auto_downloading_enabled,omitempty"`
RssDownloadRepackProperEpisodes bool `json:"rss_download_repack_proper_episodes,omitempty"`
RssMaxArticlesPerFeed int `json:"rss_max_articles_per_feed,omitempty"`
RssProcessingEnabled bool `json:"rss_processing_enabled,omitempty"`
RssRefreshInterval int `json:"rss_refresh_interval,omitempty"`
RssSmartEpisodeFilters string `json:"rss_smart_episode_filters,omitempty"`
SavePath string `json:"save_path,omitempty"`
SavePathChangedTmmEnabled bool `json:"save_path_changed_tmm_enabled,omitempty"`
SaveResumeDataInterval int `json:"save_resume_data_interval,omitempty"`
ScheduleFromHour int `json:"schedule_from_hour,omitempty"`
ScheduleFromMin int `json:"schedule_from_min,omitempty"`
ScheduleToHour int `json:"schedule_to_hour,omitempty"`
ScheduleToMin int `json:"schedule_to_min,omitempty"`
SchedulerDays int `json:"scheduler_days,omitempty"`
SchedulerEnabled bool `json:"scheduler_enabled,omitempty"`
SendBufferLowWatermark int `json:"send_buffer_low_watermark,omitempty"`
SendBufferWatermark int `json:"send_buffer_watermark,omitempty"`
SendBufferWatermarkFactor int `json:"send_buffer_watermark_factor,omitempty"`
SlowTorrentDlRateThreshold int `json:"slow_torrent_dl_rate_threshold,omitempty"`
SlowTorrentInactiveTimer int `json:"slow_torrent_inactive_timer,omitempty"`
SlowTorrentUlRateThreshold int `json:"slow_torrent_ul_rate_threshold,omitempty"`
SocketBacklogSize int `json:"socket_backlog_size,omitempty"`
SocketReceiveBufferSize int `json:"socket_receive_buffer_size,omitempty"`
SocketSendBufferSize int `json:"socket_send_buffer_size,omitempty"`
SsrfMitigation bool `json:"ssrf_mitigation,omitempty"`
StartPausedEnabled bool `json:"start_paused_enabled,omitempty"`
StopTrackerTimeout int `json:"stop_tracker_timeout,omitempty"`
TempPath string `json:"temp_path,omitempty"`
TempPathEnabled bool `json:"temp_path_enabled,omitempty"`
TorrentChangedTmmEnabled bool `json:"torrent_changed_tmm_enabled,omitempty"`
TorrentContentLayout string `json:"torrent_content_layout,omitempty"`
TorrentFileSizeLimit int `json:"torrent_file_size_limit,omitempty"`
TorrentStopCondition string `json:"torrent_stop_condition,omitempty"`
UpLimit int `json:"up_limit,omitempty"`
UploadChokingAlgorithm int `json:"upload_choking_algorithm,omitempty"`
UploadSlotsBehavior int `json:"upload_slots_behavior,omitempty"`
Upnp bool `json:"upnp,omitempty"`
UpnpLeaseDuration int `json:"upnp_lease_duration,omitempty"`
UseCategoryPathsInManualMode bool `json:"use_category_paths_in_manual_mode,omitempty"`
UseHTTPS bool `json:"use_https,omitempty"`
UseSubcategories bool `json:"use_subcategories,omitempty"`
UtpTCPMixedMode int `json:"utp_tcp_mixed_mode,omitempty"`
ValidateHTTPSTrackerCertificate bool `json:"validate_https_tracker_certificate,omitempty"`
WebUIAddress string `json:"web_ui_address,omitempty"`
WebUIBanDuration int `json:"web_ui_ban_duration,omitempty"`
WebUIClickjackingProtectionEnabled bool `json:"web_ui_clickjacking_protection_enabled,omitempty"`
WebUICsrfProtectionEnabled bool `json:"web_ui_csrf_protection_enabled,omitempty"`
WebUICustomHTTPHeaders string `json:"web_ui_custom_http_headers,omitempty"`
WebUIDomainList string `json:"web_ui_domain_list,omitempty"`
WebUIHostHeaderValidationEnabled bool `json:"web_ui_host_header_validation_enabled,omitempty"`
WebUIHTTPSCertPath string `json:"web_ui_https_cert_path,omitempty"`
WebUIHTTPSKeyPath string `json:"web_ui_https_key_path,omitempty"`
WebUIMaxAuthFailCount int `json:"web_ui_max_auth_fail_count,omitempty"`
WebUIPort int `json:"web_ui_port,omitempty"`
WebUIReverseProxiesList string `json:"web_ui_reverse_proxies_list,omitempty"`
WebUIReverseProxyEnabled bool `json:"web_ui_reverse_proxy_enabled,omitempty"`
WebUISecureCookieEnabled bool `json:"web_ui_secure_cookie_enabled,omitempty"`
WebUISessionTimeout int `json:"web_ui_session_timeout,omitempty"`
WebUIUpnp bool `json:"web_ui_upnp,omitempty"`
WebUIUseCustomHTTPHeadersEnabled bool `json:"web_ui_use_custom_http_headers_enabled,omitempty"`
WebUIUsername string `json:"web_ui_username,omitempty"`
}
func (c *client) Version(ctx context.Context) (string, error) {
apiUrl := fmt.Sprintf("%s/api/v2/app/version", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return "", err
}
if result.code != 200 {
return "", errors.New("get version failed: " + string(result.body))
}
return string(result.body), nil
}
func (c *client) WebApiVersion(ctx context.Context) (string, error) {
apiUrl := fmt.Sprintf("%s/api/v2/app/webapiVersion", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return "", err
}
if result.code != 200 {
return "", errors.New("get version failed: " + string(result.body))
}
return string(result.body), nil
}
func (c *client) BuildInfo(ctx context.Context) (*BuildInfo, error) {
apiUrl := fmt.Sprintf("%s/api/v2/app/buildInfo", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get build info failed: " + string(result.body))
}
var build = new(BuildInfo)
if err := json.Unmarshal(result.body, build); err != nil {
return nil, err
}
return build, nil
}
func (c *client) Shutdown(ctx context.Context) error {
apiUrl := fmt.Sprintf("%s/api/v2/app/shutdown", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
method: http.MethodPost,
url: apiUrl,
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("shutdown application failed: " + string(result.body))
}
return nil
}
func (c *client) GetPreferences(ctx context.Context) (*Preferences, error) {
apiUrl := fmt.Sprintf("%s/api/v2/app/preferences", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get preference failed: " + string(result.body))
}
var preferences = new(Preferences)
if err := json.Unmarshal(result.body, preferences); err != nil {
return nil, err
}
return preferences, nil
}
func (c *client) SetPreferences(ctx context.Context, prefs *Preferences) error {
apiUrl := fmt.Sprintf("%s/api/v2/app/setPreferences", c.config.Address)
data, err := json.Marshal(prefs)
if err != nil {
return err
}
var formData bytes.Buffer
formData.Write([]byte("json="))
formData.Write(data)
result, err := c.doRequest(ctx, &requestData{
method: http.MethodPost,
url: apiUrl,
contentType: ContentTypeFormUrlEncoded,
body: &formData,
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("set preference failed: " + string(result.body))
}
return nil
}
func (c *client) DefaultSavePath(ctx context.Context) (string, error) {
apiUrl := fmt.Sprintf("%s/api/v2/app/defaultSavePath", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return "", err
}
if result.code != 200 {
return "", errors.New("get default save path failed: " + string(result.body))
}
return string(result.body), nil
}

View file

@ -0,0 +1,73 @@
package qbittorrent
import (
"context"
"testing"
)
func TestClient_Version(t *testing.T) {
ctx := context.Background()
version, err := c.Application().Version(ctx)
if err != nil {
t.Fatal(err)
}
t.Log(version)
}
func TestClient_WebApiVersion(t *testing.T) {
ctx := context.Background()
version, err := c.Application().WebApiVersion(ctx)
if err != nil {
t.Fatal(err)
}
t.Log(version)
}
func TestClient_BuildInfo(t *testing.T) {
ctx := context.Background()
info, err := c.Application().BuildInfo(ctx)
if err != nil {
t.Fatal(err)
}
t.Logf("build: %+v", info)
}
func TestClient_Shutdown(t *testing.T) {
ctx := context.Background()
if err := c.Application().Shutdown(ctx); err != nil {
t.Fatal(err)
}
t.Log("shutting down")
}
func TestClient_GetPreferences(t *testing.T) {
ctx := context.Background()
prefs, err := c.Application().GetPreferences(ctx)
if err != nil {
t.Fatal(err)
}
t.Logf("prefs: %+v", prefs)
}
func TestClient_SetPreferences(t *testing.T) {
ctx := context.Background()
prefs, err := c.Application().GetPreferences(ctx)
if err != nil {
t.Fatal(err)
}
prefs.FileLogAge = 301
if err := c.Application().SetPreferences(ctx, prefs); err != nil {
t.Fatal(err)
}
t.Logf("success")
}
func TestClient_DefaultSavePath(t *testing.T) {
ctx := context.Background()
path, err := c.Application().DefaultSavePath(ctx)
if err != nil {
t.Fatal(err)
}
t.Logf("path: %s", path)
}

View file

@ -0,0 +1,85 @@
package qbittorrent
import (
"context"
"errors"
"fmt"
"net/http"
"net/http/cookiejar"
"net/url"
"strings"
)
type Authentication interface {
// Login cookie-based authentication, after calling NewClient, do not need to call Login again,
// it is the default behavior
Login(ctx context.Context) error
// Logout deactivate cookies
Logout(ctx context.Context) error
}
func (c *client) Login(ctx context.Context) error {
if c.config.Username == "" || c.config.Password == "" {
return errors.New("username or password is empty")
}
formData := url.Values{}
formData.Set("username", c.config.Username)
formData.Set("password", c.config.Password)
encodedFormData := formData.Encode()
apiUrl := fmt.Sprintf("%s/api/v2/auth/login", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
method: http.MethodPost,
url: apiUrl,
body: strings.NewReader(encodedFormData),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("login failed: " + string(result.body))
}
if string(result.body) == "Fails." {
return ErrAuthFailed
}
if string(result.body) != "Ok." {
return errors.New("login failed: " + string(result.body))
}
if c.cookieJar == nil {
c.cookieJar, err = cookiejar.New(nil)
if err != nil {
return err
}
}
u, err := url.Parse(c.config.Address)
if err != nil {
return err
}
c.cookieJar.SetCookies(u, result.cookies)
return nil
}
func (c *client) Logout(ctx context.Context) error {
apiUrl := fmt.Sprintf("%s/api/v2/auth/logout", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
method: http.MethodPost,
url: apiUrl,
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("logout failed: " + string(result.body))
}
return nil
}

View file

@ -0,0 +1,24 @@
package qbittorrent
import (
"context"
"testing"
)
func TestClient_Login(t *testing.T) {
ctx := context.Background()
if err := c.Authentication().Login(ctx); err != nil {
t.Fatal(err)
}
}
func TestClient_Logout(t *testing.T) {
ctx := context.Background()
if err := c.Authentication().Login(ctx); err != nil {
t.Fatal(err)
}
if err := c.Authentication().Logout(ctx); err != nil {
t.Fatal(err)
}
}

39
pkg/qbittorrent/client.go Normal file
View file

@ -0,0 +1,39 @@
package qbittorrent
import "context"
// Client represents a qBittorrent client
type Client interface {
// Authentication auth qBittorrent client
Authentication() Authentication
// Application get qBittorrent application info
Application() Application
// Log get qBittorrent log
Log() Log
// Sync get qBittorrent events
Sync() Sync
// Transfer transfer manage
Transfer() Transfer
// Torrent manage for torrent
Torrent() Torrent
// Search api for search
Search() Search
// RSS api for rss
RSS() RSS
}
func NewClient(ctx context.Context, cfg *Config) (Client, error) {
var c = &client{config: cfg, clientPool: newClientPool(cfg.ConnectionMaxIdles, cfg.ConnectionTimeout)}
return c, nil
}
func LoginClient(ctx context.Context, cfg *Config) (Client, error) {
var c = &client{config: cfg, clientPool: newClientPool(cfg.ConnectionMaxIdles, cfg.ConnectionTimeout)}
if err := c.Authentication().Login(ctx); err != nil {
return nil, err
}
if cfg.RefreshCookie {
go c.refreshCookie()
}
return c, nil
}

View file

@ -0,0 +1,135 @@
package qbittorrent
import (
"context"
"fmt"
"io"
"net/http"
"net/http/cookiejar"
"net/url"
"strings"
"time"
)
type responseResult struct {
code int
body []byte
cookies []*http.Cookie
}
type requestData struct {
method string
url string
contentType string
body io.Reader
}
var _ Client = (*client)(nil)
type client struct {
config *Config
clientPool *clientPool
cookieJar *cookiejar.Jar
}
func (c *client) Authentication() Authentication {
return c
}
func (c *client) Application() Application {
return c
}
func (c *client) Log() Log {
return c
}
func (c *client) Sync() Sync {
return c
}
func (c *client) Transfer() Transfer {
return c
}
func (c *client) Torrent() Torrent {
return c
}
func (c *client) Search() Search {
return c
}
func (c *client) RSS() RSS {
return c
}
// doRequest send request
func (c *client) doRequest(ctx context.Context, data *requestData) (*responseResult, error) {
if data.method == "" {
data.method = "GET"
}
if data.contentType == "" {
data.contentType = ContentTypeFormUrlEncoded
}
request, err := http.NewRequestWithContext(ctx, data.method, data.url, data.body)
if err != nil {
return nil, err
}
request.Header.Set("Content-Type", data.contentType)
for key, value := range c.config.CustomHeaders {
request.Header.Set(key, value)
}
hc := c.clientPool.GetClient()
defer c.clientPool.ReleaseClient(hc)
if c.cookieJar != nil {
hc.Jar = c.cookieJar
}
resp, err := hc.Do(request)
if err != nil {
return nil, err
}
defer resp.Body.Close()
readAll, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return &responseResult{code: resp.StatusCode, body: readAll, cookies: resp.Cookies()}, nil
}
func (c *client) cookies() (string, error) {
if c.cookieJar == nil {
return "", ErrNotLogin
}
u, err := url.Parse(c.config.Address)
if err != nil {
return "", err
}
cookies := c.cookieJar.Cookies(u)
if len(cookies) == 0 {
return "", ErrNotLogin
}
var builder strings.Builder
for _, cookie := range cookies {
builder.WriteString(fmt.Sprintf("%s=%s; ", cookie.Name, cookie.Value))
}
return builder.String(), nil
}
func (c *client) refreshCookie() {
ctx := context.Background()
if c.config.RefreshIntervals == 0 {
c.config.RefreshIntervals = time.Hour
}
var ticker = time.NewTicker(c.config.RefreshIntervals)
for range ticker.C {
if err := c.Authentication().Logout(ctx); err != nil {
// todo
}
}
}

View file

@ -0,0 +1,53 @@
package qbittorrent
import (
"crypto/tls"
"net"
"net/http"
"sync"
"time"
)
// clientPool defines a pool of HTTP clients
type clientPool struct {
// pool store http.Client instances
*sync.Pool
}
// newClientPool creates and returns a new clientPool
func newClientPool(maxIdle int, timeout time.Duration) *clientPool {
if maxIdle == 0 {
maxIdle = 128
}
if timeout == 0 {
timeout = time.Second * 3
}
return &clientPool{
Pool: &sync.Pool{
New: func() any {
return &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
MaxIdleConns: maxIdle,
},
Timeout: timeout,
}
},
},
}
}
// GetClient retrieves a http.Client from the pool
func (p *clientPool) GetClient() *http.Client {
return p.Get().(*http.Client)
}
// ReleaseClient returns a http.Client back to the pool
func (p *clientPool) ReleaseClient(client *http.Client) {
p.Put(client)
}

View file

@ -0,0 +1,56 @@
package qbittorrent
import (
"context"
"net/url"
"testing"
"time"
)
var (
c Client
)
func init() {
ctx := context.Background()
var err error
c, err = LoginClient(ctx, &Config{
Address: "http://192.168.3.33:38080",
Username: "admin",
Password: "J0710cz5",
RefreshIntervals: time.Hour,
ConnectionTimeout: time.Second * 3,
CustomHeaders: map[string]string{
//"Origin": "http://192.168.3.33:8080",
//"Referer": "http://192.168.3.33:8080",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
},
})
if err != nil {
panic(err)
}
}
func TestFormEncoder(t *testing.T) {
var option = LogOption{
Normal: true,
Info: true,
Warning: false,
Critical: false,
LastKnownId: 0,
}
var form = url.Values{}
err := encoder.Encode(option, form)
if err != nil {
t.Fatal(err)
}
t.Log(form)
}
func TestFormEncode(t *testing.T) {
var form = url.Values{}
form.Add("username", "admin hahaha")
form.Add("password", "J0710c?//&z5")
fe := form.Encode()
t.Log(fe)
}

10
pkg/qbittorrent/common.go Normal file
View file

@ -0,0 +1,10 @@
package qbittorrent
import "github.com/gorilla/schema"
const (
ContentTypeJSON = "application/json"
ContentTypeFormUrlEncoded = "application/x-www-form-urlencoded"
)
var encoder = schema.NewEncoder()

25
pkg/qbittorrent/config.go Normal file
View file

@ -0,0 +1,25 @@
package qbittorrent
import "time"
type Config struct {
// Address qBittorrent endpoint
Address string
// Username used to access the WebUI
Username string
// Password used to access the WebUI
Password string
// HTTP configuration
// CustomHeaders custom headers
CustomHeaders map[string]string
// ConnectionTimeout request timeout, default 3 seconds
ConnectionTimeout time.Duration
// ConnectionMaxIdles http client pool, default 128
ConnectionMaxIdles int
// RefreshCookie whether to automatically refresh cookies
RefreshCookie bool
// SessionTimeout interval for refreshing cookies, default 1 hour
RefreshIntervals time.Duration
}

View file

@ -0,0 +1,8 @@
package qbittorrent
import "errors"
var (
ErrNotLogin = errors.New("not login")
ErrAuthFailed = errors.New("auth failed")
)

89
pkg/qbittorrent/log.go Normal file
View file

@ -0,0 +1,89 @@
package qbittorrent
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/url"
"strconv"
"strings"
)
type LogOption struct {
Normal bool `schema:"normal,omitempty"` // include normal messages
Info bool `schema:"info,omitempty"` // include info messages
Warning bool `schema:"warning,omitempty"` // include warning messages
Critical bool `schema:"critical,omitempty"` // include critical messages
LastKnownId int64 `schema:"last_known_id,omitempty"` // exclude messages with "message id" <= (default: last_known_id-1)
}
type LogEntry struct {
Id int `json:"id,omitempty"` // id of the message or peer
Timestamp int `json:"timestamp,omitempty"` // seconds since epoch
Type int `json:"type,omitempty"` // type of the message, Log::NORMAL: 1, Log::INFO: 2, Log::WARNING: 4, Log::CRITICAL: 8
Message string `json:"message,omitempty"` // text of the message
IP string `json:"ip"` // ip of the peer
Blocked bool `json:"blocked,omitempty"` // whether the peer was blocked
Reason string `json:"reason,omitempty"` // Reason of the block
}
type Log interface {
// GetLog get log
GetLog(ctx context.Context, option *LogOption) ([]*LogEntry, error)
// GetPeerLog get peer log
GetPeerLog(ctx context.Context, lastKnownId int) ([]*LogEntry, error)
}
func (c *client) GetLog(ctx context.Context, option *LogOption) ([]*LogEntry, error) {
var form = url.Values{}
err := encoder.Encode(option, form)
if err != nil {
return nil, err
}
apiUrl := fmt.Sprintf("%s/api/v2/log/main?%s", c.config.Address, form.Encode())
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
body: strings.NewReader(form.Encode()),
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get log failed: " + string(result.body))
}
var logs []*LogEntry
if err := json.Unmarshal(result.body, &logs); err != nil {
return nil, err
}
return logs, nil
}
func (c *client) GetPeerLog(ctx context.Context, lastKnownId int) ([]*LogEntry, error) {
apiUrl := fmt.Sprintf("%s/api/v2/log/peers", c.config.Address)
var form = url.Values{}
form.Add("last_known_id", strconv.Itoa(lastKnownId))
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
body: strings.NewReader(form.Encode()),
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get peer log failed: " + string(result.body))
}
var logs []*LogEntry
if err := json.Unmarshal(result.body, &logs); err != nil {
return nil, err
}
return logs, nil
}

View file

@ -0,0 +1,39 @@
package qbittorrent
import (
"context"
"encoding/json"
"testing"
)
func TestClient_GetLog(t *testing.T) {
ctx := context.Background()
entries, err := c.Log().GetLog(ctx, &LogOption{
Normal: true,
Info: true,
Warning: true,
Critical: true,
LastKnownId: 0,
})
if err != nil {
t.Fatal(err)
}
bytes, err := json.Marshal(entries)
if err != nil {
t.Fatal(err)
}
t.Log(string(bytes))
}
func TestClient_GetPeerLog(t *testing.T) {
ctx := context.Background()
entries, err := c.Log().GetPeerLog(ctx, -1)
if err != nil {
t.Fatal(err)
}
bytes, err := json.Marshal(entries)
if err != nil {
t.Fatal(err)
}
t.Log(string(bytes))
}

323
pkg/qbittorrent/rss.go Normal file
View file

@ -0,0 +1,323 @@
package qbittorrent
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
)
type RSS interface {
// AddFolder create new folder for rss, full path of added folder such as "The Pirate Bay\Top100"
AddFolder(ctx context.Context, path string) error
// AddFeed add feed
AddFeed(ctx context.Context, option *RssAddFeedOption) error
// RemoveItem remove folder or feed
RemoveItem(ctx context.Context, path string) error
// MoveItem move or rename folder or feed
MoveItem(ctx context.Context, srcPath, destPath string) error
// GetItems list all items, if withData is true, will return all data
GetItems(ctx context.Context, withData bool) (map[string]interface{}, error)
// MarkAsRead if articleId is provided only the article is marked as read otherwise the whole feed
// is going to be marked as read.
MarkAsRead(ctx context.Context, option *RssMarkAsReadOption) error
// RefreshItem refresh folder or feed
RefreshItem(ctx context.Context, itemPath string) error
// SetAutoDownloadingRule set auto-downloading rule
SetAutoDownloadingRule(ctx context.Context, ruleName string, ruleDef *RssAutoDownloadingRuleDef) error
// RenameAutoDownloadingRule rename auto-downloading rule
RenameAutoDownloadingRule(ctx context.Context, ruleName, newRuleName string) error
// RemoveAutoDownloadingRule remove auto-downloading rule
RemoveAutoDownloadingRule(ctx context.Context, ruleName string) error
// GetAllAutoDownloadingRules get all auto-downloading rules
GetAllAutoDownloadingRules(ctx context.Context) (map[string]*RssAutoDownloadingRuleDef, error)
// GetAllArticlesMatchingRule get all articles matching a rule
GetAllArticlesMatchingRule(ctx context.Context, ruleName string) (map[string][]string, error)
}
type RssAddFeedOption struct {
// URL feed of rss such as http://thepiratebay.org/rss//top100/200
URL string `schema:"url"`
// Folder full path of added folder, optional
Folder string `schema:"path,omitempty"`
}
type RssMarkAsReadOption struct {
// ItemPath current full path of item
ItemPath string `schema:"itemPath"`
// ArticleId id of article, optional
ArticleId string `schema:"articleId,omitempty"`
}
type RssAutoDownloadingRuleDefTorrentParams struct {
Category string `json:"category,omitempty"`
DownloadLimit int `json:"download_limit,omitempty"`
DownloadPath int `json:"download_path,omitempty"`
InactiveSeedingTimeLimit int `json:"inactive_seeding_time_limit,omitempty"`
OperatingMode string `json:"operating_mode,omitempty"`
RatioLimit int `json:"ratio_limit,omitempty"`
SavePath string `json:"save_path,omitempty"`
SeedingTimeLimit int `json:"seeding_time_limit,omitempty"`
SkipChecking bool `json:"skip_checking,omitempty"`
Tags []string `json:"tags,omitempty"`
UploadLimit int `json:"upload_limit,omitempty"`
Stopped bool `json:"stopped,omitempty"`
UseAutoTMM bool `json:"use_auto_tmm,omitempty"`
}
type RssAutoDownloadingRuleDef struct {
AddPaused bool `json:"addPaused,omitempty"`
AffectedFeeds []string `json:"affectedFeeds,omitempty"`
AssignedCategory string `json:"assignedCategory,omitempty"`
Enabled bool `json:"enabled,omitempty"`
EpisodeFilter string `json:"episodeFilter,omitempty"`
IgnoreDays int `json:"ignoreDays,omitempty"`
LastMatch string `json:"lastMatch,omitempty"`
MustContain string `json:"mustContain,omitempty"`
MustNotContain string `json:"mustNotContain,omitempty"`
PreviouslyMatchedEpisodes []string `json:"previouslyMatchedEpisodes,omitempty"`
Priority int `json:"priority,omitempty"`
SavePath string `json:"savePath,omitempty"`
SmartFilter bool `json:"smartFilter,omitempty"`
TorrentParams *RssAutoDownloadingRuleDefTorrentParams `json:"torrentParams,omitempty"`
UseRegex bool `json:"useRegex,omitempty"`
}
func (c *client) AddFolder(ctx context.Context, path string) error {
var formData = url.Values{}
formData.Add("path", path)
var apiUrl = fmt.Sprintf("%s/api/v2/rss/addFolder", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("add rss folder failed: " + string(result.body))
}
return nil
}
func (c *client) AddFeed(ctx context.Context, opt *RssAddFeedOption) error {
var formData = url.Values{}
err := encoder.Encode(opt, formData)
if err != nil {
return err
}
var apiUrl = fmt.Sprintf("%s/api/v2/rss/addFolder", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("add rss feed failed: " + string(result.body))
}
return nil
}
func (c *client) RemoveItem(ctx context.Context, path string) error {
var formData = url.Values{}
formData.Add("path", path)
var apiUrl = fmt.Sprintf("%s/api/v2/rss/removeItem", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("remove rss item failed: " + string(result.body))
}
return nil
}
func (c *client) MoveItem(ctx context.Context, srcPath, destPath string) error {
var formData = url.Values{}
formData.Add("itemPath", srcPath)
formData.Add("destPath", destPath)
var apiUrl = fmt.Sprintf("%s/api/v2/rss/moveItem", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("move rss item failed: " + string(result.body))
}
return nil
}
func (c *client) GetItems(ctx context.Context, withData bool) (map[string]interface{}, error) {
var apiUrl = fmt.Sprintf("%s/api/v2/rss/items?withData=%t", c.config.Address, withData)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodGet,
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get rss items failed: " + string(result.body))
}
var data = make(map[string]interface{})
err = json.Unmarshal(result.body, &data)
return data, err
}
func (c *client) MarkAsRead(ctx context.Context, opt *RssMarkAsReadOption) error {
var formData = url.Values{}
err := encoder.Encode(opt, formData)
if err != nil {
return err
}
var apiUrl = fmt.Sprintf("%s/api/v2/rss/markAsRead", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("mark as read rss item failed: " + string(result.body))
}
return nil
}
func (c *client) RefreshItem(ctx context.Context, itemPath string) error {
var formData = url.Values{}
formData.Add("itemPath", itemPath)
var apiUrl = fmt.Sprintf("%s/api/v2/rss/refreshItem", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("refresh rss item failed: " + string(result.body))
}
return nil
}
func (c *client) SetAutoDownloadingRule(ctx context.Context, ruleName string, ruleDef *RssAutoDownloadingRuleDef) error {
var formData = url.Values{}
formData.Add("ruleName", ruleName)
ruleDefBytes, err := json.Marshal(ruleDef)
if err != nil {
return err
}
formData.Add("ruleDef", string(ruleDefBytes))
var apiUrl = fmt.Sprintf("%s/api/v2/rss/setRule", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("set auto downloading rule failed: " + string(result.body))
}
return nil
}
func (c *client) RenameAutoDownloadingRule(ctx context.Context, ruleName, newRuleName string) error {
var formData = url.Values{}
formData.Add("ruleName", ruleName)
formData.Add("newRuleName", newRuleName)
var apiUrl = fmt.Sprintf("%s/api/v2/rss/renameRule", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("rename auto downloading rule failed: " + string(result.body))
}
return nil
}
func (c *client) RemoveAutoDownloadingRule(ctx context.Context, ruleName string) error {
var formData = url.Values{}
formData.Add("ruleName", ruleName)
var apiUrl = fmt.Sprintf("%s/api/v2/rss/removeRule", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(formData.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("remove auto downloading rule failed: " + string(result.body))
}
return nil
}
func (c *client) GetAllAutoDownloadingRules(ctx context.Context) (map[string]*RssAutoDownloadingRuleDef, error) {
var apiUrl = fmt.Sprintf("%s/api/v2/rss/matchingArticles", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get rss rules failed: " + string(result.body))
}
var data = make(map[string]*RssAutoDownloadingRuleDef)
err = json.Unmarshal(result.body, &data)
return data, err
}
func (c *client) GetAllArticlesMatchingRule(ctx context.Context, ruleName string) (map[string][]string, error) {
var formData = url.Values{}
formData.Add("ruleName", ruleName)
var apiUrl = fmt.Sprintf("%s/api/v2/rss/matchingArticles?%s", c.config.Address, formData.Encode())
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get rss rule match articles failed: " + string(result.body))
}
var data = make(map[string][]string)
err = json.Unmarshal(result.body, &data)
return data, err
}

64
pkg/qbittorrent/search.go Normal file
View file

@ -0,0 +1,64 @@
package qbittorrent
type Search interface {
Start()
Stop()
Status()
Results()
Delete()
Plugins()
InstallPlugins()
UninstallPlugins()
EnableSearchPlugins()
UpdateSearchPlugins()
}
func (c *client) Start() {
//TODO implement me
panic("implement me")
}
func (c *client) Stop() {
//TODO implement me
panic("implement me")
}
func (c *client) Status() {
//TODO implement me
panic("implement me")
}
func (c *client) Results() {
//TODO implement me
panic("implement me")
}
func (c *client) Delete() {
//TODO implement me
panic("implement me")
}
func (c *client) Plugins() {
//TODO implement me
panic("implement me")
}
func (c *client) InstallPlugins() {
//TODO implement me
panic("implement me")
}
func (c *client) UninstallPlugins() {
//TODO implement me
panic("implement me")
}
func (c *client) EnableSearchPlugins() {
//TODO implement me
panic("implement me")
}
func (c *client) UpdateSearchPlugins() {
//TODO implement me
panic("implement me")
}

121
pkg/qbittorrent/sync.go Normal file
View file

@ -0,0 +1,121 @@
package qbittorrent
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/url"
"strconv"
)
type Sync interface {
// MainData get sync main data, rid is Response ID. if not provided, will be assumed.
// if the given is different from the one of last server reply, will be (see the server reply details for more info)
MainData(ctx context.Context, rid int) (*SyncMainData, error)
// TorrentPeersData get sync torrent peer data, hash is torrent hash, rid is response id
TorrentPeersData(ctx context.Context, hash string, rid int) (*SyncTorrentPeers, error)
}
type SyncMainData struct {
Rid int `json:"rid,omitempty"`
FullUpdate bool `json:"full_update,omitempty"`
ServerState ServerState `json:"server_state,omitempty"`
Torrents map[string]SyncTorrentInfo `json:"torrents,omitempty"`
}
type ServerState struct {
AllTimeDl int64 `json:"alltime_dl,omitempty"`
AllTimeUl int64 `json:"alltime_ul,omitempty"`
AverageTimeQueue int `json:"average_time_queue,omitempty"`
DlInfoData int64 `json:"dl_info_data,omitempty"`
DlInfoSpeed int `json:"dl_info_speed,omitempty"`
QueuedIoJobs int `json:"queued_io_jobs,omitempty"`
TotalBuffersSize int `json:"total_buffers_size,omitempty"`
UpInfoData int64 `json:"up_info_data,omitempty"`
UpInfoSpeed int `json:"up_info_speed,omitempty"`
WriteCacheOverload string `json:"write_cache_overload,omitempty"`
}
type SyncTorrentInfo struct {
AmountLeft int64 `json:"amount_left,omitempty"`
Completed int `json:"completed,omitempty"`
DlSpeed int `json:"dlspeed,omitempty"`
Downloaded int `json:"downloaded,omitempty"`
DownloadedSession int `json:"downloaded_session,omitempty"`
Eta int `json:"eta,omitempty"`
Progress float64 `json:"progress,omitempty"`
SeenComplete int `json:"seen_complete,omitempty"`
TimeActive int `json:"time_active,omitempty"`
}
type SyncTorrentPeers struct {
Rid int `json:"rid,omitempty"`
FullUpdate bool `json:"full_update,omitempty"`
ShowFlags bool `json:"show_flags,omitempty"`
Peers map[string]SyncTorrentPeer `json:"peers,omitempty"`
}
type SyncTorrentPeer struct {
Client string `json:"client,omitempty"`
Connection string `json:"connection,omitempty"`
Country string `json:"country,omitempty"`
CountryCode string `json:"country_code,omitempty"`
DlSpeed int `json:"dl_speed,omitempty"`
Downloaded int `json:"downloaded,omitempty"`
Files string `json:"files,omitempty"`
Flags string `json:"flags,omitempty"`
FlagsDesc string `json:"flags_desc,omitempty"`
IP string `json:"ip,omitempty"`
PeerIDClient string `json:"peer_id_client,omitempty"`
Port int `json:"port,omitempty"`
Progress float64 `json:"progress,omitempty"`
Relevance float64 `json:"relevance,omitempty"`
UpSpeed int `json:"up_speed,omitempty"`
Uploaded int `json:"uploaded,omitempty"`
}
func (c *client) MainData(ctx context.Context, rid int) (*SyncMainData, error) {
apiUrl := fmt.Sprintf("%s/api/v2/sync/maindata?rid=%d", c.config.Address, rid)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get main data failed: " + string(result.body))
}
var mainData = new(SyncMainData)
if err := json.Unmarshal(result.body, mainData); err != nil {
return nil, err
}
return mainData, nil
}
func (c *client) TorrentPeersData(ctx context.Context, hash string, rid int) (*SyncTorrentPeers, error) {
var formData = url.Values{}
formData.Add("hash", hash)
formData.Add("rid", strconv.Itoa(rid))
apiUrl := fmt.Sprintf("%s/api/v2/sync/torrentPeers?%s", c.config.Address, formData.Encode())
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get torrent peers data failed: " + string(result.body))
}
var mainData = new(SyncTorrentPeers)
if err := json.Unmarshal(result.body, mainData); err != nil {
return nil, err
}
return mainData, nil
}

View file

@ -0,0 +1,37 @@
package qbittorrent
import (
"context"
"encoding/json"
"testing"
"time"
)
func TestClient_MainData(t *testing.T) {
ctx := context.Background()
syncMainData, err := c.Sync().MainData(ctx, 0)
if err != nil {
t.Fatal(err)
}
t.Logf("sync main data: %+v", syncMainData)
time.Sleep(time.Second)
syncMainData, err = c.Sync().MainData(ctx, 0)
if err != nil {
t.Fatal(err)
}
t.Logf("sync main data: %+v", syncMainData)
}
func TestClient_TorrentPeersData(t *testing.T) {
ctx := context.Background()
peersData, err := c.Sync().TorrentPeersData(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc", 0)
if err != nil {
t.Fatal(err)
}
bytes, err := json.Marshal(peersData)
if err != nil {
t.Fatal(err)
}
t.Log(string(bytes))
}

1362
pkg/qbittorrent/torrent.go Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,299 @@
package qbittorrent
import (
"context"
"encoding/json"
"os"
"testing"
)
func TestClient_GetTorrents(t *testing.T) {
ctx := context.Background()
torrents, err := c.Torrent().GetTorrents(ctx, &TorrentOption{
Filter: "",
Category: "movies",
Tag: "hdtime",
Sort: "",
Reverse: false,
Limit: 0,
Offset: 0,
Hashes: nil,
})
if err != nil {
t.Fatal(err)
}
bytes, err := json.Marshal(torrents)
if err != nil {
t.Fatal(err)
}
t.Log(string(bytes))
}
func TestClient_GetProperties(t *testing.T) {
ctx := context.Background()
properties, err := c.Torrent().GetProperties(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc")
if err != nil {
t.Fatal(err)
}
bytes, err := json.Marshal(properties)
if err != nil {
t.Fatal(err)
}
t.Log(string(bytes))
}
func TestClient_GetTrackers(t *testing.T) {
ctx := context.Background()
trackers, err := c.Torrent().GetTrackers(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc")
if err != nil {
t.Fatal(err)
}
bytes, err := json.Marshal(trackers)
if err != nil {
t.Fatal(err)
}
t.Log(string(bytes))
}
func TestClient_GetWebSeeds(t *testing.T) {
ctx := context.Background()
webSeeds, err := c.Torrent().GetWebSeeds(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc")
if err != nil {
t.Fatal(err)
}
bytes, err := json.Marshal(webSeeds)
if err != nil {
t.Fatal(err)
}
t.Log(string(bytes))
}
func TestClient_GetContents(t *testing.T) {
ctx := context.Background()
contents, err := c.Torrent().GetContents(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc")
if err != nil {
t.Fatal(err)
}
bytes, err := json.Marshal(contents)
if err != nil {
t.Fatal(err)
}
t.Log(string(bytes))
}
func TestClient_GetPiecesStates(t *testing.T) {
ctx := context.Background()
states, err := c.Torrent().GetPiecesStates(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc")
if err != nil {
t.Fatal(err)
}
t.Log(states)
}
func TestClient_GetPiecesHashes(t *testing.T) {
ctx := context.Background()
hashes, err := c.Torrent().GetPiecesHashes(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc")
if err != nil {
t.Fatal(err)
}
t.Log(hashes)
}
func TestClient_PauseTorrents(t *testing.T) {
ctx := context.Background()
err := c.Torrent().PauseTorrents(ctx, []string{"202382999be6a4fab395cd9c2c9d294177587904"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent paused")
}
func TestClient_ResumeTorrents(t *testing.T) {
ctx := context.Background()
err := c.Torrent().ResumeTorrents(ctx, []string{"fd3b4bf1937c59a8fd1a240cddc07172e0b979a2"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent resumed")
}
func TestClient_DeleteTorrents(t *testing.T) {
ctx := context.Background()
err := c.Torrent().DeleteTorrents(ctx, []string{"202382999be6a4fab395cd9c2c9d294177587904"}, true)
if err != nil {
t.Fatal(err)
}
t.Log("torrent deleted")
}
func TestClient_RecheckTorrents(t *testing.T) {
ctx := context.Background()
err := c.Torrent().RecheckTorrents(ctx, []string{"fd3b4bf1937c59a8fd1a240cddc07172e0b979a2"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent rechecked")
}
func TestClient_ReAnnounceTorrents(t *testing.T) {
ctx := context.Background()
err := c.Torrent().ReAnnounceTorrents(ctx, []string{"fd3b4bf1937c59a8fd1a240cddc07172e0b979a2"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent reannounceed")
}
func TestClient_AddNewTorrent(t *testing.T) {
ctx := context.Background()
fileContent, err := os.ReadFile("C:\\Users\\xuthu\\Downloads\\bbbbb.torrent")
if err != nil {
t.Fatal(err)
}
err = c.Torrent().AddNewTorrent(ctx, &TorrentAddOption{
Torrents: []*TorrentAddFileMetadata{
{
//Filename: "ttttt.torrent",
Data: fileContent,
},
},
Category: "movies",
Tags: []string{"d", "e", "f"},
SkipChecking: false,
Paused: false,
RootFolder: false,
Rename: "",
UpLimit: 0,
DlLimit: 0,
RatioLimit: 0,
SeedingTimeLimit: 0,
AutoTMM: false,
SequentialDownload: "",
FirstLastPiecePrio: "",
})
if err != nil {
t.Fatal(err)
}
t.Log("torrent added")
}
func TestClient_AddTrackers(t *testing.T) {
ctx := context.Background()
err := c.Torrent().AddTrackers(ctx, "ca4523a3db9c6c3a13d7d7f3a545f97b75083032", []string{"https://hddtime.org/announce"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent trackers added")
}
func TestClient_EditTrackers(t *testing.T) {
ctx := context.Background()
err := c.Torrent().EditTrackers(ctx, "ca4523a3db9c6c3a13d7d7f3a545f97b75083032", "https://hddtime.org/announce", "https://hdctime.org/announce")
if err != nil {
t.Fatal(err)
}
t.Log("torrent trackers edited")
}
func TestClient_RemoveTrackers(t *testing.T) {
ctx := context.Background()
err := c.Torrent().RemoveTrackers(ctx, "ca4523a3db9c6c3a13d7d7f3a545f97b75083032", []string{"https://hdctime.org/announce"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent trackers removed")
}
func TestClient_AddPeers(t *testing.T) {
// todo no test
//c.Torrent().AddPeers([]string{"ca4523a3db9c6c3a13d7d7f3a545f97b75083032"}, []string{"10.0.0.1:38080"})
}
func TestClient_IncreasePriority(t *testing.T) {
ctx := context.Background()
err := c.Torrent().IncreasePriority(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent priority increased")
}
func TestClient_DecreasePriority(t *testing.T) {
ctx := context.Background()
err := c.Torrent().DecreasePriority(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent priority decreased")
}
func TestClient_MaxPriority(t *testing.T) {
ctx := context.Background()
err := c.Torrent().MaxPriority(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent priority maxed")
}
func TestClient_MinPriority(t *testing.T) {
ctx := context.Background()
err := c.Torrent().MinPriority(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent priority mined")
}
func TestClient_SetFilePriority(t *testing.T) {
// todo no test
}
func TestClient_GetDownloadLimit(t *testing.T) {
ctx := context.Background()
downloadLimit, err := c.Torrent().GetDownloadLimit(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent download limit", downloadLimit)
}
func TestClient_SetDownloadLimit(t *testing.T) {
ctx := context.Background()
err := c.Torrent().SetDownloadLimit(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"}, 0)
if err != nil {
t.Fatal(err)
}
t.Log("torrent download limit setted")
}
func TestClient_SetShareLimit(t *testing.T) {
ctx := context.Background()
err := c.Torrent().SetShareLimit(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"}, -2, -2, -2)
if err != nil {
t.Fatal(err)
}
t.Log("torrent share limit setted")
}
func TestClient_GetUploadLimit(t *testing.T) {
ctx := context.Background()
limit, err := c.Torrent().GetUploadLimit(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"})
if err != nil {
t.Fatal(err)
}
t.Log("torrent upload limit", limit)
}
func TestClient_SetUploadLimit(t *testing.T) {
ctx := context.Background()
err := c.Torrent().SetUploadLimit(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"}, 0)
if err != nil {
t.Fatal(err)
}
t.Log("torrent upload limit setted")
}
func TestClient_SetLocation(t *testing.T) {
// todo test
}

185
pkg/qbittorrent/transfer.go Normal file
View file

@ -0,0 +1,185 @@
package qbittorrent
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
)
type TransferStatusBar struct {
ConnectionStatus string `json:"connection_status,omitempty"`
DhtNodes int `json:"dht_nodes,omitempty"`
DlInfoData int64 `json:"dl_info_data,omitempty"`
DlInfoSpeed int `json:"dl_info_speed,omitempty"`
DlRateLimit int `json:"dl_rate_limit,omitempty"`
UpInfoData int `json:"up_info_data,omitempty"`
UpInfoSpeed int `json:"up_info_speed,omitempty"`
UpRateLimit int `json:"up_rate_limit,omitempty"`
Queueing bool `json:"queueing,omitempty"`
UseAltSpeedLimits bool `json:"use_alt_speed_limits,omitempty"`
RefreshInterval int `json:"refresh_interval,omitempty"`
}
type Transfer interface {
// GlobalStatusBar usually see in qBittorrent status bar
GlobalStatusBar(ctx context.Context) (*TransferStatusBar, error)
// BanPeers the peer to ban, or multiple peers separated by a pipe.
// each peer is host:port
BanPeers(ctx context.Context, peers []string) error
// GetSpeedLimitsMode get alternative speed limits state
GetSpeedLimitsMode(ctx context.Context) (string, error)
// ToggleSpeedLimitsMode toggle alternative speed limits
ToggleSpeedLimitsMode(ctx context.Context) error
// GetGlobalUploadLimit get global upload limit, the response is the value of current global download speed
// limit in bytes/second; this value will be zero if no limit is applied.
GetGlobalUploadLimit(ctx context.Context) (string, error)
// SetGlobalUploadLimit set global upload limit, set in bytes/second
SetGlobalUploadLimit(ctx context.Context, limit int) error
// GetGlobalDownloadLimit get global download limit, the response is the value of current global download speed
// limit in bytes/second; this value will be zero if no limit is applied.
GetGlobalDownloadLimit(ctx context.Context) (string, error)
// SetGlobalDownloadLimit set global download limit, set in bytes/second
SetGlobalDownloadLimit(ctx context.Context, limit int) error
}
func (c *client) GlobalStatusBar(ctx context.Context) (*TransferStatusBar, error) {
apiUrl := fmt.Sprintf("%s/api/v2/transfer/info", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return nil, err
}
if result.code != 200 {
return nil, errors.New("get global transfer status bar failed: " + string(result.body))
}
var data = new(TransferStatusBar)
if err := json.Unmarshal(result.body, data); err != nil {
return nil, err
}
return data, nil
}
func (c *client) BanPeers(ctx context.Context, peers []string) error {
apiUrl := fmt.Sprintf("%s/api/v2/transfer/banPeers", c.config.Address)
var form = url.Values{}
form.Add("peers", strings.Join(peers, "|"))
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
body: strings.NewReader(form.Encode()),
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("ban peers failed: " + string(result.body))
}
return nil
}
func (c *client) GetSpeedLimitsMode(ctx context.Context) (string, error) {
apiUrl := fmt.Sprintf("%s/api/v2/transfer/speedLimitsMode", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return "", err
}
if result.code != 200 {
return "", errors.New("ban peers failed: " + string(result.body))
}
return string(result.body), nil
}
func (c *client) ToggleSpeedLimitsMode(ctx context.Context) error {
apiUrl := fmt.Sprintf("%s/api/v2/transfer/toggleSpeedLimitsMode", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
method: http.MethodPost,
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("ban peers failed: " + string(result.body))
}
return nil
}
func (c *client) GetGlobalUploadLimit(ctx context.Context) (string, error) {
apiUrl := fmt.Sprintf("%s/api/v2/transfer/uploadLimit", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return "", err
}
if result.code != 200 {
return "", errors.New("get global upload limit failed: " + string(result.body))
}
return string(result.body), nil
}
func (c *client) SetGlobalUploadLimit(ctx context.Context, limit int) error {
apiUrl := fmt.Sprintf("%s/api/v2/transfer/setUploadLimit?limit=%d", c.config.Address, limit)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("set global upload limit failed: " + string(result.body))
}
return nil
}
func (c *client) GetGlobalDownloadLimit(ctx context.Context) (string, error) {
apiUrl := fmt.Sprintf("%s/api/v2/transfer/downloadLimit", c.config.Address)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return "", err
}
if result.code != 200 {
return "", errors.New("get global download limit failed: " + string(result.body))
}
return string(result.body), nil
}
func (c *client) SetGlobalDownloadLimit(ctx context.Context, limit int) error {
apiUrl := fmt.Sprintf("%s/api/v2/transfer/setDownloadLimit?limit=%d", c.config.Address, limit)
result, err := c.doRequest(ctx, &requestData{
url: apiUrl,
})
if err != nil {
return err
}
if result.code != 200 {
return errors.New("set global download limit failed: " + string(result.body))
}
return nil
}

View file

@ -6,6 +6,16 @@ var defaultConfig = Settings{
Port: 4444, Port: 4444,
IP: "0.0.0.0", IP: "0.0.0.0",
}, },
Sources: Sources{
QBittorrent: QBittorrent{
DataFolder: "./qbittorrent/data",
MetadataFolder: "./qbittorrent/metadata",
},
TorrentClient: TorrentClient{
DataFolder: "./torrent/data",
MetadataFolder: "./torrent/metadata",
},
},
Mounts: Mounts{ Mounts: Mounts{
HttpFs: HttpFs{ HttpFs: HttpFs{
Enabled: true, Enabled: true,
@ -27,17 +37,6 @@ var defaultConfig = Settings{
}, },
}, },
TorrentClient: TorrentClient{
DataFolder: "./torrent/data",
MetadataFolder: "./torrent/metadata",
DHTNodes: []string{},
// GlobalCacheSize: 2048,
// AddTimeout: 60,
// ReadTimeout: 120,
},
Log: Log{ Log: Log{
Path: "/tmp/tstor", Path: "/tmp/tstor",
MaxBackups: 2, MaxBackups: 2,

View file

@ -3,7 +3,9 @@ package config
// Config is the main config object // Config is the main config object
type Settings struct { type Settings struct {
WebUi WebUi `koanf:"webUi"` WebUi WebUi `koanf:"webUi"`
TorrentClient TorrentClient `koanf:"torrent"`
Sources Sources `koanf:"sources"`
Mounts Mounts `koanf:"mounts"` Mounts Mounts `koanf:"mounts"`
Log Log `koanf:"log"` Log Log `koanf:"log"`
@ -12,6 +14,11 @@ type Settings struct {
OtelHttp string `koanf:"otel_http"` OtelHttp string `koanf:"otel_http"`
} }
type Sources struct {
TorrentClient TorrentClient `koanf:"torrent"`
QBittorrent QBittorrent `koanf:"qbittorrent"`
}
type WebUi struct { type WebUi struct {
Port int `koanf:"port"` Port int `koanf:"port"`
IP string `koanf:"ip"` IP string `koanf:"ip"`
@ -25,6 +32,11 @@ type Log struct {
Path string `koanf:"path"` Path string `koanf:"path"`
} }
type QBittorrent struct {
DataFolder string `koanf:"data_folder,omitempty"`
MetadataFolder string `koanf:"metadata_folder,omitempty"`
}
type TorrentClient struct { type TorrentClient struct {
// ReadTimeout int `koanf:"read_timeout,omitempty"` // ReadTimeout int `koanf:"read_timeout,omitempty"`
// AddTimeout int `koanf:"add_timeout,omitempty"` // AddTimeout int `koanf:"add_timeout,omitempty"`

View file

@ -7,13 +7,13 @@ import (
nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs" nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
nfshelper "git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers" nfshelper "git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
"git.kmsign.ru/royalcat/tstor/src/config" "git.kmsign.ru/royalcat/tstor/src/config"
"git.kmsign.ru/royalcat/tstor/src/log" "git.kmsign.ru/royalcat/tstor/src/logwrap"
"git.kmsign.ru/royalcat/tstor/src/vfs" "git.kmsign.ru/royalcat/tstor/src/vfs"
) )
func NewNFSv3Handler(fs vfs.Filesystem, config config.NFS) (nfs.Handler, error) { func NewNFSv3Handler(fs vfs.Filesystem, config config.NFS) (nfs.Handler, error) {
nfslog := slog.With("component", "nfs") nfslog := slog.With("component", "nfs")
nfs.SetLogger(log.NewNFSLog(nfslog)) nfs.SetLogger(logwrap.NewNFSLog(nfslog))
nfs.Log.SetLevel(nfs.InfoLevel) nfs.Log.SetLevel(nfs.InfoLevel)
bfs := &fsWrapper{fs: fs, log: nfslog, timeout: time.Minute} bfs := &fsWrapper{fs: fs, log: nfslog, timeout: time.Minute}

View file

@ -10,7 +10,7 @@ import (
"git.kmsign.ru/royalcat/tstor/pkg/go-nfs" "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
"git.kmsign.ru/royalcat/tstor/src/config" "git.kmsign.ru/royalcat/tstor/src/config"
"git.kmsign.ru/royalcat/tstor/src/log" "git.kmsign.ru/royalcat/tstor/src/logwrap"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric"
@ -51,7 +51,7 @@ var kvhandlerMeter = otel.Meter("git.kmsign.ru/royalcat/tstor/src/export/nfs.kvh
func NewKvHandler(h nfs.Handler, fs nfs.Filesystem, config config.NFS) (nfs.Handler, error) { func NewKvHandler(h nfs.Handler, fs nfs.Filesystem, config config.NFS) (nfs.Handler, error) {
opts := kvbadger.DefaultOptions[handle](path.Join(config.CachePath, "handlers")) opts := kvbadger.DefaultOptions[handle](path.Join(config.CachePath, "handlers"))
opts.Codec = kv.CodecBinary[handle, *handle]{} opts.Codec = kv.CodecBinary[handle, *handle]{}
opts.BadgerOptions.Logger = log.BadgerLogger("nfs", "kvhandler") opts.BadgerOptions.Logger = logwrap.BadgerLogger("nfs", "kvhandler")
activeHandles, err := kvbadger.NewBagerKVBinaryKey[uuid.UUID, handle](opts) activeHandles, err := kvbadger.NewBagerKVBinaryKey[uuid.UUID, handle](opts)
if err != nil { if err != nil {

View file

@ -1,4 +1,4 @@
package log package logwrap
import ( import (
"context" "context"

View file

@ -1,4 +1,4 @@
package log package logwrap
const FileName = "tstor.log" const FileName = "tstor.log"

View file

@ -1,4 +1,4 @@
package log package logwrap
import ( import (
"fmt" "fmt"

View file

@ -1,4 +1,4 @@
package log package logwrap
import ( import (
"context" "context"

48
src/logwrap/writer.go Normal file
View file

@ -0,0 +1,48 @@
package logwrap
import (
"bufio"
"bytes"
"context"
"sync"
"log/slog"
)
type SlogWriter struct {
ctx context.Context
level slog.Level
log *slog.Logger
mu sync.Mutex
buffer *bytes.Buffer
scanner *bufio.Scanner
}
func NewSlogWriter(ctx context.Context, level slog.Level, log *slog.Logger) *SlogWriter {
buf := &bytes.Buffer{}
return &SlogWriter{
ctx: ctx,
level: level,
log: log,
buffer: buf,
scanner: bufio.NewScanner(buf),
}
}
func (sw *SlogWriter) Write(p []byte) (n int, err error) {
sw.mu.Lock()
defer sw.mu.Unlock()
n, err = sw.buffer.Write(p)
if err != nil {
return n, err
}
for sw.scanner.Scan() {
sw.log.Log(sw.ctx, sw.level, sw.scanner.Text())
}
return n, err
}

View file

@ -6,35 +6,54 @@ import (
"slices" "slices"
"time" "time"
"github.com/xuthus5/qbittorrent-client-go/qbittorrent" "git.kmsign.ru/royalcat/tstor/pkg/qbittorrent"
) )
type client struct { type cacheClient struct {
qb qbittorrent.Client qb qbittorrent.Client
} }
func wrapClient(qb qbittorrent.Client) *client { func wrapClient(qb qbittorrent.Client) *cacheClient {
return &client{qb: qb} return &cacheClient{qb: qb}
} }
func (f *client) getFileContent(ctx context.Context, hash string, contextIndex int) (*qbittorrent.TorrentContent, error) { var errNotFound = fmt.Errorf("not found")
contents, err := f.qb.Torrent().GetContents(hash)
func (f *cacheClient) getProperties(ctx context.Context, hash string) (*qbittorrent.TorrentProperties, error) {
info, err := f.qb.Torrent().GetProperties(ctx, hash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
contentIndex := slices.IndexFunc(contents, func(c *qbittorrent.TorrentContent) bool { return info, nil
return c.Index == contextIndex }
func (f *cacheClient) listContent(ctx context.Context, hash string) ([]*qbittorrent.TorrentContent, error) {
contents, err := f.qb.Torrent().GetContents(ctx, hash)
if err != nil {
return nil, err
}
return contents, nil
}
func (f *cacheClient) getContent(ctx context.Context, hash string, contentIndex int) (*qbittorrent.TorrentContent, error) {
contents, err := f.qb.Torrent().GetContents(ctx, hash, contentIndex)
if err != nil {
return nil, err
}
contentI := slices.IndexFunc(contents, func(c *qbittorrent.TorrentContent) bool {
return c.Index == contentIndex
}) })
if contentIndex == -1 { if contentI == -1 {
return nil, fmt.Errorf("content not found") return nil, fmt.Errorf("content not found")
} }
return contents[contentIndex], nil return contents[contentI], nil
} }
func (f *client) isPieceComplete(ctx context.Context, hash string, pieceIndex int) (bool, error) { func (f *cacheClient) isPieceComplete(ctx context.Context, hash string, pieceIndex int) (bool, error) {
completion, err := f.qb.Torrent().GetPiecesStates(hash) completion, err := f.qb.Torrent().GetPiecesStates(ctx, hash)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -46,7 +65,7 @@ func (f *client) isPieceComplete(ctx context.Context, hash string, pieceIndex in
return false, nil return false, nil
} }
func (f *client) waitPieceToComplete(ctx context.Context, hash string, pieceIndex int) error { func (f *cacheClient) waitPieceToComplete(ctx context.Context, hash string, pieceIndex int) error {
const checkingInterval = 1 * time.Second const checkingInterval = 1 * time.Second
ok, err := f.isPieceComplete(ctx, hash, pieceIndex) ok, err := f.isPieceComplete(ctx, hash, pieceIndex)

View file

@ -1,28 +1,107 @@
package qbittorrent package qbittorrent
import ( import (
"bytes"
"context" "context"
"errors"
"fmt"
"io"
"log/slog"
"os"
"path" "path"
"path/filepath"
"time"
"git.kmsign.ru/royalcat/tstor/pkg/qbittorrent"
"git.kmsign.ru/royalcat/tstor/pkg/rlog"
"git.kmsign.ru/royalcat/tstor/src/config"
"git.kmsign.ru/royalcat/tstor/src/logwrap"
"git.kmsign.ru/royalcat/tstor/src/vfs" "git.kmsign.ru/royalcat/tstor/src/vfs"
"github.com/anacrolix/torrent/metainfo" "github.com/anacrolix/torrent/metainfo"
"github.com/anacrolix/torrent/types/infohash" "github.com/anacrolix/torrent/types/infohash"
infohash_v2 "github.com/anacrolix/torrent/types/infohash-v2"
"github.com/royalcat/ctxio" "github.com/royalcat/ctxio"
"github.com/xuthus5/qbittorrent-client-go/qbittorrent"
) )
type Daemon struct { type Daemon struct {
proc *os.Process
qb qbittorrent.Client qb qbittorrent.Client
client *client client *cacheClient
dataDir string dataDir string
log *rlog.Logger
} }
func NewDaemon(dir string) (*Daemon, error) { const defaultConf = `
[LegalNotice]
Accepted=true
dataDir := dir + "/data" [Preferences]
qb, err := qbittorrent.NewClient(&qbittorrent.Config{ WebUI\LocalHostAuth=false
Address: "localhost:8080", WebUI\Password_PBKDF2="@ByteArray(qef5I4wZBkDG+PP6/5mQwA==:LoTmorQM/QM5RHI4+dOiu6xfAz9xak6fhR4ZGpRtJF3JNCGG081Yrtva4G71kXz//ODUuWQKTLlrZPuIDvzqUQ==)"
`
func NewDaemon(conf config.QBittorrent) (*Daemon, error) {
ctx := context.Background()
log := rlog.Component("qbittorrent")
binPath := conf.MetadataFolder + "/qbittorrent-nox"
err := downloadLatestQbitRelease(ctx, binPath)
if err != nil {
return nil, err
}
daemonLog := log.WithComponent("process")
outLog := logwrap.NewSlogWriter(ctx, slog.LevelInfo, daemonLog.Slog())
errLog := logwrap.NewSlogWriter(ctx, slog.LevelError, daemonLog.Slog())
_, err = os.Stat(conf.MetadataFolder + "/profile/qBittorrent/config/qBittorrent.conf")
if errors.Is(err, os.ErrNotExist) {
err = os.MkdirAll(conf.MetadataFolder+"/profile/qBittorrent/config", 0744)
if err != nil {
return nil, err
}
err = os.WriteFile(conf.MetadataFolder+"/profile/qBittorrent/config/qBittorrent.conf", []byte(defaultConf), 0644)
if err != nil {
return nil, err
}
}
err = os.MkdirAll(conf.DataFolder, 0744)
if err != nil {
return nil, err
}
const port = 25436
proc, err := runQBittorrent(binPath, conf.MetadataFolder+"/profile", port, outLog, errLog)
if err != nil {
return nil, err
}
time.Sleep(time.Second)
qb, err := qbittorrent.NewClient(ctx, &qbittorrent.Config{
Address: fmt.Sprintf("http://localhost:%d", port),
})
if err != nil {
return nil, err
}
for { // wait for qbittorrent to start
_, err = qb.Application().Version(ctx)
if err == nil {
break
}
log.Warn(ctx, "waiting for qbittorrent to start", rlog.Error(err))
time.Sleep(time.Second)
}
dataDir, err := filepath.Abs(conf.DataFolder)
if err != nil {
return nil, err
}
err = qb.Application().SetPreferences(ctx, &qbittorrent.Preferences{
SavePath: dataDir,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@ -30,37 +109,20 @@ func NewDaemon(dir string) (*Daemon, error) {
return &Daemon{ return &Daemon{
qb: qb, qb: qb,
dataDir: dataDir, proc: proc,
dataDir: conf.DataFolder,
client: wrapClient(qb), client: wrapClient(qb),
log: rlog.Component("qbittorrent"),
}, nil }, nil
} }
func (fs *Daemon) torrentPath(ih infohash.T) string { func (d *Daemon) Close(ctx context.Context) error {
return path.Join(fs.dataDir, ih.HexString()) err := d.proc.Signal(os.Interrupt)
}
func (fs *Daemon) addTorrent(ctx context.Context, f vfs.File) error {
file, err := ctxio.ReadAll(ctx, f)
if err != nil { if err != nil {
return err return err
} }
mi, err := metainfo.Load(bytes.NewBuffer(file)) _, err = d.proc.Wait()
if err != nil {
return err
}
ih := mi.HashInfoBytes()
err = fs.qb.Torrent().AddNewTorrent(&qbittorrent.TorrentAddOption{
Torrents: []*qbittorrent.TorrentAddFileMetadata{
{
Data: file,
},
},
SavePath: fs.torrentPath(ih),
// SequentialDownload: "true",
// FirstLastPiecePrio: "true",
})
if err != nil { if err != nil {
return err return err
} }
@ -68,27 +130,107 @@ func (fs *Daemon) addTorrent(ctx context.Context, f vfs.File) error {
return nil return nil
} }
func (fs *Daemon) TorrentFS(ctx context.Context, file vfs.File) (*FS, error) { func (d *Daemon) torrentPath(ih infohash.T) (string, error) {
return filepath.Abs(path.Join(d.dataDir, ih.HexString()))
}
func (fs *Daemon) TorrentFS(ctx context.Context, file vfs.File) (vfs.Filesystem, error) {
log := fs.log.With(slog.String("file", file.Name()))
ih, err := readInfoHash(ctx, file) ih, err := readInfoHash(ctx, file)
if err != nil { if err != nil {
return nil, err return nil, err
} }
log = log.With(slog.String("infohash", ih.HexString()))
existing, err := fs.qb.Torrent().GetTorrents(&qbittorrent.TorrentOption{ torrentPath, err := fs.torrentPath(ih)
if err != nil {
return nil, fmt.Errorf("error getting torrent path: %w", err)
}
log = log.With(slog.String("torrentPath", torrentPath))
log.Debug(ctx, "creating fs for torrent")
err = fs.syncTorrentState(ctx, file, ih, torrentPath)
if err != nil {
return nil, fmt.Errorf("error syncing torrent state: %w", err)
}
return newTorrentFS(ctx, fs.client, file.Name(), ih.HexString(), torrentPath)
}
func (d *Daemon) syncTorrentState(ctx context.Context, file vfs.File, ih metainfo.Hash, torrentPath string) error {
log := d.log.With(slog.String("file", file.Name()), slog.String("infohash", ih.HexString()))
existing, err := d.qb.Torrent().GetTorrents(ctx, &qbittorrent.TorrentOption{
Hashes: []string{ih.HexString()}, Hashes: []string{ih.HexString()},
}) })
if err != nil { if err != nil {
return nil, err return fmt.Errorf("error to check torrent existence: %w", err)
} }
log = log.With(slog.String("torrentPath", torrentPath))
if len(existing) == 0 { if len(existing) == 0 {
err := fs.addTorrent(ctx, file) _, err := file.Seek(0, io.SeekStart)
if err != nil { if err != nil {
return nil, err return err
}
data, err := ctxio.ReadAll(ctx, file)
if err != nil {
return err
}
err = d.qb.Torrent().AddNewTorrent(ctx, &qbittorrent.TorrentAddOption{
Torrents: []*qbittorrent.TorrentAddFileMetadata{
{
Data: data,
},
},
SavePath: torrentPath,
// SequentialDownload: "true",
FirstLastPiecePrio: "true",
})
if err != nil {
return err
}
for {
_, err := d.qb.Torrent().GetProperties(ctx, ih.HexString())
if err == nil {
break
}
log.Error(ctx, "waiting for torrent to be added", rlog.Error(err))
time.Sleep(time.Millisecond * 15)
}
log.Info(ctx, "added torrent", slog.String("infohash", ih.HexString()))
if err != nil {
d.log.Error(ctx, "error adding torrent", rlog.Error(err))
return err
}
return nil
} else if len(existing) == 1 {
// info := existing[0]
props, err := d.qb.Torrent().GetProperties(ctx, ih.HexString())
if err != nil {
return err
}
if props.SavePath != torrentPath {
log.Info(ctx, "moving torrent to correct location", slog.String("oldPath", props.SavePath))
err = d.qb.Torrent().SetLocation(ctx, []string{ih.HexString()}, torrentPath)
if err != nil {
return err
} }
} }
return newTorrentFS(fs.client, file.Name(), ih.HexString(), fs.torrentPath(ih)) return nil
}
return fmt.Errorf("multiple torrents with the same infohash")
} }
// TODO caching // TODO caching
@ -97,5 +239,15 @@ func readInfoHash(ctx context.Context, file vfs.File) (infohash.T, error) {
if err != nil { if err != nil {
return infohash.T{}, err return infohash.T{}, err
} }
return mi.HashInfoBytes(), nil info, err := mi.UnmarshalInfo()
if err != nil {
return infohash.T{}, err
}
if info.HasV2() {
ih := infohash_v2.HashBytes(mi.InfoBytes)
return *(&ih).ToShort(), nil
}
return infohash.HashBytes(mi.InfoBytes), nil
} }

View file

@ -2,87 +2,123 @@ package qbittorrent
import ( import (
"context" "context"
"fmt"
"io" "io"
"io/fs" "io/fs"
"os" "os"
"path" "path"
"strings"
"time" "time"
"git.kmsign.ru/royalcat/tstor/pkg/qbittorrent"
"git.kmsign.ru/royalcat/tstor/src/vfs" "git.kmsign.ru/royalcat/tstor/src/vfs"
) )
type FS struct { type FS struct {
client *client client *cacheClient
name string name string
hash string hash string
dataDir string dataDir string
content map[string]*qbittorrent.TorrentContent
files map[string]fs.FileInfo
vfs.FilesystemPrototype
} }
var _ vfs.Filesystem = (*FS)(nil) var _ vfs.Filesystem = (*FS)(nil)
func newTorrentFS(client *client, name string, hash string, dataDir string) (*FS, error) { func newTorrentFS(ctx context.Context, client *cacheClient, name string, hash string, dataDir string) (*FS, error) {
cnts, err := client.listContent(ctx, hash)
if err != nil {
return nil, fmt.Errorf("failed to list content for hash %s: %w", hash, err)
}
content := make(map[string]*qbittorrent.TorrentContent, len(cnts))
files := make(map[string]fs.FileInfo, len(cnts))
for _, cnt := range cnts {
path := vfs.AbsPath(cnt.Name)
files[path] = vfs.NewFileInfo(cnt.Name, cnt.Size)
content[path] = cnt
}
return &FS{ return &FS{
client: client, client: client,
name: name, name: name,
hash: hash, hash: hash,
dataDir: dataDir, dataDir: dataDir,
content: content,
files: files,
FilesystemPrototype: vfs.FilesystemPrototype(name),
}, nil }, nil
} }
// Info implements vfs.Filesystem.
func (f *FS) Info() (fs.FileInfo, error) {
return vfs.NewDirInfo(f.name), nil
}
// IsDir implements vfs.Filesystem.
func (f *FS) IsDir() bool {
return true
}
// Name implements vfs.Filesystem.
func (f *FS) Name() string {
return path.Base(f.dataDir)
}
// Open implements vfs.Filesystem. // Open implements vfs.Filesystem.
func (f *FS) Open(ctx context.Context, filename string) (vfs.File, error) { func (f *FS) Open(ctx context.Context, name string) (vfs.File, error) {
panic("unimplemented") if name == vfs.Separator {
return vfs.NewDirFile(name), nil
}
cnt, ok := f.content[name]
if ok {
return openFile(ctx, f.client, f.dataDir, f.hash, cnt)
}
for p := range f.content {
if strings.HasPrefix(p, name) {
return vfs.NewDirFile(name), nil
}
}
return nil, vfs.ErrNotExist
} }
// ReadDir implements vfs.Filesystem. // ReadDir implements vfs.Filesystem.
func (f *FS) ReadDir(ctx context.Context, path string) ([]fs.DirEntry, error) { func (fs *FS) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) {
panic("unimplemented") return vfs.ListDirFromInfo(fs.files, name)
} }
// Stat implements vfs.Filesystem. // Stat implements vfs.Filesystem.
func (f *FS) Stat(ctx context.Context, filename string) (fs.FileInfo, error) { func (f *FS) Stat(ctx context.Context, name string) (fs.FileInfo, error) {
return vfs.NewDirInfo(f.name), nil info, ok := f.files[name]
if !ok {
return nil, vfs.ErrNotExist
} }
return info, nil
// Type implements vfs.Filesystem.
func (f *FS) Type() fs.FileMode {
return vfs.ROMode
} }
// Unlink implements vfs.Filesystem. // Unlink implements vfs.Filesystem.
func (f *FS) Unlink(ctx context.Context, filename string) error { func (f *FS) Unlink(ctx context.Context, filename string) error {
panic("unimplemented") return vfs.ErrNotImplemented
} }
func openFile(ctx context.Context, client client, hash, filePath string) *File { func openFile(ctx context.Context, client *cacheClient, torrentDir string, hash string, content *qbittorrent.TorrentContent) (*File, error) {
client.getFileContent(ctx, hash, 0) props, err := client.getProperties(ctx, hash)
if err != nil {
return nil, err
}
return &File{ return &File{
client: client, client: client,
hash: hash, hash: hash,
filePath: filePath, torrentDir: torrentDir,
}
filePath: content.Name,
contentIndex: content.Index,
pieceSize: props.PieceSize,
fileSize: content.Size,
offset: 0,
}, nil
} }
type File struct { type File struct {
client client client *cacheClient
hash string hash string
dataDir string torrentDir string
filePath string // path inside a torrent directory filePath string // path inside a torrent directory
contentIndex int contentIndex int
pieceSize int pieceSize int
@ -94,16 +130,6 @@ type File struct {
var _ vfs.File = (*File)(nil) var _ vfs.File = (*File)(nil)
// Close implements vfs.File.
func (f *File) Close(ctx context.Context) error {
if f.osfile != nil {
err := f.osfile.Close()
f.osfile = nil
return err
}
return nil
}
// Info implements vfs.File. // Info implements vfs.File.
func (f *File) Info() (fs.FileInfo, error) { func (f *File) Info() (fs.FileInfo, error) {
return &fileInfo{name: path.Base(f.filePath), size: f.fileSize}, nil return &fileInfo{name: path.Base(f.filePath), size: f.fileSize}, nil
@ -173,7 +199,7 @@ func (f *File) Size() int64 {
// Type implements vfs.File. // Type implements vfs.File.
func (f *File) Type() fs.FileMode { func (f *File) Type() fs.FileMode {
return vfs.ROMode return fs.ModeDir
} }
func (f *File) descriptor() (*os.File, error) { func (f *File) descriptor() (*os.File, error) {
@ -181,7 +207,7 @@ func (f *File) descriptor() (*os.File, error) {
return f.osfile, nil return f.osfile, nil
} }
osfile, err := os.Open(path.Join(f.dataDir, f.filePath)) osfile, err := os.Open(path.Join(f.torrentDir, f.filePath))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -190,6 +216,16 @@ func (f *File) descriptor() (*os.File, error) {
return f.osfile, nil return f.osfile, nil
} }
// Close implements vfs.File.
func (f *File) Close(ctx context.Context) error {
if f.osfile != nil {
err := f.osfile.Close()
f.osfile = nil
return err
}
return nil
}
type fileInfo struct { type fileInfo struct {
name string name string
size int64 size int64

View file

@ -20,25 +20,26 @@ import (
const ( const (
repoOwner = "userdocs" repoOwner = "userdocs"
repoName = "qbittorrent-nox-static" repoName = "qbittorrent-nox-static"
binName = "qbittorrent-nox"
) )
func runQBittorrent(binDir string, profileDir string, stdout, stderr io.Writer) (*os.Process, error) { func runQBittorrent(binPath string, profileDir string, port int, stdout, stderr io.Writer) (*os.Process, error) {
cmd := exec.Command( err := os.Chmod(binPath, 0755)
path.Join(binDir, binName), if err != nil {
fmt.Sprintf("--profile=%s", profileDir), return nil, err
) }
cmd := exec.Command(binPath, fmt.Sprintf("--profile=%s", profileDir), fmt.Sprintf("--webui-port=%d", port))
cmd.Stdin = bytes.NewReader([]byte("y\n")) cmd.Stdin = bytes.NewReader([]byte("y\n"))
cmd.Stdout = stdout cmd.Stdout = stdout
cmd.Stderr = stderr cmd.Stderr = stderr
err := cmd.Start() err = cmd.Start()
if err != nil { if err != nil {
return nil, err return nil, err
} }
return cmd.Process, nil return cmd.Process, nil
} }
func downloadLatestRelease(ctx context.Context, binPath string) error { func downloadLatestQbitRelease(ctx context.Context, binPath string) error {
client := github.NewClient(nil) client := github.NewClient(nil)
rel, _, err := client.Repositories.GetLatestRelease(ctx, repoOwner, repoName) rel, _, err := client.Repositories.GetLatestRelease(ctx, repoOwner, repoName)
if err != nil { if err != nil {

View file

@ -11,8 +11,8 @@ func TestDownloadQBittorent(t *testing.T) {
ctx := context.Background() ctx := context.Background()
tempDir := t.TempDir() tempDir := t.TempDir()
require := require.New(t) require := require.New(t)
err := downloadLatestRelease(ctx, tempDir) err := downloadLatestQbitRelease(ctx, tempDir)
require.NoError(err) require.NoError(err)
err = downloadLatestRelease(ctx, tempDir) err = downloadLatestQbitRelease(ctx, tempDir)
require.NoError(err) require.NoError(err)
} }

View file

@ -1,14 +1,14 @@
package sources package sources
import ( import (
"git.kmsign.ru/royalcat/tstor/src/sources/torrent" "git.kmsign.ru/royalcat/tstor/src/sources/qbittorrent"
"git.kmsign.ru/royalcat/tstor/src/sources/ytdlp" "git.kmsign.ru/royalcat/tstor/src/sources/ytdlp"
"git.kmsign.ru/royalcat/tstor/src/vfs" "git.kmsign.ru/royalcat/tstor/src/vfs"
) )
func NewHostedFS(sourceFS vfs.Filesystem, tsrv *torrent.Daemon, ytdlpsrv *ytdlp.Daemon) vfs.Filesystem { func NewHostedFS(sourceFS vfs.Filesystem, tsrv *qbittorrent.Daemon, ytdlpsrv *ytdlp.Daemon) vfs.Filesystem {
factories := map[string]vfs.FsFactory{ factories := map[string]vfs.FsFactory{
".torrent": tsrv.NewTorrentFs, ".torrent": tsrv.TorrentFS,
".ts-ytdlp": ytdlpsrv.BuildFS, ".ts-ytdlp": ytdlpsrv.BuildFS,
} }

View file

@ -6,7 +6,7 @@ import (
"os" "os"
"git.kmsign.ru/royalcat/tstor/src/config" "git.kmsign.ru/royalcat/tstor/src/config"
dlog "git.kmsign.ru/royalcat/tstor/src/log" "git.kmsign.ru/royalcat/tstor/src/logwrap"
"github.com/anacrolix/dht/v2/bep44" "github.com/anacrolix/dht/v2/bep44"
tlog "github.com/anacrolix/log" tlog "github.com/anacrolix/log"
"github.com/anacrolix/torrent" "github.com/anacrolix/torrent"
@ -37,7 +37,7 @@ func newClientConfig(st storage.ClientImpl, fis bep44.Store, cfg *config.Torrent
// } // }
tl := tlog.NewLogger("torrent-client") tl := tlog.NewLogger("torrent-client")
tl.SetHandlers(&dlog.Torrent{L: l}) tl.SetHandlers(&logwrap.Torrent{L: l})
torrentCfg.Logger = tl torrentCfg.Logger = tl
torrentCfg.Callbacks.NewPeer = append(torrentCfg.Callbacks.NewPeer, func(p *torrent.Peer) { torrentCfg.Callbacks.NewPeer = append(torrentCfg.Callbacks.NewPeer, func(p *torrent.Peer) {

View file

@ -121,13 +121,13 @@ func NewDaemon(sourceFs billy.Filesystem, conf config.TorrentClient) (*Daemon, e
return nil, err return nil, err
} }
go func() { // go func() {
ctx := context.Background() // ctx := context.Background()
err := s.backgroudFileLoad(ctx) // err := s.backgroudFileLoad(ctx)
if err != nil { // if err != nil {
s.log.Error(ctx, "initial torrent load failed", rlog.Error(err)) // s.log.Error(ctx, "initial torrent load failed", rlog.Error(err))
} // }
}() // }()
go func() { go func() {
ctx := context.Background() ctx := context.Background()

View file

@ -5,7 +5,7 @@ import (
"encoding/gob" "encoding/gob"
"time" "time"
dlog "git.kmsign.ru/royalcat/tstor/src/log" "git.kmsign.ru/royalcat/tstor/src/logwrap"
"github.com/anacrolix/dht/v2/bep44" "github.com/anacrolix/dht/v2/bep44"
"github.com/dgraph-io/badger/v4" "github.com/dgraph-io/badger/v4"
) )
@ -19,7 +19,7 @@ type dhtFileItemStore struct {
func newDHTStore(path string, itemsTTL time.Duration) (*dhtFileItemStore, error) { func newDHTStore(path string, itemsTTL time.Duration) (*dhtFileItemStore, error) {
opts := badger.DefaultOptions(path). opts := badger.DefaultOptions(path).
WithLogger(dlog.BadgerLogger("torrent-client", "dht-item-store")). WithLogger(logwrap.BadgerLogger("torrent-client", "dht-item-store")).
WithValueLogFileSize(1<<26 - 1) WithValueLogFileSize(1<<26 - 1)
db, err := badger.Open(opts) db, err := badger.Open(opts)

View file

@ -138,26 +138,26 @@ func (fs *TorrentFS) files(ctx context.Context) (map[string]vfs.File, error) {
} }
// TODO optional // TODO optional
if len(fs.filesCache) == 1 && fs.resolver.IsNestedFs(fs.Torrent.Name()) { // if len(fs.filesCache) == 1 && fs.resolver.IsNestedFs(fs.Torrent.Name()) {
filepath := "/" + fs.Torrent.Name() // filepath := "/" + fs.Torrent.Name()
if file, ok := fs.filesCache[filepath]; ok { // if file, ok := fs.filesCache[filepath]; ok {
nestedFs, err := fs.resolver.NestedFs(ctx, filepath, file) // nestedFs, err := fs.resolver.NestedFs(ctx, filepath, file)
if err != nil { // if err != nil {
return nil, err // return nil, err
} // }
if nestedFs == nil { // if nestedFs == nil {
goto DEFAULT_DIR // FIXME // goto DEFAULT_DIR // FIXME
} // }
fs.filesCache, err = listFilesRecursive(ctx, nestedFs, "/") // fs.filesCache, err = listFilesRecursive(ctx, nestedFs, "/")
if err != nil { // if err != nil {
return nil, err // return nil, err
} // }
return fs.filesCache, nil // return fs.filesCache, nil
} // }
} // }
// DEFAULT_DIR:
DEFAULT_DIR:
rootDir := "/" + fs.Torrent.Name() + "/" rootDir := "/" + fs.Torrent.Name() + "/"
singleDir := true singleDir := true
for k, _ := range fs.filesCache { for k, _ := range fs.filesCache {
@ -315,7 +315,6 @@ func (tfs *TorrentFS) Open(ctx context.Context, filename string) (file vfs.File,
return nil, err return nil, err
} }
if nestedFs != nil { if nestedFs != nil {
return nestedFs.Open(ctx, nestedFsPath) return nestedFs.Open(ctx, nestedFsPath)
} }

View file

@ -6,7 +6,7 @@ import (
"fmt" "fmt"
"path/filepath" "path/filepath"
dlog "git.kmsign.ru/royalcat/tstor/src/log" "git.kmsign.ru/royalcat/tstor/src/logwrap"
"github.com/anacrolix/torrent/metainfo" "github.com/anacrolix/torrent/metainfo"
"github.com/anacrolix/torrent/types/infohash" "github.com/anacrolix/torrent/types/infohash"
"github.com/dgraph-io/badger/v4" "github.com/dgraph-io/badger/v4"
@ -21,7 +21,7 @@ type infoBytesStore struct {
func newInfoBytesStore(metaDir string) (*infoBytesStore, error) { func newInfoBytesStore(metaDir string) (*infoBytesStore, error) {
opts := badger. opts := badger.
DefaultOptions(filepath.Join(metaDir, "infobytes")). DefaultOptions(filepath.Join(metaDir, "infobytes")).
WithLogger(dlog.BadgerLogger("torrent-client", "infobytes")) WithLogger(logwrap.BadgerLogger("torrent-client", "infobytes"))
db, err := badger.Open(opts) db, err := badger.Open(opts)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -5,7 +5,7 @@ import (
"encoding/binary" "encoding/binary"
"fmt" "fmt"
dlog "git.kmsign.ru/royalcat/tstor/src/log" "git.kmsign.ru/royalcat/tstor/src/logwrap"
"github.com/anacrolix/torrent/metainfo" "github.com/anacrolix/torrent/metainfo"
"github.com/anacrolix/torrent/storage" "github.com/anacrolix/torrent/storage"
"github.com/royalcat/kv" "github.com/royalcat/kv"
@ -86,7 +86,7 @@ var _ storage.PieceCompletion = (*badgerPieceCompletion)(nil)
func newPieceCompletion(dir string) (storage.PieceCompletion, error) { func newPieceCompletion(dir string) (storage.PieceCompletion, error) {
opts := kvbadger.DefaultOptions[PieceCompletionState](dir) opts := kvbadger.DefaultOptions[PieceCompletionState](dir)
opts.Codec = kv.CodecBinary[PieceCompletionState, *PieceCompletionState]{} opts.Codec = kv.CodecBinary[PieceCompletionState, *PieceCompletionState]{}
opts.BadgerOptions = opts.BadgerOptions.WithLogger(dlog.BadgerLogger("torrent-client", "piece-completion")) opts.BadgerOptions = opts.BadgerOptions.WithLogger(logwrap.BadgerLogger("torrent-client", "piece-completion"))
db, err := kvbadger.NewBagerKVBinaryKey[pieceKey, PieceCompletionState](opts) db, err := kvbadger.NewBagerKVBinaryKey[pieceKey, PieceCompletionState](opts)
if err != nil { if err != nil {

View file

@ -7,7 +7,7 @@ import (
"slices" "slices"
"time" "time"
"git.kmsign.ru/royalcat/tstor/src/log" "git.kmsign.ru/royalcat/tstor/src/logwrap"
"github.com/anacrolix/torrent/types/infohash" "github.com/anacrolix/torrent/types/infohash"
"github.com/dgraph-io/badger/v4" "github.com/dgraph-io/badger/v4"
) )
@ -17,7 +17,7 @@ func newStatsStore(metaDir string, lifetime time.Duration) (*statsStore, error)
badger. badger.
DefaultOptions(path.Join(metaDir, "stats")). DefaultOptions(path.Join(metaDir, "stats")).
WithNumVersionsToKeep(int(^uint(0) >> 1)). WithNumVersionsToKeep(int(^uint(0) >> 1)).
WithLogger(log.BadgerLogger("stats")), // Infinity WithLogger(logwrap.BadgerLogger("stats")), // Infinity
) )
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -3,14 +3,14 @@ package tkv
import ( import (
"path" "path"
tlog "git.kmsign.ru/royalcat/tstor/src/log" "git.kmsign.ru/royalcat/tstor/src/logwrap"
"github.com/royalcat/kv" "github.com/royalcat/kv"
"github.com/royalcat/kv/kvbadger" "github.com/royalcat/kv/kvbadger"
) )
func NewKV[K kv.Bytes, V any](dbdir, name string) (store kv.Store[K, V], err error) { func NewKV[K kv.Bytes, V any](dbdir, name string) (store kv.Store[K, V], err error) {
opts := kvbadger.DefaultOptions[V](path.Join(dbdir, name)) opts := kvbadger.DefaultOptions[V](path.Join(dbdir, name))
opts.BadgerOptions.Logger = tlog.BadgerLogger(name, "badger") opts.BadgerOptions.Logger = logwrap.BadgerLogger(name, "badger")
store, err = kvbadger.NewBadgerKVBytesKey[K, V](opts) store, err = kvbadger.NewBadgerKVBytesKey[K, V](opts)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -109,3 +109,25 @@ func (fi *fileInfo) IsDir() bool {
func (fi *fileInfo) Sys() interface{} { func (fi *fileInfo) Sys() interface{} {
return nil return nil
} }
type FilesystemPrototype string
// Info implements Filesystem.
func (p FilesystemPrototype) Info() (fs.FileInfo, error) {
return NewDirInfo(string(p)), nil
}
// IsDir implements Filesystem.
func (p FilesystemPrototype) IsDir() bool {
return true
}
// Name implements Filesystem.
func (p FilesystemPrototype) Name() string {
return string(p)
}
// Type implements Filesystem.
func (p FilesystemPrototype) Type() fs.FileMode {
return fs.ModeDir
}

86
src/vfs/hash.go Normal file
View file

@ -0,0 +1,86 @@
package vfs
import (
"context"
"encoding/binary"
"errors"
"fmt"
"io"
)
const chunkSize int64 = 64 * 1024
var ErrOsHashLen = errors.New("oshash: buffer length must be a multiple of 8")
type Hash string
func FileHash(ctx context.Context, f File) (Hash, error) {
_, err := f.Seek(0, io.SeekStart)
if err != nil {
return "", fmt.Errorf("error seeking file: %w", err)
}
defer f.Seek(0, io.SeekStart)
fileSize := f.Size()
if fileSize <= 8 {
return "", fmt.Errorf("cannot calculate oshash where size < 8 (%d)", fileSize)
}
fileChunkSize := chunkSize
if fileSize < fileChunkSize {
// Must be a multiple of 8.
fileChunkSize = (fileSize / 8) * 8
}
head := make([]byte, fileChunkSize)
tail := make([]byte, fileChunkSize)
// read the head of the file into the start of the buffer
_, err = f.Read(ctx, head)
if err != nil {
return "", err
}
// seek to the end of the file - the chunk size
_, err = f.Seek(-fileChunkSize, io.SeekEnd)
if err != nil {
return "", err
}
// read the tail of the file
_, err = f.Read(ctx, tail)
if err != nil {
return "", err
}
return oshash(fileSize, head, tail)
}
func sumBytes(buf []byte) (uint64, error) {
if len(buf)%8 != 0 {
return 0, ErrOsHashLen
}
sz := len(buf) / 8
var sum uint64
for j := 0; j < sz; j++ {
sum += binary.LittleEndian.Uint64(buf[8*j : 8*(j+1)])
}
return sum, nil
}
func oshash(size int64, head []byte, tail []byte) (Hash, error) {
headSum, err := sumBytes(head)
if err != nil {
return "", fmt.Errorf("oshash head: %w", err)
}
tailSum, err := sumBytes(tail)
if err != nil {
return "", fmt.Errorf("oshash tail: %w", err)
}
// Compute the sum of the head, tail and file size
result := headSum + tailSum + uint64(size)
// output as hex
return Hash(fmt.Sprintf("%016x", result)), nil
}

View file

@ -111,6 +111,7 @@ func (fs *LogFS) Open(ctx context.Context, filename string) (file File, err erro
ctx, span := tracer.Start(ctx, "Open", ctx, span := tracer.Start(ctx, "Open",
fs.traceAttrs(attribute.String("filename", filename)), fs.traceAttrs(attribute.String("filename", filename)),
) )
log := fs.log.With(slog.String("filename", filename))
defer func() { defer func() {
if err != nil { if err != nil {
span.RecordError(err) span.RecordError(err)
@ -120,7 +121,7 @@ func (fs *LogFS) Open(ctx context.Context, filename string) (file File, err erro
file, err = fs.fs.Open(ctx, filename) file, err = fs.fs.Open(ctx, filename)
if isLoggableError(err) { if isLoggableError(err) {
fs.log.Error(ctx, "Failed to open file") log.Error(ctx, "Failed to open file", rlog.Error(err))
} }
file = WrapLogFile(file, filename, fs.log, fs.readTimeout, fs.tel) file = WrapLogFile(file, filename, fs.log, fs.readTimeout, fs.tel)

View file

@ -92,13 +92,14 @@ func (r *ResolverFS) Open(ctx context.Context, filename string) (File, error) {
} }
// ReadDir implements Filesystem. // ReadDir implements Filesystem.
func (r *ResolverFS) ReadDir(ctx context.Context, dir string) ([]fs.DirEntry, error) { func (r *ResolverFS) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) {
log := r.log.With(slog.String("name", name))
ctx, span := tracer.Start(ctx, "ReadDir", ctx, span := tracer.Start(ctx, "ReadDir",
r.traceAttrs(attribute.String("name", dir)), r.traceAttrs(attribute.String("name", name)),
) )
defer span.End() defer span.End()
fsPath, nestedFs, nestedFsPath, err := r.resolver.ResolvePath(ctx, dir, r.rootFS.Open) fsPath, nestedFs, nestedFsPath, err := r.resolver.ResolvePath(ctx, name, r.rootFS.Open)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -113,34 +114,22 @@ func (r *ResolverFS) ReadDir(ctx context.Context, dir string) ([]fs.DirEntry, er
out := make([]fs.DirEntry, 0, len(entries)) out := make([]fs.DirEntry, 0, len(entries))
for _, e := range entries { for _, e := range entries {
if r.resolver.IsNestedFs(e.Name()) { if r.resolver.IsNestedFs(e.Name()) {
filepath := path.Join("/", dir, e.Name()) filepath := path.Join("/", name, e.Name())
file, err := r.Open(ctx, filepath) file, err := r.rootFS.Open(ctx, filepath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// it is factory responsibility to close file then needed nestedfs, err := r.resolver.nestedFs(ctx, filepath, file)
err = func() error {
factoryCtx, cancel := subTimeout(ctx)
defer cancel()
nestedfs, err := r.resolver.NestedFs(factoryCtx, filepath, file)
if err != nil {
if errors.Is(err, context.DeadlineExceeded) { if errors.Is(err, context.DeadlineExceeded) {
r.log.Error(ctx, "creating fs timed out",
slog.String("filename", e.Name()),
)
return nil
}
return err
}
out = append(out, nestedfs)
return nil
}()
if err != nil {
return nil, err return nil, err
} }
if err != nil {
log.Error(ctx, "error creating nested fs", rlog.Error(err))
out = append(out, e)
continue
}
out = append(out, nestedfs)
} else { } else {
out = append(out, e) out = append(out, e)
} }
@ -214,14 +203,14 @@ type FsFactory func(ctx context.Context, f File) (Filesystem, error)
func NewResolver(factories map[string]FsFactory) *Resolver { func NewResolver(factories map[string]FsFactory) *Resolver {
return &Resolver{ return &Resolver{
factories: factories, factories: factories,
fsmap: map[string]Filesystem{}, fsmap: map[Hash]Filesystem{},
} }
} }
type Resolver struct { type Resolver struct {
m sync.Mutex m sync.Mutex
factories map[string]FsFactory factories map[string]FsFactory
fsmap map[string]Filesystem // filesystem cache fsmap map[Hash]Filesystem // filesystem cache
// TODO: add fsmap clean // TODO: add fsmap clean
} }
@ -236,26 +225,35 @@ func (r *Resolver) IsNestedFs(f string) bool {
return false return false
} }
func (r *Resolver) NestedFs(ctx context.Context, fsPath string, file File) (Filesystem, error) { func (r *Resolver) nestedFs(ctx context.Context, fsPath string, file File) (Filesystem, error) {
if file.IsDir() {
return nil, file.Close(ctx)
}
fileHash, err := FileHash(ctx, file)
if err != nil {
return nil, fmt.Errorf("error calculating file hash: %w", err)
}
if nestedFs, ok := r.fsmap[fileHash]; ok {
return nestedFs, file.Close(ctx)
}
for ext, nestFactory := range r.factories { for ext, nestFactory := range r.factories {
if !strings.HasSuffix(fsPath, ext) { if !strings.HasSuffix(fsPath, ext) {
continue continue
} }
if nestedFs, ok := r.fsmap[fsPath]; ok {
return nestedFs, nil
}
nestedFs, err := nestFactory(ctx, file) nestedFs, err := nestFactory(ctx, file)
if err != nil { if err != nil {
return nil, fmt.Errorf("error creating filesystem from file: %s with error: %w", fsPath, err) return nil, fmt.Errorf("error calling nest factory: %s with error: %w", fsPath, err)
} }
r.fsmap[fsPath] = nestedFs r.fsmap[fileHash] = nestedFs
return nestedFs, nil return nestedFs, nil
} }
return nil, nil return nil, file.Close(ctx)
} }
// open requeue raw open, without resolver call // open requeue raw open, without resolver call
@ -289,6 +287,19 @@ PARTS_LOOP:
nestedFsPath = AbsPath(path.Join(parts[nestOn:]...)) nestedFsPath = AbsPath(path.Join(parts[nestOn:]...))
file, err := rawOpen(ctx, fsPath)
if err != nil {
return "", nil, "", fmt.Errorf("error opening filesystem file: %s with error: %w", fsPath, err)
}
fileHash, err := FileHash(ctx, file)
if err != nil {
return "", nil, "", fmt.Errorf("error calculating file hash: %w", err)
}
err = file.Close(ctx)
if err != nil {
return "", nil, "", fmt.Errorf("error closing file: %w", err)
}
// we dont need lock until now // we dont need lock until now
// it must be before fsmap read to exclude race condition: // it must be before fsmap read to exclude race condition:
// read -> write // read -> write
@ -296,7 +307,7 @@ PARTS_LOOP:
r.m.Lock() r.m.Lock()
defer r.m.Unlock() defer r.m.Unlock()
if nestedFs, ok := r.fsmap[fsPath]; ok { if nestedFs, ok := r.fsmap[fileHash]; ok {
span.AddEvent("fs loaded from cache", trace.WithAttributes(attribute.String("nestedFs", reflect.TypeOf(nestedFs).Name()))) span.AddEvent("fs loaded from cache", trace.WithAttributes(attribute.String("nestedFs", reflect.TypeOf(nestedFs).Name())))
return fsPath, nestedFs, nestedFsPath, nil return fsPath, nestedFs, nestedFsPath, nil
} else { } else {
@ -307,13 +318,13 @@ PARTS_LOOP:
if err != nil { if err != nil {
return "", nil, "", fmt.Errorf("error opening filesystem file: %s with error: %w", fsPath, err) return "", nil, "", fmt.Errorf("error opening filesystem file: %s with error: %w", fsPath, err)
} }
// it is factory responsibility to close file then needed // it is factory responsibility to close file handler then needed
nestedFs, err := nestFactory(ctx, fsFile) nestedFs, err := nestFactory(ctx, fsFile)
if err != nil { if err != nil {
return "", nil, "", fmt.Errorf("error creating filesystem from file: %s with error: %w", fsPath, err) return "", nil, "", fmt.Errorf("error creating filesystem from file: %s with error: %w", fsPath, err)
} }
r.fsmap[fsPath] = nestedFs r.fsmap[fileHash] = nestedFs
span.AddEvent("fs created", trace.WithAttributes(attribute.String("nestedFs", reflect.TypeOf(nestedFs).Name()))) span.AddEvent("fs created", trace.WithAttributes(attribute.String("nestedFs", reflect.TypeOf(nestedFs).Name())))
@ -366,3 +377,27 @@ func ListDirFromFiles[F File](m map[string]F, name string) ([]fs.DirEntry, error
return out, nil return out, nil
} }
func ListDirFromInfo(m map[string]fs.FileInfo, name string) ([]fs.DirEntry, error) {
out := make([]fs.DirEntry, 0, len(m))
name = AddTrailSlash(path.Clean(name))
for p, f := range m {
if strings.HasPrefix(p, name) {
parts := strings.Split(trimRelPath(p, name), Separator)
if len(parts) == 1 {
out = append(out, NewFileInfo(parts[0], f.Size()))
} else {
out = append(out, NewDirInfo(parts[0]))
}
}
}
slices.SortStableFunc(out, func(de1, de2 fs.DirEntry) int {
return strings.Compare(de1.Name(), de2.Name())
})
out = slices.CompactFunc(out, func(de1, de2 fs.DirEntry) bool {
return de1.Name() == de2.Name()
})
return out, nil
}