diff --git a/cmd/tstor/main.go b/cmd/tstor/main.go index 6673f89..0a5765a 100644 --- a/cmd/tstor/main.go +++ b/cmd/tstor/main.go @@ -20,7 +20,7 @@ import ( "git.kmsign.ru/royalcat/tstor/src/config" "git.kmsign.ru/royalcat/tstor/src/delivery" "git.kmsign.ru/royalcat/tstor/src/sources" - "git.kmsign.ru/royalcat/tstor/src/sources/torrent" + "git.kmsign.ru/royalcat/tstor/src/sources/qbittorrent" "git.kmsign.ru/royalcat/tstor/src/sources/ytdlp" "git.kmsign.ru/royalcat/tstor/src/telemetry" "git.kmsign.ru/royalcat/tstor/src/vfs" @@ -91,10 +91,10 @@ func run(configPath string) error { } sourceFs := osfs.New(conf.SourceDir, osfs.WithBoundOS()) - tsrv, err := torrent.NewDaemon(sourceFs, conf.TorrentClient) - if err != nil { - return fmt.Errorf("error creating service: %w", err) - } + // tsrv, err := torrent.NewDaemon(sourceFs, conf.Sources.TorrentClient) + // if err != nil { + // return fmt.Errorf("error creating service: %w", err) + // } err = os.MkdirAll("./ytdlp", 0744) if err != nil { @@ -105,9 +105,14 @@ func run(configPath string) error { return err } + qtsrv, err := qbittorrent.NewDaemon(conf.Sources.QBittorrent) + if err != nil { + return fmt.Errorf("error creating qbittorrent daemon: %w", err) + } + sfs := sources.NewHostedFS( vfs.NewCtxBillyFs("/", ctxbilly.WrapFileSystem(sourceFs)), - tsrv, ytdlpsrv, + qtsrv, ytdlpsrv, ) sfs, err = vfs.WrapLogFS(sfs) if err != nil { @@ -187,7 +192,7 @@ func run(configPath string) error { go func() { logFilename := filepath.Join(conf.Log.Path, "logs") - err := delivery.Run(tsrv, sfs, logFilename, conf) + err := delivery.Run(nil, sfs, logFilename, conf) if err != nil { log.Error(ctx, "error initializing HTTP server", rlog.Error(err)) } @@ -198,6 +203,7 @@ func run(configPath string) error { <-sigChan return errors.Join( - tsrv.Close(ctx), + // tsrv.Close(ctx), + qtsrv.Close(ctx), ) } diff --git a/go.mod b/go.mod index e949487..8517af4 100644 --- a/go.mod +++ b/go.mod @@ -2,8 +2,6 @@ module git.kmsign.ru/royalcat/tstor go 1.22.3 -replace github.com/bytedance/sonic v1.11.9 => github.com/bytedance/sonic v1.12.1 - require ( github.com/99designs/gqlgen v0.17.49 github.com/agoda-com/opentelemetry-go/otelslog v0.1.1 @@ -20,6 +18,7 @@ require ( github.com/gofrs/uuid/v5 v5.1.0 github.com/google/go-github/v63 v63.0.0 github.com/google/uuid v1.6.0 + github.com/gorilla/schema v1.4.1 github.com/grafana/otel-profiling-go v0.5.1 github.com/grafana/pyroscope-go v1.1.2 github.com/hashicorp/golang-lru/v2 v2.0.7 @@ -47,7 +46,6 @@ require ( github.com/vektah/gqlparser/v2 v2.5.16 github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 github.com/willscott/memphis v0.0.0-20210922141505-529d4987ab7e - github.com/xuthus5/qbittorrent-client-go v0.0.0-20240710140754-51c95559ea0a go.opentelemetry.io/otel v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 @@ -122,7 +120,6 @@ require ( github.com/google/btree v1.1.2 // indirect github.com/google/flatbuffers v24.3.25+incompatible // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/gorilla/schema v1.4.1 // indirect github.com/gorilla/websocket v1.5.1 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect diff --git a/go.sum b/go.sum index a7f7ce9..b7203a6 100644 --- a/go.sum +++ b/go.sum @@ -136,8 +136,8 @@ github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2w github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8= github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og= -github.com/bytedance/sonic v1.12.1 h1:jWl5Qz1fy7X1ioY74WqO0KjAMtAGQs4sYnjiEBiyX24= -github.com/bytedance/sonic v1.12.1/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk= +github.com/bytedance/sonic v1.11.9 h1:LFHENlIY/SLzDWverzdOvgMztTxcfcF+cqNsz9pK5zg= +github.com/bytedance/sonic v1.11.9/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/bytedance/sonic/loader v0.2.0 h1:zNprn+lsIP06C/IqCHs3gPQIvnvpKbbxyXQP1iU4kWM= github.com/bytedance/sonic/loader v0.2.0/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= @@ -653,8 +653,6 @@ github.com/willscott/memphis v0.0.0-20210922141505-529d4987ab7e h1:1eHCP4w7tMmpf github.com/willscott/memphis v0.0.0-20210922141505-529d4987ab7e/go.mod h1:59vHBW4EpjiL5oiqgCrBp1Tc9JXRzKCNMEOaGmNfSHo= github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= -github.com/xuthus5/qbittorrent-client-go v0.0.0-20240710140754-51c95559ea0a h1:/3NF320wvXk5nm9Ng02eKTiWSYf20r4acufqecGLpfo= -github.com/xuthus5/qbittorrent-client-go v0.0.0-20240710140754-51c95559ea0a/go.mod h1:lP2yxMU6WGTmHqI9T+SrEw3wo7k5kUyiA9FBOK9NKMQ= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -699,6 +697,7 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc= golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1019,6 +1018,7 @@ modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= zombiezen.com/go/sqlite v1.3.0 h1:98g1gnCm+CNz6AuQHu0gqyw7gR2WU3O3PJufDOStpUs= diff --git a/pkg/go-nfs/file.go b/pkg/go-nfs/file.go index 2a108b1..6bdc2a3 100644 --- a/pkg/go-nfs/file.go +++ b/pkg/go-nfs/file.go @@ -255,6 +255,8 @@ func (s *SetFileAttributes) Apply(ctx context.Context, changer Change, fs Filesy } else if err != nil { return err } + defer fp.Close(ctx) + if *s.SetSize > math.MaxInt64 { return &NFSStatusError{NFSStatusInval, os.ErrInvalid} } diff --git a/pkg/qbittorrent/application.go b/pkg/qbittorrent/application.go new file mode 100644 index 0000000..31daefd --- /dev/null +++ b/pkg/qbittorrent/application.go @@ -0,0 +1,375 @@ +package qbittorrent + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" +) + +type Application interface { + // Version get application version + Version(context.Context) (string, error) + // WebApiVersion get webapi version + WebApiVersion(context.Context) (string, error) + // BuildInfo get build info + BuildInfo(context.Context) (*BuildInfo, error) + // Shutdown exit application + Shutdown(context.Context) error + // GetPreferences get application preferences + GetPreferences(context.Context) (*Preferences, error) + // SetPreferences set application preferences + SetPreferences(context.Context, *Preferences) error + // DefaultSavePath get default save path + DefaultSavePath(context.Context) (string, error) +} + +type BuildInfo struct { + BitNess int `json:"bitness,omitempty"` + Boost string `json:"boost,omitempty"` + LibTorrent string `json:"libtorrent,omitempty"` + Openssl string `json:"openssl,omitempty"` + QT string `json:"qt,omitempty"` + Zlib string `json:"zlib,omitempty"` +} + +type Preferences struct { + AddToTopOfQueue bool `json:"add_to_top_of_queue,omitempty"` + AddTrackers string `json:"add_trackers,omitempty"` + AddTrackersEnabled bool `json:"add_trackers_enabled,omitempty"` + AltDlLimit int `json:"alt_dl_limit,omitempty"` + AltUpLimit int `json:"alt_up_limit,omitempty"` + AlternativeWebuiEnabled bool `json:"alternative_webui_enabled,omitempty"` + AlternativeWebuiPath string `json:"alternative_webui_path,omitempty"` + AnnounceIP string `json:"announce_ip,omitempty"` + AnnounceToAllTiers bool `json:"announce_to_all_tiers,omitempty"` + AnnounceToAllTrackers bool `json:"announce_to_all_trackers,omitempty"` + AnonymousMode bool `json:"anonymous_mode,omitempty"` + AsyncIoThreads int `json:"async_io_threads,omitempty"` + AutoDeleteMode int `json:"auto_delete_mode,omitempty"` + AutoTmmEnabled bool `json:"auto_tmm_enabled,omitempty"` + AutorunEnabled bool `json:"autorun_enabled,omitempty"` + AutorunOnTorrentAddedEnabled bool `json:"autorun_on_torrent_added_enabled,omitempty"` + AutorunOnTorrentAddedProgram string `json:"autorun_on_torrent_added_program,omitempty"` + AutorunProgram string `json:"autorun_program,omitempty"` + BannedIPs string `json:"banned_IPs,omitempty"` + BdecodeDepthLimit int `json:"bdecode_depth_limit,omitempty"` + BdecodeTokenLimit int `json:"bdecode_token_limit,omitempty"` + BittorrentProtocol int `json:"bittorrent_protocol,omitempty"` + BlockPeersOnPrivilegedPorts bool `json:"block_peers_on_privileged_ports,omitempty"` + BypassAuthSubnetWhitelist string `json:"bypass_auth_subnet_whitelist,omitempty"` + BypassAuthSubnetWhitelistEnabled bool `json:"bypass_auth_subnet_whitelist_enabled,omitempty"` + BypassLocalAuth bool `json:"bypass_local_auth,omitempty"` + CategoryChangedTmmEnabled bool `json:"category_changed_tmm_enabled,omitempty"` + CheckingMemoryUse int `json:"checking_memory_use,omitempty"` + ConnectionSpeed int `json:"connection_speed,omitempty"` + CurrentInterfaceAddress string `json:"current_interface_address,omitempty"` + CurrentInterfaceName string `json:"current_interface_name,omitempty"` + CurrentNetworkInterface string `json:"current_network_interface,omitempty"` + Dht bool `json:"dht,omitempty"` + DiskCache int `json:"disk_cache,omitempty"` + DiskCacheTTL int `json:"disk_cache_ttl,omitempty"` + DiskIoReadMode int `json:"disk_io_read_mode,omitempty"` + DiskIoType int `json:"disk_io_type,omitempty"` + DiskIoWriteMode int `json:"disk_io_write_mode,omitempty"` + DiskQueueSize int `json:"disk_queue_size,omitempty"` + DlLimit int `json:"dl_limit,omitempty"` + DontCountSlowTorrents bool `json:"dont_count_slow_torrents,omitempty"` + DyndnsDomain string `json:"dyndns_domain,omitempty"` + DyndnsEnabled bool `json:"dyndns_enabled,omitempty"` + DyndnsPassword string `json:"dyndns_password,omitempty"` + DyndnsService int `json:"dyndns_service,omitempty"` + DyndnsUsername string `json:"dyndns_username,omitempty"` + EmbeddedTrackerPort int `json:"embedded_tracker_port,omitempty"` + EmbeddedTrackerPortForwarding bool `json:"embedded_tracker_port_forwarding,omitempty"` + EnableCoalesceReadWrite bool `json:"enable_coalesce_read_write,omitempty"` + EnableEmbeddedTracker bool `json:"enable_embedded_tracker,omitempty"` + EnableMultiConnectionsFromSameIP bool `json:"enable_multi_connections_from_same_ip,omitempty"` + EnablePieceExtentAffinity bool `json:"enable_piece_extent_affinity,omitempty"` + EnableUploadSuggestions bool `json:"enable_upload_suggestions,omitempty"` + Encryption int `json:"encryption,omitempty"` + ExcludedFileNames string `json:"excluded_file_names,omitempty"` + ExcludedFileNamesEnabled bool `json:"excluded_file_names_enabled,omitempty"` + ExportDir string `json:"export_dir,omitempty"` + ExportDirFin string `json:"export_dir_fin,omitempty"` + FileLogAge int `json:"file_log_age,omitempty"` + FileLogAgeType int `json:"file_log_age_type,omitempty"` + FileLogBackupEnabled bool `json:"file_log_backup_enabled,omitempty"` + FileLogDeleteOld bool `json:"file_log_delete_old,omitempty"` + FileLogEnabled bool `json:"file_log_enabled,omitempty"` + FileLogMaxSize int `json:"file_log_max_size,omitempty"` + FileLogPath string `json:"file_log_path,omitempty"` + FilePoolSize int `json:"file_pool_size,omitempty"` + HashingThreads int `json:"hashing_threads,omitempty"` + I2PAddress string `json:"i2p_address,omitempty"` + I2PEnabled bool `json:"i2p_enabled,omitempty"` + I2PInboundLength int `json:"i2p_inbound_length,omitempty"` + I2PInboundQuantity int `json:"i2p_inbound_quantity,omitempty"` + I2PMixedMode bool `json:"i2p_mixed_mode,omitempty"` + I2POutboundLength int `json:"i2p_outbound_length,omitempty"` + I2POutboundQuantity int `json:"i2p_outbound_quantity,omitempty"` + I2PPort int `json:"i2p_port,omitempty"` + IdnSupportEnabled bool `json:"idn_support_enabled,omitempty"` + IncompleteFilesExt bool `json:"incomplete_files_ext,omitempty"` + IPFilterEnabled bool `json:"ip_filter_enabled,omitempty"` + IPFilterPath string `json:"ip_filter_path,omitempty"` + IPFilterTrackers bool `json:"ip_filter_trackers,omitempty"` + LimitLanPeers bool `json:"limit_lan_peers,omitempty"` + LimitTCPOverhead bool `json:"limit_tcp_overhead,omitempty"` + LimitUtpRate bool `json:"limit_utp_rate,omitempty"` + ListenPort int `json:"listen_port,omitempty"` + Locale string `json:"locale,omitempty"` + Lsd bool `json:"lsd,omitempty"` + MailNotificationAuthEnabled bool `json:"mail_notification_auth_enabled,omitempty"` + MailNotificationEmail string `json:"mail_notification_email,omitempty"` + MailNotificationEnabled bool `json:"mail_notification_enabled,omitempty"` + MailNotificationPassword string `json:"mail_notification_password,omitempty"` + MailNotificationSender string `json:"mail_notification_sender,omitempty"` + MailNotificationSMTP string `json:"mail_notification_smtp,omitempty"` + MailNotificationSslEnabled bool `json:"mail_notification_ssl_enabled,omitempty"` + MailNotificationUsername string `json:"mail_notification_username,omitempty"` + MaxActiveCheckingTorrents int `json:"max_active_checking_torrents,omitempty"` + MaxActiveDownloads int `json:"max_active_downloads,omitempty"` + MaxActiveTorrents int `json:"max_active_torrents,omitempty"` + MaxActiveUploads int `json:"max_active_uploads,omitempty"` + MaxConcurrentHTTPAnnounces int `json:"max_concurrent_http_announces,omitempty"` + MaxConnec int `json:"max_connec,omitempty"` + MaxConnecPerTorrent int `json:"max_connec_per_torrent,omitempty"` + MaxInactiveSeedingTime int `json:"max_inactive_seeding_time,omitempty"` + MaxInactiveSeedingTimeEnabled bool `json:"max_inactive_seeding_time_enabled,omitempty"` + MaxRatio int `json:"max_ratio,omitempty"` + MaxRatioAct int `json:"max_ratio_act,omitempty"` + MaxRatioEnabled bool `json:"max_ratio_enabled,omitempty"` + MaxSeedingTime int `json:"max_seeding_time,omitempty"` + MaxSeedingTimeEnabled bool `json:"max_seeding_time_enabled,omitempty"` + MaxUploads int `json:"max_uploads,omitempty"` + MaxUploadsPerTorrent int `json:"max_uploads_per_torrent,omitempty"` + MemoryWorkingSetLimit int `json:"memory_working_set_limit,omitempty"` + MergeTrackers bool `json:"merge_trackers,omitempty"` + OutgoingPortsMax int `json:"outgoing_ports_max,omitempty"` + OutgoingPortsMin int `json:"outgoing_ports_min,omitempty"` + PeerTos int `json:"peer_tos,omitempty"` + PeerTurnover int `json:"peer_turnover,omitempty"` + PeerTurnoverCutoff int `json:"peer_turnover_cutoff,omitempty"` + PeerTurnoverInterval int `json:"peer_turnover_interval,omitempty"` + PerformanceWarning bool `json:"performance_warning,omitempty"` + Pex bool `json:"pex,omitempty"` + PreallocateAll bool `json:"preallocate_all,omitempty"` + ProxyAuthEnabled bool `json:"proxy_auth_enabled,omitempty"` + ProxyBittorrent bool `json:"proxy_bittorrent,omitempty"` + ProxyHostnameLookup bool `json:"proxy_hostname_lookup,omitempty"` + ProxyIP string `json:"proxy_ip,omitempty"` + ProxyMisc bool `json:"proxy_misc,omitempty"` + ProxyPassword string `json:"proxy_password,omitempty"` + ProxyPeerConnections bool `json:"proxy_peer_connections,omitempty"` + ProxyPort int `json:"proxy_port,omitempty"` + ProxyRss bool `json:"proxy_rss,omitempty"` + ProxyType string `json:"proxy_type,omitempty"` + ProxyUsername string `json:"proxy_username,omitempty"` + QueueingEnabled bool `json:"queueing_enabled,omitempty"` + RandomPort bool `json:"random_port,omitempty"` + ReannounceWhenAddressChanged bool `json:"reannounce_when_address_changed,omitempty"` + RecheckCompletedTorrents bool `json:"recheck_completed_torrents,omitempty"` + RefreshInterval int `json:"refresh_interval,omitempty"` + RequestQueueSize int `json:"request_queue_size,omitempty"` + ResolvePeerCountries bool `json:"resolve_peer_countries,omitempty"` + ResumeDataStorageType string `json:"resume_data_storage_type,omitempty"` + RssAutoDownloadingEnabled bool `json:"rss_auto_downloading_enabled,omitempty"` + RssDownloadRepackProperEpisodes bool `json:"rss_download_repack_proper_episodes,omitempty"` + RssMaxArticlesPerFeed int `json:"rss_max_articles_per_feed,omitempty"` + RssProcessingEnabled bool `json:"rss_processing_enabled,omitempty"` + RssRefreshInterval int `json:"rss_refresh_interval,omitempty"` + RssSmartEpisodeFilters string `json:"rss_smart_episode_filters,omitempty"` + SavePath string `json:"save_path,omitempty"` + SavePathChangedTmmEnabled bool `json:"save_path_changed_tmm_enabled,omitempty"` + SaveResumeDataInterval int `json:"save_resume_data_interval,omitempty"` + ScheduleFromHour int `json:"schedule_from_hour,omitempty"` + ScheduleFromMin int `json:"schedule_from_min,omitempty"` + ScheduleToHour int `json:"schedule_to_hour,omitempty"` + ScheduleToMin int `json:"schedule_to_min,omitempty"` + SchedulerDays int `json:"scheduler_days,omitempty"` + SchedulerEnabled bool `json:"scheduler_enabled,omitempty"` + SendBufferLowWatermark int `json:"send_buffer_low_watermark,omitempty"` + SendBufferWatermark int `json:"send_buffer_watermark,omitempty"` + SendBufferWatermarkFactor int `json:"send_buffer_watermark_factor,omitempty"` + SlowTorrentDlRateThreshold int `json:"slow_torrent_dl_rate_threshold,omitempty"` + SlowTorrentInactiveTimer int `json:"slow_torrent_inactive_timer,omitempty"` + SlowTorrentUlRateThreshold int `json:"slow_torrent_ul_rate_threshold,omitempty"` + SocketBacklogSize int `json:"socket_backlog_size,omitempty"` + SocketReceiveBufferSize int `json:"socket_receive_buffer_size,omitempty"` + SocketSendBufferSize int `json:"socket_send_buffer_size,omitempty"` + SsrfMitigation bool `json:"ssrf_mitigation,omitempty"` + StartPausedEnabled bool `json:"start_paused_enabled,omitempty"` + StopTrackerTimeout int `json:"stop_tracker_timeout,omitempty"` + TempPath string `json:"temp_path,omitempty"` + TempPathEnabled bool `json:"temp_path_enabled,omitempty"` + TorrentChangedTmmEnabled bool `json:"torrent_changed_tmm_enabled,omitempty"` + TorrentContentLayout string `json:"torrent_content_layout,omitempty"` + TorrentFileSizeLimit int `json:"torrent_file_size_limit,omitempty"` + TorrentStopCondition string `json:"torrent_stop_condition,omitempty"` + UpLimit int `json:"up_limit,omitempty"` + UploadChokingAlgorithm int `json:"upload_choking_algorithm,omitempty"` + UploadSlotsBehavior int `json:"upload_slots_behavior,omitempty"` + Upnp bool `json:"upnp,omitempty"` + UpnpLeaseDuration int `json:"upnp_lease_duration,omitempty"` + UseCategoryPathsInManualMode bool `json:"use_category_paths_in_manual_mode,omitempty"` + UseHTTPS bool `json:"use_https,omitempty"` + UseSubcategories bool `json:"use_subcategories,omitempty"` + UtpTCPMixedMode int `json:"utp_tcp_mixed_mode,omitempty"` + ValidateHTTPSTrackerCertificate bool `json:"validate_https_tracker_certificate,omitempty"` + WebUIAddress string `json:"web_ui_address,omitempty"` + WebUIBanDuration int `json:"web_ui_ban_duration,omitempty"` + WebUIClickjackingProtectionEnabled bool `json:"web_ui_clickjacking_protection_enabled,omitempty"` + WebUICsrfProtectionEnabled bool `json:"web_ui_csrf_protection_enabled,omitempty"` + WebUICustomHTTPHeaders string `json:"web_ui_custom_http_headers,omitempty"` + WebUIDomainList string `json:"web_ui_domain_list,omitempty"` + WebUIHostHeaderValidationEnabled bool `json:"web_ui_host_header_validation_enabled,omitempty"` + WebUIHTTPSCertPath string `json:"web_ui_https_cert_path,omitempty"` + WebUIHTTPSKeyPath string `json:"web_ui_https_key_path,omitempty"` + WebUIMaxAuthFailCount int `json:"web_ui_max_auth_fail_count,omitempty"` + WebUIPort int `json:"web_ui_port,omitempty"` + WebUIReverseProxiesList string `json:"web_ui_reverse_proxies_list,omitempty"` + WebUIReverseProxyEnabled bool `json:"web_ui_reverse_proxy_enabled,omitempty"` + WebUISecureCookieEnabled bool `json:"web_ui_secure_cookie_enabled,omitempty"` + WebUISessionTimeout int `json:"web_ui_session_timeout,omitempty"` + WebUIUpnp bool `json:"web_ui_upnp,omitempty"` + WebUIUseCustomHTTPHeadersEnabled bool `json:"web_ui_use_custom_http_headers_enabled,omitempty"` + WebUIUsername string `json:"web_ui_username,omitempty"` +} + +func (c *client) Version(ctx context.Context) (string, error) { + apiUrl := fmt.Sprintf("%s/api/v2/app/version", c.config.Address) + + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return "", err + } + + if result.code != 200 { + return "", errors.New("get version failed: " + string(result.body)) + } + + return string(result.body), nil +} + +func (c *client) WebApiVersion(ctx context.Context) (string, error) { + apiUrl := fmt.Sprintf("%s/api/v2/app/webapiVersion", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return "", err + } + + if result.code != 200 { + return "", errors.New("get version failed: " + string(result.body)) + } + + return string(result.body), nil +} + +func (c *client) BuildInfo(ctx context.Context) (*BuildInfo, error) { + apiUrl := fmt.Sprintf("%s/api/v2/app/buildInfo", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get build info failed: " + string(result.body)) + } + + var build = new(BuildInfo) + if err := json.Unmarshal(result.body, build); err != nil { + return nil, err + } + + return build, nil +} + +func (c *client) Shutdown(ctx context.Context) error { + apiUrl := fmt.Sprintf("%s/api/v2/app/shutdown", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + method: http.MethodPost, + url: apiUrl, + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("shutdown application failed: " + string(result.body)) + } + + return nil +} + +func (c *client) GetPreferences(ctx context.Context) (*Preferences, error) { + apiUrl := fmt.Sprintf("%s/api/v2/app/preferences", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get preference failed: " + string(result.body)) + } + + var preferences = new(Preferences) + if err := json.Unmarshal(result.body, preferences); err != nil { + return nil, err + } + + return preferences, nil +} + +func (c *client) SetPreferences(ctx context.Context, prefs *Preferences) error { + apiUrl := fmt.Sprintf("%s/api/v2/app/setPreferences", c.config.Address) + data, err := json.Marshal(prefs) + if err != nil { + return err + } + var formData bytes.Buffer + formData.Write([]byte("json=")) + formData.Write(data) + + result, err := c.doRequest(ctx, &requestData{ + method: http.MethodPost, + url: apiUrl, + contentType: ContentTypeFormUrlEncoded, + body: &formData, + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("set preference failed: " + string(result.body)) + } + + return nil +} + +func (c *client) DefaultSavePath(ctx context.Context) (string, error) { + apiUrl := fmt.Sprintf("%s/api/v2/app/defaultSavePath", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return "", err + } + + if result.code != 200 { + return "", errors.New("get default save path failed: " + string(result.body)) + } + + return string(result.body), nil +} diff --git a/pkg/qbittorrent/application_test.go b/pkg/qbittorrent/application_test.go new file mode 100644 index 0000000..dfdad0a --- /dev/null +++ b/pkg/qbittorrent/application_test.go @@ -0,0 +1,73 @@ +package qbittorrent + +import ( + "context" + "testing" +) + +func TestClient_Version(t *testing.T) { + ctx := context.Background() + version, err := c.Application().Version(ctx) + if err != nil { + t.Fatal(err) + } + t.Log(version) +} + +func TestClient_WebApiVersion(t *testing.T) { + ctx := context.Background() + version, err := c.Application().WebApiVersion(ctx) + if err != nil { + t.Fatal(err) + } + t.Log(version) +} + +func TestClient_BuildInfo(t *testing.T) { + ctx := context.Background() + info, err := c.Application().BuildInfo(ctx) + if err != nil { + t.Fatal(err) + } + t.Logf("build: %+v", info) +} + +func TestClient_Shutdown(t *testing.T) { + ctx := context.Background() + if err := c.Application().Shutdown(ctx); err != nil { + t.Fatal(err) + } + t.Log("shutting down") +} + +func TestClient_GetPreferences(t *testing.T) { + ctx := context.Background() + prefs, err := c.Application().GetPreferences(ctx) + if err != nil { + t.Fatal(err) + } + t.Logf("prefs: %+v", prefs) +} + +func TestClient_SetPreferences(t *testing.T) { + ctx := context.Background() + prefs, err := c.Application().GetPreferences(ctx) + if err != nil { + t.Fatal(err) + } + + prefs.FileLogAge = 301 + if err := c.Application().SetPreferences(ctx, prefs); err != nil { + t.Fatal(err) + } + t.Logf("success") +} + +func TestClient_DefaultSavePath(t *testing.T) { + ctx := context.Background() + path, err := c.Application().DefaultSavePath(ctx) + if err != nil { + t.Fatal(err) + } + t.Logf("path: %s", path) +} diff --git a/pkg/qbittorrent/authentication.go b/pkg/qbittorrent/authentication.go new file mode 100644 index 0000000..eb91b2c --- /dev/null +++ b/pkg/qbittorrent/authentication.go @@ -0,0 +1,85 @@ +package qbittorrent + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/http/cookiejar" + "net/url" + "strings" +) + +type Authentication interface { + // Login cookie-based authentication, after calling NewClient, do not need to call Login again, + // it is the default behavior + Login(ctx context.Context) error + // Logout deactivate cookies + Logout(ctx context.Context) error +} + +func (c *client) Login(ctx context.Context) error { + if c.config.Username == "" || c.config.Password == "" { + return errors.New("username or password is empty") + } + + formData := url.Values{} + formData.Set("username", c.config.Username) + formData.Set("password", c.config.Password) + encodedFormData := formData.Encode() + + apiUrl := fmt.Sprintf("%s/api/v2/auth/login", c.config.Address) + + result, err := c.doRequest(ctx, &requestData{ + method: http.MethodPost, + url: apiUrl, + body: strings.NewReader(encodedFormData), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("login failed: " + string(result.body)) + } + + if string(result.body) == "Fails." { + return ErrAuthFailed + } + + if string(result.body) != "Ok." { + return errors.New("login failed: " + string(result.body)) + } + + if c.cookieJar == nil { + c.cookieJar, err = cookiejar.New(nil) + if err != nil { + return err + } + } + + u, err := url.Parse(c.config.Address) + if err != nil { + return err + } + c.cookieJar.SetCookies(u, result.cookies) + + return nil +} + +func (c *client) Logout(ctx context.Context) error { + apiUrl := fmt.Sprintf("%s/api/v2/auth/logout", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + method: http.MethodPost, + url: apiUrl, + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("logout failed: " + string(result.body)) + } + + return nil +} diff --git a/pkg/qbittorrent/authentication_test.go b/pkg/qbittorrent/authentication_test.go new file mode 100644 index 0000000..538dac3 --- /dev/null +++ b/pkg/qbittorrent/authentication_test.go @@ -0,0 +1,24 @@ +package qbittorrent + +import ( + "context" + "testing" +) + +func TestClient_Login(t *testing.T) { + ctx := context.Background() + if err := c.Authentication().Login(ctx); err != nil { + t.Fatal(err) + } +} + +func TestClient_Logout(t *testing.T) { + ctx := context.Background() + if err := c.Authentication().Login(ctx); err != nil { + t.Fatal(err) + } + + if err := c.Authentication().Logout(ctx); err != nil { + t.Fatal(err) + } +} diff --git a/pkg/qbittorrent/client.go b/pkg/qbittorrent/client.go new file mode 100644 index 0000000..2ec6fba --- /dev/null +++ b/pkg/qbittorrent/client.go @@ -0,0 +1,39 @@ +package qbittorrent + +import "context" + +// Client represents a qBittorrent client +type Client interface { + // Authentication auth qBittorrent client + Authentication() Authentication + // Application get qBittorrent application info + Application() Application + // Log get qBittorrent log + Log() Log + // Sync get qBittorrent events + Sync() Sync + // Transfer transfer manage + Transfer() Transfer + // Torrent manage for torrent + Torrent() Torrent + // Search api for search + Search() Search + // RSS api for rss + RSS() RSS +} + +func NewClient(ctx context.Context, cfg *Config) (Client, error) { + var c = &client{config: cfg, clientPool: newClientPool(cfg.ConnectionMaxIdles, cfg.ConnectionTimeout)} + return c, nil +} + +func LoginClient(ctx context.Context, cfg *Config) (Client, error) { + var c = &client{config: cfg, clientPool: newClientPool(cfg.ConnectionMaxIdles, cfg.ConnectionTimeout)} + if err := c.Authentication().Login(ctx); err != nil { + return nil, err + } + if cfg.RefreshCookie { + go c.refreshCookie() + } + return c, nil +} diff --git a/pkg/qbittorrent/client_impl.go b/pkg/qbittorrent/client_impl.go new file mode 100644 index 0000000..5151fd5 --- /dev/null +++ b/pkg/qbittorrent/client_impl.go @@ -0,0 +1,135 @@ +package qbittorrent + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/cookiejar" + "net/url" + "strings" + "time" +) + +type responseResult struct { + code int + body []byte + cookies []*http.Cookie +} + +type requestData struct { + method string + url string + contentType string + body io.Reader +} + +var _ Client = (*client)(nil) + +type client struct { + config *Config + clientPool *clientPool + cookieJar *cookiejar.Jar +} + +func (c *client) Authentication() Authentication { + return c +} + +func (c *client) Application() Application { + return c +} + +func (c *client) Log() Log { + return c +} + +func (c *client) Sync() Sync { + return c +} + +func (c *client) Transfer() Transfer { + return c +} + +func (c *client) Torrent() Torrent { + return c +} + +func (c *client) Search() Search { + return c +} + +func (c *client) RSS() RSS { + return c +} + +// doRequest send request +func (c *client) doRequest(ctx context.Context, data *requestData) (*responseResult, error) { + if data.method == "" { + data.method = "GET" + } + if data.contentType == "" { + data.contentType = ContentTypeFormUrlEncoded + } + request, err := http.NewRequestWithContext(ctx, data.method, data.url, data.body) + if err != nil { + return nil, err + } + + request.Header.Set("Content-Type", data.contentType) + for key, value := range c.config.CustomHeaders { + request.Header.Set(key, value) + } + hc := c.clientPool.GetClient() + defer c.clientPool.ReleaseClient(hc) + if c.cookieJar != nil { + hc.Jar = c.cookieJar + } + + resp, err := hc.Do(request) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + readAll, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return &responseResult{code: resp.StatusCode, body: readAll, cookies: resp.Cookies()}, nil +} + +func (c *client) cookies() (string, error) { + if c.cookieJar == nil { + return "", ErrNotLogin + } + u, err := url.Parse(c.config.Address) + if err != nil { + return "", err + } + cookies := c.cookieJar.Cookies(u) + if len(cookies) == 0 { + return "", ErrNotLogin + } + var builder strings.Builder + for _, cookie := range cookies { + builder.WriteString(fmt.Sprintf("%s=%s; ", cookie.Name, cookie.Value)) + } + + return builder.String(), nil +} + +func (c *client) refreshCookie() { + ctx := context.Background() + if c.config.RefreshIntervals == 0 { + c.config.RefreshIntervals = time.Hour + } + var ticker = time.NewTicker(c.config.RefreshIntervals) + for range ticker.C { + if err := c.Authentication().Logout(ctx); err != nil { + // todo + } + } +} diff --git a/pkg/qbittorrent/client_pool.go b/pkg/qbittorrent/client_pool.go new file mode 100644 index 0000000..d390665 --- /dev/null +++ b/pkg/qbittorrent/client_pool.go @@ -0,0 +1,53 @@ +package qbittorrent + +import ( + "crypto/tls" + "net" + "net/http" + "sync" + "time" +) + +// clientPool defines a pool of HTTP clients +type clientPool struct { + // pool store http.Client instances + *sync.Pool +} + +// newClientPool creates and returns a new clientPool +func newClientPool(maxIdle int, timeout time.Duration) *clientPool { + if maxIdle == 0 { + maxIdle = 128 + } + if timeout == 0 { + timeout = time.Second * 3 + } + return &clientPool{ + Pool: &sync.Pool{ + New: func() any { + return &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + MaxIdleConns: maxIdle, + }, + Timeout: timeout, + } + }, + }, + } +} + +// GetClient retrieves a http.Client from the pool +func (p *clientPool) GetClient() *http.Client { + return p.Get().(*http.Client) +} + +// ReleaseClient returns a http.Client back to the pool +func (p *clientPool) ReleaseClient(client *http.Client) { + p.Put(client) +} diff --git a/pkg/qbittorrent/client_test.go b/pkg/qbittorrent/client_test.go new file mode 100644 index 0000000..03bdee7 --- /dev/null +++ b/pkg/qbittorrent/client_test.go @@ -0,0 +1,56 @@ +package qbittorrent + +import ( + "context" + "net/url" + "testing" + "time" +) + +var ( + c Client +) + +func init() { + ctx := context.Background() + var err error + c, err = LoginClient(ctx, &Config{ + Address: "http://192.168.3.33:38080", + Username: "admin", + Password: "J0710cz5", + RefreshIntervals: time.Hour, + ConnectionTimeout: time.Second * 3, + CustomHeaders: map[string]string{ + //"Origin": "http://192.168.3.33:8080", + //"Referer": "http://192.168.3.33:8080", + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0", + }, + }) + if err != nil { + panic(err) + } +} + +func TestFormEncoder(t *testing.T) { + var option = LogOption{ + Normal: true, + Info: true, + Warning: false, + Critical: false, + LastKnownId: 0, + } + var form = url.Values{} + err := encoder.Encode(option, form) + if err != nil { + t.Fatal(err) + } + t.Log(form) +} + +func TestFormEncode(t *testing.T) { + var form = url.Values{} + form.Add("username", "admin hahaha") + form.Add("password", "J0710c?//&z5") + fe := form.Encode() + t.Log(fe) +} diff --git a/pkg/qbittorrent/common.go b/pkg/qbittorrent/common.go new file mode 100644 index 0000000..d4fb846 --- /dev/null +++ b/pkg/qbittorrent/common.go @@ -0,0 +1,10 @@ +package qbittorrent + +import "github.com/gorilla/schema" + +const ( + ContentTypeJSON = "application/json" + ContentTypeFormUrlEncoded = "application/x-www-form-urlencoded" +) + +var encoder = schema.NewEncoder() diff --git a/pkg/qbittorrent/config.go b/pkg/qbittorrent/config.go new file mode 100644 index 0000000..bd8798f --- /dev/null +++ b/pkg/qbittorrent/config.go @@ -0,0 +1,25 @@ +package qbittorrent + +import "time" + +type Config struct { + // Address qBittorrent endpoint + Address string + // Username used to access the WebUI + Username string + // Password used to access the WebUI + Password string + + // HTTP configuration + + // CustomHeaders custom headers + CustomHeaders map[string]string + // ConnectionTimeout request timeout, default 3 seconds + ConnectionTimeout time.Duration + // ConnectionMaxIdles http client pool, default 128 + ConnectionMaxIdles int + // RefreshCookie whether to automatically refresh cookies + RefreshCookie bool + // SessionTimeout interval for refreshing cookies, default 1 hour + RefreshIntervals time.Duration +} diff --git a/pkg/qbittorrent/error_code.go b/pkg/qbittorrent/error_code.go new file mode 100644 index 0000000..827d74b --- /dev/null +++ b/pkg/qbittorrent/error_code.go @@ -0,0 +1,8 @@ +package qbittorrent + +import "errors" + +var ( + ErrNotLogin = errors.New("not login") + ErrAuthFailed = errors.New("auth failed") +) diff --git a/pkg/qbittorrent/log.go b/pkg/qbittorrent/log.go new file mode 100644 index 0000000..d81fbef --- /dev/null +++ b/pkg/qbittorrent/log.go @@ -0,0 +1,89 @@ +package qbittorrent + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + "strconv" + "strings" +) + +type LogOption struct { + Normal bool `schema:"normal,omitempty"` // include normal messages + Info bool `schema:"info,omitempty"` // include info messages + Warning bool `schema:"warning,omitempty"` // include warning messages + Critical bool `schema:"critical,omitempty"` // include critical messages + LastKnownId int64 `schema:"last_known_id,omitempty"` // exclude messages with "message id" <= (default: last_known_id-1) +} + +type LogEntry struct { + Id int `json:"id,omitempty"` // id of the message or peer + Timestamp int `json:"timestamp,omitempty"` // seconds since epoch + Type int `json:"type,omitempty"` // type of the message, Log::NORMAL: 1, Log::INFO: 2, Log::WARNING: 4, Log::CRITICAL: 8 + Message string `json:"message,omitempty"` // text of the message + IP string `json:"ip"` // ip of the peer + Blocked bool `json:"blocked,omitempty"` // whether the peer was blocked + Reason string `json:"reason,omitempty"` // Reason of the block +} + +type Log interface { + // GetLog get log + GetLog(ctx context.Context, option *LogOption) ([]*LogEntry, error) + // GetPeerLog get peer log + GetPeerLog(ctx context.Context, lastKnownId int) ([]*LogEntry, error) +} + +func (c *client) GetLog(ctx context.Context, option *LogOption) ([]*LogEntry, error) { + var form = url.Values{} + err := encoder.Encode(option, form) + if err != nil { + return nil, err + } + apiUrl := fmt.Sprintf("%s/api/v2/log/main?%s", c.config.Address, form.Encode()) + + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + body: strings.NewReader(form.Encode()), + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get log failed: " + string(result.body)) + } + + var logs []*LogEntry + if err := json.Unmarshal(result.body, &logs); err != nil { + return nil, err + } + + return logs, nil +} + +func (c *client) GetPeerLog(ctx context.Context, lastKnownId int) ([]*LogEntry, error) { + apiUrl := fmt.Sprintf("%s/api/v2/log/peers", c.config.Address) + var form = url.Values{} + form.Add("last_known_id", strconv.Itoa(lastKnownId)) + + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + body: strings.NewReader(form.Encode()), + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get peer log failed: " + string(result.body)) + } + + var logs []*LogEntry + if err := json.Unmarshal(result.body, &logs); err != nil { + return nil, err + } + + return logs, nil +} diff --git a/pkg/qbittorrent/log_test.go b/pkg/qbittorrent/log_test.go new file mode 100644 index 0000000..fb5aece --- /dev/null +++ b/pkg/qbittorrent/log_test.go @@ -0,0 +1,39 @@ +package qbittorrent + +import ( + "context" + "encoding/json" + "testing" +) + +func TestClient_GetLog(t *testing.T) { + ctx := context.Background() + entries, err := c.Log().GetLog(ctx, &LogOption{ + Normal: true, + Info: true, + Warning: true, + Critical: true, + LastKnownId: 0, + }) + if err != nil { + t.Fatal(err) + } + bytes, err := json.Marshal(entries) + if err != nil { + t.Fatal(err) + } + t.Log(string(bytes)) +} + +func TestClient_GetPeerLog(t *testing.T) { + ctx := context.Background() + entries, err := c.Log().GetPeerLog(ctx, -1) + if err != nil { + t.Fatal(err) + } + bytes, err := json.Marshal(entries) + if err != nil { + t.Fatal(err) + } + t.Log(string(bytes)) +} diff --git a/pkg/qbittorrent/rss.go b/pkg/qbittorrent/rss.go new file mode 100644 index 0000000..2e5da65 --- /dev/null +++ b/pkg/qbittorrent/rss.go @@ -0,0 +1,323 @@ +package qbittorrent + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" +) + +type RSS interface { + // AddFolder create new folder for rss, full path of added folder such as "The Pirate Bay\Top100" + AddFolder(ctx context.Context, path string) error + // AddFeed add feed + AddFeed(ctx context.Context, option *RssAddFeedOption) error + // RemoveItem remove folder or feed + RemoveItem(ctx context.Context, path string) error + // MoveItem move or rename folder or feed + MoveItem(ctx context.Context, srcPath, destPath string) error + // GetItems list all items, if withData is true, will return all data + GetItems(ctx context.Context, withData bool) (map[string]interface{}, error) + // MarkAsRead if articleId is provided only the article is marked as read otherwise the whole feed + // is going to be marked as read. + MarkAsRead(ctx context.Context, option *RssMarkAsReadOption) error + // RefreshItem refresh folder or feed + RefreshItem(ctx context.Context, itemPath string) error + // SetAutoDownloadingRule set auto-downloading rule + SetAutoDownloadingRule(ctx context.Context, ruleName string, ruleDef *RssAutoDownloadingRuleDef) error + // RenameAutoDownloadingRule rename auto-downloading rule + RenameAutoDownloadingRule(ctx context.Context, ruleName, newRuleName string) error + // RemoveAutoDownloadingRule remove auto-downloading rule + RemoveAutoDownloadingRule(ctx context.Context, ruleName string) error + // GetAllAutoDownloadingRules get all auto-downloading rules + GetAllAutoDownloadingRules(ctx context.Context) (map[string]*RssAutoDownloadingRuleDef, error) + // GetAllArticlesMatchingRule get all articles matching a rule + GetAllArticlesMatchingRule(ctx context.Context, ruleName string) (map[string][]string, error) +} + +type RssAddFeedOption struct { + // URL feed of rss such as http://thepiratebay.org/rss//top100/200 + URL string `schema:"url"` + // Folder full path of added folder, optional + Folder string `schema:"path,omitempty"` +} + +type RssMarkAsReadOption struct { + // ItemPath current full path of item + ItemPath string `schema:"itemPath"` + // ArticleId id of article, optional + ArticleId string `schema:"articleId,omitempty"` +} + +type RssAutoDownloadingRuleDefTorrentParams struct { + Category string `json:"category,omitempty"` + DownloadLimit int `json:"download_limit,omitempty"` + DownloadPath int `json:"download_path,omitempty"` + InactiveSeedingTimeLimit int `json:"inactive_seeding_time_limit,omitempty"` + OperatingMode string `json:"operating_mode,omitempty"` + RatioLimit int `json:"ratio_limit,omitempty"` + SavePath string `json:"save_path,omitempty"` + SeedingTimeLimit int `json:"seeding_time_limit,omitempty"` + SkipChecking bool `json:"skip_checking,omitempty"` + Tags []string `json:"tags,omitempty"` + UploadLimit int `json:"upload_limit,omitempty"` + Stopped bool `json:"stopped,omitempty"` + UseAutoTMM bool `json:"use_auto_tmm,omitempty"` +} + +type RssAutoDownloadingRuleDef struct { + AddPaused bool `json:"addPaused,omitempty"` + AffectedFeeds []string `json:"affectedFeeds,omitempty"` + AssignedCategory string `json:"assignedCategory,omitempty"` + Enabled bool `json:"enabled,omitempty"` + EpisodeFilter string `json:"episodeFilter,omitempty"` + IgnoreDays int `json:"ignoreDays,omitempty"` + LastMatch string `json:"lastMatch,omitempty"` + MustContain string `json:"mustContain,omitempty"` + MustNotContain string `json:"mustNotContain,omitempty"` + PreviouslyMatchedEpisodes []string `json:"previouslyMatchedEpisodes,omitempty"` + Priority int `json:"priority,omitempty"` + SavePath string `json:"savePath,omitempty"` + SmartFilter bool `json:"smartFilter,omitempty"` + TorrentParams *RssAutoDownloadingRuleDefTorrentParams `json:"torrentParams,omitempty"` + UseRegex bool `json:"useRegex,omitempty"` +} + +func (c *client) AddFolder(ctx context.Context, path string) error { + var formData = url.Values{} + formData.Add("path", path) + var apiUrl = fmt.Sprintf("%s/api/v2/rss/addFolder", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("add rss folder failed: " + string(result.body)) + } + return nil +} + +func (c *client) AddFeed(ctx context.Context, opt *RssAddFeedOption) error { + var formData = url.Values{} + err := encoder.Encode(opt, formData) + if err != nil { + return err + } + var apiUrl = fmt.Sprintf("%s/api/v2/rss/addFolder", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("add rss feed failed: " + string(result.body)) + } + return nil +} + +func (c *client) RemoveItem(ctx context.Context, path string) error { + var formData = url.Values{} + formData.Add("path", path) + var apiUrl = fmt.Sprintf("%s/api/v2/rss/removeItem", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("remove rss item failed: " + string(result.body)) + } + return nil +} + +func (c *client) MoveItem(ctx context.Context, srcPath, destPath string) error { + var formData = url.Values{} + formData.Add("itemPath", srcPath) + formData.Add("destPath", destPath) + var apiUrl = fmt.Sprintf("%s/api/v2/rss/moveItem", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("move rss item failed: " + string(result.body)) + } + return nil +} + +func (c *client) GetItems(ctx context.Context, withData bool) (map[string]interface{}, error) { + var apiUrl = fmt.Sprintf("%s/api/v2/rss/items?withData=%t", c.config.Address, withData) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodGet, + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get rss items failed: " + string(result.body)) + } + var data = make(map[string]interface{}) + err = json.Unmarshal(result.body, &data) + return data, err +} + +func (c *client) MarkAsRead(ctx context.Context, opt *RssMarkAsReadOption) error { + var formData = url.Values{} + err := encoder.Encode(opt, formData) + if err != nil { + return err + } + var apiUrl = fmt.Sprintf("%s/api/v2/rss/markAsRead", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("mark as read rss item failed: " + string(result.body)) + } + return nil +} + +func (c *client) RefreshItem(ctx context.Context, itemPath string) error { + var formData = url.Values{} + formData.Add("itemPath", itemPath) + var apiUrl = fmt.Sprintf("%s/api/v2/rss/refreshItem", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("refresh rss item failed: " + string(result.body)) + } + return nil +} + +func (c *client) SetAutoDownloadingRule(ctx context.Context, ruleName string, ruleDef *RssAutoDownloadingRuleDef) error { + var formData = url.Values{} + formData.Add("ruleName", ruleName) + ruleDefBytes, err := json.Marshal(ruleDef) + if err != nil { + return err + } + formData.Add("ruleDef", string(ruleDefBytes)) + var apiUrl = fmt.Sprintf("%s/api/v2/rss/setRule", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("set auto downloading rule failed: " + string(result.body)) + } + return nil +} + +func (c *client) RenameAutoDownloadingRule(ctx context.Context, ruleName, newRuleName string) error { + var formData = url.Values{} + formData.Add("ruleName", ruleName) + formData.Add("newRuleName", newRuleName) + var apiUrl = fmt.Sprintf("%s/api/v2/rss/renameRule", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("rename auto downloading rule failed: " + string(result.body)) + } + return nil +} + +func (c *client) RemoveAutoDownloadingRule(ctx context.Context, ruleName string) error { + var formData = url.Values{} + formData.Add("ruleName", ruleName) + var apiUrl = fmt.Sprintf("%s/api/v2/rss/removeRule", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("remove auto downloading rule failed: " + string(result.body)) + } + return nil +} + +func (c *client) GetAllAutoDownloadingRules(ctx context.Context) (map[string]*RssAutoDownloadingRuleDef, error) { + var apiUrl = fmt.Sprintf("%s/api/v2/rss/matchingArticles", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return nil, err + } + if result.code != 200 { + return nil, errors.New("get rss rules failed: " + string(result.body)) + } + var data = make(map[string]*RssAutoDownloadingRuleDef) + err = json.Unmarshal(result.body, &data) + return data, err +} + +func (c *client) GetAllArticlesMatchingRule(ctx context.Context, ruleName string) (map[string][]string, error) { + var formData = url.Values{} + formData.Add("ruleName", ruleName) + var apiUrl = fmt.Sprintf("%s/api/v2/rss/matchingArticles?%s", c.config.Address, formData.Encode()) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return nil, err + } + if result.code != 200 { + return nil, errors.New("get rss rule match articles failed: " + string(result.body)) + } + var data = make(map[string][]string) + err = json.Unmarshal(result.body, &data) + return data, err +} diff --git a/pkg/qbittorrent/search.go b/pkg/qbittorrent/search.go new file mode 100644 index 0000000..fa4db8a --- /dev/null +++ b/pkg/qbittorrent/search.go @@ -0,0 +1,64 @@ +package qbittorrent + +type Search interface { + Start() + Stop() + Status() + Results() + Delete() + Plugins() + InstallPlugins() + UninstallPlugins() + EnableSearchPlugins() + UpdateSearchPlugins() +} + +func (c *client) Start() { + //TODO implement me + panic("implement me") +} + +func (c *client) Stop() { + //TODO implement me + panic("implement me") +} + +func (c *client) Status() { + //TODO implement me + panic("implement me") +} + +func (c *client) Results() { + //TODO implement me + panic("implement me") +} + +func (c *client) Delete() { + //TODO implement me + panic("implement me") +} + +func (c *client) Plugins() { + //TODO implement me + panic("implement me") +} + +func (c *client) InstallPlugins() { + //TODO implement me + panic("implement me") +} + +func (c *client) UninstallPlugins() { + //TODO implement me + panic("implement me") +} + +func (c *client) EnableSearchPlugins() { + //TODO implement me + panic("implement me") +} + +func (c *client) UpdateSearchPlugins() { + //TODO implement me + panic("implement me") +} diff --git a/pkg/qbittorrent/sync.go b/pkg/qbittorrent/sync.go new file mode 100644 index 0000000..cfd5abc --- /dev/null +++ b/pkg/qbittorrent/sync.go @@ -0,0 +1,121 @@ +package qbittorrent + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + "strconv" +) + +type Sync interface { + // MainData get sync main data, rid is Response ID. if not provided, will be assumed. + // if the given is different from the one of last server reply, will be (see the server reply details for more info) + MainData(ctx context.Context, rid int) (*SyncMainData, error) + // TorrentPeersData get sync torrent peer data, hash is torrent hash, rid is response id + TorrentPeersData(ctx context.Context, hash string, rid int) (*SyncTorrentPeers, error) +} + +type SyncMainData struct { + Rid int `json:"rid,omitempty"` + FullUpdate bool `json:"full_update,omitempty"` + ServerState ServerState `json:"server_state,omitempty"` + Torrents map[string]SyncTorrentInfo `json:"torrents,omitempty"` +} + +type ServerState struct { + AllTimeDl int64 `json:"alltime_dl,omitempty"` + AllTimeUl int64 `json:"alltime_ul,omitempty"` + AverageTimeQueue int `json:"average_time_queue,omitempty"` + DlInfoData int64 `json:"dl_info_data,omitempty"` + DlInfoSpeed int `json:"dl_info_speed,omitempty"` + QueuedIoJobs int `json:"queued_io_jobs,omitempty"` + TotalBuffersSize int `json:"total_buffers_size,omitempty"` + UpInfoData int64 `json:"up_info_data,omitempty"` + UpInfoSpeed int `json:"up_info_speed,omitempty"` + WriteCacheOverload string `json:"write_cache_overload,omitempty"` +} + +type SyncTorrentInfo struct { + AmountLeft int64 `json:"amount_left,omitempty"` + Completed int `json:"completed,omitempty"` + DlSpeed int `json:"dlspeed,omitempty"` + Downloaded int `json:"downloaded,omitempty"` + DownloadedSession int `json:"downloaded_session,omitempty"` + Eta int `json:"eta,omitempty"` + Progress float64 `json:"progress,omitempty"` + SeenComplete int `json:"seen_complete,omitempty"` + TimeActive int `json:"time_active,omitempty"` +} + +type SyncTorrentPeers struct { + Rid int `json:"rid,omitempty"` + FullUpdate bool `json:"full_update,omitempty"` + ShowFlags bool `json:"show_flags,omitempty"` + Peers map[string]SyncTorrentPeer `json:"peers,omitempty"` +} + +type SyncTorrentPeer struct { + Client string `json:"client,omitempty"` + Connection string `json:"connection,omitempty"` + Country string `json:"country,omitempty"` + CountryCode string `json:"country_code,omitempty"` + DlSpeed int `json:"dl_speed,omitempty"` + Downloaded int `json:"downloaded,omitempty"` + Files string `json:"files,omitempty"` + Flags string `json:"flags,omitempty"` + FlagsDesc string `json:"flags_desc,omitempty"` + IP string `json:"ip,omitempty"` + PeerIDClient string `json:"peer_id_client,omitempty"` + Port int `json:"port,omitempty"` + Progress float64 `json:"progress,omitempty"` + Relevance float64 `json:"relevance,omitempty"` + UpSpeed int `json:"up_speed,omitempty"` + Uploaded int `json:"uploaded,omitempty"` +} + +func (c *client) MainData(ctx context.Context, rid int) (*SyncMainData, error) { + apiUrl := fmt.Sprintf("%s/api/v2/sync/maindata?rid=%d", c.config.Address, rid) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get main data failed: " + string(result.body)) + } + + var mainData = new(SyncMainData) + if err := json.Unmarshal(result.body, mainData); err != nil { + return nil, err + } + + return mainData, nil +} + +func (c *client) TorrentPeersData(ctx context.Context, hash string, rid int) (*SyncTorrentPeers, error) { + var formData = url.Values{} + formData.Add("hash", hash) + formData.Add("rid", strconv.Itoa(rid)) + apiUrl := fmt.Sprintf("%s/api/v2/sync/torrentPeers?%s", c.config.Address, formData.Encode()) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get torrent peers data failed: " + string(result.body)) + } + + var mainData = new(SyncTorrentPeers) + if err := json.Unmarshal(result.body, mainData); err != nil { + return nil, err + } + + return mainData, nil +} diff --git a/pkg/qbittorrent/sync_test.go b/pkg/qbittorrent/sync_test.go new file mode 100644 index 0000000..58b867c --- /dev/null +++ b/pkg/qbittorrent/sync_test.go @@ -0,0 +1,37 @@ +package qbittorrent + +import ( + "context" + "encoding/json" + "testing" + "time" +) + +func TestClient_MainData(t *testing.T) { + ctx := context.Background() + syncMainData, err := c.Sync().MainData(ctx, 0) + if err != nil { + t.Fatal(err) + } + t.Logf("sync main data: %+v", syncMainData) + + time.Sleep(time.Second) + syncMainData, err = c.Sync().MainData(ctx, 0) + if err != nil { + t.Fatal(err) + } + t.Logf("sync main data: %+v", syncMainData) +} + +func TestClient_TorrentPeersData(t *testing.T) { + ctx := context.Background() + peersData, err := c.Sync().TorrentPeersData(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc", 0) + if err != nil { + t.Fatal(err) + } + bytes, err := json.Marshal(peersData) + if err != nil { + t.Fatal(err) + } + t.Log(string(bytes)) +} diff --git a/pkg/qbittorrent/torrent.go b/pkg/qbittorrent/torrent.go new file mode 100644 index 0000000..298955e --- /dev/null +++ b/pkg/qbittorrent/torrent.go @@ -0,0 +1,1362 @@ +package qbittorrent + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/url" + "strconv" + "strings" + + "golang.org/x/exp/constraints" +) + +type Torrent interface { + // GetTorrents get torrent list + GetTorrents(ctx context.Context, opt *TorrentOption) ([]*TorrentInfo, error) + // GetProperties get torrent generic properties + GetProperties(ctx context.Context, hash string) (*TorrentProperties, error) + // GetTrackers get torrent trackers + GetTrackers(ctx context.Context, hash string) ([]*TorrentTracker, error) + // GetWebSeeds get torrent web seeds + GetWebSeeds(ctx context.Context, hash string) ([]*TorrentWebSeed, error) + // GetContents get torrent contents, indexes(optional) of the files you want to retrieve + GetContents(ctx context.Context, hash string, indexes ...int) ([]*TorrentContent, error) + // GetPiecesStates get torrent pieces states + GetPiecesStates(ctx context.Context, hash string) ([]int, error) + // GetPiecesHashes get torrent pieces hashes + GetPiecesHashes(ctx context.Context, hash string) ([]string, error) + // PauseTorrents the hashes of the torrents you want to pause + PauseTorrents(ctx context.Context, hashes []string) error + // ResumeTorrents the hashes of the torrents you want to resume + ResumeTorrents(ctx context.Context, hashes []string) error + // DeleteTorrents the hashes of the torrents you want to delete, if set deleteFile to true, + // the downloaded data will also be deleted, otherwise has no effect. + DeleteTorrents(ctx context.Context, hashes []string, deleteFile bool) error + // RecheckTorrents the hashes of the torrents you want to recheck + RecheckTorrents(ctx context.Context, hashes []string) error + // ReAnnounceTorrents the hashes of the torrents you want to reannounce + ReAnnounceTorrents(ctx context.Context, hashes []string) error + // AddNewTorrent add torrents from server local file or from URLs. http://, https://, + // magnet: and bc://bt/ links are supported, but only one onetime + AddNewTorrent(ctx context.Context, opt *TorrentAddOption) error + // AddTrackers add trackers to torrent + AddTrackers(ctx context.Context, hash string, urls []string) error + // EditTrackers edit trackers + EditTrackers(ctx context.Context, hash, origUrl, newUrl string) error + // RemoveTrackers remove trackers + RemoveTrackers(ctx context.Context, hash string, urls []string) error + // AddPeers add peers for torrent, each peer is host:port + AddPeers(ctx context.Context, hashes []string, peers []string) error + // IncreasePriority increase torrent priority + IncreasePriority(ctx context.Context, hashes []string) error + // DecreasePriority decrease torrent priority + DecreasePriority(ctx context.Context, hashes []string) error + // MaxPriority maximal torrent priority + MaxPriority(ctx context.Context, hashes []string) error + // MinPriority minimal torrent priority + MinPriority(ctx context.Context, hashes []string) error + // SetFilePriority set file priority + SetFilePriority(ctx context.Context, hash string, id string, priority int) error + // GetDownloadLimit get torrent download limit + GetDownloadLimit(ctx context.Context, hashes []string) (map[string]int, error) + // SetDownloadLimit set torrent download limit, limit in bytes per second, if no limit please set value zero + SetDownloadLimit(ctx context.Context, hashes []string, limit int) error + // SetShareLimit set torrent share limit, ratioLimit: the maximum seeding ratio for the torrent, -2 means the + // global limit should be used, -1 means no limit; seedingTimeLimit: the maximum seeding time (minutes) for the + // torrent, -2 means the global limit should be used, -1 means no limit; inactiveSeedingTimeLimit: the maximum + // amount of time (minutes) the torrent is allowed to seed while being inactive, -2 means the global limit should + // be used, -1 means no limit. + SetShareLimit(ctx context.Context, hashes []string, ratioLimit float64, seedingTimeLimit, inactiveSeedingTimeLimit int) error + // GetUploadLimit get torrent upload limit + GetUploadLimit(ctx context.Context, hashes []string) (map[string]int, error) + // SetUploadLimit set torrent upload limit + SetUploadLimit(ctx context.Context, hashes []string, limit int) error + // SetLocation set torrent location + SetLocation(ctx context.Context, hashes []string, location string) error + // SetName set torrent name + SetName(ctx context.Context, hash string, name string) error + // SetCategory set torrent category + SetCategory(ctx context.Context, hashes []string, category string) error + // GetCategories get all categories + GetCategories(ctx context.Context) (map[string]*TorrentCategory, error) + // AddNewCategory add new category + AddNewCategory(ctx context.Context, category, savePath string) error + // EditCategory edit category + EditCategory(ctx context.Context, category, savePath string) error + // RemoveCategories remove categories + RemoveCategories(ctx context.Context, categories []string) error + // AddTags add torrent tags + AddTags(ctx context.Context, hashes []string, tags []string) error + // RemoveTags remove torrent tags + RemoveTags(ctx context.Context, hashes []string, tags []string) error + // GetTags get all tags + GetTags(ctx context.Context) ([]string, error) + // CreateTags create tags + CreateTags(ctx context.Context, tags []string) error + // DeleteTags delete tags + DeleteTags(ctx context.Context, tags []string) error + // SetAutomaticManagement set automatic torrent management + SetAutomaticManagement(ctx context.Context, hashes []string, enable bool) error + // ToggleSequentialDownload toggle sequential download + ToggleSequentialDownload(ctx context.Context, hashes []string) error + // SetFirstLastPiecePriority set first/last piece priority + SetFirstLastPiecePriority(ctx context.Context, hashes []string) error + // SetForceStart set force start + SetForceStart(ctx context.Context, hashes []string, force bool) error + // SetSuperSeeding set super seeding + SetSuperSeeding(ctx context.Context, hashes []string, enable bool) error + // RenameFile rename file + RenameFile(ctx context.Context, hash, oldPath, newPath string) error + // RenameFolder rename folder + RenameFolder(ctx context.Context, hash, oldPath, newPath string) error +} + +type TorrentOption struct { + // Filter torrent list by state. Allowed state filters: all,downloading,seeding,completed,paused, + // active,inactive,resumed,stalled,stalled_uploading,stalled_downloading,errored + Filter string `schema:"filter,omitempty"` + // Category get torrents with the given category, empty string means "without category"; no "category" + // parameter means "any category" + Category string `schema:"category,omitempty"` + // Tag get torrents with the given tag, empty string means "without tag"; no "tag" parameter means "any tag" + Tag string `schema:"tag,omitempty"` + // Sort torrents by given key, they can be sorted using any field of the response's JSON array (which are documented below) as the sort key. + Sort string `schema:"sort,omitempty"` + // Reverse enable reverse sorting. Defaults to false + Reverse bool `schema:"reverse,omitempty"` + // Limit the number of torrents returned + Limit int `schema:"limit,omitempty"` + // Offset set offset (if less than 0, offset from end) + Offset int `schema:"offset,omitempty"` + // Hashes filter by hashes + Hashes []string `schema:"-"` +} + +type TorrentInfo struct { + AddedOn int `json:"added_on"` + AmountLeft int `json:"amount_left"` + AutoTmm bool `json:"auto_tmm"` + Availability float64 `json:"availability"` + Category string `json:"category"` + Completed int `json:"completed"` + CompletionOn int `json:"completion_on"` + ContentPath string `json:"content_path"` + DlLimit int `json:"dl_limit"` + Dlspeed int `json:"dlspeed"` + DownloadPath string `json:"download_path"` + Downloaded int `json:"downloaded"` + DownloadedSession int `json:"downloaded_session"` + Eta int `json:"eta"` + FLPiecePrio bool `json:"f_l_piece_prio"` + ForceStart bool `json:"force_start"` + Hash string `json:"hash"` + InactiveSeedingTimeLimit int `json:"inactive_seeding_time_limit"` + InfohashV1 string `json:"infohash_v1"` + InfohashV2 string `json:"infohash_v2"` + LastActivity int `json:"last_activity"` + MagnetURI string `json:"magnet_uri"` + MaxInactiveSeedingTime int `json:"max_inactive_seeding_time"` + MaxRatio int `json:"max_ratio"` + MaxSeedingTime int `json:"max_seeding_time"` + Name string `json:"name"` + NumComplete int `json:"num_complete"` + NumIncomplete int `json:"num_incomplete"` + NumLeechs int `json:"num_leechs"` + NumSeeds int `json:"num_seeds"` + Priority int `json:"priority"` + Progress float64 `json:"progress"` + Ratio float64 `json:"ratio"` + RatioLimit int `json:"ratio_limit"` + SavePath string `json:"save_path"` + SeedingTime int `json:"seeding_time"` + SeedingTimeLimit int `json:"seeding_time_limit"` + SeenComplete int `json:"seen_complete"` + SeqDl bool `json:"seq_dl"` + Size int `json:"size"` + State string `json:"state"` + SuperSeeding bool `json:"super_seeding"` + Tags string `json:"tags"` + TimeActive int `json:"time_active"` + TotalSize int `json:"total_size"` + Tracker string `json:"tracker"` + TrackersCount int `json:"trackers_count"` + UpLimit int `json:"up_limit"` + Uploaded int `json:"uploaded"` + UploadedSession int `json:"uploaded_session"` + Upspeed int `json:"upspeed"` +} + +type TorrentProperties struct { + AdditionDate int `json:"addition_date,omitempty"` + Comment string `json:"comment,omitempty"` + CompletionDate int `json:"completion_date,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + CreationDate int `json:"creation_date,omitempty"` + DlLimit int `json:"dl_limit,omitempty"` + DlSpeed int `json:"dl_speed,omitempty"` + DlSpeedAvg int `json:"dl_speed_avg,omitempty"` + DownloadPath string `json:"download_path,omitempty"` + Eta int `json:"eta,omitempty"` + Hash string `json:"hash,omitempty"` + InfohashV1 string `json:"infohash_v1,omitempty"` + InfohashV2 string `json:"infohash_v2,omitempty"` + IsPrivate bool `json:"is_private,omitempty"` + LastSeen int `json:"last_seen,omitempty"` + Name string `json:"name,omitempty"` + NbConnections int `json:"nb_connections,omitempty"` + NbConnectionsLimit int `json:"nb_connections_limit,omitempty"` + Peers int `json:"peers,omitempty"` + PeersTotal int `json:"peers_total,omitempty"` + PieceSize int `json:"piece_size,omitempty"` + PiecesHave int `json:"pieces_have,omitempty"` + PiecesNum int `json:"pieces_num,omitempty"` + Reannounce int `json:"reannounce,omitempty"` + SavePath string `json:"save_path,omitempty"` + SeedingTime int `json:"seeding_time,omitempty"` + Seeds int `json:"seeds,omitempty"` + SeedsTotal int `json:"seeds_total,omitempty"` + ShareRatio float64 `json:"share_ratio,omitempty"` + TimeElapsed int `json:"time_elapsed,omitempty"` + TotalDownloaded int64 `json:"total_downloaded,omitempty"` + TotalDownloadedSession int64 `json:"total_downloaded_session,omitempty"` + TotalSize int64 `json:"total_size,omitempty"` + TotalUploaded int64 `json:"total_uploaded,omitempty"` + TotalUploadedSession int64 `json:"total_uploaded_session,omitempty"` + TotalWasted int `json:"total_wasted,omitempty"` + UpLimit int `json:"up_limit,omitempty"` + UpSpeed int `json:"up_speed,omitempty"` + UpSpeedAvg int `json:"up_speed_avg,omitempty"` +} + +type TorrentTracker struct { + Msg string `json:"msg,omitempty"` + NumDownloaded int `json:"num_downloaded,omitempty"` + NumLeeches int `json:"num_leeches,omitempty"` + NumPeers int `json:"num_peers,omitempty"` + NumSeeds int `json:"num_seeds,omitempty"` + Status int `json:"status,omitempty"` + Tier int `json:"tier,omitempty"` + URL string `json:"url,omitempty"` +} + +type TorrentWebSeed struct { + URL string `json:"url"` +} + +type TorrentContent struct { + Availability float64 `json:"availability,omitempty"` + Index int `json:"index,omitempty"` + IsSeed bool `json:"is_seed,omitempty"` + Name string `json:"name,omitempty"` + PieceRange []int `json:"piece_range,omitempty"` + Priority int `json:"priority,omitempty"` + Progress float64 `json:"progress,omitempty"` + Size int64 `json:"size,omitempty"` +} + +type TorrentAddFileMetadata struct { + // Filename only used to distinguish two files in form-data, does not work on the server side, + // for different files, please give different identification names + Filename string + // Data read torrent file content and set to here + Data []byte +} + +type TorrentAddOption struct { + URLs []string `schema:"-"` // torrents url + Torrents []*TorrentAddFileMetadata `schema:"-"` // raw data of torrent file + SavePath string `schema:"savepath,omitempty"` // download folder, optional + Cookies string `schema:"cookie,omitempty"` // cookie sent to download torrent file, optional + Category string `schema:"category,omitempty"` // category for the torrent, optional + Tags []string `schema:"-"` // tags for the torrent, optional + SkipChecking bool `schema:"skip_checking,omitempty"` // skip hash checking, optional + Paused bool `schema:"paused,omitempty"` // add torrent in the pause state, optional + RootFolder bool `schema:"root_folder,omitempty"` // create the root folder, optional + Rename string `schema:"rename,omitempty"` // rename torrent, optional + UpLimit int `schema:"upLimit,omitempty"` // set torrent upload speed, Unit in bytes/second, optional + DlLimit int `schema:"dlLimit,omitempty"` // set torrent download speed, Unit in bytes/second, optional + RatioLimit float64 `schema:"ratioLimit,omitempty"` // set torrent share ratio limit, optional + SeedingTimeLimit int `schema:"seedingTimeLimit,omitempty"` // set torrent seeding torrent limit, Unit in minutes, optional + AutoTMM bool `schema:"autoTMM,omitempty"` // whether Automatic Torrent Management should be used, optional + SequentialDownload string `schema:"sequentialDownload,omitempty"` // enable sequential download, optional + FirstLastPiecePrio string `schema:"firstLastPiecePrio,omitempty"` // prioritize download first last piece, optional +} + +type TorrentCategory struct { + Name string `json:"name,omitempty"` + SavePath string `json:"savePath,omitempty"` +} + +func (c *client) GetTorrents(ctx context.Context, opt *TorrentOption) ([]*TorrentInfo, error) { + var formData = url.Values{} + err := encoder.Encode(opt, formData) + if err != nil { + return nil, err + } + if len(opt.Hashes) != 0 { + formData.Add("hashes", strings.Join(opt.Hashes, "|")) + } + + apiUrl := fmt.Sprintf("%s/api/v2/torrents/info?%s", c.config.Address, formData.Encode()) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get torrents failed: " + string(result.body)) + } + + var mainData []*TorrentInfo + if err := json.Unmarshal(result.body, &mainData); err != nil { + return nil, err + } + + return mainData, nil +} + +func (c *client) GetProperties(ctx context.Context, hash string) (*TorrentProperties, error) { + apiUrl := fmt.Sprintf("%s/api/v2/torrents/properties?hash=%s", c.config.Address, hash) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get torrent properties failed: " + string(result.body)) + } + + var mainData = new(TorrentProperties) + if err := json.Unmarshal(result.body, mainData); err != nil { + return nil, err + } + + return mainData, nil +} + +func (c *client) GetTrackers(ctx context.Context, hash string) ([]*TorrentTracker, error) { + apiUrl := fmt.Sprintf("%s/api/v2/torrents/trackers?hash=%s", c.config.Address, hash) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get torrent trackers failed: " + string(result.body)) + } + + var mainData []*TorrentTracker + if err := json.Unmarshal(result.body, &mainData); err != nil { + return nil, err + } + + return mainData, nil +} + +func (c *client) GetWebSeeds(ctx context.Context, hash string) ([]*TorrentWebSeed, error) { + apiUrl := fmt.Sprintf("%s/api/v2/torrents/webseeds?hash=%s", c.config.Address, hash) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get torrent web seeds failed: " + string(result.body)) + } + + var mainData []*TorrentWebSeed + if err := json.Unmarshal(result.body, &mainData); err != nil { + return nil, err + } + + return mainData, nil +} + +func sliceItoa[E constraints.Integer](in []E) []string { + out := make([]string, 0, len(in)) + for _, v := range in { + out = append(out, strconv.FormatInt(int64(v), 10)) + } + return out +} + +func (c *client) GetContents(ctx context.Context, hash string, indexes ...int) ([]*TorrentContent, error) { + var apiUrl string + if len(indexes) != 0 { + + apiUrl = fmt.Sprintf("%s/api/v2/torrents/files?hash=%s&indexes=%s", c.config.Address, hash, strings.Join(sliceItoa(indexes), "|")) + } else { + apiUrl = fmt.Sprintf("%s/api/v2/torrents/files?hash=%s", c.config.Address, hash) + } + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get torrent web seeds failed: " + string(result.body)) + } + + var mainData []*TorrentContent + if err := json.Unmarshal(result.body, &mainData); err != nil { + return nil, err + } + + return mainData, nil +} + +func (c *client) GetPiecesStates(ctx context.Context, hash string) ([]int, error) { + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/pieceStates?hash=%s", c.config.Address, hash) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get torrent pieces states failed: " + string(result.body)) + } + + var mainData []int + if err := json.Unmarshal(result.body, &mainData); err != nil { + return nil, err + } + + return mainData, nil +} + +func (c *client) GetPiecesHashes(ctx context.Context, hash string) ([]string, error) { + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/pieceHashes?hash=%s", c.config.Address, hash) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get torrent pieces states failed: " + string(result.body)) + } + + var mainData []string + if err := json.Unmarshal(result.body, &mainData); err != nil { + return nil, err + } + + return mainData, nil +} + +func (c *client) PauseTorrents(ctx context.Context, hashes []string) error { + if len(hashes) == 0 { + return errors.New("no torrent hashes provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/pause", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("pause torrents failed: " + string(result.body)) + } + return nil +} + +func (c *client) ResumeTorrents(ctx context.Context, hashes []string) error { + if len(hashes) == 0 { + return errors.New("no torrent hashes provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/resume", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("resume torrents failed: " + string(result.body)) + } + return nil +} + +func (c *client) DeleteTorrents(ctx context.Context, hashes []string, deleteFile bool) error { + if len(hashes) == 0 { + return errors.New("no torrent hashes provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + formData.Add("deleteFile", strconv.FormatBool(deleteFile)) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/resume", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("delete torrents failed: " + string(result.body)) + } + return nil +} + +func (c *client) RecheckTorrents(ctx context.Context, hashes []string) error { + if len(hashes) == 0 { + return errors.New("no torrent hashes provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/recheck", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("recheck torrents failed: " + string(result.body)) + } + return nil +} + +func (c *client) ReAnnounceTorrents(ctx context.Context, hashes []string) error { + if len(hashes) == 0 { + return errors.New("no torrent hashes provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/reannounce", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("reannounce torrents failed: " + string(result.body)) + } + return nil +} + +func (c *client) AddNewTorrent(ctx context.Context, opt *TorrentAddOption) error { + var requestBody bytes.Buffer + var writer = multipart.NewWriter(&requestBody) + + if len(opt.URLs) == 0 && len(opt.Torrents) == 0 { + return errors.New("no torrent url or data provided") + } + + if opt.SavePath != "" { + _ = writer.WriteField("savepath", opt.SavePath) + } + if opt.Cookies != "" { + _ = writer.WriteField("cookies", opt.Cookies) + } + if opt.Category != "" { + _ = writer.WriteField("category", opt.Category) + } + if len(opt.Tags) != 0 { + _ = writer.WriteField("tags", strings.Join(opt.Tags, ",")) + } + if opt.SkipChecking { + _ = writer.WriteField("skip_checking", "true") + } + if opt.Paused { + _ = writer.WriteField("paused", "true") + } + if opt.RootFolder { + _ = writer.WriteField("root_folder", "true") + } + if opt.Rename != "" { + _ = writer.WriteField("rename", opt.Rename) + } + if opt.UpLimit != 0 { + _ = writer.WriteField("upLimit", strconv.Itoa(opt.UpLimit)) + } + if opt.DlLimit != 0 { + _ = writer.WriteField("dlLimit", strconv.Itoa(opt.DlLimit)) + } + if opt.RatioLimit != 0 { + _ = writer.WriteField("ratioLimit", strconv.FormatFloat(opt.RatioLimit, 'f', -1, 64)) + } + if opt.SeedingTimeLimit != 0 { + _ = writer.WriteField("seedingTimeLimit", strconv.Itoa(opt.SeedingTimeLimit)) + } + if opt.AutoTMM { + _ = writer.WriteField("autoTMM", "true") + } + if opt.SequentialDownload != "" { + _ = writer.WriteField("sequentialDownload", opt.SequentialDownload) + } + if opt.FirstLastPiecePrio != "" { + _ = writer.WriteField("firstLastPiecePrio", opt.FirstLastPiecePrio) + } + + if len(opt.URLs) != 0 { + _ = writer.WriteField("urls", strings.Join(opt.URLs, "\n")) + } else if len(opt.Torrents) != 0 { + for _, torrent := range opt.Torrents { + formFile, err := writer.CreateFormFile("torrents", torrent.Filename) + if err != nil { + return err + } + _, err = io.Copy(formFile, bytes.NewReader(torrent.Data)) + if err != nil { + return err + } + } + } + _ = writer.Close() + + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/add", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + contentType: writer.FormDataContentType(), + body: &requestBody, + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("add torrents failed: " + string(result.body)) + } + return nil +} + +func (c *client) AddTrackers(ctx context.Context, hash string, urls []string) error { + if len(urls) == 0 { + return errors.New("no torrent tracker provided") + } + var formData = url.Values{} + formData.Add("urls", strings.Join(urls, "%0A")) + formData.Add("hash", hash) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/addTrackers", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("add torrent trackers failed: " + string(result.body)) + } + return nil +} + +func (c *client) EditTrackers(ctx context.Context, hash, origUrl, newUrl string) error { + var formData = url.Values{} + formData.Add("origUrl", origUrl) + formData.Add("newUrl", newUrl) + formData.Add("hash", hash) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/editTracker", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("edit torrent trackers failed: " + string(result.body)) + } + return nil +} + +func (c *client) RemoveTrackers(ctx context.Context, hash string, urls []string) error { + if len(urls) == 0 { + return errors.New("no torrent tracker provided") + } + var formData = url.Values{} + formData.Add("hash", hash) + formData.Add("urls", strings.Join(urls, "|")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/removeTrackers", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("remove torrent trackers failed: " + string(result.body)) + } + return nil +} + +func (c *client) AddPeers(ctx context.Context, hashes []string, peers []string) error { + if len(hashes) == 0 { + return errors.New("no hashes provided") + } + if len(peers) == 0 { + return errors.New("no peers provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + formData.Add("peers", strings.Join(peers, "|")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/addPeers", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("addPeers torrents failed: " + string(result.body)) + } + return nil +} + +func (c *client) IncreasePriority(ctx context.Context, hashes []string) error { + if len(hashes) == 0 { + return errors.New("no hashes provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/increasePrio", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("increasePrio torrents failed: " + string(result.body)) + } + return nil +} + +func (c *client) DecreasePriority(ctx context.Context, hashes []string) error { + if len(hashes) == 0 { + return errors.New("no hashes provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/decreasePrio", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("decreasePrio torrents failed: " + string(result.body)) + } + return nil +} + +func (c *client) MaxPriority(ctx context.Context, hashes []string) error { + if len(hashes) == 0 { + return errors.New("no hashes provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/topPrio", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("topPrio torrents failed: " + string(result.body)) + } + return nil +} + +func (c *client) MinPriority(ctx context.Context, hashes []string) error { + if len(hashes) == 0 { + return errors.New("no hashes provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/bottomPrio", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("bottomPrio torrents failed: " + string(result.body)) + } + return nil +} + +func (c *client) SetFilePriority(ctx context.Context, hash string, id string, priority int) error { + var formData = url.Values{} + formData.Add("hash", hash) + formData.Add("id", id) + formData.Add("priority", strconv.Itoa(priority)) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/filePrio", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("filePrio torrents failed: " + string(result.body)) + } + return nil +} + +func (c *client) GetDownloadLimit(ctx context.Context, hashes []string) (map[string]int, error) { + if len(hashes) == 0 { + return nil, errors.New("no hashes provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/downloadLimit", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get torrents download limit failed: " + string(result.body)) + } + var data = make(map[string]int) + err = json.Unmarshal(result.body, &data) + return data, err +} + +func (c *client) SetDownloadLimit(ctx context.Context, hashes []string, limit int) error { + if len(hashes) == 0 { + return errors.New("no hashes provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + formData.Add("limit", strconv.Itoa(limit)) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/setDownloadLimit", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("set torrents download limit failed: " + string(result.body)) + } + return err +} + +func (c *client) SetShareLimit(ctx context.Context, hashes []string, ratioLimit float64, seedingTimeLimit, inactiveSeedingTimeLimit int) error { + if len(hashes) == 0 { + return errors.New("no hashes provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + formData.Add("ratioLimit", strconv.FormatFloat(ratioLimit, 'f', -1, 64)) + formData.Add("seedingTimeLimit", strconv.Itoa(seedingTimeLimit)) + formData.Add("inactiveSeedingTimeLimit", strconv.Itoa(inactiveSeedingTimeLimit)) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/setShareLimits", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("set torrents share limit failed: " + string(result.body)) + } + return err +} + +func (c *client) GetUploadLimit(ctx context.Context, hashes []string) (map[string]int, error) { + if len(hashes) == 0 { + return nil, errors.New("no hashes provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/uploadLimit", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get torrents upload limit failed: " + string(result.body)) + } + var data = make(map[string]int) + err = json.Unmarshal(result.body, &data) + return data, err +} + +func (c *client) SetUploadLimit(ctx context.Context, hashes []string, limit int) error { + if len(hashes) == 0 { + return errors.New("no hashes provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + formData.Add("limit", strconv.Itoa(limit)) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/setUploadLimit", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("set torrents upload limit failed: " + string(result.body)) + } + return err +} + +func (c *client) SetLocation(ctx context.Context, hashes []string, location string) error { + if len(hashes) == 0 { + return errors.New("no hashes provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + formData.Add("location", location) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/setLocation", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("set torrents location failed: " + string(result.body)) + } + return err +} + +func (c *client) SetName(ctx context.Context, hash string, name string) error { + var formData = url.Values{} + formData.Add("hash", hash) + formData.Add("name", name) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/rename", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("set torrents name failed: " + string(result.body)) + } + return err +} + +func (c *client) SetCategory(ctx context.Context, hashes []string, category string) error { + if len(hashes) == 0 { + return errors.New("no hashes provided") + } + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + formData.Add("category", category) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/setCategory", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("set torrents category failed: " + string(result.body)) + } + return err +} + +func (c *client) GetCategories(ctx context.Context) (map[string]*TorrentCategory, error) { + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/categories", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get torrents upload limit failed: " + string(result.body)) + } + var data = make(map[string]*TorrentCategory) + err = json.Unmarshal(result.body, &data) + return data, err +} + +func (c *client) AddNewCategory(ctx context.Context, category, savePath string) error { + var formData = url.Values{} + formData.Add("category", category) + formData.Add("savePath", savePath) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/createCategory", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("add new category failed: " + string(result.body)) + } + return err +} + +func (c *client) EditCategory(ctx context.Context, category, savePath string) error { + var formData = url.Values{} + formData.Add("category", category) + formData.Add("savePath", savePath) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/editCategory", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("add new category failed: " + string(result.body)) + } + return err +} + +func (c *client) RemoveCategories(ctx context.Context, categories []string) error { + var formData = url.Values{} + formData.Add("categories", strings.Join(categories, "\n")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/removeCategories", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("remove categories failed: " + string(result.body)) + } + return err +} + +func (c *client) AddTags(ctx context.Context, hashes []string, tags []string) error { + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + formData.Add("tags", strings.Join(tags, ",")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/addTags", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("add torrent tags failed: " + string(result.body)) + } + return err +} + +func (c *client) RemoveTags(ctx context.Context, hashes []string, tags []string) error { + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + formData.Add("tags", strings.Join(tags, ",")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/removeTags", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("remove torrent tags failed: " + string(result.body)) + } + return err +} + +func (c *client) GetTags(ctx context.Context) ([]string, error) { + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/tags", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodGet, + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get tags failed: " + string(result.body)) + } + var data []string + err = json.Unmarshal(result.body, &data) + return data, err +} + +func (c *client) CreateTags(ctx context.Context, tags []string) error { + var formData = url.Values{} + formData.Add("tags", strings.Join(tags, ",")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/createTags", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("create tags failed: " + string(result.body)) + } + return err +} + +func (c *client) DeleteTags(ctx context.Context, tags []string) error { + var formData = url.Values{} + formData.Add("tags", strings.Join(tags, ",")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/deleteTags", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("delete tags failed: " + string(result.body)) + } + return err +} + +func (c *client) SetAutomaticManagement(ctx context.Context, hashes []string, enable bool) error { + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + formData.Add("enable", strconv.FormatBool(enable)) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/setAutoManagement", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("set automatic management failed: " + string(result.body)) + } + return err +} + +func (c *client) ToggleSequentialDownload(ctx context.Context, hashes []string) error { + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/toggleSequentialDownload", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("toggle sequential download failed: " + string(result.body)) + } + return err +} + +func (c *client) SetFirstLastPiecePriority(ctx context.Context, hashes []string) error { + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/toggleFirstLastPiecePrio", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("toggle first last piece prio failed: " + string(result.body)) + } + return err +} + +func (c *client) SetForceStart(ctx context.Context, hashes []string, force bool) error { + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + formData.Add("value", strconv.FormatBool(force)) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/setForceStart", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("set force start failed: " + string(result.body)) + } + return err +} + +func (c *client) SetSuperSeeding(ctx context.Context, hashes []string, enable bool) error { + var formData = url.Values{} + formData.Add("hashes", strings.Join(hashes, "|")) + formData.Add("value", strconv.FormatBool(enable)) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/setSuperSeeding", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("set super seeding failed: " + string(result.body)) + } + return err +} + +func (c *client) RenameFile(ctx context.Context, hash, oldPath, newPath string) error { + var formData = url.Values{} + formData.Add("oldPath", oldPath) + formData.Add("newPath", newPath) + formData.Add("hash", hash) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/renameFile", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("rename file failed: " + string(result.body)) + } + return nil +} + +func (c *client) RenameFolder(ctx context.Context, hash, oldPath, newPath string) error { + var formData = url.Values{} + formData.Add("oldPath", oldPath) + formData.Add("newPath", newPath) + formData.Add("hash", hash) + var apiUrl = fmt.Sprintf("%s/api/v2/torrents/renameFolder", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(formData.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("rename folder failed: " + string(result.body)) + } + return nil +} diff --git a/pkg/qbittorrent/torrent_test.go b/pkg/qbittorrent/torrent_test.go new file mode 100644 index 0000000..d28dfc2 --- /dev/null +++ b/pkg/qbittorrent/torrent_test.go @@ -0,0 +1,299 @@ +package qbittorrent + +import ( + "context" + "encoding/json" + "os" + "testing" +) + +func TestClient_GetTorrents(t *testing.T) { + ctx := context.Background() + torrents, err := c.Torrent().GetTorrents(ctx, &TorrentOption{ + Filter: "", + Category: "movies", + Tag: "hdtime", + Sort: "", + Reverse: false, + Limit: 0, + Offset: 0, + Hashes: nil, + }) + if err != nil { + t.Fatal(err) + } + bytes, err := json.Marshal(torrents) + if err != nil { + t.Fatal(err) + } + t.Log(string(bytes)) +} + +func TestClient_GetProperties(t *testing.T) { + ctx := context.Background() + properties, err := c.Torrent().GetProperties(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc") + if err != nil { + t.Fatal(err) + } + bytes, err := json.Marshal(properties) + if err != nil { + t.Fatal(err) + } + t.Log(string(bytes)) +} + +func TestClient_GetTrackers(t *testing.T) { + ctx := context.Background() + trackers, err := c.Torrent().GetTrackers(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc") + if err != nil { + t.Fatal(err) + } + bytes, err := json.Marshal(trackers) + if err != nil { + t.Fatal(err) + } + t.Log(string(bytes)) +} + +func TestClient_GetWebSeeds(t *testing.T) { + ctx := context.Background() + webSeeds, err := c.Torrent().GetWebSeeds(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc") + if err != nil { + t.Fatal(err) + } + bytes, err := json.Marshal(webSeeds) + if err != nil { + t.Fatal(err) + } + t.Log(string(bytes)) +} + +func TestClient_GetContents(t *testing.T) { + ctx := context.Background() + contents, err := c.Torrent().GetContents(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc") + if err != nil { + t.Fatal(err) + } + bytes, err := json.Marshal(contents) + if err != nil { + t.Fatal(err) + } + t.Log(string(bytes)) +} + +func TestClient_GetPiecesStates(t *testing.T) { + ctx := context.Background() + states, err := c.Torrent().GetPiecesStates(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc") + if err != nil { + t.Fatal(err) + } + t.Log(states) +} + +func TestClient_GetPiecesHashes(t *testing.T) { + ctx := context.Background() + hashes, err := c.Torrent().GetPiecesHashes(ctx, "f23daefbe8d24d3dd882b44cb0b4f762bc23b4fc") + if err != nil { + t.Fatal(err) + } + t.Log(hashes) +} + +func TestClient_PauseTorrents(t *testing.T) { + ctx := context.Background() + err := c.Torrent().PauseTorrents(ctx, []string{"202382999be6a4fab395cd9c2c9d294177587904"}) + if err != nil { + t.Fatal(err) + } + t.Log("torrent paused") +} + +func TestClient_ResumeTorrents(t *testing.T) { + ctx := context.Background() + err := c.Torrent().ResumeTorrents(ctx, []string{"fd3b4bf1937c59a8fd1a240cddc07172e0b979a2"}) + if err != nil { + t.Fatal(err) + } + t.Log("torrent resumed") +} + +func TestClient_DeleteTorrents(t *testing.T) { + ctx := context.Background() + err := c.Torrent().DeleteTorrents(ctx, []string{"202382999be6a4fab395cd9c2c9d294177587904"}, true) + if err != nil { + t.Fatal(err) + } + t.Log("torrent deleted") +} + +func TestClient_RecheckTorrents(t *testing.T) { + ctx := context.Background() + err := c.Torrent().RecheckTorrents(ctx, []string{"fd3b4bf1937c59a8fd1a240cddc07172e0b979a2"}) + if err != nil { + t.Fatal(err) + } + t.Log("torrent rechecked") +} + +func TestClient_ReAnnounceTorrents(t *testing.T) { + ctx := context.Background() + err := c.Torrent().ReAnnounceTorrents(ctx, []string{"fd3b4bf1937c59a8fd1a240cddc07172e0b979a2"}) + if err != nil { + t.Fatal(err) + } + t.Log("torrent reannounceed") +} + +func TestClient_AddNewTorrent(t *testing.T) { + ctx := context.Background() + fileContent, err := os.ReadFile("C:\\Users\\xuthu\\Downloads\\bbbbb.torrent") + if err != nil { + t.Fatal(err) + } + err = c.Torrent().AddNewTorrent(ctx, &TorrentAddOption{ + Torrents: []*TorrentAddFileMetadata{ + { + //Filename: "ttttt.torrent", + Data: fileContent, + }, + }, + Category: "movies", + Tags: []string{"d", "e", "f"}, + SkipChecking: false, + Paused: false, + RootFolder: false, + Rename: "", + UpLimit: 0, + DlLimit: 0, + RatioLimit: 0, + SeedingTimeLimit: 0, + AutoTMM: false, + SequentialDownload: "", + FirstLastPiecePrio: "", + }) + if err != nil { + t.Fatal(err) + } + t.Log("torrent added") +} + +func TestClient_AddTrackers(t *testing.T) { + ctx := context.Background() + err := c.Torrent().AddTrackers(ctx, "ca4523a3db9c6c3a13d7d7f3a545f97b75083032", []string{"https://hddtime.org/announce"}) + if err != nil { + t.Fatal(err) + } + t.Log("torrent trackers added") +} + +func TestClient_EditTrackers(t *testing.T) { + ctx := context.Background() + err := c.Torrent().EditTrackers(ctx, "ca4523a3db9c6c3a13d7d7f3a545f97b75083032", "https://hddtime.org/announce", "https://hdctime.org/announce") + if err != nil { + t.Fatal(err) + } + t.Log("torrent trackers edited") +} + +func TestClient_RemoveTrackers(t *testing.T) { + ctx := context.Background() + err := c.Torrent().RemoveTrackers(ctx, "ca4523a3db9c6c3a13d7d7f3a545f97b75083032", []string{"https://hdctime.org/announce"}) + if err != nil { + t.Fatal(err) + } + t.Log("torrent trackers removed") +} + +func TestClient_AddPeers(t *testing.T) { + // todo no test + //c.Torrent().AddPeers([]string{"ca4523a3db9c6c3a13d7d7f3a545f97b75083032"}, []string{"10.0.0.1:38080"}) +} + +func TestClient_IncreasePriority(t *testing.T) { + ctx := context.Background() + err := c.Torrent().IncreasePriority(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"}) + if err != nil { + t.Fatal(err) + } + t.Log("torrent priority increased") +} + +func TestClient_DecreasePriority(t *testing.T) { + ctx := context.Background() + err := c.Torrent().DecreasePriority(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"}) + if err != nil { + t.Fatal(err) + } + t.Log("torrent priority decreased") +} + +func TestClient_MaxPriority(t *testing.T) { + ctx := context.Background() + err := c.Torrent().MaxPriority(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"}) + if err != nil { + t.Fatal(err) + } + t.Log("torrent priority maxed") +} + +func TestClient_MinPriority(t *testing.T) { + ctx := context.Background() + err := c.Torrent().MinPriority(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"}) + if err != nil { + t.Fatal(err) + } + t.Log("torrent priority mined") +} + +func TestClient_SetFilePriority(t *testing.T) { + // todo no test +} + +func TestClient_GetDownloadLimit(t *testing.T) { + ctx := context.Background() + downloadLimit, err := c.Torrent().GetDownloadLimit(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"}) + if err != nil { + t.Fatal(err) + } + t.Log("torrent download limit", downloadLimit) +} + +func TestClient_SetDownloadLimit(t *testing.T) { + ctx := context.Background() + err := c.Torrent().SetDownloadLimit(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"}, 0) + if err != nil { + t.Fatal(err) + } + t.Log("torrent download limit setted") +} + +func TestClient_SetShareLimit(t *testing.T) { + ctx := context.Background() + err := c.Torrent().SetShareLimit(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"}, -2, -2, -2) + if err != nil { + t.Fatal(err) + } + t.Log("torrent share limit setted") +} + +func TestClient_GetUploadLimit(t *testing.T) { + ctx := context.Background() + limit, err := c.Torrent().GetUploadLimit(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"}) + if err != nil { + t.Fatal(err) + } + t.Log("torrent upload limit", limit) +} + +func TestClient_SetUploadLimit(t *testing.T) { + ctx := context.Background() + err := c.Torrent().SetUploadLimit(ctx, []string{"916a250d32822adca39eb2b53efadfda1a15f902"}, 0) + if err != nil { + t.Fatal(err) + } + t.Log("torrent upload limit setted") +} + +func TestClient_SetLocation(t *testing.T) { + // todo test +} diff --git a/pkg/qbittorrent/transfer.go b/pkg/qbittorrent/transfer.go new file mode 100644 index 0000000..e6f0a51 --- /dev/null +++ b/pkg/qbittorrent/transfer.go @@ -0,0 +1,185 @@ +package qbittorrent + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" +) + +type TransferStatusBar struct { + ConnectionStatus string `json:"connection_status,omitempty"` + DhtNodes int `json:"dht_nodes,omitempty"` + DlInfoData int64 `json:"dl_info_data,omitempty"` + DlInfoSpeed int `json:"dl_info_speed,omitempty"` + DlRateLimit int `json:"dl_rate_limit,omitempty"` + UpInfoData int `json:"up_info_data,omitempty"` + UpInfoSpeed int `json:"up_info_speed,omitempty"` + UpRateLimit int `json:"up_rate_limit,omitempty"` + Queueing bool `json:"queueing,omitempty"` + UseAltSpeedLimits bool `json:"use_alt_speed_limits,omitempty"` + RefreshInterval int `json:"refresh_interval,omitempty"` +} + +type Transfer interface { + // GlobalStatusBar usually see in qBittorrent status bar + GlobalStatusBar(ctx context.Context) (*TransferStatusBar, error) + // BanPeers the peer to ban, or multiple peers separated by a pipe. + // each peer is host:port + BanPeers(ctx context.Context, peers []string) error + // GetSpeedLimitsMode get alternative speed limits state + GetSpeedLimitsMode(ctx context.Context) (string, error) + // ToggleSpeedLimitsMode toggle alternative speed limits + ToggleSpeedLimitsMode(ctx context.Context) error + // GetGlobalUploadLimit get global upload limit, the response is the value of current global download speed + // limit in bytes/second; this value will be zero if no limit is applied. + GetGlobalUploadLimit(ctx context.Context) (string, error) + // SetGlobalUploadLimit set global upload limit, set in bytes/second + SetGlobalUploadLimit(ctx context.Context, limit int) error + // GetGlobalDownloadLimit get global download limit, the response is the value of current global download speed + // limit in bytes/second; this value will be zero if no limit is applied. + GetGlobalDownloadLimit(ctx context.Context) (string, error) + // SetGlobalDownloadLimit set global download limit, set in bytes/second + SetGlobalDownloadLimit(ctx context.Context, limit int) error +} + +func (c *client) GlobalStatusBar(ctx context.Context) (*TransferStatusBar, error) { + apiUrl := fmt.Sprintf("%s/api/v2/transfer/info", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return nil, err + } + + if result.code != 200 { + return nil, errors.New("get global transfer status bar failed: " + string(result.body)) + } + + var data = new(TransferStatusBar) + if err := json.Unmarshal(result.body, data); err != nil { + return nil, err + } + + return data, nil +} + +func (c *client) BanPeers(ctx context.Context, peers []string) error { + apiUrl := fmt.Sprintf("%s/api/v2/transfer/banPeers", c.config.Address) + var form = url.Values{} + form.Add("peers", strings.Join(peers, "|")) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + body: strings.NewReader(form.Encode()), + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("ban peers failed: " + string(result.body)) + } + + return nil +} + +func (c *client) GetSpeedLimitsMode(ctx context.Context) (string, error) { + apiUrl := fmt.Sprintf("%s/api/v2/transfer/speedLimitsMode", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return "", err + } + + if result.code != 200 { + return "", errors.New("ban peers failed: " + string(result.body)) + } + + return string(result.body), nil +} + +func (c *client) ToggleSpeedLimitsMode(ctx context.Context) error { + apiUrl := fmt.Sprintf("%s/api/v2/transfer/toggleSpeedLimitsMode", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + method: http.MethodPost, + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("ban peers failed: " + string(result.body)) + } + + return nil +} + +func (c *client) GetGlobalUploadLimit(ctx context.Context) (string, error) { + apiUrl := fmt.Sprintf("%s/api/v2/transfer/uploadLimit", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return "", err + } + + if result.code != 200 { + return "", errors.New("get global upload limit failed: " + string(result.body)) + } + + return string(result.body), nil +} + +func (c *client) SetGlobalUploadLimit(ctx context.Context, limit int) error { + apiUrl := fmt.Sprintf("%s/api/v2/transfer/setUploadLimit?limit=%d", c.config.Address, limit) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("set global upload limit failed: " + string(result.body)) + } + + return nil +} + +func (c *client) GetGlobalDownloadLimit(ctx context.Context) (string, error) { + apiUrl := fmt.Sprintf("%s/api/v2/transfer/downloadLimit", c.config.Address) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return "", err + } + + if result.code != 200 { + return "", errors.New("get global download limit failed: " + string(result.body)) + } + + return string(result.body), nil +} + +func (c *client) SetGlobalDownloadLimit(ctx context.Context, limit int) error { + apiUrl := fmt.Sprintf("%s/api/v2/transfer/setDownloadLimit?limit=%d", c.config.Address, limit) + result, err := c.doRequest(ctx, &requestData{ + url: apiUrl, + }) + if err != nil { + return err + } + + if result.code != 200 { + return errors.New("set global download limit failed: " + string(result.body)) + } + + return nil +} diff --git a/src/config/default.go b/src/config/default.go index 5ace89f..36de950 100644 --- a/src/config/default.go +++ b/src/config/default.go @@ -6,6 +6,16 @@ var defaultConfig = Settings{ Port: 4444, IP: "0.0.0.0", }, + Sources: Sources{ + QBittorrent: QBittorrent{ + DataFolder: "./qbittorrent/data", + MetadataFolder: "./qbittorrent/metadata", + }, + TorrentClient: TorrentClient{ + DataFolder: "./torrent/data", + MetadataFolder: "./torrent/metadata", + }, + }, Mounts: Mounts{ HttpFs: HttpFs{ Enabled: true, @@ -27,17 +37,6 @@ var defaultConfig = Settings{ }, }, - TorrentClient: TorrentClient{ - DataFolder: "./torrent/data", - MetadataFolder: "./torrent/metadata", - DHTNodes: []string{}, - - // GlobalCacheSize: 2048, - - // AddTimeout: 60, - // ReadTimeout: 120, - }, - Log: Log{ Path: "/tmp/tstor", MaxBackups: 2, diff --git a/src/config/model.go b/src/config/model.go index 8dd744e..c41d042 100644 --- a/src/config/model.go +++ b/src/config/model.go @@ -2,16 +2,23 @@ package config // Config is the main config object type Settings struct { - WebUi WebUi `koanf:"webUi"` - TorrentClient TorrentClient `koanf:"torrent"` - Mounts Mounts `koanf:"mounts"` - Log Log `koanf:"log"` + WebUi WebUi `koanf:"webUi"` + + Sources Sources `koanf:"sources"` + + Mounts Mounts `koanf:"mounts"` + Log Log `koanf:"log"` SourceDir string `koanf:"source_dir"` OtelHttp string `koanf:"otel_http"` } +type Sources struct { + TorrentClient TorrentClient `koanf:"torrent"` + QBittorrent QBittorrent `koanf:"qbittorrent"` +} + type WebUi struct { Port int `koanf:"port"` IP string `koanf:"ip"` @@ -25,6 +32,11 @@ type Log struct { Path string `koanf:"path"` } +type QBittorrent struct { + DataFolder string `koanf:"data_folder,omitempty"` + MetadataFolder string `koanf:"metadata_folder,omitempty"` +} + type TorrentClient struct { // ReadTimeout int `koanf:"read_timeout,omitempty"` // AddTimeout int `koanf:"add_timeout,omitempty"` diff --git a/src/export/nfs/handler.go b/src/export/nfs/handler.go index bdc9f5b..cbb4030 100644 --- a/src/export/nfs/handler.go +++ b/src/export/nfs/handler.go @@ -7,13 +7,13 @@ import ( nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs" nfshelper "git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers" "git.kmsign.ru/royalcat/tstor/src/config" - "git.kmsign.ru/royalcat/tstor/src/log" + "git.kmsign.ru/royalcat/tstor/src/logwrap" "git.kmsign.ru/royalcat/tstor/src/vfs" ) func NewNFSv3Handler(fs vfs.Filesystem, config config.NFS) (nfs.Handler, error) { nfslog := slog.With("component", "nfs") - nfs.SetLogger(log.NewNFSLog(nfslog)) + nfs.SetLogger(logwrap.NewNFSLog(nfslog)) nfs.Log.SetLevel(nfs.InfoLevel) bfs := &fsWrapper{fs: fs, log: nfslog, timeout: time.Minute} diff --git a/src/export/nfs/kvhandler.go b/src/export/nfs/kvhandler.go index 678e5b5..af30fca 100644 --- a/src/export/nfs/kvhandler.go +++ b/src/export/nfs/kvhandler.go @@ -10,7 +10,7 @@ import ( "git.kmsign.ru/royalcat/tstor/pkg/go-nfs" "git.kmsign.ru/royalcat/tstor/src/config" - "git.kmsign.ru/royalcat/tstor/src/log" + "git.kmsign.ru/royalcat/tstor/src/logwrap" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" @@ -51,7 +51,7 @@ var kvhandlerMeter = otel.Meter("git.kmsign.ru/royalcat/tstor/src/export/nfs.kvh func NewKvHandler(h nfs.Handler, fs nfs.Filesystem, config config.NFS) (nfs.Handler, error) { opts := kvbadger.DefaultOptions[handle](path.Join(config.CachePath, "handlers")) opts.Codec = kv.CodecBinary[handle, *handle]{} - opts.BadgerOptions.Logger = log.BadgerLogger("nfs", "kvhandler") + opts.BadgerOptions.Logger = logwrap.BadgerLogger("nfs", "kvhandler") activeHandles, err := kvbadger.NewBagerKVBinaryKey[uuid.UUID, handle](opts) if err != nil { diff --git a/src/log/badger.go b/src/logwrap/badger.go similarity index 98% rename from src/log/badger.go rename to src/logwrap/badger.go index 51fd2d8..bc801a0 100644 --- a/src/log/badger.go +++ b/src/logwrap/badger.go @@ -1,4 +1,4 @@ -package log +package logwrap import ( "context" diff --git a/src/log/log.go b/src/logwrap/log.go similarity index 98% rename from src/log/log.go rename to src/logwrap/log.go index 6d0bcdb..00122a1 100644 --- a/src/log/log.go +++ b/src/logwrap/log.go @@ -1,4 +1,4 @@ -package log +package logwrap const FileName = "tstor.log" diff --git a/src/log/nfs.go b/src/logwrap/nfs.go similarity index 99% rename from src/log/nfs.go rename to src/logwrap/nfs.go index fb08422..0eb97c9 100644 --- a/src/log/nfs.go +++ b/src/logwrap/nfs.go @@ -1,4 +1,4 @@ -package log +package logwrap import ( "fmt" diff --git a/src/log/torrent.go b/src/logwrap/torrent.go similarity index 96% rename from src/log/torrent.go rename to src/logwrap/torrent.go index 62c2c8d..0917c9a 100644 --- a/src/log/torrent.go +++ b/src/logwrap/torrent.go @@ -1,4 +1,4 @@ -package log +package logwrap import ( "context" diff --git a/src/logwrap/writer.go b/src/logwrap/writer.go new file mode 100644 index 0000000..45667f5 --- /dev/null +++ b/src/logwrap/writer.go @@ -0,0 +1,48 @@ +package logwrap + +import ( + "bufio" + "bytes" + "context" + "sync" + + "log/slog" +) + +type SlogWriter struct { + ctx context.Context + level slog.Level + log *slog.Logger + + mu sync.Mutex + buffer *bytes.Buffer + scanner *bufio.Scanner +} + +func NewSlogWriter(ctx context.Context, level slog.Level, log *slog.Logger) *SlogWriter { + buf := &bytes.Buffer{} + + return &SlogWriter{ + ctx: ctx, + level: level, + log: log, + buffer: buf, + scanner: bufio.NewScanner(buf), + } +} + +func (sw *SlogWriter) Write(p []byte) (n int, err error) { + sw.mu.Lock() + defer sw.mu.Unlock() + + n, err = sw.buffer.Write(p) + if err != nil { + return n, err + } + + for sw.scanner.Scan() { + sw.log.Log(sw.ctx, sw.level, sw.scanner.Text()) + } + + return n, err +} diff --git a/src/sources/qbittorrent/client.go b/src/sources/qbittorrent/client.go index 5fb2810..96a772d 100644 --- a/src/sources/qbittorrent/client.go +++ b/src/sources/qbittorrent/client.go @@ -6,35 +6,54 @@ import ( "slices" "time" - "github.com/xuthus5/qbittorrent-client-go/qbittorrent" + "git.kmsign.ru/royalcat/tstor/pkg/qbittorrent" ) -type client struct { +type cacheClient struct { qb qbittorrent.Client } -func wrapClient(qb qbittorrent.Client) *client { - return &client{qb: qb} +func wrapClient(qb qbittorrent.Client) *cacheClient { + return &cacheClient{qb: qb} } -func (f *client) getFileContent(ctx context.Context, hash string, contextIndex int) (*qbittorrent.TorrentContent, error) { - contents, err := f.qb.Torrent().GetContents(hash) +var errNotFound = fmt.Errorf("not found") + +func (f *cacheClient) getProperties(ctx context.Context, hash string) (*qbittorrent.TorrentProperties, error) { + info, err := f.qb.Torrent().GetProperties(ctx, hash) if err != nil { return nil, err } - contentIndex := slices.IndexFunc(contents, func(c *qbittorrent.TorrentContent) bool { - return c.Index == contextIndex + return info, nil +} + +func (f *cacheClient) listContent(ctx context.Context, hash string) ([]*qbittorrent.TorrentContent, error) { + contents, err := f.qb.Torrent().GetContents(ctx, hash) + if err != nil { + return nil, err + } + return contents, nil +} + +func (f *cacheClient) getContent(ctx context.Context, hash string, contentIndex int) (*qbittorrent.TorrentContent, error) { + contents, err := f.qb.Torrent().GetContents(ctx, hash, contentIndex) + if err != nil { + return nil, err + } + + contentI := slices.IndexFunc(contents, func(c *qbittorrent.TorrentContent) bool { + return c.Index == contentIndex }) - if contentIndex == -1 { + if contentI == -1 { return nil, fmt.Errorf("content not found") } - return contents[contentIndex], nil + return contents[contentI], nil } -func (f *client) isPieceComplete(ctx context.Context, hash string, pieceIndex int) (bool, error) { - completion, err := f.qb.Torrent().GetPiecesStates(hash) +func (f *cacheClient) isPieceComplete(ctx context.Context, hash string, pieceIndex int) (bool, error) { + completion, err := f.qb.Torrent().GetPiecesStates(ctx, hash) if err != nil { return false, err } @@ -46,7 +65,7 @@ func (f *client) isPieceComplete(ctx context.Context, hash string, pieceIndex in return false, nil } -func (f *client) waitPieceToComplete(ctx context.Context, hash string, pieceIndex int) error { +func (f *cacheClient) waitPieceToComplete(ctx context.Context, hash string, pieceIndex int) error { const checkingInterval = 1 * time.Second ok, err := f.isPieceComplete(ctx, hash, pieceIndex) diff --git a/src/sources/qbittorrent/daemon.go b/src/sources/qbittorrent/daemon.go index abb19ae..41fe7fa 100644 --- a/src/sources/qbittorrent/daemon.go +++ b/src/sources/qbittorrent/daemon.go @@ -1,28 +1,107 @@ package qbittorrent import ( - "bytes" "context" + "errors" + "fmt" + "io" + "log/slog" + "os" "path" + "path/filepath" + "time" + "git.kmsign.ru/royalcat/tstor/pkg/qbittorrent" + "git.kmsign.ru/royalcat/tstor/pkg/rlog" + "git.kmsign.ru/royalcat/tstor/src/config" + "git.kmsign.ru/royalcat/tstor/src/logwrap" "git.kmsign.ru/royalcat/tstor/src/vfs" "github.com/anacrolix/torrent/metainfo" "github.com/anacrolix/torrent/types/infohash" + infohash_v2 "github.com/anacrolix/torrent/types/infohash-v2" "github.com/royalcat/ctxio" - "github.com/xuthus5/qbittorrent-client-go/qbittorrent" ) type Daemon struct { + proc *os.Process qb qbittorrent.Client - client *client + client *cacheClient dataDir string + log *rlog.Logger } -func NewDaemon(dir string) (*Daemon, error) { +const defaultConf = ` +[LegalNotice] +Accepted=true - dataDir := dir + "/data" - qb, err := qbittorrent.NewClient(&qbittorrent.Config{ - Address: "localhost:8080", +[Preferences] +WebUI\LocalHostAuth=false +WebUI\Password_PBKDF2="@ByteArray(qef5I4wZBkDG+PP6/5mQwA==:LoTmorQM/QM5RHI4+dOiu6xfAz9xak6fhR4ZGpRtJF3JNCGG081Yrtva4G71kXz//ODUuWQKTLlrZPuIDvzqUQ==)" +` + +func NewDaemon(conf config.QBittorrent) (*Daemon, error) { + ctx := context.Background() + log := rlog.Component("qbittorrent") + + binPath := conf.MetadataFolder + "/qbittorrent-nox" + err := downloadLatestQbitRelease(ctx, binPath) + if err != nil { + return nil, err + } + + daemonLog := log.WithComponent("process") + outLog := logwrap.NewSlogWriter(ctx, slog.LevelInfo, daemonLog.Slog()) + errLog := logwrap.NewSlogWriter(ctx, slog.LevelError, daemonLog.Slog()) + + _, err = os.Stat(conf.MetadataFolder + "/profile/qBittorrent/config/qBittorrent.conf") + if errors.Is(err, os.ErrNotExist) { + err = os.MkdirAll(conf.MetadataFolder+"/profile/qBittorrent/config", 0744) + if err != nil { + return nil, err + } + err = os.WriteFile(conf.MetadataFolder+"/profile/qBittorrent/config/qBittorrent.conf", []byte(defaultConf), 0644) + if err != nil { + return nil, err + } + } + + err = os.MkdirAll(conf.DataFolder, 0744) + if err != nil { + return nil, err + } + + const port = 25436 + + proc, err := runQBittorrent(binPath, conf.MetadataFolder+"/profile", port, outLog, errLog) + if err != nil { + return nil, err + } + + time.Sleep(time.Second) + + qb, err := qbittorrent.NewClient(ctx, &qbittorrent.Config{ + Address: fmt.Sprintf("http://localhost:%d", port), + }) + if err != nil { + return nil, err + } + + for { // wait for qbittorrent to start + _, err = qb.Application().Version(ctx) + if err == nil { + break + } + log.Warn(ctx, "waiting for qbittorrent to start", rlog.Error(err)) + time.Sleep(time.Second) + } + + dataDir, err := filepath.Abs(conf.DataFolder) + if err != nil { + return nil, err + } + + err = qb.Application().SetPreferences(ctx, &qbittorrent.Preferences{ + SavePath: dataDir, }) if err != nil { return nil, err @@ -30,37 +109,20 @@ func NewDaemon(dir string) (*Daemon, error) { return &Daemon{ qb: qb, - dataDir: dataDir, + proc: proc, + dataDir: conf.DataFolder, client: wrapClient(qb), + log: rlog.Component("qbittorrent"), }, nil } -func (fs *Daemon) torrentPath(ih infohash.T) string { - return path.Join(fs.dataDir, ih.HexString()) -} - -func (fs *Daemon) addTorrent(ctx context.Context, f vfs.File) error { - file, err := ctxio.ReadAll(ctx, f) +func (d *Daemon) Close(ctx context.Context) error { + err := d.proc.Signal(os.Interrupt) if err != nil { return err } - mi, err := metainfo.Load(bytes.NewBuffer(file)) - if err != nil { - return err - } - ih := mi.HashInfoBytes() - - err = fs.qb.Torrent().AddNewTorrent(&qbittorrent.TorrentAddOption{ - Torrents: []*qbittorrent.TorrentAddFileMetadata{ - { - Data: file, - }, - }, - SavePath: fs.torrentPath(ih), - // SequentialDownload: "true", - // FirstLastPiecePrio: "true", - }) + _, err = d.proc.Wait() if err != nil { return err } @@ -68,27 +130,107 @@ func (fs *Daemon) addTorrent(ctx context.Context, f vfs.File) error { return nil } -func (fs *Daemon) TorrentFS(ctx context.Context, file vfs.File) (*FS, error) { +func (d *Daemon) torrentPath(ih infohash.T) (string, error) { + return filepath.Abs(path.Join(d.dataDir, ih.HexString())) +} + +func (fs *Daemon) TorrentFS(ctx context.Context, file vfs.File) (vfs.Filesystem, error) { + log := fs.log.With(slog.String("file", file.Name())) + ih, err := readInfoHash(ctx, file) if err != nil { return nil, err } + log = log.With(slog.String("infohash", ih.HexString())) - existing, err := fs.qb.Torrent().GetTorrents(&qbittorrent.TorrentOption{ + torrentPath, err := fs.torrentPath(ih) + if err != nil { + return nil, fmt.Errorf("error getting torrent path: %w", err) + } + log = log.With(slog.String("torrentPath", torrentPath)) + + log.Debug(ctx, "creating fs for torrent") + + err = fs.syncTorrentState(ctx, file, ih, torrentPath) + if err != nil { + return nil, fmt.Errorf("error syncing torrent state: %w", err) + } + + return newTorrentFS(ctx, fs.client, file.Name(), ih.HexString(), torrentPath) +} + +func (d *Daemon) syncTorrentState(ctx context.Context, file vfs.File, ih metainfo.Hash, torrentPath string) error { + log := d.log.With(slog.String("file", file.Name()), slog.String("infohash", ih.HexString())) + + existing, err := d.qb.Torrent().GetTorrents(ctx, &qbittorrent.TorrentOption{ Hashes: []string{ih.HexString()}, }) if err != nil { - return nil, err + return fmt.Errorf("error to check torrent existence: %w", err) } + log = log.With(slog.String("torrentPath", torrentPath)) + if len(existing) == 0 { - err := fs.addTorrent(ctx, file) + _, err := file.Seek(0, io.SeekStart) if err != nil { - return nil, err + return err } + data, err := ctxio.ReadAll(ctx, file) + if err != nil { + return err + } + + err = d.qb.Torrent().AddNewTorrent(ctx, &qbittorrent.TorrentAddOption{ + Torrents: []*qbittorrent.TorrentAddFileMetadata{ + { + Data: data, + }, + }, + SavePath: torrentPath, + // SequentialDownload: "true", + FirstLastPiecePrio: "true", + }) + if err != nil { + return err + } + for { + _, err := d.qb.Torrent().GetProperties(ctx, ih.HexString()) + if err == nil { + break + } + log.Error(ctx, "waiting for torrent to be added", rlog.Error(err)) + time.Sleep(time.Millisecond * 15) + } + + log.Info(ctx, "added torrent", slog.String("infohash", ih.HexString())) + + if err != nil { + d.log.Error(ctx, "error adding torrent", rlog.Error(err)) + return err + } + + return nil + } else if len(existing) == 1 { + // info := existing[0] + props, err := d.qb.Torrent().GetProperties(ctx, ih.HexString()) + if err != nil { + return err + } + + if props.SavePath != torrentPath { + log.Info(ctx, "moving torrent to correct location", slog.String("oldPath", props.SavePath)) + err = d.qb.Torrent().SetLocation(ctx, []string{ih.HexString()}, torrentPath) + if err != nil { + return err + } + } + + return nil } - return newTorrentFS(fs.client, file.Name(), ih.HexString(), fs.torrentPath(ih)) + return fmt.Errorf("multiple torrents with the same infohash") + } // TODO caching @@ -97,5 +239,15 @@ func readInfoHash(ctx context.Context, file vfs.File) (infohash.T, error) { if err != nil { return infohash.T{}, err } - return mi.HashInfoBytes(), nil + info, err := mi.UnmarshalInfo() + if err != nil { + return infohash.T{}, err + } + + if info.HasV2() { + ih := infohash_v2.HashBytes(mi.InfoBytes) + return *(&ih).ToShort(), nil + } + + return infohash.HashBytes(mi.InfoBytes), nil } diff --git a/src/sources/qbittorrent/fs.go b/src/sources/qbittorrent/fs.go index 30acf06..7df8f8c 100644 --- a/src/sources/qbittorrent/fs.go +++ b/src/sources/qbittorrent/fs.go @@ -2,87 +2,123 @@ package qbittorrent import ( "context" + "fmt" "io" "io/fs" "os" "path" + "strings" "time" + "git.kmsign.ru/royalcat/tstor/pkg/qbittorrent" "git.kmsign.ru/royalcat/tstor/src/vfs" ) type FS struct { - client *client + client *cacheClient name string hash string dataDir string + + content map[string]*qbittorrent.TorrentContent + files map[string]fs.FileInfo + + vfs.FilesystemPrototype } var _ vfs.Filesystem = (*FS)(nil) -func newTorrentFS(client *client, name string, hash string, dataDir string) (*FS, error) { +func newTorrentFS(ctx context.Context, client *cacheClient, name string, hash string, dataDir string) (*FS, error) { + cnts, err := client.listContent(ctx, hash) + if err != nil { + return nil, fmt.Errorf("failed to list content for hash %s: %w", hash, err) + } + + content := make(map[string]*qbittorrent.TorrentContent, len(cnts)) + files := make(map[string]fs.FileInfo, len(cnts)) + for _, cnt := range cnts { + path := vfs.AbsPath(cnt.Name) + files[path] = vfs.NewFileInfo(cnt.Name, cnt.Size) + content[path] = cnt + } + return &FS{ - client: client, - name: name, - hash: hash, + client: client, + name: name, + hash: hash, + dataDir: dataDir, + + content: content, + files: files, + + FilesystemPrototype: vfs.FilesystemPrototype(name), }, nil } -// Info implements vfs.Filesystem. -func (f *FS) Info() (fs.FileInfo, error) { - return vfs.NewDirInfo(f.name), nil -} - -// IsDir implements vfs.Filesystem. -func (f *FS) IsDir() bool { - return true -} - -// Name implements vfs.Filesystem. -func (f *FS) Name() string { - return path.Base(f.dataDir) -} - // Open implements vfs.Filesystem. -func (f *FS) Open(ctx context.Context, filename string) (vfs.File, error) { - panic("unimplemented") +func (f *FS) Open(ctx context.Context, name string) (vfs.File, error) { + if name == vfs.Separator { + return vfs.NewDirFile(name), nil + } + + cnt, ok := f.content[name] + if ok { + return openFile(ctx, f.client, f.dataDir, f.hash, cnt) + } + + for p := range f.content { + if strings.HasPrefix(p, name) { + return vfs.NewDirFile(name), nil + } + } + + return nil, vfs.ErrNotExist } // ReadDir implements vfs.Filesystem. -func (f *FS) ReadDir(ctx context.Context, path string) ([]fs.DirEntry, error) { - panic("unimplemented") +func (fs *FS) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) { + return vfs.ListDirFromInfo(fs.files, name) } // Stat implements vfs.Filesystem. -func (f *FS) Stat(ctx context.Context, filename string) (fs.FileInfo, error) { - return vfs.NewDirInfo(f.name), nil -} - -// Type implements vfs.Filesystem. -func (f *FS) Type() fs.FileMode { - return vfs.ROMode +func (f *FS) Stat(ctx context.Context, name string) (fs.FileInfo, error) { + info, ok := f.files[name] + if !ok { + return nil, vfs.ErrNotExist + } + return info, nil } // Unlink implements vfs.Filesystem. func (f *FS) Unlink(ctx context.Context, filename string) error { - panic("unimplemented") + return vfs.ErrNotImplemented } -func openFile(ctx context.Context, client client, hash, filePath string) *File { - client.getFileContent(ctx, hash, 0) +func openFile(ctx context.Context, client *cacheClient, torrentDir string, hash string, content *qbittorrent.TorrentContent) (*File, error) { + props, err := client.getProperties(ctx, hash) + if err != nil { + return nil, err + } return &File{ - client: client, - hash: hash, - filePath: filePath, - } + client: client, + hash: hash, + torrentDir: torrentDir, + + filePath: content.Name, + contentIndex: content.Index, + pieceSize: props.PieceSize, + fileSize: content.Size, + + offset: 0, + }, nil } type File struct { - client client + client *cacheClient hash string - dataDir string + torrentDir string filePath string // path inside a torrent directory contentIndex int pieceSize int @@ -94,16 +130,6 @@ type File struct { var _ vfs.File = (*File)(nil) -// Close implements vfs.File. -func (f *File) Close(ctx context.Context) error { - if f.osfile != nil { - err := f.osfile.Close() - f.osfile = nil - return err - } - return nil -} - // Info implements vfs.File. func (f *File) Info() (fs.FileInfo, error) { return &fileInfo{name: path.Base(f.filePath), size: f.fileSize}, nil @@ -173,7 +199,7 @@ func (f *File) Size() int64 { // Type implements vfs.File. func (f *File) Type() fs.FileMode { - return vfs.ROMode + return fs.ModeDir } func (f *File) descriptor() (*os.File, error) { @@ -181,7 +207,7 @@ func (f *File) descriptor() (*os.File, error) { return f.osfile, nil } - osfile, err := os.Open(path.Join(f.dataDir, f.filePath)) + osfile, err := os.Open(path.Join(f.torrentDir, f.filePath)) if err != nil { return nil, err } @@ -190,6 +216,16 @@ func (f *File) descriptor() (*os.File, error) { return f.osfile, nil } +// Close implements vfs.File. +func (f *File) Close(ctx context.Context) error { + if f.osfile != nil { + err := f.osfile.Close() + f.osfile = nil + return err + } + return nil +} + type fileInfo struct { name string size int64 diff --git a/src/sources/qbittorrent/install.go b/src/sources/qbittorrent/install.go index 5e47a25..7fc1f38 100644 --- a/src/sources/qbittorrent/install.go +++ b/src/sources/qbittorrent/install.go @@ -20,25 +20,26 @@ import ( const ( repoOwner = "userdocs" repoName = "qbittorrent-nox-static" - binName = "qbittorrent-nox" ) -func runQBittorrent(binDir string, profileDir string, stdout, stderr io.Writer) (*os.Process, error) { - cmd := exec.Command( - path.Join(binDir, binName), - fmt.Sprintf("--profile=%s", profileDir), - ) +func runQBittorrent(binPath string, profileDir string, port int, stdout, stderr io.Writer) (*os.Process, error) { + err := os.Chmod(binPath, 0755) + if err != nil { + return nil, err + } + + cmd := exec.Command(binPath, fmt.Sprintf("--profile=%s", profileDir), fmt.Sprintf("--webui-port=%d", port)) cmd.Stdin = bytes.NewReader([]byte("y\n")) cmd.Stdout = stdout cmd.Stderr = stderr - err := cmd.Start() + err = cmd.Start() if err != nil { return nil, err } return cmd.Process, nil } -func downloadLatestRelease(ctx context.Context, binPath string) error { +func downloadLatestQbitRelease(ctx context.Context, binPath string) error { client := github.NewClient(nil) rel, _, err := client.Repositories.GetLatestRelease(ctx, repoOwner, repoName) if err != nil { diff --git a/src/sources/qbittorrent/install_test.go b/src/sources/qbittorrent/install_test.go index 62f4975..95a8f73 100644 --- a/src/sources/qbittorrent/install_test.go +++ b/src/sources/qbittorrent/install_test.go @@ -11,8 +11,8 @@ func TestDownloadQBittorent(t *testing.T) { ctx := context.Background() tempDir := t.TempDir() require := require.New(t) - err := downloadLatestRelease(ctx, tempDir) + err := downloadLatestQbitRelease(ctx, tempDir) require.NoError(err) - err = downloadLatestRelease(ctx, tempDir) + err = downloadLatestQbitRelease(ctx, tempDir) require.NoError(err) } diff --git a/src/sources/storage.go b/src/sources/storage.go index 6036e3f..1420c7d 100644 --- a/src/sources/storage.go +++ b/src/sources/storage.go @@ -1,14 +1,14 @@ package sources import ( - "git.kmsign.ru/royalcat/tstor/src/sources/torrent" + "git.kmsign.ru/royalcat/tstor/src/sources/qbittorrent" "git.kmsign.ru/royalcat/tstor/src/sources/ytdlp" "git.kmsign.ru/royalcat/tstor/src/vfs" ) -func NewHostedFS(sourceFS vfs.Filesystem, tsrv *torrent.Daemon, ytdlpsrv *ytdlp.Daemon) vfs.Filesystem { +func NewHostedFS(sourceFS vfs.Filesystem, tsrv *qbittorrent.Daemon, ytdlpsrv *ytdlp.Daemon) vfs.Filesystem { factories := map[string]vfs.FsFactory{ - ".torrent": tsrv.NewTorrentFs, + ".torrent": tsrv.TorrentFS, ".ts-ytdlp": ytdlpsrv.BuildFS, } diff --git a/src/sources/torrent/client.go b/src/sources/torrent/client.go index e6273d7..0071013 100644 --- a/src/sources/torrent/client.go +++ b/src/sources/torrent/client.go @@ -6,7 +6,7 @@ import ( "os" "git.kmsign.ru/royalcat/tstor/src/config" - dlog "git.kmsign.ru/royalcat/tstor/src/log" + "git.kmsign.ru/royalcat/tstor/src/logwrap" "github.com/anacrolix/dht/v2/bep44" tlog "github.com/anacrolix/log" "github.com/anacrolix/torrent" @@ -37,7 +37,7 @@ func newClientConfig(st storage.ClientImpl, fis bep44.Store, cfg *config.Torrent // } tl := tlog.NewLogger("torrent-client") - tl.SetHandlers(&dlog.Torrent{L: l}) + tl.SetHandlers(&logwrap.Torrent{L: l}) torrentCfg.Logger = tl torrentCfg.Callbacks.NewPeer = append(torrentCfg.Callbacks.NewPeer, func(p *torrent.Peer) { diff --git a/src/sources/torrent/daemon.go b/src/sources/torrent/daemon.go index a627f6a..934a322 100644 --- a/src/sources/torrent/daemon.go +++ b/src/sources/torrent/daemon.go @@ -121,13 +121,13 @@ func NewDaemon(sourceFs billy.Filesystem, conf config.TorrentClient) (*Daemon, e return nil, err } - go func() { - ctx := context.Background() - err := s.backgroudFileLoad(ctx) - if err != nil { - s.log.Error(ctx, "initial torrent load failed", rlog.Error(err)) - } - }() + // go func() { + // ctx := context.Background() + // err := s.backgroudFileLoad(ctx) + // if err != nil { + // s.log.Error(ctx, "initial torrent load failed", rlog.Error(err)) + // } + // }() go func() { ctx := context.Background() diff --git a/src/sources/torrent/dht_fileitem_store.go b/src/sources/torrent/dht_fileitem_store.go index ae7ea1e..8909dd1 100644 --- a/src/sources/torrent/dht_fileitem_store.go +++ b/src/sources/torrent/dht_fileitem_store.go @@ -5,7 +5,7 @@ import ( "encoding/gob" "time" - dlog "git.kmsign.ru/royalcat/tstor/src/log" + "git.kmsign.ru/royalcat/tstor/src/logwrap" "github.com/anacrolix/dht/v2/bep44" "github.com/dgraph-io/badger/v4" ) @@ -19,7 +19,7 @@ type dhtFileItemStore struct { func newDHTStore(path string, itemsTTL time.Duration) (*dhtFileItemStore, error) { opts := badger.DefaultOptions(path). - WithLogger(dlog.BadgerLogger("torrent-client", "dht-item-store")). + WithLogger(logwrap.BadgerLogger("torrent-client", "dht-item-store")). WithValueLogFileSize(1<<26 - 1) db, err := badger.Open(opts) diff --git a/src/sources/torrent/fs.go b/src/sources/torrent/fs.go index 6549389..4e7ffbc 100644 --- a/src/sources/torrent/fs.go +++ b/src/sources/torrent/fs.go @@ -138,26 +138,26 @@ func (fs *TorrentFS) files(ctx context.Context) (map[string]vfs.File, error) { } // TODO optional - if len(fs.filesCache) == 1 && fs.resolver.IsNestedFs(fs.Torrent.Name()) { - filepath := "/" + fs.Torrent.Name() - if file, ok := fs.filesCache[filepath]; ok { - nestedFs, err := fs.resolver.NestedFs(ctx, filepath, file) - if err != nil { - return nil, err - } - if nestedFs == nil { - goto DEFAULT_DIR // FIXME - } - fs.filesCache, err = listFilesRecursive(ctx, nestedFs, "/") - if err != nil { - return nil, err - } + // if len(fs.filesCache) == 1 && fs.resolver.IsNestedFs(fs.Torrent.Name()) { + // filepath := "/" + fs.Torrent.Name() + // if file, ok := fs.filesCache[filepath]; ok { + // nestedFs, err := fs.resolver.NestedFs(ctx, filepath, file) + // if err != nil { + // return nil, err + // } + // if nestedFs == nil { + // goto DEFAULT_DIR // FIXME + // } + // fs.filesCache, err = listFilesRecursive(ctx, nestedFs, "/") + // if err != nil { + // return nil, err + // } - return fs.filesCache, nil - } - } + // return fs.filesCache, nil + // } + // } + // DEFAULT_DIR: -DEFAULT_DIR: rootDir := "/" + fs.Torrent.Name() + "/" singleDir := true for k, _ := range fs.filesCache { @@ -315,7 +315,6 @@ func (tfs *TorrentFS) Open(ctx context.Context, filename string) (file vfs.File, return nil, err } if nestedFs != nil { - return nestedFs.Open(ctx, nestedFsPath) } diff --git a/src/sources/torrent/infobytes.go b/src/sources/torrent/infobytes.go index a2add10..ea1de67 100644 --- a/src/sources/torrent/infobytes.go +++ b/src/sources/torrent/infobytes.go @@ -6,7 +6,7 @@ import ( "fmt" "path/filepath" - dlog "git.kmsign.ru/royalcat/tstor/src/log" + "git.kmsign.ru/royalcat/tstor/src/logwrap" "github.com/anacrolix/torrent/metainfo" "github.com/anacrolix/torrent/types/infohash" "github.com/dgraph-io/badger/v4" @@ -21,7 +21,7 @@ type infoBytesStore struct { func newInfoBytesStore(metaDir string) (*infoBytesStore, error) { opts := badger. DefaultOptions(filepath.Join(metaDir, "infobytes")). - WithLogger(dlog.BadgerLogger("torrent-client", "infobytes")) + WithLogger(logwrap.BadgerLogger("torrent-client", "infobytes")) db, err := badger.Open(opts) if err != nil { return nil, err diff --git a/src/sources/torrent/piece_completion.go b/src/sources/torrent/piece_completion.go index 3e82edc..b6a763e 100644 --- a/src/sources/torrent/piece_completion.go +++ b/src/sources/torrent/piece_completion.go @@ -5,7 +5,7 @@ import ( "encoding/binary" "fmt" - dlog "git.kmsign.ru/royalcat/tstor/src/log" + "git.kmsign.ru/royalcat/tstor/src/logwrap" "github.com/anacrolix/torrent/metainfo" "github.com/anacrolix/torrent/storage" "github.com/royalcat/kv" @@ -86,7 +86,7 @@ var _ storage.PieceCompletion = (*badgerPieceCompletion)(nil) func newPieceCompletion(dir string) (storage.PieceCompletion, error) { opts := kvbadger.DefaultOptions[PieceCompletionState](dir) opts.Codec = kv.CodecBinary[PieceCompletionState, *PieceCompletionState]{} - opts.BadgerOptions = opts.BadgerOptions.WithLogger(dlog.BadgerLogger("torrent-client", "piece-completion")) + opts.BadgerOptions = opts.BadgerOptions.WithLogger(logwrap.BadgerLogger("torrent-client", "piece-completion")) db, err := kvbadger.NewBagerKVBinaryKey[pieceKey, PieceCompletionState](opts) if err != nil { diff --git a/src/sources/torrent/stats.go b/src/sources/torrent/stats.go index 5444a9a..c21df4f 100644 --- a/src/sources/torrent/stats.go +++ b/src/sources/torrent/stats.go @@ -7,7 +7,7 @@ import ( "slices" "time" - "git.kmsign.ru/royalcat/tstor/src/log" + "git.kmsign.ru/royalcat/tstor/src/logwrap" "github.com/anacrolix/torrent/types/infohash" "github.com/dgraph-io/badger/v4" ) @@ -17,7 +17,7 @@ func newStatsStore(metaDir string, lifetime time.Duration) (*statsStore, error) badger. DefaultOptions(path.Join(metaDir, "stats")). WithNumVersionsToKeep(int(^uint(0) >> 1)). - WithLogger(log.BadgerLogger("stats")), // Infinity + WithLogger(logwrap.BadgerLogger("stats")), // Infinity ) if err != nil { return nil, err diff --git a/src/tkv/new.go b/src/tkv/new.go index 13fe01a..3f2edc1 100644 --- a/src/tkv/new.go +++ b/src/tkv/new.go @@ -3,14 +3,14 @@ package tkv import ( "path" - tlog "git.kmsign.ru/royalcat/tstor/src/log" + "git.kmsign.ru/royalcat/tstor/src/logwrap" "github.com/royalcat/kv" "github.com/royalcat/kv/kvbadger" ) func NewKV[K kv.Bytes, V any](dbdir, name string) (store kv.Store[K, V], err error) { opts := kvbadger.DefaultOptions[V](path.Join(dbdir, name)) - opts.BadgerOptions.Logger = tlog.BadgerLogger(name, "badger") + opts.BadgerOptions.Logger = logwrap.BadgerLogger(name, "badger") store, err = kvbadger.NewBadgerKVBytesKey[K, V](opts) if err != nil { return nil, err diff --git a/src/vfs/fs.go b/src/vfs/fs.go index 892a01e..b8f3805 100644 --- a/src/vfs/fs.go +++ b/src/vfs/fs.go @@ -109,3 +109,25 @@ func (fi *fileInfo) IsDir() bool { func (fi *fileInfo) Sys() interface{} { return nil } + +type FilesystemPrototype string + +// Info implements Filesystem. +func (p FilesystemPrototype) Info() (fs.FileInfo, error) { + return NewDirInfo(string(p)), nil +} + +// IsDir implements Filesystem. +func (p FilesystemPrototype) IsDir() bool { + return true +} + +// Name implements Filesystem. +func (p FilesystemPrototype) Name() string { + return string(p) +} + +// Type implements Filesystem. +func (p FilesystemPrototype) Type() fs.FileMode { + return fs.ModeDir +} diff --git a/src/vfs/hash.go b/src/vfs/hash.go new file mode 100644 index 0000000..3bee0c2 --- /dev/null +++ b/src/vfs/hash.go @@ -0,0 +1,86 @@ +package vfs + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "io" +) + +const chunkSize int64 = 64 * 1024 + +var ErrOsHashLen = errors.New("oshash: buffer length must be a multiple of 8") + +type Hash string + +func FileHash(ctx context.Context, f File) (Hash, error) { + _, err := f.Seek(0, io.SeekStart) + if err != nil { + return "", fmt.Errorf("error seeking file: %w", err) + } + defer f.Seek(0, io.SeekStart) + fileSize := f.Size() + if fileSize <= 8 { + return "", fmt.Errorf("cannot calculate oshash where size < 8 (%d)", fileSize) + } + + fileChunkSize := chunkSize + if fileSize < fileChunkSize { + // Must be a multiple of 8. + fileChunkSize = (fileSize / 8) * 8 + } + + head := make([]byte, fileChunkSize) + tail := make([]byte, fileChunkSize) + + // read the head of the file into the start of the buffer + _, err = f.Read(ctx, head) + if err != nil { + return "", err + } + + // seek to the end of the file - the chunk size + _, err = f.Seek(-fileChunkSize, io.SeekEnd) + if err != nil { + return "", err + } + + // read the tail of the file + _, err = f.Read(ctx, tail) + if err != nil { + return "", err + } + + return oshash(fileSize, head, tail) +} + +func sumBytes(buf []byte) (uint64, error) { + if len(buf)%8 != 0 { + return 0, ErrOsHashLen + } + + sz := len(buf) / 8 + var sum uint64 + for j := 0; j < sz; j++ { + sum += binary.LittleEndian.Uint64(buf[8*j : 8*(j+1)]) + } + + return sum, nil +} + +func oshash(size int64, head []byte, tail []byte) (Hash, error) { + headSum, err := sumBytes(head) + if err != nil { + return "", fmt.Errorf("oshash head: %w", err) + } + tailSum, err := sumBytes(tail) + if err != nil { + return "", fmt.Errorf("oshash tail: %w", err) + } + + // Compute the sum of the head, tail and file size + result := headSum + tailSum + uint64(size) + // output as hex + return Hash(fmt.Sprintf("%016x", result)), nil +} diff --git a/src/vfs/log.go b/src/vfs/log.go index b21fb10..3f203bc 100644 --- a/src/vfs/log.go +++ b/src/vfs/log.go @@ -111,6 +111,7 @@ func (fs *LogFS) Open(ctx context.Context, filename string) (file File, err erro ctx, span := tracer.Start(ctx, "Open", fs.traceAttrs(attribute.String("filename", filename)), ) + log := fs.log.With(slog.String("filename", filename)) defer func() { if err != nil { span.RecordError(err) @@ -120,7 +121,7 @@ func (fs *LogFS) Open(ctx context.Context, filename string) (file File, err erro file, err = fs.fs.Open(ctx, filename) if isLoggableError(err) { - fs.log.Error(ctx, "Failed to open file") + log.Error(ctx, "Failed to open file", rlog.Error(err)) } file = WrapLogFile(file, filename, fs.log, fs.readTimeout, fs.tel) diff --git a/src/vfs/resolver.go b/src/vfs/resolver.go index bb5d4da..481dc92 100644 --- a/src/vfs/resolver.go +++ b/src/vfs/resolver.go @@ -92,13 +92,14 @@ func (r *ResolverFS) Open(ctx context.Context, filename string) (File, error) { } // ReadDir implements Filesystem. -func (r *ResolverFS) ReadDir(ctx context.Context, dir string) ([]fs.DirEntry, error) { +func (r *ResolverFS) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) { + log := r.log.With(slog.String("name", name)) ctx, span := tracer.Start(ctx, "ReadDir", - r.traceAttrs(attribute.String("name", dir)), + r.traceAttrs(attribute.String("name", name)), ) defer span.End() - fsPath, nestedFs, nestedFsPath, err := r.resolver.ResolvePath(ctx, dir, r.rootFS.Open) + fsPath, nestedFs, nestedFsPath, err := r.resolver.ResolvePath(ctx, name, r.rootFS.Open) if err != nil { return nil, err } @@ -113,34 +114,22 @@ func (r *ResolverFS) ReadDir(ctx context.Context, dir string) ([]fs.DirEntry, er out := make([]fs.DirEntry, 0, len(entries)) for _, e := range entries { if r.resolver.IsNestedFs(e.Name()) { - filepath := path.Join("/", dir, e.Name()) - file, err := r.Open(ctx, filepath) + filepath := path.Join("/", name, e.Name()) + file, err := r.rootFS.Open(ctx, filepath) if err != nil { return nil, err } - // it is factory responsibility to close file then needed - - err = func() error { - factoryCtx, cancel := subTimeout(ctx) - defer cancel() - nestedfs, err := r.resolver.NestedFs(factoryCtx, filepath, file) - if err != nil { - if errors.Is(err, context.DeadlineExceeded) { - r.log.Error(ctx, "creating fs timed out", - slog.String("filename", e.Name()), - ) - return nil - } - - return err - } - out = append(out, nestedfs) - return nil - }() - if err != nil { + nestedfs, err := r.resolver.nestedFs(ctx, filepath, file) + if errors.Is(err, context.DeadlineExceeded) { return nil, err } + if err != nil { + log.Error(ctx, "error creating nested fs", rlog.Error(err)) + out = append(out, e) + continue + } + out = append(out, nestedfs) } else { out = append(out, e) } @@ -214,14 +203,14 @@ type FsFactory func(ctx context.Context, f File) (Filesystem, error) func NewResolver(factories map[string]FsFactory) *Resolver { return &Resolver{ factories: factories, - fsmap: map[string]Filesystem{}, + fsmap: map[Hash]Filesystem{}, } } type Resolver struct { m sync.Mutex factories map[string]FsFactory - fsmap map[string]Filesystem // filesystem cache + fsmap map[Hash]Filesystem // filesystem cache // TODO: add fsmap clean } @@ -236,26 +225,35 @@ func (r *Resolver) IsNestedFs(f string) bool { return false } -func (r *Resolver) NestedFs(ctx context.Context, fsPath string, file File) (Filesystem, error) { +func (r *Resolver) nestedFs(ctx context.Context, fsPath string, file File) (Filesystem, error) { + if file.IsDir() { + return nil, file.Close(ctx) + } + + fileHash, err := FileHash(ctx, file) + if err != nil { + return nil, fmt.Errorf("error calculating file hash: %w", err) + } + + if nestedFs, ok := r.fsmap[fileHash]; ok { + return nestedFs, file.Close(ctx) + } + for ext, nestFactory := range r.factories { if !strings.HasSuffix(fsPath, ext) { continue } - if nestedFs, ok := r.fsmap[fsPath]; ok { - return nestedFs, nil - } - nestedFs, err := nestFactory(ctx, file) if err != nil { - return nil, fmt.Errorf("error creating filesystem from file: %s with error: %w", fsPath, err) + return nil, fmt.Errorf("error calling nest factory: %s with error: %w", fsPath, err) } - r.fsmap[fsPath] = nestedFs + r.fsmap[fileHash] = nestedFs return nestedFs, nil } - return nil, nil + return nil, file.Close(ctx) } // open requeue raw open, without resolver call @@ -289,6 +287,19 @@ PARTS_LOOP: nestedFsPath = AbsPath(path.Join(parts[nestOn:]...)) + file, err := rawOpen(ctx, fsPath) + if err != nil { + return "", nil, "", fmt.Errorf("error opening filesystem file: %s with error: %w", fsPath, err) + } + fileHash, err := FileHash(ctx, file) + if err != nil { + return "", nil, "", fmt.Errorf("error calculating file hash: %w", err) + } + err = file.Close(ctx) + if err != nil { + return "", nil, "", fmt.Errorf("error closing file: %w", err) + } + // we dont need lock until now // it must be before fsmap read to exclude race condition: // read -> write @@ -296,7 +307,7 @@ PARTS_LOOP: r.m.Lock() defer r.m.Unlock() - if nestedFs, ok := r.fsmap[fsPath]; ok { + if nestedFs, ok := r.fsmap[fileHash]; ok { span.AddEvent("fs loaded from cache", trace.WithAttributes(attribute.String("nestedFs", reflect.TypeOf(nestedFs).Name()))) return fsPath, nestedFs, nestedFsPath, nil } else { @@ -307,13 +318,13 @@ PARTS_LOOP: if err != nil { return "", nil, "", fmt.Errorf("error opening filesystem file: %s with error: %w", fsPath, err) } - // it is factory responsibility to close file then needed + // it is factory responsibility to close file handler then needed nestedFs, err := nestFactory(ctx, fsFile) if err != nil { return "", nil, "", fmt.Errorf("error creating filesystem from file: %s with error: %w", fsPath, err) } - r.fsmap[fsPath] = nestedFs + r.fsmap[fileHash] = nestedFs span.AddEvent("fs created", trace.WithAttributes(attribute.String("nestedFs", reflect.TypeOf(nestedFs).Name()))) @@ -366,3 +377,27 @@ func ListDirFromFiles[F File](m map[string]F, name string) ([]fs.DirEntry, error return out, nil } + +func ListDirFromInfo(m map[string]fs.FileInfo, name string) ([]fs.DirEntry, error) { + out := make([]fs.DirEntry, 0, len(m)) + name = AddTrailSlash(path.Clean(name)) + for p, f := range m { + if strings.HasPrefix(p, name) { + parts := strings.Split(trimRelPath(p, name), Separator) + if len(parts) == 1 { + out = append(out, NewFileInfo(parts[0], f.Size())) + } else { + out = append(out, NewDirInfo(parts[0])) + } + + } + } + slices.SortStableFunc(out, func(de1, de2 fs.DirEntry) int { + return strings.Compare(de1.Name(), de2.Name()) + }) + out = slices.CompactFunc(out, func(de1, de2 fs.DirEntry) bool { + return de1.Name() == de2.Name() + }) + + return out, nil +}