2023-12-31 22:54:55 +00:00
|
|
|
package service
|
2023-10-16 09:18:40 +00:00
|
|
|
|
|
|
|
import (
|
2024-03-28 13:09:42 +00:00
|
|
|
"bufio"
|
2023-12-21 23:15:39 +00:00
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"log/slog"
|
2024-01-28 20:22:49 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2024-02-22 22:54:56 +00:00
|
|
|
"slices"
|
2024-01-28 20:22:49 +00:00
|
|
|
"strings"
|
2024-03-28 13:09:42 +00:00
|
|
|
"sync"
|
2023-10-16 09:18:40 +00:00
|
|
|
|
2024-03-20 21:47:51 +00:00
|
|
|
"git.kmsign.ru/royalcat/tstor/pkg/ctxio"
|
2024-03-28 13:09:42 +00:00
|
|
|
"git.kmsign.ru/royalcat/tstor/pkg/rlog"
|
2024-03-19 21:30:37 +00:00
|
|
|
"git.kmsign.ru/royalcat/tstor/src/config"
|
2024-01-28 20:22:49 +00:00
|
|
|
"git.kmsign.ru/royalcat/tstor/src/host/controller"
|
2024-03-17 21:00:34 +00:00
|
|
|
"git.kmsign.ru/royalcat/tstor/src/host/datastorage"
|
2024-01-28 20:22:49 +00:00
|
|
|
"git.kmsign.ru/royalcat/tstor/src/host/store"
|
2024-03-28 13:09:42 +00:00
|
|
|
"git.kmsign.ru/royalcat/tstor/src/host/tkv"
|
2023-10-16 09:18:40 +00:00
|
|
|
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
2024-03-28 13:09:42 +00:00
|
|
|
"go.opentelemetry.io/otel"
|
|
|
|
"go.opentelemetry.io/otel/attribute"
|
|
|
|
"go.opentelemetry.io/otel/trace"
|
2024-03-17 21:00:34 +00:00
|
|
|
"go.uber.org/multierr"
|
2024-03-19 21:30:37 +00:00
|
|
|
"golang.org/x/exp/maps"
|
2024-01-28 20:22:49 +00:00
|
|
|
|
2023-10-16 09:18:40 +00:00
|
|
|
"github.com/anacrolix/torrent"
|
2024-01-28 20:22:49 +00:00
|
|
|
"github.com/anacrolix/torrent/bencode"
|
2023-10-16 09:18:40 +00:00
|
|
|
"github.com/anacrolix/torrent/metainfo"
|
2023-12-21 23:15:39 +00:00
|
|
|
"github.com/anacrolix/torrent/types"
|
2024-01-28 20:22:49 +00:00
|
|
|
"github.com/anacrolix/torrent/types/infohash"
|
2024-03-19 21:30:37 +00:00
|
|
|
"github.com/royalcat/kv"
|
2023-10-16 09:18:40 +00:00
|
|
|
)
|
|
|
|
|
2024-03-28 13:09:42 +00:00
|
|
|
var tracer = otel.Tracer("git.kmsign.ru/royalcat/tstor/src/service")
|
|
|
|
|
2024-03-19 21:30:37 +00:00
|
|
|
type DirAquire struct {
|
|
|
|
Name string
|
|
|
|
Hashes []infohash.T
|
|
|
|
}
|
|
|
|
|
2023-10-16 09:18:40 +00:00
|
|
|
type Service struct {
|
2024-01-28 20:22:49 +00:00
|
|
|
c *torrent.Client
|
2024-03-17 21:00:34 +00:00
|
|
|
excludedFiles *store.FilesMappings
|
2024-01-28 20:22:49 +00:00
|
|
|
infoBytes *store.InfoBytes
|
|
|
|
|
|
|
|
torrentLoaded chan struct{}
|
2023-10-16 09:18:40 +00:00
|
|
|
|
2024-03-28 13:09:42 +00:00
|
|
|
loadMutex sync.Mutex
|
|
|
|
|
2023-10-16 09:18:40 +00:00
|
|
|
// stats *Stats
|
2023-12-21 23:15:39 +00:00
|
|
|
DefaultPriority types.PiecePriority
|
2024-03-28 13:09:42 +00:00
|
|
|
Storage *datastorage.DataStorage
|
2024-01-28 20:22:49 +00:00
|
|
|
SourceDir string
|
2023-10-16 09:18:40 +00:00
|
|
|
|
2024-03-19 21:30:37 +00:00
|
|
|
dirsAquire kv.Store[string, DirAquire]
|
|
|
|
|
2024-03-20 21:47:51 +00:00
|
|
|
log *slog.Logger
|
2023-10-16 09:18:40 +00:00
|
|
|
}
|
|
|
|
|
2024-03-19 21:30:37 +00:00
|
|
|
func NewService(sourceDir string, cfg config.TorrentClient, c *torrent.Client,
|
2024-03-28 13:09:42 +00:00
|
|
|
storage *datastorage.DataStorage, excludedFiles *store.FilesMappings, infoBytes *store.InfoBytes,
|
2024-03-19 21:30:37 +00:00
|
|
|
) (*Service, error) {
|
2024-03-28 13:09:42 +00:00
|
|
|
dirsAcquire, err := tkv.New[string, DirAquire](cfg.MetadataFolder, "dir-acquire")
|
2024-03-19 21:30:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-01-28 20:22:49 +00:00
|
|
|
s := &Service{
|
|
|
|
log: slog.With("component", "torrent-service"),
|
2023-12-21 23:15:39 +00:00
|
|
|
c: c,
|
|
|
|
DefaultPriority: types.PiecePriorityNone,
|
2024-01-28 20:22:49 +00:00
|
|
|
excludedFiles: excludedFiles,
|
|
|
|
infoBytes: infoBytes,
|
|
|
|
Storage: storage,
|
|
|
|
SourceDir: sourceDir,
|
|
|
|
torrentLoaded: make(chan struct{}),
|
2024-03-28 13:09:42 +00:00
|
|
|
loadMutex: sync.Mutex{},
|
2024-03-19 21:30:37 +00:00
|
|
|
dirsAquire: dirsAcquire,
|
2024-03-28 13:09:42 +00:00
|
|
|
|
2023-10-16 09:18:40 +00:00
|
|
|
// stats: newStats(), // TODO persistent
|
|
|
|
}
|
2024-01-28 20:22:49 +00:00
|
|
|
|
|
|
|
go func() {
|
2024-03-28 13:09:42 +00:00
|
|
|
ctx := context.Background()
|
|
|
|
err := s.loadTorrentFiles(ctx)
|
2024-01-28 20:22:49 +00:00
|
|
|
if err != nil {
|
|
|
|
s.log.Error("initial torrent load failed", "error", err)
|
|
|
|
}
|
|
|
|
close(s.torrentLoaded)
|
|
|
|
}()
|
|
|
|
|
2024-03-19 21:30:37 +00:00
|
|
|
return s, nil
|
2023-10-16 09:18:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var _ vfs.FsFactory = (*Service)(nil).NewTorrentFs
|
|
|
|
|
2024-03-17 21:00:34 +00:00
|
|
|
func (s *Service) Close() error {
|
|
|
|
err := multierr.Combine(s.c.Close()...)
|
|
|
|
err = multierr.Append(err, s.Storage.Close())
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-03-28 13:09:42 +00:00
|
|
|
func (s *Service) LoadTorrent(ctx context.Context, f vfs.File) (*torrent.Torrent, error) {
|
|
|
|
ctx, span := tracer.Start(ctx, "LoadTorrent")
|
|
|
|
defer span.End()
|
|
|
|
|
|
|
|
log := rlog.FunctionLog(s.log, "LoadTorrent")
|
|
|
|
|
2024-03-20 21:47:51 +00:00
|
|
|
defer f.Close(ctx)
|
2024-01-07 17:09:56 +00:00
|
|
|
|
2024-03-28 13:09:42 +00:00
|
|
|
stat, err := f.Info()
|
2024-01-07 17:09:56 +00:00
|
|
|
if err != nil {
|
2024-01-28 20:22:49 +00:00
|
|
|
return nil, fmt.Errorf("call stat failed: %w", err)
|
2024-01-07 17:09:56 +00:00
|
|
|
}
|
|
|
|
|
2024-03-28 13:09:42 +00:00
|
|
|
span.SetAttributes(attribute.String("filename", stat.Name()))
|
|
|
|
|
|
|
|
mi, err := metainfo.Load(bufio.NewReader(ctxio.IoReader(ctx, f)))
|
2024-01-28 20:22:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("loading torrent metadata from file %s, error: %w", stat.Name(), err)
|
|
|
|
}
|
2024-03-28 13:09:42 +00:00
|
|
|
|
2024-01-07 17:09:56 +00:00
|
|
|
t, ok := s.c.Torrent(mi.HashInfoBytes())
|
|
|
|
if !ok {
|
2024-03-28 13:09:42 +00:00
|
|
|
|
|
|
|
span.AddEvent("torrent not found, loading from file")
|
|
|
|
log.InfoContext(ctx, "torrent not found, loading from file")
|
|
|
|
|
2024-01-28 20:22:49 +00:00
|
|
|
spec, err := torrent.TorrentSpecFromMetaInfoErr(mi)
|
2024-01-07 17:09:56 +00:00
|
|
|
if err != nil {
|
2024-01-28 20:22:49 +00:00
|
|
|
return nil, fmt.Errorf("parse spec from metadata: %w", err)
|
|
|
|
}
|
|
|
|
infoBytes := spec.InfoBytes
|
|
|
|
|
|
|
|
if !isValidInfoHashBytes(infoBytes) {
|
2024-03-28 13:09:42 +00:00
|
|
|
log.WarnContext(ctx, "info loaded from spec not valid")
|
2024-01-28 20:22:49 +00:00
|
|
|
infoBytes = nil
|
2024-01-07 17:09:56 +00:00
|
|
|
}
|
2024-01-28 20:22:49 +00:00
|
|
|
|
|
|
|
if len(infoBytes) == 0 {
|
2024-03-28 13:09:42 +00:00
|
|
|
log.InfoContext(ctx, "no info loaded from file, try to load from cache")
|
2024-01-28 20:22:49 +00:00
|
|
|
infoBytes, err = s.infoBytes.GetBytes(spec.InfoHash)
|
|
|
|
if err != nil && err != store.ErrNotFound {
|
|
|
|
return nil, fmt.Errorf("get info bytes from database: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t, _ = s.c.AddTorrentOpt(torrent.AddTorrentOpts{
|
|
|
|
InfoHash: spec.InfoHash,
|
|
|
|
Storage: s.Storage,
|
|
|
|
InfoBytes: infoBytes,
|
|
|
|
ChunkSize: spec.ChunkSize,
|
|
|
|
})
|
|
|
|
t.AllowDataDownload()
|
2024-02-22 22:54:56 +00:00
|
|
|
t.AllowDataUpload()
|
2024-01-28 20:22:49 +00:00
|
|
|
|
2024-03-28 13:09:42 +00:00
|
|
|
span.AddEvent("torrent added to client")
|
|
|
|
|
2024-01-07 17:09:56 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2024-03-28 13:09:42 +00:00
|
|
|
return nil, ctx.Err()
|
2024-01-07 17:09:56 +00:00
|
|
|
case <-t.GotInfo():
|
2024-01-28 20:22:49 +00:00
|
|
|
err := s.infoBytes.Set(t.InfoHash(), t.Metainfo())
|
|
|
|
if err != nil {
|
|
|
|
s.log.Error("error setting info bytes for torrent %s: %s", t.Name(), err.Error())
|
|
|
|
}
|
2024-03-28 13:09:42 +00:00
|
|
|
}
|
|
|
|
span.AddEvent("got info")
|
2024-01-28 20:22:49 +00:00
|
|
|
|
2024-03-28 13:09:42 +00:00
|
|
|
info := t.Info()
|
|
|
|
if info == nil {
|
|
|
|
return nil, fmt.Errorf("info is nil")
|
|
|
|
}
|
|
|
|
|
|
|
|
compatable, _, err := s.checkTorrentCompatable(ctx, spec.InfoHash, *info)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if !compatable {
|
|
|
|
return nil, fmt.Errorf(
|
|
|
|
"torrent with name '%s' not compatable existing infohash: %s, new: %s",
|
|
|
|
t.Name(), t.InfoHash().HexString(), spec.InfoHash.HexString(),
|
|
|
|
)
|
2024-01-07 17:09:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return t, nil
|
|
|
|
}
|
|
|
|
|
2024-03-19 21:30:37 +00:00
|
|
|
func (s *Service) checkTorrentCompatable(ctx context.Context, ih infohash.T, info metainfo.Info) (compatable bool, tryLater bool, err error) {
|
|
|
|
log := s.log.With("new-name", info.BestName(), "new-infohash", ih.String())
|
|
|
|
|
|
|
|
name := info.BestName()
|
|
|
|
|
|
|
|
aq, found, err := s.dirsAquire.Get(ctx, info.BestName())
|
|
|
|
if err != nil {
|
|
|
|
return false, false, err
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
err = s.dirsAquire.Set(ctx, name, DirAquire{
|
|
|
|
Name: name,
|
|
|
|
Hashes: slices.Compact([]infohash.T{ih}),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return false, false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Debug("acquiring was not found, so created")
|
|
|
|
return true, false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if slices.Contains(aq.Hashes, ih) {
|
|
|
|
log.Debug("hash already know to be compatable")
|
|
|
|
return true, false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, existingTorrent := range s.c.Torrents() {
|
|
|
|
if existingTorrent.Name() != name || existingTorrent.InfoHash() == ih {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
existingInfo := existingTorrent.Info()
|
|
|
|
|
|
|
|
existingFiles := slices.Clone(existingInfo.Files)
|
|
|
|
newFiles := slices.Clone(info.Files)
|
|
|
|
|
|
|
|
if !s.checkTorrentFilesCompatable(aq, existingFiles, newFiles) {
|
|
|
|
return false, false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
aq.Hashes = slicesUnique(append(aq.Hashes, ih))
|
|
|
|
err = s.dirsAquire.Set(ctx, aq.Name, aq)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("torrent not compatible")
|
|
|
|
return false, false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if slices.Contains(aq.Hashes, ih) {
|
|
|
|
log.Debug("hash is compatable")
|
|
|
|
return true, false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Debug("torrent with same name not found, try later")
|
|
|
|
return false, true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) checkTorrentFilesCompatable(aq DirAquire, existingFiles, newFiles []metainfo.FileInfo) bool {
|
|
|
|
log := s.log.With("name", aq.Name)
|
2024-02-22 22:54:56 +00:00
|
|
|
|
|
|
|
pathCmp := func(a, b metainfo.FileInfo) int {
|
|
|
|
return slices.Compare(a.BestPath(), b.BestPath())
|
|
|
|
}
|
|
|
|
slices.SortStableFunc(existingFiles, pathCmp)
|
|
|
|
slices.SortStableFunc(newFiles, pathCmp)
|
|
|
|
|
|
|
|
// torrents basically equals
|
|
|
|
if slices.EqualFunc(existingFiles, newFiles, func(fi1, fi2 metainfo.FileInfo) bool {
|
|
|
|
return fi1.Length == fi2.Length && slices.Equal(fi1.BestPath(), fi1.BestPath())
|
|
|
|
}) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(newFiles) > len(existingFiles) {
|
2024-03-19 21:30:37 +00:00
|
|
|
type fileInfo struct {
|
|
|
|
Path string
|
|
|
|
Length int64
|
|
|
|
}
|
|
|
|
mapInfo := func(fi metainfo.FileInfo) fileInfo {
|
|
|
|
return fileInfo{
|
|
|
|
Path: strings.Join(fi.BestPath(), "/"),
|
|
|
|
Length: fi.Length,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
existingFiles := apply(existingFiles, mapInfo)
|
|
|
|
newFiles := apply(newFiles, mapInfo)
|
|
|
|
|
|
|
|
for _, n := range newFiles {
|
|
|
|
if slices.Contains(existingFiles, n) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, e := range existingFiles {
|
|
|
|
if e.Path == n.Path && e.Length != n.Length {
|
|
|
|
log.Warn("torrents not compatible, has files with different length", "path", n.Path, "existing-length", e.Length, "new-length", e.Length)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-02-22 22:54:56 +00:00
|
|
|
}
|
|
|
|
|
2024-03-19 21:30:37 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2024-03-28 13:09:42 +00:00
|
|
|
// func (s *Service) getTorrentsByName(name string) []*torrent.Torrent {
|
|
|
|
// out := []*torrent.Torrent{}
|
|
|
|
// for _, t := range s.c.Torrents() {
|
|
|
|
// if t.Name() == name {
|
|
|
|
// out = append(out, t)
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// return out
|
|
|
|
// }
|
2024-02-22 22:54:56 +00:00
|
|
|
|
2024-01-28 20:22:49 +00:00
|
|
|
func isValidInfoHashBytes(d []byte) bool {
|
|
|
|
var info metainfo.Info
|
|
|
|
err := bencode.Unmarshal(d, &info)
|
|
|
|
return err == nil
|
|
|
|
}
|
|
|
|
|
2024-03-20 21:47:51 +00:00
|
|
|
func (s *Service) NewTorrentFs(ctx context.Context, f vfs.File) (vfs.Filesystem, error) {
|
|
|
|
defer f.Close(ctx)
|
2023-10-16 09:18:40 +00:00
|
|
|
|
2024-03-28 13:09:42 +00:00
|
|
|
info, err := f.Info()
|
2024-03-19 21:30:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-03-28 13:09:42 +00:00
|
|
|
t, err := s.LoadTorrent(ctx, f)
|
2023-10-16 09:18:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-12-21 23:15:39 +00:00
|
|
|
|
2024-03-20 21:47:51 +00:00
|
|
|
return vfs.NewTorrentFs(info.Name(), controller.NewTorrent(t, s.excludedFiles)), nil
|
2024-01-28 20:22:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) Stats() (*Stats, error) {
|
|
|
|
return &Stats{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Service) GetStats() torrent.ConnStats {
|
|
|
|
return s.c.ConnStats()
|
|
|
|
}
|
|
|
|
|
2024-03-28 13:09:42 +00:00
|
|
|
const loadWorkers = 5
|
|
|
|
|
2024-01-28 20:22:49 +00:00
|
|
|
func (s *Service) loadTorrentFiles(ctx context.Context) error {
|
2024-03-28 13:09:42 +00:00
|
|
|
ctx, span := tracer.Start(ctx, "loadTorrentFiles", trace.WithAttributes(
|
|
|
|
attribute.Int("workers", loadWorkers),
|
|
|
|
))
|
|
|
|
defer span.End()
|
|
|
|
|
|
|
|
log := rlog.FunctionLog(s.log, "loadTorrentFiles")
|
|
|
|
|
|
|
|
loaderPaths := make(chan string)
|
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
close(loaderPaths)
|
|
|
|
wg.Wait()
|
|
|
|
}()
|
|
|
|
|
|
|
|
loaderWorker := func() {
|
|
|
|
wg.Add(1)
|
|
|
|
for path := range loaderPaths {
|
|
|
|
file, err := vfs.NewLazyOsFile(path)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("error opening torrent file", "filename", path, rlog.Err(err))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
defer file.Close(ctx)
|
|
|
|
|
|
|
|
_, err = s.LoadTorrent(ctx, file)
|
|
|
|
if err != nil {
|
|
|
|
s.log.Error("failed adding torrent", "error", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}
|
|
|
|
|
|
|
|
for range loadWorkers {
|
|
|
|
go loaderWorker()
|
|
|
|
}
|
|
|
|
|
2024-01-28 20:22:49 +00:00
|
|
|
return filepath.Walk(s.SourceDir, func(path string, info os.FileInfo, err error) error {
|
2023-12-21 23:15:39 +00:00
|
|
|
if err != nil {
|
2024-01-28 20:22:49 +00:00
|
|
|
return fmt.Errorf("fs walk error: %w", err)
|
2023-12-21 23:15:39 +00:00
|
|
|
}
|
2024-01-28 20:22:49 +00:00
|
|
|
|
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
if info.IsDir() {
|
|
|
|
return nil
|
2023-12-21 23:15:39 +00:00
|
|
|
}
|
2024-01-28 20:22:49 +00:00
|
|
|
|
|
|
|
if strings.HasSuffix(path, ".torrent") {
|
2024-03-28 13:09:42 +00:00
|
|
|
loaderPaths <- path
|
2023-12-21 23:15:39 +00:00
|
|
|
}
|
2023-10-18 09:52:48 +00:00
|
|
|
|
2024-01-28 20:22:49 +00:00
|
|
|
return nil
|
|
|
|
})
|
2023-10-16 09:18:40 +00:00
|
|
|
}
|
|
|
|
|
2024-01-28 20:22:49 +00:00
|
|
|
func (s *Service) ListTorrents(ctx context.Context) ([]*controller.Torrent, error) {
|
|
|
|
<-s.torrentLoaded
|
|
|
|
|
|
|
|
out := []*controller.Torrent{}
|
|
|
|
for _, v := range s.c.Torrents() {
|
|
|
|
out = append(out, controller.NewTorrent(v, s.excludedFiles))
|
|
|
|
}
|
|
|
|
return out, nil
|
2023-10-16 09:18:40 +00:00
|
|
|
}
|
2024-01-07 17:09:56 +00:00
|
|
|
|
2024-01-28 20:22:49 +00:00
|
|
|
func (s *Service) GetTorrent(infohashHex string) (*controller.Torrent, error) {
|
|
|
|
<-s.torrentLoaded
|
|
|
|
|
|
|
|
t, ok := s.c.Torrent(infohash.FromHexString(infohashHex))
|
|
|
|
if !ok {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return controller.NewTorrent(t, s.excludedFiles), nil
|
2024-01-07 17:09:56 +00:00
|
|
|
}
|
2024-03-19 21:30:37 +00:00
|
|
|
|
|
|
|
func slicesUnique[S ~[]E, E comparable](in S) S {
|
|
|
|
m := map[E]struct{}{}
|
|
|
|
for _, v := range in {
|
|
|
|
m[v] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
return maps.Keys(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
func apply[I, O any](in []I, f func(e I) O) []O {
|
|
|
|
out := []O{}
|
|
|
|
for _, v := range in {
|
|
|
|
out = append(out, f(v))
|
|
|
|
}
|
|
|
|
return out
|
|
|
|
}
|