wip
This commit is contained in:
parent
2cefb9db98
commit
b97dcc8d8f
52 changed files with 7570 additions and 555 deletions
.gqlgen.yml.graphqlrc.yamltools.go
cmd
go.modgo.sumgraphql
src
delivery
export
host
controller
filestorage
service
storage
store
vfs
http
log
proto
44
.gqlgen.yml
Normal file
44
.gqlgen.yml
Normal file
|
@ -0,0 +1,44 @@
|
|||
schema:
|
||||
- graphql/*.graphql
|
||||
- graphql/**/*.graphql
|
||||
|
||||
exec:
|
||||
filename: src/delivery/graphql/generated.go
|
||||
package: graph
|
||||
|
||||
model:
|
||||
filename: src/delivery/graphql/model/models_gen.go
|
||||
package: model
|
||||
|
||||
resolver:
|
||||
layout: follow-schema
|
||||
dir: src/delivery/graphql/resolver
|
||||
package: resolver
|
||||
filename_template: "{name}.resolvers.go"
|
||||
|
||||
models:
|
||||
DateTime:
|
||||
model: github.com/99designs/gqlgen/graphql.Time
|
||||
Int:
|
||||
model: github.com/99designs/gqlgen/graphql.Int64
|
||||
Torrent:
|
||||
fields:
|
||||
name:
|
||||
resolver: true
|
||||
files:
|
||||
resolver: true
|
||||
excludedFiles:
|
||||
resolver: true
|
||||
peers:
|
||||
resolver: true
|
||||
extraFields:
|
||||
T:
|
||||
type: "*git.kmsign.ru/royalcat/tstor/src/host/controller.Torrent"
|
||||
TorrentFile:
|
||||
extraFields:
|
||||
F:
|
||||
type: "*github.com/anacrolix/torrent.File"
|
||||
TorrentPeer:
|
||||
extraFields:
|
||||
F:
|
||||
type: "*github.com/anacrolix/torrent.PeerConn"
|
4
.graphqlrc.yaml
Normal file
4
.graphqlrc.yaml
Normal file
|
@ -0,0 +1,4 @@
|
|||
schema:
|
||||
- graphql/schema.graphql
|
||||
- graphql/*.graphql
|
||||
- graphql/**/*.graphql
|
65
cmd/generate-graphlq/main.go
Normal file
65
cmd/generate-graphlq/main.go
Normal file
|
@ -0,0 +1,65 @@
|
|||
// https://github.com/99designs/gqlgen/issues/2281#issuecomment-1506561381
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/99designs/gqlgen/api"
|
||||
"github.com/99designs/gqlgen/codegen"
|
||||
"github.com/99designs/gqlgen/codegen/config"
|
||||
)
|
||||
|
||||
type plugin_ struct {
|
||||
}
|
||||
|
||||
func (plugin_) Name() string {
|
||||
return "Fix Directive hook called with wrong object"
|
||||
}
|
||||
|
||||
func (plugin_) GenerateCode(cfg *codegen.Data) error {
|
||||
for _, input := range cfg.Inputs {
|
||||
for _, field := range input.Fields {
|
||||
if field.GoFieldType == codegen.GoFieldVariable {
|
||||
directiveMap := make(map[string]int, len(field.TypeReference.Definition.Directives)+len(field.Object.Directives))
|
||||
for _, v := range field.TypeReference.Definition.Directives {
|
||||
directiveMap[v.Name]++
|
||||
}
|
||||
// for _, v := range field.Object.Directives {
|
||||
// directiveMap[v.Name]++
|
||||
// }
|
||||
|
||||
directive := make([]*codegen.Directive, 0, len(field.Directives))
|
||||
for _, v := range field.Directives {
|
||||
if count := directiveMap[v.Name]; count > 0 {
|
||||
directiveMap[v.Name] = count - 1
|
||||
fmt.Printf("Ignore field %s{%s} directive: @%s\n", input.Name, field.Name, v.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
directive = append(directive, v)
|
||||
}
|
||||
|
||||
field.Directives = directive
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
cfg, err := config.LoadConfigFromDefaultLocations()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "failed to load config", err.Error())
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
err = api.Generate(cfg,
|
||||
api.AddPlugin(&plugin_{}),
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err.Error())
|
||||
os.Exit(3)
|
||||
}
|
||||
}
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"net"
|
||||
nethttp "net/http"
|
||||
|
@ -14,9 +15,10 @@ import (
|
|||
|
||||
"git.kmsign.ru/royalcat/tstor/src/config"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/filestorage"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/service"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/storage"
|
||||
"github.com/rs/zerolog/log"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/store"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
"github.com/urfave/cli/v2"
|
||||
wnfs "github.com/willscott/go-nfs"
|
||||
|
||||
|
@ -54,65 +56,70 @@ func main() {
|
|||
}
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
log.Fatal().Err(err).Msg("problem starting application")
|
||||
print("problem starting application: ", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func run(configPath string) error {
|
||||
|
||||
conf, err := config.Load(configPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading configuration: %w", err)
|
||||
}
|
||||
|
||||
dlog.Load(&conf.Log)
|
||||
log := log.Logger.With().Str("conponent", "run").Logger()
|
||||
|
||||
log := slog.Default().With("component", "run")
|
||||
|
||||
// TODO make optional
|
||||
err = syscall.Setpriority(syscall.PRIO_PGRP, 0, 19)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("set priority failed")
|
||||
log.Error("set priority failed", "error", err)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(conf.TorrentClient.MetadataFolder, 0744); err != nil {
|
||||
return fmt.Errorf("error creating metadata folder: %w", err)
|
||||
}
|
||||
|
||||
fis, err := storage.NewFileItemStore(filepath.Join(conf.TorrentClient.MetadataFolder, "items"), 2*time.Hour)
|
||||
fis, err := store.NewFileItemStore(filepath.Join(conf.TorrentClient.MetadataFolder, "items"), 2*time.Hour)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error starting item store: %w", err)
|
||||
}
|
||||
defer fis.Close()
|
||||
|
||||
id, err := storage.GetOrCreatePeerID(filepath.Join(conf.TorrentClient.MetadataFolder, "ID"))
|
||||
id, err := store.GetOrCreatePeerID(filepath.Join(conf.TorrentClient.MetadataFolder, "ID"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating node ID: %w", err)
|
||||
}
|
||||
|
||||
st, _, err := storage.SetupStorage(conf.TorrentClient)
|
||||
st, _, err := filestorage.Setup(conf.TorrentClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer st.Close()
|
||||
|
||||
rep, err := storage.NewExcludedFiles(conf.TorrentClient.MetadataFolder, st)
|
||||
excludedFilesStore, err := store.NewExcludedFiles(conf.TorrentClient.MetadataFolder, st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := storage.NewClient(st, fis, &conf.TorrentClient, id)
|
||||
infoBytesStore, err := store.NewInfoBytes(conf.TorrentClient.MetadataFolder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := store.NewClient(st, fis, &conf.TorrentClient, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error starting torrent client: %w", err)
|
||||
}
|
||||
c.AddDhtNodes(conf.TorrentClient.DHTNodes)
|
||||
defer c.Close()
|
||||
|
||||
ts := service.NewService(c, rep, conf.TorrentClient.AddTimeout, conf.TorrentClient.ReadTimeout)
|
||||
ts := service.NewService(conf.SourceDir, c, st, excludedFilesStore, infoBytesStore, conf.TorrentClient.AddTimeout, conf.TorrentClient.ReadTimeout)
|
||||
|
||||
if err := os.MkdirAll(conf.SourceDir, 0744); err != nil {
|
||||
return fmt.Errorf("error creating data folder: %w", err)
|
||||
}
|
||||
sfs := host.NewTorrentStorage(conf.SourceDir, ts)
|
||||
sfs = vfs.WrapLogFS(sfs, slog.Default())
|
||||
|
||||
// TODO make separate function
|
||||
// {
|
||||
|
@ -161,10 +168,10 @@ func run(configPath string) error {
|
|||
if conf.Mounts.WebDAV.Enabled {
|
||||
go func() {
|
||||
if err := webdav.NewWebDAVServer(sfs, conf.Mounts.WebDAV.Port, conf.Mounts.WebDAV.User, conf.Mounts.WebDAV.Pass); err != nil {
|
||||
log.Error().Err(err).Msg("error starting webDAV")
|
||||
log.Error("error starting webDAV", "error", err)
|
||||
}
|
||||
|
||||
log.Warn().Msg("webDAV configuration not found!")
|
||||
log.Warn("webDAV configuration not found!")
|
||||
}()
|
||||
}
|
||||
if conf.Mounts.HttpFs.Enabled {
|
||||
|
@ -172,7 +179,7 @@ func run(configPath string) error {
|
|||
httpfs := httpfs.NewHTTPFS(sfs)
|
||||
err = nethttp.ListenAndServe(fmt.Sprintf("0.0.0.0:%d", conf.Mounts.HttpFs.Port), nethttp.FileServer(httpfs))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("error starting HTTPFS")
|
||||
log.Error("error starting HTTPFS", "error", err)
|
||||
}
|
||||
// r := gin.New()
|
||||
|
||||
|
@ -181,7 +188,7 @@ func run(configPath string) error {
|
|||
// c.FileFromFS(path, httpfs)
|
||||
// })
|
||||
|
||||
log.Info().Str("host", fmt.Sprintf("0.0.0.0:%d", conf.Mounts.HttpFs.Port)).Msg("starting HTTPFS")
|
||||
log.Info("starting HTTPFS", "host", fmt.Sprintf("0.0.0.0:%d", conf.Mounts.HttpFs.Port))
|
||||
// if err := r.Run(fmt.Sprintf("0.0.0.0:%d", conf.Mounts.HttpFs.Port)); err != nil {
|
||||
// log.Error().Err(err).Msg("error starting HTTPFS")
|
||||
// }
|
||||
|
@ -190,28 +197,41 @@ func run(configPath string) error {
|
|||
|
||||
if conf.Mounts.NFS.Enabled {
|
||||
go func() {
|
||||
log := log.With("component", "NFS")
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf("0.0.0.0:%d", conf.Mounts.NFS.Port))
|
||||
panicOnErr(err, "starting TCP listener")
|
||||
log.Info().Str("host", listener.Addr().String()).Msg("starting NFS server")
|
||||
if err != nil {
|
||||
log.Error("failed to start TCP listener", err)
|
||||
return
|
||||
}
|
||||
log.Info("starting NFS server", "host", listener.Addr().String())
|
||||
handler, err := nfs.NewNFSv3Handler(sfs)
|
||||
panicOnErr(err, "creating NFS handler")
|
||||
panicOnErr(wnfs.Serve(listener, handler), "serving nfs")
|
||||
if err != nil {
|
||||
log.Error("failed to create NFS handler", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = wnfs.Serve(listener, handler)
|
||||
if err != nil {
|
||||
log.Error("error serving nfs", "error", err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := webdav.NewDirServer(conf.SourceDir, 36912, conf.Mounts.WebDAV.User, conf.Mounts.WebDAV.Pass); err != nil {
|
||||
log.Error().Err(err).Msg("error starting webDAV")
|
||||
err := webdav.NewDirServer(conf.SourceDir, 36912, conf.Mounts.WebDAV.User, conf.Mounts.WebDAV.Pass)
|
||||
if err != nil {
|
||||
log.Error("error starting webDAV", "error", err)
|
||||
}
|
||||
|
||||
log.Warn().Msg("webDAV configuration not found!")
|
||||
}()
|
||||
|
||||
go func() {
|
||||
logFilename := filepath.Join(conf.Log.Path, dlog.FileName)
|
||||
|
||||
err = http.New(nil, service.NewStats(), ts, logFilename, conf)
|
||||
log.Error().Err(err).Msg("error initializing HTTP server")
|
||||
err := http.New(nil, service.NewStats(), ts, logFilename, conf)
|
||||
if err != nil {
|
||||
log.Error("error initializing HTTP server", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
|
@ -220,11 +240,3 @@ func run(configPath string) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func panicOnErr(err error, desc string) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
log.Err(err).Msg(desc)
|
||||
log.Panic()
|
||||
}
|
||||
|
|
22
go.mod
22
go.mod
|
@ -3,6 +3,7 @@ module git.kmsign.ru/royalcat/tstor
|
|||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/99designs/gqlgen v0.17.43
|
||||
github.com/anacrolix/dht/v2 v2.21.0
|
||||
github.com/anacrolix/log v0.14.5
|
||||
github.com/anacrolix/missinggo/v2 v2.7.3
|
||||
|
@ -18,26 +19,24 @@ require (
|
|||
github.com/knadh/koanf/providers/file v0.1.0
|
||||
github.com/knadh/koanf/providers/structs v0.1.0
|
||||
github.com/knadh/koanf/v2 v2.0.1
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/lmittmann/tint v1.0.4
|
||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.2
|
||||
github.com/philippgille/gokv v0.6.0
|
||||
github.com/philippgille/gokv/badgerdb v0.6.0
|
||||
github.com/philippgille/gokv/encoding v0.6.0
|
||||
github.com/rs/zerolog v1.31.0
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/urfave/cli/v2 v2.27.0
|
||||
github.com/vektah/gqlparser/v2 v2.5.11
|
||||
github.com/willscott/go-nfs v0.0.2
|
||||
golang.org/x/exp v0.0.0-20231226003508-02704c960a9b
|
||||
golang.org/x/net v0.19.0
|
||||
google.golang.org/grpc v1.53.0
|
||||
google.golang.org/protobuf v1.30.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect
|
||||
github.com/RoaringBitmap/roaring v1.2.3 // indirect
|
||||
github.com/agnivade/levenshtein v1.1.1 // indirect
|
||||
github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 // indirect
|
||||
github.com/alecthomas/atomic v0.1.0-alpha2 // indirect
|
||||
github.com/anacrolix/chansync v0.3.0 // indirect
|
||||
|
@ -99,7 +98,7 @@ require (
|
|||
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
|
||||
github.com/knadh/koanf/maps v0.1.1 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
|
@ -134,6 +133,7 @@ require (
|
|||
github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect
|
||||
github.com/sosodev/duration v1.1.0 // indirect
|
||||
github.com/tidwall/btree v1.6.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
|
@ -146,12 +146,14 @@ require (
|
|||
go.opentelemetry.io/otel/trace v1.8.0 // indirect
|
||||
go4.org v0.0.0-20200411211856-f5505b9728dd // indirect
|
||||
golang.org/x/arch v0.3.0 // indirect
|
||||
golang.org/x/crypto v0.16.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/crypto v0.17.0 // indirect
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/sync v0.5.0 // indirect
|
||||
golang.org/x/sys v0.16.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.16.0 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
modernc.org/libc v1.22.3 // indirect
|
||||
modernc.org/mathutil v1.5.0 // indirect
|
||||
|
|
60
go.sum
60
go.sum
|
@ -19,6 +19,8 @@ crawshaw.io/sqlite v0.3.2/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4=
|
|||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU=
|
||||
filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
|
||||
github.com/99designs/gqlgen v0.17.43 h1:I4SYg6ahjowErAQcHFVKy5EcWuwJ3+Xw9z2fLpuFCPo=
|
||||
github.com/99designs/gqlgen v0.17.43/go.mod h1:lO0Zjy8MkZgBdv4T1U91x09r0e0WFOdhVUutlQs1Rsc=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
|
@ -31,6 +33,8 @@ github.com/RoaringBitmap/roaring v1.2.3 h1:yqreLINqIrX22ErkKI0vY47/ivtJr6n+kMhVO
|
|||
github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
|
||||
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
|
||||
github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 h1:byYvvbfSo3+9efR4IeReh77gVs4PnNDR3AMOE9NJ7a0=
|
||||
github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0/go.mod h1:q37NoqncT41qKc048STsifIt69LfUJ8SrWWcz/yam5k=
|
||||
github.com/alecthomas/assert/v2 v2.0.0-alpha3 h1:pcHeMvQ3OMstAWgaeaXIAL8uzB9xMm2zlxt+/4ml8lk=
|
||||
|
@ -98,9 +102,13 @@ github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 h1:QAVZ3pN/J4/Uzi
|
|||
github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96/go.mod h1:Wa6n8cYIdaG35x15aH3Zy6d03f7P728QfdcDeD/IEOs=
|
||||
github.com/anacrolix/utp v0.1.0 h1:FOpQOmIwYsnENnz7tAGohA+r6iXpRjrq8ssKSre2Cp4=
|
||||
github.com/anacrolix/utp v0.1.0/go.mod h1:MDwc+vsGEq7RMw6lr2GKOEqjWny5hO5OZXRVNaBJ2Dk=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI=
|
||||
github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
|
||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
|
||||
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
|
||||
|
@ -142,7 +150,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
|||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
|
@ -158,6 +165,8 @@ github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWa
|
|||
github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g=
|
||||
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
|
@ -229,7 +238,6 @@ github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3a
|
|||
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
|
@ -358,13 +366,12 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
|||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/lmittmann/tint v1.0.4 h1:LeYihpJ9hyGvE0w+K2okPTGUdVLfng1+nDNVR4vWISc=
|
||||
github.com/lmittmann/tint v1.0.4/go.mod h1:HIS3gSy7qNwGCj+5oRjAutErFBl4BzdQP6cJZ0NfMwE=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||
|
@ -500,9 +507,6 @@ github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDN
|
|||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 h1:Lt9DzQALzHoDwMBGJ6v8ObDPR0dzr2a6sXTB1Fq7IHs=
|
||||
github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A=
|
||||
github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
|
@ -510,6 +514,8 @@ github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0
|
|||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
||||
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
|
||||
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
||||
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs=
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
|
@ -518,6 +524,8 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
|
|||
github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs=
|
||||
github.com/sosodev/duration v1.1.0 h1:kQcaiGbJaIsRqgQy7VGlZrVw1giWO+lDoX3MCPnpVO4=
|
||||
github.com/sosodev/duration v1.1.0/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
|
@ -558,14 +566,12 @@ github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
|
|||
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/urfave/cli/v2 v2.27.0 h1:uNs1K8JwTFL84X68j5Fjny6hfANh9nTlJ6dRtZAFAHY=
|
||||
github.com/urfave/cli/v2 v2.27.0/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/vektah/gqlparser/v2 v2.5.11 h1:JJxLtXIoN7+3x6MBdtIP59TP1RANnY7pXOaDnADQSf8=
|
||||
github.com/vektah/gqlparser/v2 v2.5.11/go.mod h1:1rCcfwB2ekJofmluGWXMSEnPMZgbxzwj6FaZ/4OT8Cc=
|
||||
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/willscott/go-nfs v0.0.2-0.20240102104133-9c599ee601d3 h1:ObGKsnV2OfjSlp0sCJwV0toiQl2eoqlFwrln1W5aBPg=
|
||||
github.com/willscott/go-nfs v0.0.2-0.20240102104133-9c599ee601d3/go.mod h1:+7+CzZfrWAP2Ff9h/6MhCMrjmitC21Yxt7nF/erAHNM=
|
||||
github.com/willscott/go-nfs v0.0.2 h1:BaBp1CpGDMooCT6bCgX6h6ZwgPcTMST4yToYZ9byee0=
|
||||
github.com/willscott/go-nfs v0.0.2/go.mod h1:SvullWeHxr/924WQNbUaZqtluBt2vuZ61g6yAV+xj7w=
|
||||
github.com/willscott/go-nfs-client v0.0.0-20200605172546-271fa9065b33 h1:Wd8wdpRzPXskyHvZLyw7Wc1fp5oCE2mhBCj7bAiibUs=
|
||||
github.com/willscott/go-nfs-client v0.0.0-20200605172546-271fa9065b33/go.mod h1:cOUKSNty+RabZqKhm5yTJT5Vq/Fe83ZRWAJ5Kj8nRes=
|
||||
github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 h1:U0DnHRZFzoIV1oFEZczg5XyPut9yxk9jjtax/9Bxr/o=
|
||||
github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00/go.mod h1:Tq++Lr/FgiS3X48q5FETemXiSLGuYMQT2sPjYNPJSwA=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
|
@ -574,7 +580,6 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
|
|||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/zema1/go-nfs-client v0.0.0-20200604081958-0cf942f0e0fe/go.mod h1:im3CVJ32XM3+E+2RhY0sa5IVJVQehUrX0oE1wX4xOwU=
|
||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
|
@ -607,8 +612,8 @@ golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0
|
|||
golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220516162934-403b01795ae8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
|
||||
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
||||
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -638,6 +643,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
|
|||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -689,8 +696,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -732,7 +739,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
@ -740,9 +746,6 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
|
@ -764,8 +767,8 @@ golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
|||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U=
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -828,8 +831,6 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx
|
|||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w=
|
||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
|
@ -838,8 +839,6 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac
|
|||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
|
||||
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -859,14 +858,13 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
|
|||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
10
graphql/mutation.graphql
Normal file
10
graphql/mutation.graphql
Normal file
|
@ -0,0 +1,10 @@
|
|||
type Mutation {
|
||||
validateTorrents(filter: TorrentFilter!): Boolean!
|
||||
cleanupTorrents(files: Boolean, dryRun: Boolean!): Int!
|
||||
}
|
||||
|
||||
input TorrentFilter @oneOf {
|
||||
everything: Boolean
|
||||
infohash: String
|
||||
# pathGlob: String!
|
||||
}
|
43
graphql/query.graphql
Normal file
43
graphql/query.graphql
Normal file
|
@ -0,0 +1,43 @@
|
|||
type Query {
|
||||
torrents(filter: TorrentsFilter, pagination: Pagination): [Torrent!]!
|
||||
}
|
||||
|
||||
input TorrentsFilter {
|
||||
name: StringFilter
|
||||
bytesCompleted: IntFilter
|
||||
bytesMissing: IntFilter
|
||||
|
||||
peersCount: IntFilter
|
||||
}
|
||||
|
||||
input Pagination {
|
||||
offset: Int!
|
||||
limit: Int!
|
||||
}
|
||||
|
||||
input StringFilter @oneOf {
|
||||
eq: String
|
||||
substr: String
|
||||
in: [String!]
|
||||
}
|
||||
|
||||
input IntFilter @oneOf {
|
||||
eq: Int
|
||||
gt: Int
|
||||
lt: Int
|
||||
gte: Int
|
||||
lte: Int
|
||||
in: [Int!]
|
||||
}
|
||||
|
||||
input DateTimeFilter @oneOf {
|
||||
eq: DateTime
|
||||
gt: DateTime
|
||||
lt: DateTime
|
||||
gte: DateTime
|
||||
lte: DateTime
|
||||
}
|
||||
|
||||
input BooleanFilter @oneOf {
|
||||
eq: Boolean
|
||||
}
|
9
graphql/schema.graphql
Normal file
9
graphql/schema.graphql
Normal file
|
@ -0,0 +1,9 @@
|
|||
directive @oneOf on INPUT_OBJECT | FIELD_DEFINITION
|
||||
|
||||
scalar DateTime
|
||||
|
||||
type Schema {
|
||||
query: Query
|
||||
mutation: Mutation
|
||||
}
|
||||
|
24
graphql/types/torrent.graphql
Normal file
24
graphql/types/torrent.graphql
Normal file
|
@ -0,0 +1,24 @@
|
|||
type Torrent {
|
||||
name: String!
|
||||
infohash: String!
|
||||
bytesCompleted: Int!
|
||||
torrentFilePath: String!
|
||||
bytesMissing: Int!
|
||||
files: [TorrentFile!]!
|
||||
excludedFiles: [TorrentFile!]!
|
||||
peers: [TorrentPeer!]!
|
||||
}
|
||||
|
||||
type TorrentFile {
|
||||
filename: String!
|
||||
size: Int!
|
||||
bytesCompleted: Int!
|
||||
}
|
||||
|
||||
type TorrentPeer {
|
||||
ip: String!
|
||||
downloadRate: Float!
|
||||
discovery: String!
|
||||
port: Int!
|
||||
clientName: String!
|
||||
}
|
5741
src/delivery/graphql/generated.go
Normal file
5741
src/delivery/graphql/generated.go
Normal file
File diff suppressed because it is too large
Load diff
21
src/delivery/graphql/model/filter.go
Normal file
21
src/delivery/graphql/model/filter.go
Normal file
|
@ -0,0 +1,21 @@
|
|||
package model
|
||||
|
||||
import "slices"
|
||||
|
||||
func (f *IntFilter) IsValid(v int64) bool {
|
||||
if f.Eq != nil {
|
||||
return v == *f.Eq
|
||||
} else if f.Gt != nil {
|
||||
return v > *f.Gt
|
||||
} else if f.Gte != nil {
|
||||
return v >= *f.Gte
|
||||
} else if f.Lt != nil {
|
||||
return v < *f.Lt
|
||||
} else if f.Lte != nil {
|
||||
return v <= *f.Lte
|
||||
} else if f.In != nil {
|
||||
return slices.Contains(f.In, v)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
24
src/delivery/graphql/model/mappers.go
Normal file
24
src/delivery/graphql/model/mappers.go
Normal file
|
@ -0,0 +1,24 @@
|
|||
package model
|
||||
|
||||
import "github.com/anacrolix/torrent"
|
||||
|
||||
func MapPeerSource(source torrent.PeerSource) string {
|
||||
switch source {
|
||||
case torrent.PeerSourceDirect:
|
||||
return "Direct"
|
||||
case torrent.PeerSourceUtHolepunch:
|
||||
return "Ut Holepunch"
|
||||
case torrent.PeerSourceDhtAnnouncePeer:
|
||||
return "DHT Announce"
|
||||
case torrent.PeerSourceDhtGetPeers:
|
||||
return "DHT"
|
||||
case torrent.PeerSourceIncoming:
|
||||
return "Incoming"
|
||||
case torrent.PeerSourceTracker:
|
||||
return "Tracker"
|
||||
case torrent.PeerSourcePex:
|
||||
return "PEX"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
93
src/delivery/graphql/model/models_gen.go
Normal file
93
src/delivery/graphql/model/models_gen.go
Normal file
|
@ -0,0 +1,93 @@
|
|||
// Code generated by github.com/99designs/gqlgen, DO NOT EDIT.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/controller"
|
||||
"github.com/anacrolix/torrent"
|
||||
)
|
||||
|
||||
type BooleanFilter struct {
|
||||
Eq *bool `json:"eq,omitempty"`
|
||||
}
|
||||
|
||||
type DateTimeFilter struct {
|
||||
Eq *time.Time `json:"eq,omitempty"`
|
||||
Gt *time.Time `json:"gt,omitempty"`
|
||||
Lt *time.Time `json:"lt,omitempty"`
|
||||
Gte *time.Time `json:"gte,omitempty"`
|
||||
Lte *time.Time `json:"lte,omitempty"`
|
||||
}
|
||||
|
||||
type IntFilter struct {
|
||||
Eq *int64 `json:"eq,omitempty"`
|
||||
Gt *int64 `json:"gt,omitempty"`
|
||||
Lt *int64 `json:"lt,omitempty"`
|
||||
Gte *int64 `json:"gte,omitempty"`
|
||||
Lte *int64 `json:"lte,omitempty"`
|
||||
In []int64 `json:"in,omitempty"`
|
||||
}
|
||||
|
||||
type Mutation struct {
|
||||
}
|
||||
|
||||
type Pagination struct {
|
||||
Offset int64 `json:"offset"`
|
||||
Limit int64 `json:"limit"`
|
||||
}
|
||||
|
||||
type Query struct {
|
||||
}
|
||||
|
||||
type Schema struct {
|
||||
Query *Query `json:"query,omitempty"`
|
||||
Mutation *Mutation `json:"mutation,omitempty"`
|
||||
}
|
||||
|
||||
type StringFilter struct {
|
||||
Eq *string `json:"eq,omitempty"`
|
||||
Substr *string `json:"substr,omitempty"`
|
||||
In []string `json:"in,omitempty"`
|
||||
}
|
||||
|
||||
type Torrent struct {
|
||||
Name string `json:"name"`
|
||||
Infohash string `json:"infohash"`
|
||||
BytesCompleted int64 `json:"bytesCompleted"`
|
||||
TorrentFilePath string `json:"torrentFilePath"`
|
||||
BytesMissing int64 `json:"bytesMissing"`
|
||||
Files []*TorrentFile `json:"files"`
|
||||
ExcludedFiles []*TorrentFile `json:"excludedFiles"`
|
||||
Peers []*TorrentPeer `json:"peers"`
|
||||
T *controller.Torrent `json:"-"`
|
||||
}
|
||||
|
||||
type TorrentFile struct {
|
||||
Filename string `json:"filename"`
|
||||
Size int64 `json:"size"`
|
||||
BytesCompleted int64 `json:"bytesCompleted"`
|
||||
F *torrent.File `json:"-"`
|
||||
}
|
||||
|
||||
type TorrentFilter struct {
|
||||
Everything *bool `json:"everything,omitempty"`
|
||||
Infohash *string `json:"infohash,omitempty"`
|
||||
}
|
||||
|
||||
type TorrentPeer struct {
|
||||
IP string `json:"ip"`
|
||||
DownloadRate float64 `json:"downloadRate"`
|
||||
Discovery string `json:"discovery"`
|
||||
Port int64 `json:"port"`
|
||||
ClientName string `json:"clientName"`
|
||||
F *torrent.PeerConn `json:"-"`
|
||||
}
|
||||
|
||||
type TorrentsFilter struct {
|
||||
Name *StringFilter `json:"name,omitempty"`
|
||||
BytesCompleted *IntFilter `json:"bytesCompleted,omitempty"`
|
||||
BytesMissing *IntFilter `json:"bytesMissing,omitempty"`
|
||||
PeersCount *IntFilter `json:"peersCount,omitempty"`
|
||||
}
|
28
src/delivery/graphql/oneof.go
Normal file
28
src/delivery/graphql/oneof.go
Normal file
|
@ -0,0 +1,28 @@
|
|||
package graph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/99designs/gqlgen/graphql"
|
||||
)
|
||||
|
||||
func OneOf(ctx context.Context, obj interface{}, next graphql.Resolver) (res interface{}, err error) {
|
||||
wasValue := false
|
||||
m, ok := obj.(map[string]any)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("OneOf error, unknow object type: %T", obj)
|
||||
}
|
||||
|
||||
for k, v := range m {
|
||||
if v != nil {
|
||||
if !wasValue {
|
||||
wasValue = true
|
||||
} else {
|
||||
return nil, fmt.Errorf("OneOf with multiple fields: %s", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return next(ctx)
|
||||
}
|
64
src/delivery/graphql/resolver/mutation.resolvers.go
Normal file
64
src/delivery/graphql/resolver/mutation.resolvers.go
Normal file
|
@ -0,0 +1,64 @@
|
|||
package resolver
|
||||
|
||||
// This file will be automatically regenerated based on the schema, any resolver implementations
|
||||
// will be copied through when generating and any unknown code will be moved to the end.
|
||||
// Code generated by github.com/99designs/gqlgen version v0.17.43
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
graph "git.kmsign.ru/royalcat/tstor/src/delivery/graphql"
|
||||
"git.kmsign.ru/royalcat/tstor/src/delivery/graphql/model"
|
||||
)
|
||||
|
||||
// ValidateTorrents is the resolver for the validateTorrents field.
|
||||
func (r *mutationResolver) ValidateTorrents(ctx context.Context, filter model.TorrentFilter) (bool, error) {
|
||||
if filter.Infohash != nil {
|
||||
t, err := r.Resolver.Service.GetTorrent(*filter.Infohash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if t == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
t.ValidateTorrent()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if filter.Everything != nil && *filter.Everything {
|
||||
torrents, err := r.Resolver.Service.ListTorrents(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, v := range torrents {
|
||||
if err := v.ValidateTorrent(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// CleanupTorrents is the resolver for the cleanupTorrents field.
|
||||
func (r *mutationResolver) CleanupTorrents(ctx context.Context, files *bool, dryRun bool) (int64, error) {
|
||||
torrents, err := r.Service.ListTorrents(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if files != nil && *files {
|
||||
r, err := r.Service.Storage.CleanupFiles(ctx, torrents, dryRun)
|
||||
return int64(r), err
|
||||
} else {
|
||||
r, err := r.Service.Storage.CleanupDirs(ctx, torrents, dryRun)
|
||||
return int64(r), err
|
||||
}
|
||||
}
|
||||
|
||||
// Mutation returns graph.MutationResolver implementation.
|
||||
func (r *Resolver) Mutation() graph.MutationResolver { return &mutationResolver{r} }
|
||||
|
||||
type mutationResolver struct{ *Resolver }
|
75
src/delivery/graphql/resolver/query.resolvers.go
Normal file
75
src/delivery/graphql/resolver/query.resolvers.go
Normal file
|
@ -0,0 +1,75 @@
|
|||
package resolver
|
||||
|
||||
// This file will be automatically regenerated based on the schema, any resolver implementations
|
||||
// will be copied through when generating and any unknown code will be moved to the end.
|
||||
// Code generated by github.com/99designs/gqlgen version v0.17.43
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
graph "git.kmsign.ru/royalcat/tstor/src/delivery/graphql"
|
||||
"git.kmsign.ru/royalcat/tstor/src/delivery/graphql/model"
|
||||
)
|
||||
|
||||
// Torrents is the resolver for the torrents field.
|
||||
func (r *queryResolver) Torrents(ctx context.Context, filter *model.TorrentsFilter, pagination *model.Pagination) ([]*model.Torrent, error) {
|
||||
torrents, err := r.Service.ListTorrents(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filterFuncs := []func(torrent *model.Torrent) bool{}
|
||||
|
||||
if filter != nil {
|
||||
if filter.BytesCompleted != nil {
|
||||
filterFuncs = append(filterFuncs, func(torrent *model.Torrent) bool {
|
||||
return filter.BytesCompleted.IsValid(torrent.BytesCompleted)
|
||||
})
|
||||
}
|
||||
if filter.BytesMissing != nil {
|
||||
filterFuncs = append(filterFuncs, func(torrent *model.Torrent) bool {
|
||||
return filter.BytesMissing.IsValid(torrent.BytesMissing)
|
||||
})
|
||||
}
|
||||
if filter.PeersCount != nil {
|
||||
filterFuncs = append(filterFuncs, func(torrent *model.Torrent) bool {
|
||||
return filter.PeersCount.IsValid(
|
||||
int64(len(torrent.T.Torrent().PeerConns())),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
filterFunc := func(torrent *model.Torrent) bool {
|
||||
for _, f := range filterFuncs {
|
||||
if !f(torrent) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
tr := []*model.Torrent{}
|
||||
for _, t := range torrents {
|
||||
d := &model.Torrent{
|
||||
Infohash: t.InfoHash(),
|
||||
Name: t.Name(),
|
||||
BytesCompleted: t.BytesCompleted(),
|
||||
BytesMissing: t.BytesMissing(),
|
||||
T: t,
|
||||
}
|
||||
|
||||
if !filterFunc(d) {
|
||||
continue
|
||||
}
|
||||
tr = append(tr, d)
|
||||
}
|
||||
|
||||
return tr, nil
|
||||
}
|
||||
|
||||
// Query returns graph.QueryResolver implementation.
|
||||
func (r *Resolver) Query() graph.QueryResolver { return &queryResolver{r} }
|
||||
|
||||
type queryResolver struct{ *Resolver }
|
11
src/delivery/graphql/resolver/resolver.go
Normal file
11
src/delivery/graphql/resolver/resolver.go
Normal file
|
@ -0,0 +1,11 @@
|
|||
package resolver
|
||||
|
||||
import "git.kmsign.ru/royalcat/tstor/src/host/service"
|
||||
|
||||
// This file will not be regenerated automatically.
|
||||
//
|
||||
// It serves as dependency injection for your app, add any dependencies you require here.
|
||||
|
||||
type Resolver struct {
|
||||
Service *service.Service
|
||||
}
|
73
src/delivery/graphql/resolver/torrent.resolvers.go
Normal file
73
src/delivery/graphql/resolver/torrent.resolvers.go
Normal file
|
@ -0,0 +1,73 @@
|
|||
package resolver
|
||||
|
||||
// This file will be automatically regenerated based on the schema, any resolver implementations
|
||||
// will be copied through when generating and any unknown code will be moved to the end.
|
||||
// Code generated by github.com/99designs/gqlgen version v0.17.43
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
graph "git.kmsign.ru/royalcat/tstor/src/delivery/graphql"
|
||||
"git.kmsign.ru/royalcat/tstor/src/delivery/graphql/model"
|
||||
)
|
||||
|
||||
// Name is the resolver for the name field.
|
||||
func (r *torrentResolver) Name(ctx context.Context, obj *model.Torrent) (string, error) {
|
||||
return obj.T.Name(), nil
|
||||
}
|
||||
|
||||
// Files is the resolver for the files field.
|
||||
func (r *torrentResolver) Files(ctx context.Context, obj *model.Torrent) ([]*model.TorrentFile, error) {
|
||||
out := []*model.TorrentFile{}
|
||||
files, err := obj.T.Files()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, f := range files {
|
||||
out = append(out, &model.TorrentFile{
|
||||
Filename: f.DisplayPath(),
|
||||
Size: f.Length(),
|
||||
BytesCompleted: f.BytesCompleted(),
|
||||
F: f,
|
||||
})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ExcludedFiles is the resolver for the excludedFiles field.
|
||||
func (r *torrentResolver) ExcludedFiles(ctx context.Context, obj *model.Torrent) ([]*model.TorrentFile, error) {
|
||||
out := []*model.TorrentFile{}
|
||||
files, err := obj.T.ExcludedFiles()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, f := range files {
|
||||
out = append(out, &model.TorrentFile{
|
||||
Filename: f.DisplayPath(),
|
||||
Size: f.Length(),
|
||||
F: f,
|
||||
})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Peers is the resolver for the peers field.
|
||||
func (r *torrentResolver) Peers(ctx context.Context, obj *model.Torrent) ([]*model.TorrentPeer, error) {
|
||||
peers := []*model.TorrentPeer{}
|
||||
for _, peer := range obj.T.Torrent().PeerConns() {
|
||||
peers = append(peers, &model.TorrentPeer{
|
||||
IP: peer.RemoteAddr.String(),
|
||||
DownloadRate: peer.DownloadRate(),
|
||||
Discovery: model.MapPeerSource(peer.Discovery),
|
||||
Port: int64(peer.PeerListenPort),
|
||||
ClientName: peer.PeerClientName.Load().(string),
|
||||
F: peer,
|
||||
})
|
||||
}
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
// Torrent returns graph.TorrentResolver implementation.
|
||||
func (r *Resolver) Torrent() graph.TorrentResolver { return &torrentResolver{r} }
|
||||
|
||||
type torrentResolver struct{ *Resolver }
|
35
src/delivery/router.go
Normal file
35
src/delivery/router.go
Normal file
|
@ -0,0 +1,35 @@
|
|||
package delivery
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
graph "git.kmsign.ru/royalcat/tstor/src/delivery/graphql"
|
||||
"git.kmsign.ru/royalcat/tstor/src/delivery/graphql/resolver"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/service"
|
||||
"github.com/99designs/gqlgen/graphql/handler"
|
||||
"github.com/99designs/gqlgen/graphql/handler/extension"
|
||||
"github.com/99designs/gqlgen/graphql/handler/lru"
|
||||
"github.com/99designs/gqlgen/graphql/handler/transport"
|
||||
)
|
||||
|
||||
func GraphQLHandler(service *service.Service) http.Handler {
|
||||
graphqlHandler := handler.NewDefaultServer(
|
||||
graph.NewExecutableSchema(
|
||||
graph.Config{
|
||||
Resolvers: &resolver.Resolver{Service: service},
|
||||
Directives: graph.DirectiveRoot{
|
||||
OneOf: graph.OneOf,
|
||||
},
|
||||
},
|
||||
),
|
||||
)
|
||||
graphqlHandler.AddTransport(&transport.POST{})
|
||||
graphqlHandler.AddTransport(&transport.Websocket{})
|
||||
graphqlHandler.AddTransport(&transport.SSE{})
|
||||
graphqlHandler.AddTransport(&transport.UrlEncodedForm{})
|
||||
graphqlHandler.SetQueryCache(lru.New(1000))
|
||||
graphqlHandler.Use(extension.Introspection{})
|
||||
graphqlHandler.Use(extension.AutomaticPersistedQuery{Cache: lru.New(100)})
|
||||
|
||||
return graphqlHandler
|
||||
}
|
|
@ -3,14 +3,13 @@
|
|||
package fuse
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type Handler struct {
|
||||
|
@ -18,16 +17,20 @@ type Handler struct {
|
|||
path string
|
||||
|
||||
host *fuse.FileSystemHost
|
||||
log *slog.Logger
|
||||
}
|
||||
|
||||
func NewHandler(fuseAllowOther bool, path string) *Handler {
|
||||
return &Handler{
|
||||
fuseAllowOther: fuseAllowOther,
|
||||
path: path,
|
||||
log: slog.With("component", "fuse-handler").With("path", path),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Handler) Mount(vfs vfs.Filesystem) error {
|
||||
log := s.log.With("function", "Mount")
|
||||
|
||||
folder := s.path
|
||||
// On windows, the folder must don't exist
|
||||
if runtime.GOOS == "windows" {
|
||||
|
@ -52,18 +55,20 @@ func (s *Handler) Mount(vfs vfs.Filesystem) error {
|
|||
|
||||
ok := host.Mount(s.path, config)
|
||||
if !ok {
|
||||
log.Error().Str("path", s.path).Msg("error trying to mount filesystem")
|
||||
log.Error("error trying to mount filesystem")
|
||||
}
|
||||
}()
|
||||
|
||||
s.host = host
|
||||
|
||||
log.Info().Str("path", s.path).Msg("starting FUSE mount")
|
||||
log.Info("starting FUSE mount", "path", s.path)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Handler) Unmount() {
|
||||
log := s.log.With("function", "Unmount")
|
||||
|
||||
if s.host == nil {
|
||||
return
|
||||
}
|
||||
|
@ -71,6 +76,6 @@ func (s *Handler) Unmount() {
|
|||
ok := s.host.Unmount()
|
||||
if !ok {
|
||||
//TODO try to force unmount if possible
|
||||
log.Error().Str("path", s.path).Msg("unmount failed")
|
||||
log.Error("unmount failed")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,26 +5,24 @@ package fuse
|
|||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"log/slog"
|
||||
"math"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type fuseFS struct {
|
||||
fuse.FileSystemBase
|
||||
fh *fileHandler
|
||||
|
||||
log zerolog.Logger
|
||||
log *slog.Logger
|
||||
}
|
||||
|
||||
func newFuseFS(fs vfs.Filesystem) fuse.FileSystemInterface {
|
||||
l := log.Logger.With().Str("component", "fuse").Logger()
|
||||
l := slog.With("component", "fuse")
|
||||
return &fuseFS{
|
||||
fh: &fileHandler{fs: fs},
|
||||
log: l,
|
||||
|
@ -32,14 +30,16 @@ func newFuseFS(fs vfs.Filesystem) fuse.FileSystemInterface {
|
|||
}
|
||||
|
||||
func (fs *fuseFS) Open(path string, flags int) (errc int, fh uint64) {
|
||||
log := fs.log.With("function", "Open", "path", path, "flags", flags)
|
||||
|
||||
fh, err := fs.fh.OpenHolder(path)
|
||||
if os.IsNotExist(err) {
|
||||
fs.log.Debug().Str("path", path).Msg("file does not exists")
|
||||
log.Debug("file does not exists")
|
||||
return -fuse.ENOENT, fhNone
|
||||
|
||||
}
|
||||
if err != nil {
|
||||
fs.log.Error().Err(err).Str("path", path).Msg("error opening file")
|
||||
log.Error("error opening file", "err", err)
|
||||
return -fuse.EIO, fhNone
|
||||
}
|
||||
|
||||
|
@ -57,6 +57,7 @@ func (fs *fuseFS) Opendir(path string) (errc int, fh uint64) {
|
|||
}
|
||||
|
||||
func (fs *fuseFS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {
|
||||
log := fs.log.With("function", "Getattr", "path", path, "filehandler", fh)
|
||||
if path == "/" {
|
||||
stat.Mode = fuse.S_IFDIR | 0555
|
||||
return 0
|
||||
|
@ -64,12 +65,12 @@ func (fs *fuseFS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int)
|
|||
|
||||
file, err := fs.fh.GetFile(path, fh)
|
||||
if os.IsNotExist(err) {
|
||||
fs.log.Debug().Str("path", path).Msg("file does not exists")
|
||||
log.Debug("file does not exists", "error", err)
|
||||
return -fuse.ENOENT
|
||||
|
||||
}
|
||||
if err != nil {
|
||||
fs.log.Error().Err(err).Str("path", path).Msg("error getting holder when reading file attributes")
|
||||
log.Error("error getting holder when reading file attributes", "error", err)
|
||||
return -fuse.EIO
|
||||
}
|
||||
|
||||
|
@ -84,14 +85,15 @@ func (fs *fuseFS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int)
|
|||
}
|
||||
|
||||
func (fs *fuseFS) Read(path string, dest []byte, off int64, fh uint64) int {
|
||||
log := fs.log.With("function", "Read", "path", path, "offset", off, "filehandler", fh)
|
||||
file, err := fs.fh.GetFile(path, fh)
|
||||
if os.IsNotExist(err) {
|
||||
fs.log.Error().Err(err).Str("path", path).Msg("file not found on READ operation")
|
||||
log.Error("file not found on READ operation", "path", path, "error", err)
|
||||
return -fuse.ENOENT
|
||||
|
||||
}
|
||||
if err != nil {
|
||||
fs.log.Error().Err(err).Str("path", path).Msg("error getting holder reading data from file")
|
||||
fs.log.Error("error getting holder reading data from file", "path", path, "error", err)
|
||||
return -fuse.EIO
|
||||
}
|
||||
|
||||
|
@ -104,7 +106,7 @@ func (fs *fuseFS) Read(path string, dest []byte, off int64, fh uint64) int {
|
|||
|
||||
n, err := file.ReadAt(buf, off)
|
||||
if err != nil && err != io.EOF {
|
||||
log.Error().Err(err).Str("path", path).Msg("error reading data")
|
||||
log.Error("error reading data")
|
||||
return -fuse.EIO
|
||||
}
|
||||
|
||||
|
@ -113,8 +115,9 @@ func (fs *fuseFS) Read(path string, dest []byte, off int64, fh uint64) int {
|
|||
}
|
||||
|
||||
func (fs *fuseFS) Release(path string, fh uint64) int {
|
||||
log := fs.log.With("function", "Release", "path", path, "filehandler", fh)
|
||||
if err := fs.fh.Remove(fh); err != nil {
|
||||
fs.log.Error().Err(err).Str("path", path).Msg("error getting holder when releasing file")
|
||||
log.Error("error getting holder when releasing file", "path", path, "error", err)
|
||||
return -fuse.EIO
|
||||
}
|
||||
|
||||
|
@ -129,19 +132,20 @@ func (fs *fuseFS) Readdir(path string,
|
|||
fill func(name string, stat *fuse.Stat_t, ofst int64) bool,
|
||||
ofst int64,
|
||||
fh uint64) (errc int) {
|
||||
log := fs.log.With("function", "Readdir", "path", path, "offset", ofst, "filehandler", fh)
|
||||
fill(".", nil, 0)
|
||||
fill("..", nil, 0)
|
||||
|
||||
//TODO improve this function to make use of fh index if possible
|
||||
paths, err := fs.fh.ListDir(path)
|
||||
if err != nil {
|
||||
fs.log.Error().Err(err).Str("path", path).Msg("error reading directory")
|
||||
log.Error("error reading directory", "error", err)
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
for _, p := range paths {
|
||||
if !fill(p, nil, 0) {
|
||||
fs.log.Error().Str("path", path).Msg("error adding directory")
|
||||
log.Error("error adding directory")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,15 +1,16 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
"git.kmsign.ru/royalcat/tstor/src/log"
|
||||
zlog "github.com/rs/zerolog/log"
|
||||
nfs "github.com/willscott/go-nfs"
|
||||
nfshelper "github.com/willscott/go-nfs/helpers"
|
||||
)
|
||||
|
||||
func NewNFSv3Handler(fs vfs.Filesystem) (nfs.Handler, error) {
|
||||
nfslog := zlog.Logger.With().Str("component", "nfs").Logger()
|
||||
nfslog := slog.With("component", "nfs")
|
||||
nfs.SetLogger(log.NewNFSLog(nfslog))
|
||||
nfs.Log.SetLevel(nfs.InfoLevel)
|
||||
|
||||
|
|
|
@ -3,16 +3,16 @@ package nfs
|
|||
import (
|
||||
"errors"
|
||||
"io/fs"
|
||||
"log/slog"
|
||||
"path/filepath"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
"github.com/go-git/go-billy/v5"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type billyFsWrapper struct {
|
||||
fs vfs.Filesystem
|
||||
log zerolog.Logger
|
||||
log *slog.Logger
|
||||
}
|
||||
|
||||
var _ billy.Filesystem = (*billyFsWrapper)(nil)
|
||||
|
@ -56,7 +56,7 @@ func (fs *billyFsWrapper) Open(filename string) (billy.File, error) {
|
|||
return &billyFile{
|
||||
name: filename,
|
||||
file: file,
|
||||
log: fs.log.With().Str("filename", filename).Logger(),
|
||||
log: fs.log.With("filename", filename),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -69,7 +69,7 @@ func (fs *billyFsWrapper) OpenFile(filename string, flag int, perm fs.FileMode)
|
|||
return &billyFile{
|
||||
name: filename,
|
||||
file: file,
|
||||
log: fs.log.With().Str("filename", filename).Int("flag", flag).Str("perm", perm.String()).Logger(),
|
||||
log: fs.log.With("filename", filename, "flag", flag, "perm", perm.String()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -102,8 +102,8 @@ func (*billyFsWrapper) Readlink(link string) (string, error) {
|
|||
}
|
||||
|
||||
// Remove implements billy.Filesystem.
|
||||
func (*billyFsWrapper) Remove(filename string) error {
|
||||
return billy.ErrNotSupported
|
||||
func (s *billyFsWrapper) Remove(filename string) error {
|
||||
return s.fs.Unlink(filename)
|
||||
}
|
||||
|
||||
// Rename implements billy.Filesystem.
|
||||
|
@ -138,7 +138,7 @@ func (fs *billyFsWrapper) TempFile(dir string, prefix string) (billy.File, error
|
|||
type billyFile struct {
|
||||
name string
|
||||
file vfs.File
|
||||
log zerolog.Logger
|
||||
log *slog.Logger
|
||||
}
|
||||
|
||||
var _ billy.File = (*billyFile)(nil)
|
||||
|
@ -188,13 +188,13 @@ func (*billyFile) Unlock() error {
|
|||
return nil // TODO
|
||||
}
|
||||
|
||||
func billyErr(err error, log zerolog.Logger) error {
|
||||
func billyErr(err error, log *slog.Logger) error {
|
||||
if errors.Is(err, vfs.ErrNotImplemented) {
|
||||
return billy.ErrNotSupported
|
||||
}
|
||||
if errors.Is(err, vfs.ErrNotExist) {
|
||||
if err, ok := asErr[*fs.PathError](err); ok {
|
||||
log.Error().Err(err.Err).Str("op", err.Op).Str("path", err.Path).Msg("file not found")
|
||||
log.Error("file not found", "op", err.Op, "path", err.Path, "error", err.Err)
|
||||
}
|
||||
return fs.ErrNotExist
|
||||
}
|
||||
|
|
|
@ -1,22 +1,22 @@
|
|||
package webdav
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"net/http"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
"github.com/rs/zerolog/log"
|
||||
"golang.org/x/net/webdav"
|
||||
)
|
||||
|
||||
func newHandler(fs vfs.Filesystem) *webdav.Handler {
|
||||
l := log.Logger.With().Str("component", "webDAV").Logger()
|
||||
log := slog.With("component", "webDAV")
|
||||
return &webdav.Handler{
|
||||
Prefix: "/",
|
||||
FileSystem: newFS(fs),
|
||||
LockSystem: webdav.NewMemLS(),
|
||||
Logger: func(req *http.Request, err error) {
|
||||
if err != nil {
|
||||
l.Error().Err(err).Str("path", req.RequestURI).Msg("webDAV error")
|
||||
log.Error("webDAV error", "path", req.RequestURI, "error", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
|
|
@ -2,10 +2,10 @@ package webdav
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
"github.com/rs/zerolog/log"
|
||||
"golang.org/x/net/webdav"
|
||||
)
|
||||
|
||||
|
@ -33,21 +33,21 @@ func NewWebDAVServer(fs vfs.Filesystem, port int, user, pass string) error {
|
|||
Handler: serveMux,
|
||||
}
|
||||
|
||||
log.Info().Str("host", httpServer.Addr).Msg("starting webDAV server")
|
||||
slog.With("host", httpServer.Addr).Info("starting webDAV server")
|
||||
|
||||
return httpServer.ListenAndServe()
|
||||
}
|
||||
|
||||
func NewDirServer(dir string, port int, user, pass string) error {
|
||||
|
||||
l := log.Logger.With().Str("component", "webDAV").Logger()
|
||||
log := slog.With("component", "webDAV")
|
||||
srv := &webdav.Handler{
|
||||
Prefix: "/",
|
||||
FileSystem: webdav.Dir(dir),
|
||||
LockSystem: webdav.NewMemLS(),
|
||||
Logger: func(req *http.Request, err error) {
|
||||
if err != nil {
|
||||
l.Error().Err(err).Str("path", req.RequestURI).Msg("webDAV error")
|
||||
log.Error("webDAV error", "path", req.RequestURI)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ func NewDirServer(dir string, port int, user, pass string) error {
|
|||
Handler: serveMux,
|
||||
}
|
||||
|
||||
log.Info().Str("host", httpServer.Addr).Msg("starting webDAV server")
|
||||
log.Info("starting webDAV server", "host", httpServer.Addr)
|
||||
|
||||
return httpServer.ListenAndServe()
|
||||
}
|
||||
|
|
107
src/host/controller/torrent.go
Normal file
107
src/host/controller/torrent.go
Normal file
|
@ -0,0 +1,107 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/store"
|
||||
"github.com/anacrolix/torrent"
|
||||
)
|
||||
|
||||
type Torrent struct {
|
||||
torrentFilePath string
|
||||
t *torrent.Torrent
|
||||
rep *store.ExlcudedFiles
|
||||
}
|
||||
|
||||
func NewTorrent(t *torrent.Torrent, rep *store.ExlcudedFiles) *Torrent {
|
||||
return &Torrent{t: t, rep: rep}
|
||||
}
|
||||
|
||||
func (s *Torrent) TorrentFilePath() string {
|
||||
return s.torrentFilePath
|
||||
}
|
||||
|
||||
func (s *Torrent) Torrent() *torrent.Torrent {
|
||||
return s.t
|
||||
}
|
||||
|
||||
func (s *Torrent) Name() string {
|
||||
<-s.t.GotInfo()
|
||||
return s.t.Name()
|
||||
}
|
||||
|
||||
func (s *Torrent) InfoHash() string {
|
||||
<-s.t.GotInfo()
|
||||
return s.t.InfoHash().HexString()
|
||||
}
|
||||
|
||||
func (s *Torrent) BytesCompleted() int64 {
|
||||
<-s.t.GotInfo()
|
||||
return s.t.BytesCompleted()
|
||||
}
|
||||
|
||||
func (s *Torrent) BytesMissing() int64 {
|
||||
<-s.t.GotInfo()
|
||||
return s.t.BytesMissing()
|
||||
}
|
||||
|
||||
func (s *Torrent) Files() ([]*torrent.File, error) {
|
||||
excludedFiles, err := s.rep.ExcludedFiles(s.t.InfoHash())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
<-s.t.GotInfo()
|
||||
files := s.t.Files()
|
||||
files = slices.DeleteFunc(files, func(file *torrent.File) bool {
|
||||
p := file.Path()
|
||||
|
||||
if strings.Contains(p, "/.pad/") {
|
||||
return false
|
||||
}
|
||||
|
||||
if !slices.Contains(excludedFiles, p) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (s *Torrent) ExcludedFiles() ([]*torrent.File, error) {
|
||||
excludedFiles, err := s.rep.ExcludedFiles(s.t.InfoHash())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
<-s.t.GotInfo()
|
||||
files := s.t.Files()
|
||||
files = slices.DeleteFunc(files, func(file *torrent.File) bool {
|
||||
p := file.Path()
|
||||
|
||||
if strings.Contains(p, "/.pad/") {
|
||||
return false
|
||||
}
|
||||
|
||||
if slices.Contains(excludedFiles, p) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (s *Torrent) ExcludeFile(f *torrent.File) error {
|
||||
return s.rep.ExcludeFile(f)
|
||||
}
|
||||
|
||||
func (s *Torrent) ValidateTorrent() error {
|
||||
<-s.t.GotInfo()
|
||||
s.t.VerifyData()
|
||||
return nil
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package storage
|
||||
package filestorage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -6,15 +6,16 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/config"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/store"
|
||||
"github.com/anacrolix/torrent/storage"
|
||||
)
|
||||
|
||||
func SetupStorage(cfg config.TorrentClient) (storage.ClientImplCloser, storage.PieceCompletion, error) {
|
||||
func Setup(cfg config.TorrentClient) (*FileStorage, storage.PieceCompletion, error) {
|
||||
pcp := filepath.Join(cfg.MetadataFolder, "piece-completion")
|
||||
if err := os.MkdirAll(pcp, 0744); err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating piece completion folder: %w", err)
|
||||
}
|
||||
pc, err := NewBadgerPieceCompletion(pcp)
|
||||
pc, err := store.NewBadgerPieceCompletion(pcp)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating servers piece completion: %w", err)
|
||||
}
|
186
src/host/filestorage/storage_files.go
Normal file
186
src/host/filestorage/storage_files.go
Normal file
|
@ -0,0 +1,186 @@
|
|||
package filestorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/controller"
|
||||
"github.com/anacrolix/torrent"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/anacrolix/torrent/storage"
|
||||
)
|
||||
|
||||
type FileStorageDeleter interface {
|
||||
storage.ClientImplCloser
|
||||
DeleteFile(file *torrent.File) error
|
||||
}
|
||||
|
||||
// NewFileStorage creates a new ClientImplCloser that stores files using the OS native filesystem.
|
||||
func NewFileStorage(baseDir string, pc storage.PieceCompletion) *FileStorage {
|
||||
return &FileStorage{
|
||||
baseDir: baseDir,
|
||||
ClientImplCloser: storage.NewFileOpts(storage.NewFileClientOpts{
|
||||
ClientBaseDir: baseDir,
|
||||
PieceCompletion: pc,
|
||||
TorrentDirMaker: torrentDir,
|
||||
FilePathMaker: filePath,
|
||||
}),
|
||||
pieceCompletion: pc,
|
||||
log: slog.With("component", "torrent-client"),
|
||||
}
|
||||
}
|
||||
|
||||
// File-based storage for torrents, that isn't yet bound to a particular torrent.
|
||||
type FileStorage struct {
|
||||
baseDir string
|
||||
storage.ClientImplCloser
|
||||
pieceCompletion storage.PieceCompletion
|
||||
log *slog.Logger
|
||||
}
|
||||
|
||||
func (me *FileStorage) Close() error {
|
||||
return me.pieceCompletion.Close()
|
||||
}
|
||||
|
||||
func torrentDir(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string {
|
||||
return filepath.Join(baseDir, info.Name)
|
||||
}
|
||||
|
||||
func filePath(opts storage.FilePathMakerOpts) string {
|
||||
return filepath.Join(opts.File.Path...)
|
||||
}
|
||||
|
||||
func (fs *FileStorage) filePath(info *metainfo.Info, infoHash metainfo.Hash, fileInfo *metainfo.FileInfo) string {
|
||||
return filepath.Join(torrentDir(fs.baseDir, info, infoHash), filePath(storage.FilePathMakerOpts{
|
||||
Info: info,
|
||||
File: fileInfo,
|
||||
}))
|
||||
}
|
||||
|
||||
func (fs *FileStorage) DeleteFile(file *torrent.File) error {
|
||||
info := file.Torrent().Info()
|
||||
infoHash := file.Torrent().InfoHash()
|
||||
torrentDir := torrentDir(fs.baseDir, info, infoHash)
|
||||
fileInfo := file.FileInfo()
|
||||
relFilePath := filePath(storage.FilePathMakerOpts{
|
||||
Info: info,
|
||||
File: &fileInfo,
|
||||
})
|
||||
filePath := path.Join(torrentDir, relFilePath)
|
||||
for i := file.BeginPieceIndex(); i < file.EndPieceIndex(); i++ {
|
||||
pk := metainfo.PieceKey{InfoHash: infoHash, Index: i}
|
||||
err := fs.pieceCompletion.Set(pk, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return os.Remove(filePath)
|
||||
}
|
||||
|
||||
func (fs *FileStorage) CleanupDirs(ctx context.Context, expected []*controller.Torrent, dryRun bool) (int, error) {
|
||||
log := fs.log.With("function", "CleanupDirs", "expectedTorrents", len(expected), "dryRun", dryRun)
|
||||
|
||||
expectedEntries := []string{}
|
||||
for _, e := range expected {
|
||||
expectedEntries = append(expectedEntries, e.Torrent().Name())
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(fs.baseDir)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
toDelete := []string{}
|
||||
for _, v := range entries {
|
||||
if !slices.Contains(expectedEntries, v.Name()) {
|
||||
toDelete = append(toDelete, v.Name())
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return 0, ctx.Err()
|
||||
}
|
||||
|
||||
log.Info("deleting trash data", "dirsCount", len(toDelete))
|
||||
if !dryRun {
|
||||
for i, name := range toDelete {
|
||||
p := path.Join(fs.baseDir, name)
|
||||
log.Warn("deleting trash data", "path", p)
|
||||
err := os.RemoveAll(p)
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return len(toDelete), nil
|
||||
}
|
||||
|
||||
func (fs *FileStorage) CleanupFiles(ctx context.Context, expected []*controller.Torrent, dryRun bool) (int, error) {
|
||||
log := fs.log.With("function", "CleanupFiles", "expectedTorrents", len(expected), "dryRun", dryRun)
|
||||
|
||||
expectedEntries := []string{}
|
||||
{
|
||||
for _, e := range expected {
|
||||
files, err := e.Files()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
expectedEntries = append(expectedEntries, fs.filePath(e.Torrent().Info(), e.Torrent().InfoHash(), ptr(f.FileInfo())))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
entries := []string{}
|
||||
err := filepath.Walk(fs.baseDir,
|
||||
func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
entries = append(entries, path)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
toDelete := []string{}
|
||||
for _, v := range entries {
|
||||
if !slices.Contains(expectedEntries, v) {
|
||||
toDelete = append(toDelete, v)
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return len(toDelete), ctx.Err()
|
||||
}
|
||||
|
||||
log.Info("deleting trash data", "filesCount", len(toDelete))
|
||||
if !dryRun {
|
||||
for i, p := range toDelete {
|
||||
fs.log.Warn("deleting trash data", "path", p)
|
||||
err := os.Remove(p)
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return len(toDelete), nil
|
||||
}
|
||||
|
||||
func ptr[D any](v D) *D {
|
||||
return &v
|
||||
}
|
|
@ -4,99 +4,153 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/storage"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/controller"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/filestorage"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/store"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
|
||||
"github.com/anacrolix/torrent"
|
||||
"github.com/anacrolix/torrent/bencode"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/anacrolix/torrent/types"
|
||||
"github.com/anacrolix/torrent/types/infohash"
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
c *torrent.Client
|
||||
rep storage.ExlcudedFiles
|
||||
c *torrent.Client
|
||||
excludedFiles *store.ExlcudedFiles
|
||||
infoBytes *store.InfoBytes
|
||||
|
||||
torrentLoaded chan struct{}
|
||||
|
||||
// stats *Stats
|
||||
DefaultPriority types.PiecePriority
|
||||
Storage *filestorage.FileStorage
|
||||
SourceDir string
|
||||
|
||||
log *slog.Logger
|
||||
addTimeout, readTimeout int
|
||||
}
|
||||
|
||||
func NewService(c *torrent.Client, rep storage.ExlcudedFiles, addTimeout, readTimeout int) *Service {
|
||||
l := slog.With("component", "torrent-service")
|
||||
return &Service{
|
||||
log: l,
|
||||
func NewService(sourceDir string, c *torrent.Client, storage *filestorage.FileStorage, excludedFiles *store.ExlcudedFiles, infoBytes *store.InfoBytes, addTimeout, readTimeout int) *Service {
|
||||
s := &Service{
|
||||
log: slog.With("component", "torrent-service"),
|
||||
c: c,
|
||||
DefaultPriority: types.PiecePriorityNone,
|
||||
rep: rep,
|
||||
excludedFiles: excludedFiles,
|
||||
infoBytes: infoBytes,
|
||||
Storage: storage,
|
||||
SourceDir: sourceDir,
|
||||
torrentLoaded: make(chan struct{}),
|
||||
// stats: newStats(), // TODO persistent
|
||||
addTimeout: addTimeout,
|
||||
readTimeout: readTimeout,
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := s.loadTorrentFiles(context.Background())
|
||||
if err != nil {
|
||||
s.log.Error("initial torrent load failed", "error", err)
|
||||
}
|
||||
close(s.torrentLoaded)
|
||||
}()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
var _ vfs.FsFactory = (*Service)(nil).NewTorrentFs
|
||||
|
||||
func (s *Service) NewTorrent(f vfs.File) (*torrent.Torrent, error) {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Second*time.Duration(s.addTimeout))
|
||||
defer cancel()
|
||||
func (s *Service) AddTorrent(ctx context.Context, f vfs.File) (*torrent.Torrent, error) {
|
||||
defer f.Close()
|
||||
|
||||
stat, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("call stat failed: %w", err)
|
||||
}
|
||||
|
||||
mi, err := metainfo.Load(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("loading torrent metadata from file %s, error: %w", stat.Name(), err)
|
||||
}
|
||||
|
||||
t, ok := s.c.Torrent(mi.HashInfoBytes())
|
||||
if !ok {
|
||||
t, err = s.c.AddTorrent(mi)
|
||||
spec, err := torrent.TorrentSpecFromMetaInfoErr(mi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("parse spec from metadata: %w", err)
|
||||
}
|
||||
infoBytes := spec.InfoBytes
|
||||
|
||||
if !isValidInfoHashBytes(infoBytes) {
|
||||
infoBytes = nil
|
||||
}
|
||||
|
||||
if len(infoBytes) == 0 {
|
||||
infoBytes, err = s.infoBytes.GetBytes(spec.InfoHash)
|
||||
if err != nil && err != store.ErrNotFound {
|
||||
return nil, fmt.Errorf("get info bytes from database: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var info metainfo.Info
|
||||
err = bencode.Unmarshal(infoBytes, &info)
|
||||
if err != nil {
|
||||
infoBytes = nil
|
||||
} else {
|
||||
for _, t := range s.c.Torrents() {
|
||||
if t.Name() == info.BestName() {
|
||||
return nil, fmt.Errorf("torrent with name '%s' already exists", t.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t, _ = s.c.AddTorrentOpt(torrent.AddTorrentOpts{
|
||||
InfoHash: spec.InfoHash,
|
||||
Storage: s.Storage,
|
||||
InfoBytes: infoBytes,
|
||||
ChunkSize: spec.ChunkSize,
|
||||
})
|
||||
t.AllowDataDownload()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, fmt.Errorf("creating torrent fs timed out")
|
||||
return nil, fmt.Errorf("creating torrent timed out")
|
||||
case <-t.GotInfo():
|
||||
err := s.infoBytes.Set(t.InfoHash(), t.Metainfo())
|
||||
if err != nil {
|
||||
s.log.Error("error setting info bytes for torrent %s: %s", t.Name(), err.Error())
|
||||
}
|
||||
for _, f := range t.Files() {
|
||||
f.SetPriority(s.DefaultPriority)
|
||||
}
|
||||
|
||||
}
|
||||
for _, f := range t.Files() {
|
||||
f.SetPriority(s.DefaultPriority)
|
||||
}
|
||||
t.AllowDataDownload()
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func isValidInfoHashBytes(d []byte) bool {
|
||||
var info metainfo.Info
|
||||
err := bencode.Unmarshal(d, &info)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (s *Service) NewTorrentFs(f vfs.File) (vfs.Filesystem, error) {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Second*time.Duration(s.addTimeout))
|
||||
defer cancel()
|
||||
defer f.Close()
|
||||
|
||||
mi, err := metainfo.Load(f)
|
||||
t, err := s.AddTorrent(ctx, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t, ok := s.c.Torrent(mi.HashInfoBytes())
|
||||
if !ok {
|
||||
t, err = s.c.AddTorrent(mi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, fmt.Errorf("creating torrent fs timed out")
|
||||
case <-t.GotInfo():
|
||||
}
|
||||
for _, f := range t.Files() {
|
||||
f.SetPriority(s.DefaultPriority)
|
||||
}
|
||||
t.AllowDataDownload()
|
||||
}
|
||||
|
||||
return vfs.NewTorrentFs(t, s.rep, s.readTimeout), nil
|
||||
return vfs.NewTorrentFs(controller.NewTorrent(t, s.excludedFiles), s.readTimeout), nil
|
||||
}
|
||||
|
||||
func (s *Service) Stats() (*Stats, error) {
|
||||
|
@ -106,3 +160,52 @@ func (s *Service) Stats() (*Stats, error) {
|
|||
func (s *Service) GetStats() torrent.ConnStats {
|
||||
return s.c.ConnStats()
|
||||
}
|
||||
|
||||
func (s *Service) loadTorrentFiles(ctx context.Context) error {
|
||||
return filepath.Walk(s.SourceDir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("fs walk error: %w", err)
|
||||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if strings.HasSuffix(path, ".torrent") {
|
||||
file := vfs.NewLazyOsFile(path)
|
||||
defer file.Close()
|
||||
|
||||
_, err = s.AddTorrent(ctx, file)
|
||||
if err != nil {
|
||||
s.log.Error("failed adding torrent", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Service) ListTorrents(ctx context.Context) ([]*controller.Torrent, error) {
|
||||
<-s.torrentLoaded
|
||||
|
||||
out := []*controller.Torrent{}
|
||||
for _, v := range s.c.Torrents() {
|
||||
out = append(out, controller.NewTorrent(v, s.excludedFiles))
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (s *Service) GetTorrent(infohashHex string) (*controller.Torrent, error) {
|
||||
<-s.torrentLoaded
|
||||
|
||||
t, ok := s.c.Torrent(infohash.FromHexString(infohashHex))
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return controller.NewTorrent(t, s.excludedFiles), nil
|
||||
}
|
||||
|
|
|
@ -1,36 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/anacrolix/dht/v2"
|
||||
"github.com/anacrolix/dht/v2/bep44"
|
||||
tlog "github.com/anacrolix/log"
|
||||
"github.com/anacrolix/torrent"
|
||||
"github.com/anacrolix/torrent/storage"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/config"
|
||||
dlog "git.kmsign.ru/royalcat/tstor/src/log"
|
||||
)
|
||||
|
||||
func NewClient(st storage.ClientImpl, fis bep44.Store, cfg *config.TorrentClient, id [20]byte) (*torrent.Client, error) {
|
||||
// TODO download and upload limits
|
||||
torrentCfg := torrent.NewDefaultClientConfig()
|
||||
torrentCfg.PeerID = string(id[:])
|
||||
torrentCfg.DefaultStorage = st
|
||||
|
||||
l := log.Logger.With().Str("component", "torrent-client").Logger()
|
||||
|
||||
tl := tlog.NewLogger()
|
||||
tl.SetHandlers(&dlog.Torrent{L: l})
|
||||
torrentCfg.Logger = tl
|
||||
|
||||
torrentCfg.ConfigureAnacrolixDhtServer = func(cfg *dht.ServerConfig) {
|
||||
cfg.Store = fis
|
||||
cfg.Exp = 2 * time.Hour
|
||||
cfg.NoSecurity = false
|
||||
}
|
||||
|
||||
return torrent.NewClient(torrentCfg)
|
||||
}
|
|
@ -1,105 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
|
||||
"github.com/anacrolix/torrent"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/anacrolix/torrent/storage"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type FileStorageDeleter interface {
|
||||
storage.ClientImplCloser
|
||||
DeleteFile(file *torrent.File) error
|
||||
Cleanup(expected []*torrent.Torrent) error
|
||||
}
|
||||
|
||||
// NewFileStorage creates a new ClientImplCloser that stores files using the OS native filesystem.
|
||||
func NewFileStorage(baseDir string, pc storage.PieceCompletion) FileStorageDeleter {
|
||||
|
||||
return &FileStorage{
|
||||
baseDir: baseDir,
|
||||
ClientImplCloser: storage.NewFileOpts(storage.NewFileClientOpts{
|
||||
ClientBaseDir: baseDir,
|
||||
PieceCompletion: pc,
|
||||
TorrentDirMaker: torrentDir,
|
||||
FilePathMaker: func(opts storage.FilePathMakerOpts) string {
|
||||
return filePath(opts.File)
|
||||
},
|
||||
}),
|
||||
pieceCompletion: pc,
|
||||
log: log.Logger.With().Str("component", "torrent-client").Logger(),
|
||||
}
|
||||
}
|
||||
|
||||
// File-based storage for torrents, that isn't yet bound to a particular torrent.
|
||||
type FileStorage struct {
|
||||
baseDir string
|
||||
storage.ClientImplCloser
|
||||
pieceCompletion storage.PieceCompletion
|
||||
log zerolog.Logger
|
||||
}
|
||||
|
||||
func (me *FileStorage) Close() error {
|
||||
return me.pieceCompletion.Close()
|
||||
}
|
||||
|
||||
func torrentDir(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string {
|
||||
return filepath.Join(baseDir, info.Name)
|
||||
}
|
||||
|
||||
func filePath(file *metainfo.FileInfo) string {
|
||||
return filepath.Join(file.Path...)
|
||||
}
|
||||
|
||||
func (fs *FileStorage) DeleteFile(file *torrent.File) error {
|
||||
info := file.Torrent().Info()
|
||||
infoHash := file.Torrent().InfoHash()
|
||||
torrentDir := torrentDir(fs.baseDir, info, infoHash)
|
||||
fileInfo := file.FileInfo()
|
||||
relFilePath := filePath(&fileInfo)
|
||||
filePath := path.Join(torrentDir, relFilePath)
|
||||
for i := file.BeginPieceIndex(); i < file.EndPieceIndex(); i++ {
|
||||
pk := metainfo.PieceKey{InfoHash: infoHash, Index: i}
|
||||
err := fs.pieceCompletion.Set(pk, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return os.Remove(filePath)
|
||||
}
|
||||
|
||||
func (fs *FileStorage) Cleanup(expected []*torrent.Torrent) error {
|
||||
expectedEntries := []string{}
|
||||
for _, e := range expected {
|
||||
expectedEntries = append(expectedEntries, e.Name())
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(fs.baseDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
toDelete := []string{}
|
||||
for _, v := range entries {
|
||||
if !slices.Contains(expectedEntries, v.Name()) {
|
||||
toDelete = append(toDelete, v.Name())
|
||||
}
|
||||
}
|
||||
|
||||
fs.log.Info().Int("count", len(toDelete)).Msg("start deleting trash data")
|
||||
for _, name := range toDelete {
|
||||
p := path.Join(fs.baseDir, name)
|
||||
fs.log.Info().Str("path", p).Msg("deleting trash data")
|
||||
err := os.RemoveAll(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
47
src/host/store/client.go
Normal file
47
src/host/store/client.go
Normal file
|
@ -0,0 +1,47 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"github.com/anacrolix/dht/v2/bep44"
|
||||
tlog "github.com/anacrolix/log"
|
||||
"github.com/anacrolix/torrent"
|
||||
"github.com/anacrolix/torrent/storage"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/config"
|
||||
dlog "git.kmsign.ru/royalcat/tstor/src/log"
|
||||
)
|
||||
|
||||
// MOVE
|
||||
func NewClient(st storage.ClientImpl, fis bep44.Store, cfg *config.TorrentClient, id [20]byte) (*torrent.Client, error) {
|
||||
l := slog.With("component", "torrent-client")
|
||||
|
||||
// TODO download and upload limits
|
||||
torrentCfg := torrent.NewDefaultClientConfig()
|
||||
torrentCfg.PeerID = string(id[:])
|
||||
torrentCfg.DefaultStorage = st
|
||||
// torrentCfg.AlwaysWantConns = true
|
||||
// torrentCfg.DisableAggressiveUpload = true
|
||||
// torrentCfg.Seed = true
|
||||
// torrentCfg.DownloadRateLimiter = rate.NewLimiter(rate.Inf, 0)
|
||||
// torrentCfg
|
||||
|
||||
tl := tlog.NewLogger()
|
||||
tl.SetHandlers(&dlog.Torrent{L: l})
|
||||
torrentCfg.Logger = tl
|
||||
torrentCfg.Callbacks.NewPeer = append(torrentCfg.Callbacks.NewPeer, func(p *torrent.Peer) {
|
||||
l.Debug("new peer", "ip", p.RemoteAddr.String())
|
||||
})
|
||||
|
||||
torrentCfg.Callbacks.NewPeer = append(torrentCfg.Callbacks.PeerClosed, func(p *torrent.Peer) {
|
||||
l.Debug("peer closed", "ip", p.RemoteAddr.String())
|
||||
})
|
||||
|
||||
// torrentCfg.ConfigureAnacrolixDhtServer = func(cfg *dht.ServerConfig) {
|
||||
// cfg.Store = fis
|
||||
// cfg.Exp = 2 * time.Hour
|
||||
// cfg.NoSecurity = false
|
||||
// }
|
||||
|
||||
return torrent.NewClient(torrentCfg)
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package storage
|
||||
package store
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
@ -7,18 +7,12 @@ import (
|
|||
|
||||
"github.com/anacrolix/torrent"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
atstorage "github.com/anacrolix/torrent/storage"
|
||||
"github.com/philippgille/gokv"
|
||||
"github.com/philippgille/gokv/badgerdb"
|
||||
"github.com/philippgille/gokv/encoding"
|
||||
)
|
||||
|
||||
type ExlcudedFiles interface {
|
||||
ExcludeFile(file *torrent.File) error
|
||||
ExcludedFiles(hash metainfo.Hash) ([]string, error)
|
||||
}
|
||||
|
||||
func NewExcludedFiles(metaDir string, storage atstorage.ClientImplCloser) (ExlcudedFiles, error) {
|
||||
func NewExcludedFiles(metaDir string, storage TorrentFileDeleter) (*ExlcudedFiles, error) {
|
||||
excludedFilesStore, err := badgerdb.NewStore(badgerdb.Options{
|
||||
Dir: filepath.Join(metaDir, "excluded-files"),
|
||||
Codec: encoding.JSON,
|
||||
|
@ -28,7 +22,7 @@ func NewExcludedFiles(metaDir string, storage atstorage.ClientImplCloser) (Exlcu
|
|||
return nil, err
|
||||
}
|
||||
|
||||
r := &torrentRepositoryImpl{
|
||||
r := &ExlcudedFiles{
|
||||
excludedFiles: excludedFilesStore,
|
||||
storage: storage,
|
||||
}
|
||||
|
@ -36,15 +30,19 @@ func NewExcludedFiles(metaDir string, storage atstorage.ClientImplCloser) (Exlcu
|
|||
return r, nil
|
||||
}
|
||||
|
||||
type torrentRepositoryImpl struct {
|
||||
type ExlcudedFiles struct {
|
||||
m sync.RWMutex
|
||||
excludedFiles gokv.Store
|
||||
storage atstorage.ClientImplCloser
|
||||
storage TorrentFileDeleter
|
||||
}
|
||||
|
||||
var ErrNotFound = errors.New("not found")
|
||||
|
||||
func (r *torrentRepositoryImpl) ExcludeFile(file *torrent.File) error {
|
||||
type TorrentFileDeleter interface {
|
||||
DeleteFile(file *torrent.File) error
|
||||
}
|
||||
|
||||
func (r *ExlcudedFiles) ExcludeFile(file *torrent.File) error {
|
||||
r.m.Lock()
|
||||
defer r.m.Unlock()
|
||||
|
||||
|
@ -59,17 +57,15 @@ func (r *torrentRepositoryImpl) ExcludeFile(file *torrent.File) error {
|
|||
}
|
||||
excludedFiles = unique(append(excludedFiles, file.Path()))
|
||||
|
||||
if storage, ok := r.storage.(FileStorageDeleter); ok {
|
||||
err = storage.DeleteFile(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = r.storage.DeleteFile(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return r.excludedFiles.Set(hash.AsString(), excludedFiles)
|
||||
}
|
||||
|
||||
func (r *torrentRepositoryImpl) ExcludedFiles(hash metainfo.Hash) ([]string, error) {
|
||||
func (r *ExlcudedFiles) ExcludedFiles(hash metainfo.Hash) ([]string, error) {
|
||||
r.m.Lock()
|
||||
defer r.m.Unlock()
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package storage
|
||||
package store
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
79
src/host/store/info.go
Normal file
79
src/host/store/info.go
Normal file
|
@ -0,0 +1,79 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"path/filepath"
|
||||
|
||||
dlog "git.kmsign.ru/royalcat/tstor/src/log"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/anacrolix/torrent/types/infohash"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
)
|
||||
|
||||
type InfoBytes struct {
|
||||
db *badger.DB
|
||||
}
|
||||
|
||||
func NewInfoBytes(metaDir string) (*InfoBytes, error) {
|
||||
l := slog.With("component", "badger", "db", "info-bytes")
|
||||
|
||||
opts := badger.
|
||||
DefaultOptions(filepath.Join(metaDir, "infobytes")).
|
||||
WithLogger(&dlog.Badger{L: l})
|
||||
db, err := badger.Open(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &InfoBytes{db}, nil
|
||||
}
|
||||
|
||||
func (k *InfoBytes) GetBytes(ih infohash.T) ([]byte, error) {
|
||||
var data []byte
|
||||
err := k.db.View(func(tx *badger.Txn) error {
|
||||
item, err := tx.Get(ih.Bytes())
|
||||
if err != nil {
|
||||
if err == badger.ErrKeyNotFound {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return fmt.Errorf("error getting value: %w", err)
|
||||
}
|
||||
|
||||
data, err = item.ValueCopy(data)
|
||||
return err
|
||||
})
|
||||
return data, err
|
||||
}
|
||||
|
||||
func (k *InfoBytes) Get(ih infohash.T) (*metainfo.MetaInfo, error) {
|
||||
data, err := k.GetBytes(ih)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return metainfo.Load(bytes.NewReader(data))
|
||||
}
|
||||
|
||||
func (me *InfoBytes) SetBytes(ih infohash.T, bytes []byte) error {
|
||||
return me.db.Update(func(txn *badger.Txn) error {
|
||||
return txn.Set(ih.Bytes(), bytes)
|
||||
})
|
||||
}
|
||||
|
||||
func (me *InfoBytes) Set(ih infohash.T, info metainfo.MetaInfo) error {
|
||||
return me.db.Update(func(txn *badger.Txn) error {
|
||||
return txn.Set(ih.Bytes(), info.InfoBytes)
|
||||
})
|
||||
}
|
||||
|
||||
func (k *InfoBytes) Delete(ih infohash.T) error {
|
||||
return k.db.Update(func(txn *badger.Txn) error {
|
||||
return txn.Delete(ih.Bytes())
|
||||
})
|
||||
}
|
||||
|
||||
func (me *InfoBytes) Close() error {
|
||||
return me.db.Close()
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package storage
|
||||
package store
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
@ -9,7 +9,6 @@ import (
|
|||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/anacrolix/torrent/storage"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type PieceCompletionState byte
|
||||
|
@ -34,7 +33,7 @@ type badgerPieceCompletion struct {
|
|||
var _ storage.PieceCompletion = (*badgerPieceCompletion)(nil)
|
||||
|
||||
func NewBadgerPieceCompletion(dir string) (storage.PieceCompletion, error) {
|
||||
l := log.Logger.With().Str("component", "badger").Str("db", "piece-completion").Logger()
|
||||
l := slog.With("component", "badger", "db", "piece-completion")
|
||||
|
||||
opts := badger.
|
||||
DefaultOptions(dir).
|
|
@ -1,14 +1,14 @@
|
|||
package storage
|
||||
package store
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
dlog "git.kmsign.ru/royalcat/tstor/src/log"
|
||||
"github.com/anacrolix/dht/v2/bep44"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
var _ bep44.Store = &FileItemStore{}
|
||||
|
@ -19,7 +19,7 @@ type FileItemStore struct {
|
|||
}
|
||||
|
||||
func NewFileItemStore(path string, itemsTTL time.Duration) (*FileItemStore, error) {
|
||||
l := log.Logger.With().Str("component", "item-store").Logger()
|
||||
l := slog.With("component", "item-store")
|
||||
|
||||
opts := badger.DefaultOptions(path).
|
||||
WithLogger(&dlog.Badger{L: l}).
|
|
@ -8,7 +8,6 @@ import (
|
|||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/iio"
|
||||
"github.com/bodgit/sevenzip"
|
||||
|
@ -17,34 +16,79 @@ import (
|
|||
|
||||
var ArchiveFactories = map[string]FsFactory{
|
||||
".zip": func(f File) (Filesystem, error) {
|
||||
return NewArchive(f, f.Size(), ZipLoader), nil
|
||||
stat, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewArchive(stat.Name(), f, stat.Size(), ZipLoader), nil
|
||||
},
|
||||
".rar": func(f File) (Filesystem, error) {
|
||||
return NewArchive(f, f.Size(), RarLoader), nil
|
||||
stat, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewArchive(stat.Name(), f, stat.Size(), RarLoader), nil
|
||||
},
|
||||
".7z": func(f File) (Filesystem, error) {
|
||||
return NewArchive(f, f.Size(), SevenZipLoader), nil
|
||||
stat, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewArchive(stat.Name(), f, stat.Size(), SevenZipLoader), nil
|
||||
},
|
||||
}
|
||||
|
||||
type ArchiveLoader func(r iio.Reader, size int64) (map[string]*archiveFile, error)
|
||||
type archiveLoader func(r iio.Reader, size int64) (map[string]*archiveFile, error)
|
||||
|
||||
var _ Filesystem = &archive{}
|
||||
|
||||
type archive struct {
|
||||
name string
|
||||
|
||||
r iio.Reader
|
||||
|
||||
size int64
|
||||
|
||||
files func() (map[string]*archiveFile, error)
|
||||
files func() (map[string]File, error)
|
||||
}
|
||||
|
||||
func NewArchive(r iio.Reader, size int64, loader ArchiveLoader) *archive {
|
||||
func NewArchive(name string, r iio.Reader, size int64, loader archiveLoader) *archive {
|
||||
return &archive{
|
||||
name: name,
|
||||
r: r,
|
||||
size: size,
|
||||
files: sync.OnceValues(func() (map[string]*archiveFile, error) {
|
||||
return loader(r, size)
|
||||
files: OnceValueWOErr(func() (map[string]File, error) {
|
||||
zipFiles, err := loader(r, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO make optional
|
||||
singleDir := true
|
||||
for k := range zipFiles {
|
||||
if !strings.HasPrefix(k, "/"+name+"/") {
|
||||
singleDir = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
files := make(map[string]File, len(zipFiles))
|
||||
for k, v := range zipFiles {
|
||||
// TODO make optional
|
||||
if strings.Contains(k, "/__MACOSX/") {
|
||||
continue
|
||||
}
|
||||
|
||||
if singleDir {
|
||||
k, _ = strings.CutPrefix(k, "/"+name)
|
||||
}
|
||||
|
||||
files[k] = v
|
||||
}
|
||||
|
||||
// FIXME
|
||||
files["/.forcegallery"] = NewMemoryFile(".forcegallery", []byte{})
|
||||
|
||||
return files, nil
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
@ -80,7 +124,7 @@ func (afs *archive) Stat(filename string) (fs.FileInfo, error) {
|
|||
}
|
||||
|
||||
if file, ok := files[filename]; ok {
|
||||
return newFileInfo(path.Base(filename), file.Size()), nil
|
||||
return file.Stat()
|
||||
}
|
||||
|
||||
for p, _ := range files {
|
||||
|
@ -90,7 +134,6 @@ func (afs *archive) Stat(filename string) (fs.FileInfo, error) {
|
|||
}
|
||||
|
||||
return nil, ErrNotExist
|
||||
|
||||
}
|
||||
|
||||
var _ File = &archiveFile{}
|
||||
|
@ -162,7 +205,7 @@ func (d *archiveFile) ReadAt(p []byte, off int64) (n int, err error) {
|
|||
return d.reader.ReadAt(p, off)
|
||||
}
|
||||
|
||||
var _ ArchiveLoader = ZipLoader
|
||||
var _ archiveLoader = ZipLoader
|
||||
|
||||
func ZipLoader(reader iio.Reader, size int64) (map[string]*archiveFile, error) {
|
||||
zr, err := zip.NewReader(reader, size)
|
||||
|
@ -171,14 +214,14 @@ func ZipLoader(reader iio.Reader, size int64) (map[string]*archiveFile, error) {
|
|||
}
|
||||
|
||||
out := make(map[string]*archiveFile)
|
||||
for _, f := range zr.File {
|
||||
f := f
|
||||
if f.FileInfo().IsDir() {
|
||||
for i := range zr.File {
|
||||
zipFile := zr.File[i]
|
||||
if zipFile.FileInfo().IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
rf := func() (iio.Reader, error) {
|
||||
zr, err := f.Open()
|
||||
zr, err := zipFile.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -186,16 +229,13 @@ func ZipLoader(reader iio.Reader, size int64) (map[string]*archiveFile, error) {
|
|||
return iio.NewDiskTeeReader(zr)
|
||||
}
|
||||
|
||||
n := filepath.Join(string(os.PathSeparator), f.Name)
|
||||
af := NewArchiveFile(f.Name, rf, f.FileInfo().Size())
|
||||
|
||||
out[n] = af
|
||||
out[AbsPath(zipFile.Name)] = NewArchiveFile(zipFile.Name, rf, zipFile.FileInfo().Size())
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
var _ ArchiveLoader = SevenZipLoader
|
||||
var _ archiveLoader = SevenZipLoader
|
||||
|
||||
func SevenZipLoader(reader iio.Reader, size int64) (map[string]*archiveFile, error) {
|
||||
r, err := sevenzip.NewReader(reader, size)
|
||||
|
@ -228,7 +268,7 @@ func SevenZipLoader(reader iio.Reader, size int64) (map[string]*archiveFile, err
|
|||
return out, nil
|
||||
}
|
||||
|
||||
var _ ArchiveLoader = RarLoader
|
||||
var _ archiveLoader = RarLoader
|
||||
|
||||
func RarLoader(reader iio.Reader, size int64) (map[string]*archiveFile, error) {
|
||||
r, err := rardecode.NewReader(iio.NewSeekerWrapper(reader, size))
|
||||
|
|
|
@ -18,7 +18,8 @@ func TestZipFilesystem(t *testing.T) {
|
|||
|
||||
zReader, size := createTestZip(require)
|
||||
|
||||
zfs := NewArchive(zReader, size, ZipLoader)
|
||||
// TODO add single dir collapse test
|
||||
zfs := NewArchive("test", zReader, size, ZipLoader)
|
||||
|
||||
files, err := zfs.ReadDir("/path/to/test/file")
|
||||
require.NoError(err)
|
||||
|
|
117
src/host/vfs/log.go
Normal file
117
src/host/vfs/log.go
Normal file
|
@ -0,0 +1,117 @@
|
|||
package vfs
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
type LogFS struct {
|
||||
fs Filesystem
|
||||
log *slog.Logger
|
||||
}
|
||||
|
||||
var _ Filesystem = (*LogFS)(nil)
|
||||
|
||||
func WrapLogFS(fs Filesystem, log *slog.Logger) *LogFS {
|
||||
return &LogFS{
|
||||
fs: fs,
|
||||
log: log.With("component", "fs"),
|
||||
}
|
||||
}
|
||||
|
||||
// Open implements Filesystem.
|
||||
func (fs *LogFS) Open(filename string) (File, error) {
|
||||
file, err := fs.fs.Open(filename)
|
||||
if err != nil {
|
||||
fs.log.With("filename", filename).Error("Failed to open file")
|
||||
}
|
||||
file = WrapLogFile(file, filename, fs.log)
|
||||
return file, err
|
||||
}
|
||||
|
||||
// ReadDir implements Filesystem.
|
||||
func (fs *LogFS) ReadDir(path string) ([]fs.DirEntry, error) {
|
||||
file, err := fs.fs.ReadDir(path)
|
||||
if err != nil {
|
||||
fs.log.Error("Failed to read dir", "path", path, "error", err)
|
||||
}
|
||||
return file, err
|
||||
}
|
||||
|
||||
// Stat implements Filesystem.
|
||||
func (fs *LogFS) Stat(filename string) (fs.FileInfo, error) {
|
||||
file, err := fs.fs.Stat(filename)
|
||||
if err != nil {
|
||||
fs.log.Error("Failed to stat", "filename", filename, "error", err)
|
||||
}
|
||||
return file, err
|
||||
}
|
||||
|
||||
// Unlink implements Filesystem.
|
||||
func (fs *LogFS) Unlink(filename string) error {
|
||||
err := fs.fs.Unlink(filename)
|
||||
if err != nil {
|
||||
fs.log.Error("Failed to stat", "filename", filename, "error", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type LogFile struct {
|
||||
f File
|
||||
log *slog.Logger
|
||||
}
|
||||
|
||||
var _ File = (*LogFile)(nil)
|
||||
|
||||
func WrapLogFile(f File, filename string, log *slog.Logger) *LogFile {
|
||||
return &LogFile{
|
||||
f: f,
|
||||
log: log.With("filename", filename),
|
||||
}
|
||||
}
|
||||
|
||||
// Close implements File.
|
||||
func (f *LogFile) Close() error {
|
||||
err := f.f.Close()
|
||||
if err != nil {
|
||||
f.log.Error("Failed to close", "error", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// IsDir implements File.
|
||||
func (f *LogFile) IsDir() bool {
|
||||
return f.f.IsDir()
|
||||
}
|
||||
|
||||
// Read implements File.
|
||||
func (f *LogFile) Read(p []byte) (n int, err error) {
|
||||
n, err = f.f.Read(p)
|
||||
if err != nil {
|
||||
f.log.Error("Failed to read", "error", err)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// ReadAt implements File.
|
||||
func (f *LogFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
n, err = f.f.ReadAt(p, off)
|
||||
if err != nil {
|
||||
f.log.Error("Failed to read", "offset", off, "error", err)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Size implements File.
|
||||
func (f *LogFile) Size() int64 {
|
||||
return f.f.Size()
|
||||
}
|
||||
|
||||
// Stat implements File.
|
||||
func (f *LogFile) Stat() (fs.FileInfo, error) {
|
||||
info, err := f.f.Stat()
|
||||
if err != nil {
|
||||
f.log.Error("Failed to read", "error", err)
|
||||
}
|
||||
return info, err
|
||||
}
|
|
@ -31,17 +31,12 @@ func (fs *OsFS) Open(filename string) (File, error) {
|
|||
return NewDir(filename), nil
|
||||
}
|
||||
|
||||
osfile, err := os.Open(path.Join(fs.hostDir, filename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewOsFile(osfile), nil
|
||||
return NewLazyOsFile(path.Join(fs.hostDir, filename)), nil
|
||||
}
|
||||
|
||||
// ReadDir implements Filesystem.
|
||||
func (o *OsFS) ReadDir(dir string) ([]fs.DirEntry, error) {
|
||||
dir = path.Join(o.hostDir, dir)
|
||||
return os.ReadDir(dir)
|
||||
return os.ReadDir(path.Join(o.hostDir, dir))
|
||||
}
|
||||
|
||||
func NewOsFs(osDir string) *OsFS {
|
||||
|
@ -163,6 +158,7 @@ func (f *LazyOsFile) ReadAt(p []byte, off int64) (n int, err error) {
|
|||
|
||||
func (f *LazyOsFile) Stat() (fs.FileInfo, error) {
|
||||
f.m.Lock()
|
||||
defer f.m.Unlock()
|
||||
if f.info == nil {
|
||||
if f.file == nil {
|
||||
info, err := os.Stat(f.path)
|
||||
|
@ -178,7 +174,6 @@ func (f *LazyOsFile) Stat() (fs.FileInfo, error) {
|
|||
f.info = info
|
||||
}
|
||||
}
|
||||
f.m.Unlock()
|
||||
return f.info, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -113,7 +113,29 @@ func (r *resolver) isNestedFs(f string) bool {
|
|||
return true
|
||||
}
|
||||
}
|
||||
return true
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *resolver) nestedFs(fsPath string, file File) (Filesystem, error) {
|
||||
for ext, nestFactory := range r.factories {
|
||||
if !strings.HasSuffix(fsPath, ext) {
|
||||
continue
|
||||
}
|
||||
|
||||
if nestedFs, ok := r.fsmap[fsPath]; ok {
|
||||
return nestedFs, nil
|
||||
}
|
||||
|
||||
nestedFs, err := nestFactory(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating filesystem from file: %s with error: %w", fsPath, err)
|
||||
}
|
||||
r.fsmap[fsPath] = nestedFs
|
||||
|
||||
return nestedFs, nil
|
||||
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// open requeue raw open, without resolver call
|
||||
|
@ -173,7 +195,7 @@ var ErrNotExist = fs.ErrNotExist
|
|||
|
||||
func getFile[F File](m map[string]F, name string) (File, error) {
|
||||
if name == Separator {
|
||||
return &dir{}, nil
|
||||
return NewDir(name), nil
|
||||
}
|
||||
|
||||
f, ok := m[name]
|
||||
|
@ -183,7 +205,7 @@ func getFile[F File](m map[string]F, name string) (File, error) {
|
|||
|
||||
for p := range m {
|
||||
if strings.HasPrefix(p, name) {
|
||||
return &dir{}, nil
|
||||
return NewDir(name), nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/storage"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/controller"
|
||||
"git.kmsign.ru/royalcat/tstor/src/iio"
|
||||
"github.com/anacrolix/missinggo/v2"
|
||||
"github.com/anacrolix/torrent"
|
||||
|
@ -20,81 +20,115 @@ import (
|
|||
var _ Filesystem = &TorrentFs{}
|
||||
|
||||
type TorrentFs struct {
|
||||
mu sync.Mutex
|
||||
t *torrent.Torrent
|
||||
rep storage.ExlcudedFiles
|
||||
mu sync.Mutex
|
||||
c *controller.Torrent
|
||||
|
||||
readTimeout int
|
||||
|
||||
//cache
|
||||
filesCache map[string]*torrentFile
|
||||
filesCache map[string]File
|
||||
|
||||
resolver *resolver
|
||||
}
|
||||
|
||||
func NewTorrentFs(t *torrent.Torrent, rep storage.ExlcudedFiles, readTimeout int) *TorrentFs {
|
||||
func NewTorrentFs(c *controller.Torrent, readTimeout int) *TorrentFs {
|
||||
return &TorrentFs{
|
||||
t: t,
|
||||
rep: rep,
|
||||
c: c,
|
||||
readTimeout: readTimeout,
|
||||
resolver: newResolver(ArchiveFactories),
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *TorrentFs) files() (map[string]*torrentFile, error) {
|
||||
if fs.filesCache == nil {
|
||||
fs.mu.Lock()
|
||||
<-fs.t.GotInfo()
|
||||
files := fs.t.Files()
|
||||
func (fs *TorrentFs) files() (map[string]File, error) {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
|
||||
excludedFiles, err := fs.rep.ExcludedFiles(fs.t.InfoHash())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if fs.filesCache != nil {
|
||||
return fs.filesCache, nil
|
||||
}
|
||||
|
||||
files, err := fs.c.Files()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs.filesCache = make(map[string]File)
|
||||
for _, file := range files {
|
||||
p := AbsPath(file.Path())
|
||||
|
||||
fs.filesCache[p] = &torrentFile{
|
||||
name: path.Base(p),
|
||||
timeout: fs.readTimeout,
|
||||
file: file,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO optional
|
||||
if len(fs.filesCache) == 1 && fs.resolver.isNestedFs(fs.c.Name()) {
|
||||
filepath := "/" + fs.c.Name()
|
||||
if file, ok := fs.filesCache[filepath]; ok {
|
||||
nestedFs, err := fs.resolver.nestedFs(filepath, file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if nestedFs == nil {
|
||||
goto DEFAULT_DIR // FIXME
|
||||
}
|
||||
fs.filesCache, err = listFilesRecursive(nestedFs, "/")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fs.filesCache, nil
|
||||
}
|
||||
|
||||
fs.filesCache = make(map[string]*torrentFile)
|
||||
for _, file := range files {
|
||||
p := file.Path()
|
||||
}
|
||||
|
||||
if slices.Contains(excludedFiles, p) {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.Contains(p, "/.pad/") {
|
||||
continue
|
||||
}
|
||||
|
||||
p = AbsPath(file.Path())
|
||||
|
||||
fs.filesCache[p] = &torrentFile{
|
||||
name: path.Base(p),
|
||||
timeout: fs.readTimeout,
|
||||
file: file,
|
||||
}
|
||||
DEFAULT_DIR:
|
||||
rootDir := "/" + fs.c.Name() + "/"
|
||||
singleDir := true
|
||||
for k, _ := range fs.filesCache {
|
||||
if !strings.HasPrefix(k, rootDir) {
|
||||
singleDir = false
|
||||
}
|
||||
|
||||
rootDir := "/" + fs.t.Name() + "/"
|
||||
singleDir := true
|
||||
for k, _ := range fs.filesCache {
|
||||
if !strings.HasPrefix(k, rootDir) {
|
||||
singleDir = false
|
||||
}
|
||||
}
|
||||
if singleDir {
|
||||
for k, f := range fs.filesCache {
|
||||
delete(fs.filesCache, k)
|
||||
k, _ = strings.CutPrefix(k, rootDir)
|
||||
k = AbsPath(k)
|
||||
fs.filesCache[k] = f
|
||||
}
|
||||
if singleDir {
|
||||
for k, f := range fs.filesCache {
|
||||
delete(fs.filesCache, k)
|
||||
k, _ = strings.CutPrefix(k, rootDir)
|
||||
k = AbsPath(k)
|
||||
fs.filesCache[k] = f
|
||||
}
|
||||
}
|
||||
|
||||
fs.mu.Unlock()
|
||||
}
|
||||
|
||||
return fs.filesCache, nil
|
||||
}
|
||||
|
||||
func listFilesRecursive(vfs Filesystem, start string) (map[string]File, error) {
|
||||
out := make(map[string]File, 0)
|
||||
entries, err := vfs.ReadDir(start)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
filename := path.Join(start, entry.Name())
|
||||
if entry.IsDir() {
|
||||
rec, err := listFilesRecursive(vfs, filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
maps.Copy(out, rec)
|
||||
} else {
|
||||
file, err := vfs.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out[filename] = file
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (fs *TorrentFs) rawOpen(path string) (File, error) {
|
||||
files, err := fs.files()
|
||||
if err != nil {
|
||||
|
@ -113,12 +147,7 @@ func (fs *TorrentFs) rawStat(filename string) (fs.FileInfo, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if file.IsDir() {
|
||||
return newDirInfo(path.Base(filename)), nil
|
||||
} else {
|
||||
return newFileInfo(path.Base(filename), file.Size()), nil
|
||||
}
|
||||
|
||||
return file.Stat()
|
||||
}
|
||||
|
||||
// Stat implements Filesystem.
|
||||
|
@ -184,7 +213,12 @@ func (fs *TorrentFs) Unlink(name string) error {
|
|||
file := files[name]
|
||||
delete(fs.filesCache, name)
|
||||
|
||||
return fs.rep.ExcludeFile(file.file)
|
||||
tfile, ok := file.(*torrentFile)
|
||||
if !ok {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
return fs.c.ExcludeFile(tfile.file)
|
||||
}
|
||||
|
||||
type reader interface {
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package vfs
|
||||
|
||||
import "strings"
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func trimRelPath(p, t string) string {
|
||||
return strings.Trim(strings.TrimPrefix(p, t), "/")
|
||||
|
@ -23,3 +26,28 @@ func AddTrailSlash(p string) string {
|
|||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// OnceValueWOErr returns a function that invokes f only once and returns the value
|
||||
// returned by f . The returned function may be called concurrently.
|
||||
//
|
||||
// If f panics, the returned function will panic with the same value on every call.
|
||||
func OnceValueWOErr[T any](f func() (T, error)) func() (T, error) {
|
||||
var (
|
||||
mu sync.Mutex
|
||||
isExecuted bool
|
||||
r1 T
|
||||
err error
|
||||
)
|
||||
|
||||
return func() (T, error) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
if isExecuted && err == nil {
|
||||
return r1, nil
|
||||
}
|
||||
|
||||
r1, err = f()
|
||||
return r1, err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,19 +2,22 @@ package http
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor"
|
||||
"git.kmsign.ru/royalcat/tstor/src/config"
|
||||
"git.kmsign.ru/royalcat/tstor/src/delivery"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/service"
|
||||
"github.com/anacrolix/missinggo/v2/filecache"
|
||||
"github.com/gin-contrib/pprof"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/shurcooL/httpfs/html/vfstemplate"
|
||||
)
|
||||
|
||||
func New(fc *filecache.Cache, ss *service.Stats, s *service.Service, logPath string, cfg *config.Config) error {
|
||||
log := slog.With()
|
||||
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
r := gin.New()
|
||||
r.Use(gin.Recovery())
|
||||
|
@ -37,6 +40,7 @@ func New(fc *filecache.Cache, ss *service.Stats, s *service.Service, logPath str
|
|||
// r.GET("/routes", routesHandler(ss))
|
||||
r.GET("/logs", logsHandler)
|
||||
r.GET("/servers", serversFoldersHandler())
|
||||
r.Any("/graphql", gin.WrapH(delivery.GraphQLHandler(s)))
|
||||
|
||||
api := r.Group("/api")
|
||||
{
|
||||
|
@ -50,7 +54,7 @@ func New(fc *filecache.Cache, ss *service.Stats, s *service.Service, logPath str
|
|||
|
||||
}
|
||||
|
||||
log.Info().Str("host", fmt.Sprintf("%s:%d", cfg.WebUi.IP, cfg.WebUi.Port)).Msg("starting webserver")
|
||||
log.Info("starting webserver", "host", fmt.Sprintf("%s:%d", cfg.WebUi.IP, cfg.WebUi.Port))
|
||||
|
||||
if err := r.Run(fmt.Sprintf("%s:%d", cfg.WebUi.IP, cfg.WebUi.Port)); err != nil {
|
||||
return fmt.Errorf("error initializing server: %w", err)
|
||||
|
@ -60,7 +64,7 @@ func New(fc *filecache.Cache, ss *service.Stats, s *service.Service, logPath str
|
|||
}
|
||||
|
||||
func Logger() gin.HandlerFunc {
|
||||
l := log.Logger.With().Str("component", "http").Logger()
|
||||
l := slog.With("component", "http")
|
||||
return func(c *gin.Context) {
|
||||
path := c.Request.URL.Path
|
||||
raw := c.Request.URL.RawQuery
|
||||
|
@ -76,11 +80,11 @@ func Logger() gin.HandlerFunc {
|
|||
s := c.Writer.Status()
|
||||
switch {
|
||||
case s >= 400 && s < 500:
|
||||
l.Warn().Str("path", path).Int("status", s).Msg(msg)
|
||||
l.Warn(msg, "path", path, "status", s)
|
||||
case s >= 500:
|
||||
l.Error().Str("path", path).Int("status", s).Msg(msg)
|
||||
l.Error(msg, "path", path, "status", s)
|
||||
default:
|
||||
l.Debug().Str("path", path).Int("status", s).Msg(msg)
|
||||
l.Debug(msg, "path", path, "status", s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,30 +1,35 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
var _ badger.Logger = (*Badger)(nil)
|
||||
|
||||
type Badger struct {
|
||||
L zerolog.Logger
|
||||
L *slog.Logger
|
||||
}
|
||||
|
||||
func fmtBadgerLog(m string, f ...any) string {
|
||||
return fmt.Sprintf(strings.ReplaceAll(m, "\n", ""), f...)
|
||||
}
|
||||
|
||||
func (l *Badger) Errorf(m string, f ...interface{}) {
|
||||
l.L.Error().Msgf(strings.ReplaceAll(m, "\n", ""), f...)
|
||||
l.L.Error(fmtBadgerLog(m, f...))
|
||||
}
|
||||
|
||||
func (l *Badger) Warningf(m string, f ...interface{}) {
|
||||
l.L.Warn().Msgf(strings.ReplaceAll(m, "\n", ""), f...)
|
||||
l.L.Warn(fmtBadgerLog(m, f...))
|
||||
}
|
||||
|
||||
func (l *Badger) Infof(m string, f ...interface{}) {
|
||||
l.L.Info().Msgf(strings.ReplaceAll(m, "\n", ""), f...)
|
||||
l.L.Info(fmtBadgerLog(m, f...))
|
||||
}
|
||||
|
||||
func (l *Badger) Debugf(m string, f ...interface{}) {
|
||||
l.L.Debug().Msgf(strings.ReplaceAll(m, "\n", ""), f...)
|
||||
l.L.Debug(fmtBadgerLog(m, f...))
|
||||
}
|
||||
|
|
|
@ -1,50 +1,41 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/config"
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
"github.com/lmittmann/tint"
|
||||
)
|
||||
|
||||
const FileName = "tstor.log"
|
||||
|
||||
func Load(config *config.Log) {
|
||||
var writers []io.Writer
|
||||
|
||||
// fix console colors on windows
|
||||
cso := colorable.NewColorableStdout()
|
||||
|
||||
writers = append(writers, zerolog.ConsoleWriter{Out: cso})
|
||||
writers = append(writers, newRollingFile(config))
|
||||
mw := io.MultiWriter(writers...)
|
||||
|
||||
log.Logger = log.Output(mw)
|
||||
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
|
||||
|
||||
l := zerolog.InfoLevel
|
||||
level := slog.LevelInfo
|
||||
if config.Debug {
|
||||
l = zerolog.DebugLevel
|
||||
level = slog.LevelDebug
|
||||
}
|
||||
|
||||
zerolog.SetGlobalLevel(l)
|
||||
slog.SetDefault(slog.New(
|
||||
tint.NewHandler(os.Stdout, &tint.Options{
|
||||
Level: level,
|
||||
TimeFormat: time.Kitchen,
|
||||
// NoColor: !isatty.IsTerminal(os.Stdout.Fd()),
|
||||
}),
|
||||
))
|
||||
}
|
||||
|
||||
func newRollingFile(config *config.Log) io.Writer {
|
||||
if err := os.MkdirAll(config.Path, 0744); err != nil {
|
||||
log.Error().Err(err).Str("path", config.Path).Msg("can't create log directory")
|
||||
return nil
|
||||
}
|
||||
// func newRollingFile(config *config.Log) io.Writer {
|
||||
// if err := os.MkdirAll(config.Path, 0744); err != nil {
|
||||
// log.Error().Err(err).Str("path", config.Path).Msg("can't create log directory")
|
||||
// return nil
|
||||
// }
|
||||
|
||||
return &lumberjack.Logger{
|
||||
Filename: filepath.Join(config.Path, FileName),
|
||||
MaxBackups: config.MaxBackups, // files
|
||||
MaxSize: config.MaxSize, // megabytes
|
||||
MaxAge: config.MaxAge, // days
|
||||
}
|
||||
}
|
||||
// return &lumberjack.Logger{
|
||||
// Filename: filepath.Join(config.Path, FileName),
|
||||
// MaxBackups: config.MaxBackups, // files
|
||||
// MaxSize: config.MaxSize, // megabytes
|
||||
// MaxAge: config.MaxAge, // days
|
||||
// }
|
||||
// }
|
||||
|
|
169
src/log/nfs.go
169
src/log/nfs.go
|
@ -2,172 +2,179 @@ package log
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"log/slog"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
nfs "github.com/willscott/go-nfs"
|
||||
)
|
||||
|
||||
var _ nfs.Logger = (*NFSLog)(nil)
|
||||
|
||||
type NFSLog struct {
|
||||
r zerolog.Logger
|
||||
l zerolog.Logger
|
||||
// r *slog.Logger
|
||||
l *slog.Logger
|
||||
}
|
||||
|
||||
func NewNFSLog(r zerolog.Logger) nfs.Logger {
|
||||
func NewNFSLog(r *slog.Logger) nfs.Logger {
|
||||
return &NFSLog{
|
||||
r: r,
|
||||
l: r.Level(zerolog.DebugLevel),
|
||||
// r: r,
|
||||
// l: r.Level(zerolog.DebugLevel),
|
||||
l: r,
|
||||
}
|
||||
}
|
||||
|
||||
// Debug implements nfs.Logger.
|
||||
func (l *NFSLog) Debug(args ...interface{}) {
|
||||
l.l.Debug().Msg(fmt.Sprint(args...))
|
||||
l.l.Debug(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Debugf implements nfs.Logger.
|
||||
func (l *NFSLog) Debugf(format string, args ...interface{}) {
|
||||
l.l.Debug().Msgf(format, args...)
|
||||
l.l.Debug(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Error implements nfs.Logger.
|
||||
func (l *NFSLog) Error(args ...interface{}) {
|
||||
l.l.Error().Msg(fmt.Sprint(args...))
|
||||
l.l.Error(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Errorf implements nfs.Logger.
|
||||
func (l *NFSLog) Errorf(format string, args ...interface{}) {
|
||||
l.l.Error().Msgf(format, args...)
|
||||
l.l.Error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Fatal implements nfs.Logger.
|
||||
func (l *NFSLog) Fatal(args ...interface{}) {
|
||||
l.l.Fatal().Msg(fmt.Sprint(args...))
|
||||
l.l.Error(fmt.Sprint(args...))
|
||||
log.Fatal(args...)
|
||||
}
|
||||
|
||||
// Fatalf implements nfs.Logger.
|
||||
func (l *NFSLog) Fatalf(format string, args ...interface{}) {
|
||||
l.l.Fatal().Msgf(format, args...)
|
||||
l.l.Error(fmt.Sprintf(format, args...))
|
||||
log.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
// Info implements nfs.Logger.
|
||||
func (l *NFSLog) Info(args ...interface{}) {
|
||||
l.l.Info().Msg(fmt.Sprint(args...))
|
||||
l.l.Info(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Infof implements nfs.Logger.
|
||||
func (l *NFSLog) Infof(format string, args ...interface{}) {
|
||||
l.l.Info().Msgf(format, args...)
|
||||
l.l.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Panic implements nfs.Logger.
|
||||
func (l *NFSLog) Panic(args ...interface{}) {
|
||||
l.l.Panic().Msg(fmt.Sprint(args...))
|
||||
l.l.Error(fmt.Sprint(args...))
|
||||
panic(args)
|
||||
}
|
||||
|
||||
// Panicf implements nfs.Logger.
|
||||
func (l *NFSLog) Panicf(format string, args ...interface{}) {
|
||||
l.l.Panic().Msgf(format, args...)
|
||||
l.l.Error(fmt.Sprintf(format, args...))
|
||||
panic(args)
|
||||
}
|
||||
|
||||
// Print implements nfs.Logger.
|
||||
func (l *NFSLog) Print(args ...interface{}) {
|
||||
l.l.Print(args...)
|
||||
l.l.Info(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Printf implements nfs.Logger.
|
||||
func (l *NFSLog) Printf(format string, args ...interface{}) {
|
||||
l.l.Printf(format, args...)
|
||||
l.l.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Trace implements nfs.Logger.
|
||||
func (l *NFSLog) Trace(args ...interface{}) {
|
||||
l.l.Trace().Msg(fmt.Sprint(args...))
|
||||
l.l.Debug(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Tracef implements nfs.Logger.
|
||||
func (l *NFSLog) Tracef(format string, args ...interface{}) {
|
||||
l.l.Trace().Msgf(format, args...)
|
||||
l.l.Debug(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Warn implements nfs.Logger.
|
||||
func (l *NFSLog) Warn(args ...interface{}) {
|
||||
l.l.Warn().Msg(fmt.Sprint(args...))
|
||||
l.l.Warn(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Warnf implements nfs.Logger.
|
||||
func (l *NFSLog) Warnf(format string, args ...interface{}) {
|
||||
l.l.Warn().Msgf(format, args...)
|
||||
l.l.Warn(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// GetLevel implements nfs.Logger.
|
||||
func (l *NFSLog) GetLevel() nfs.LogLevel {
|
||||
zl := l.l.GetLevel()
|
||||
switch zl {
|
||||
case zerolog.PanicLevel, zerolog.Disabled:
|
||||
return nfs.PanicLevel
|
||||
case zerolog.FatalLevel:
|
||||
return nfs.FatalLevel
|
||||
case zerolog.ErrorLevel:
|
||||
return nfs.ErrorLevel
|
||||
case zerolog.WarnLevel:
|
||||
return nfs.WarnLevel
|
||||
case zerolog.InfoLevel:
|
||||
return nfs.InfoLevel
|
||||
case zerolog.DebugLevel:
|
||||
return nfs.DebugLevel
|
||||
case zerolog.TraceLevel:
|
||||
return nfs.TraceLevel
|
||||
}
|
||||
return nfs.DebugLevel
|
||||
// zl := l.l.Handler()
|
||||
// switch zl {
|
||||
// case zerolog.PanicLevel, zerolog.Disabled:
|
||||
// return nfs.PanicLevel
|
||||
// case zerolog.FatalLevel:
|
||||
// return nfs.FatalLevel
|
||||
// case zerolog.ErrorLevel:
|
||||
// return nfs.ErrorLevel
|
||||
// case zerolog.WarnLevel:
|
||||
// return nfs.WarnLevel
|
||||
// case zerolog.InfoLevel:
|
||||
// return nfs.InfoLevel
|
||||
// case zerolog.DebugLevel:
|
||||
// return nfs.DebugLevel
|
||||
// case zerolog.TraceLevel:
|
||||
// return nfs.TraceLevel
|
||||
// }
|
||||
return nfs.TraceLevel
|
||||
}
|
||||
|
||||
// ParseLevel implements nfs.Logger.
|
||||
func (l *NFSLog) ParseLevel(level string) (nfs.LogLevel, error) {
|
||||
switch level {
|
||||
case "panic":
|
||||
return nfs.PanicLevel, nil
|
||||
case "fatal":
|
||||
return nfs.FatalLevel, nil
|
||||
case "error":
|
||||
return nfs.ErrorLevel, nil
|
||||
case "warn":
|
||||
return nfs.WarnLevel, nil
|
||||
case "info":
|
||||
return nfs.InfoLevel, nil
|
||||
case "debug":
|
||||
return nfs.DebugLevel, nil
|
||||
case "trace":
|
||||
return nfs.TraceLevel, nil
|
||||
}
|
||||
var ll nfs.LogLevel
|
||||
return ll, fmt.Errorf("invalid log level %q", level)
|
||||
// switch level {
|
||||
// case "panic":
|
||||
// return nfs.PanicLevel, nil
|
||||
// case "fatal":
|
||||
// return nfs.FatalLevel, nil
|
||||
// case "error":
|
||||
// return nfs.ErrorLevel, nil
|
||||
// case "warn":
|
||||
// return nfs.WarnLevel, nil
|
||||
// case "info":
|
||||
// return nfs.InfoLevel, nil
|
||||
// case "debug":
|
||||
// return nfs.DebugLevel, nil
|
||||
// case "trace":
|
||||
// return nfs.TraceLevel, nil
|
||||
// }
|
||||
// var ll nfs.LogLevel
|
||||
// return ll, fmt.Errorf("invalid log level %q", level)
|
||||
return nfs.TraceLevel, fmt.Errorf("level change not supported")
|
||||
}
|
||||
|
||||
// SetLevel implements nfs.Logger.
|
||||
func (l *NFSLog) SetLevel(level nfs.LogLevel) {
|
||||
switch level {
|
||||
case nfs.PanicLevel:
|
||||
l.l = l.r.Level(zerolog.PanicLevel)
|
||||
return
|
||||
case nfs.FatalLevel:
|
||||
l.l = l.r.Level(zerolog.FatalLevel)
|
||||
return
|
||||
case nfs.ErrorLevel:
|
||||
l.l = l.r.Level(zerolog.ErrorLevel)
|
||||
return
|
||||
case nfs.WarnLevel:
|
||||
l.l = l.r.Level(zerolog.WarnLevel)
|
||||
return
|
||||
case nfs.InfoLevel:
|
||||
l.l = l.r.Level(zerolog.InfoLevel)
|
||||
return
|
||||
case nfs.DebugLevel:
|
||||
l.l = l.r.Level(zerolog.DebugLevel)
|
||||
return
|
||||
case nfs.TraceLevel:
|
||||
l.l = l.r.Level(zerolog.TraceLevel)
|
||||
return
|
||||
}
|
||||
// switch level {
|
||||
// case nfs.PanicLevel:
|
||||
// l.l = l.r.Level(zerolog.PanicLevel)
|
||||
// return
|
||||
// case nfs.FatalLevel:
|
||||
// l.l = l.r.Level(zerolog.FatalLevel)
|
||||
// return
|
||||
// case nfs.ErrorLevel:
|
||||
// l.l = l.r.Level(zerolog.ErrorLevel)
|
||||
// return
|
||||
// case nfs.WarnLevel:
|
||||
// l.l = l.r.Level(zerolog.WarnLevel)
|
||||
// return
|
||||
// case nfs.InfoLevel:
|
||||
// l.l = l.r.Level(zerolog.InfoLevel)
|
||||
// return
|
||||
// case nfs.DebugLevel:
|
||||
// l.l = l.r.Level(zerolog.DebugLevel)
|
||||
// return
|
||||
// case nfs.TraceLevel:
|
||||
// l.l = l.r.Level(zerolog.TraceLevel)
|
||||
// return
|
||||
// }
|
||||
}
|
||||
|
|
|
@ -1,32 +1,34 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/anacrolix/log"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
var _ log.Handler = &Torrent{}
|
||||
|
||||
type Torrent struct {
|
||||
L zerolog.Logger
|
||||
L *slog.Logger
|
||||
}
|
||||
|
||||
func (l *Torrent) Handle(r log.Record) {
|
||||
e := l.L.Info()
|
||||
lvl := slog.LevelInfo
|
||||
switch r.Level {
|
||||
case log.Debug:
|
||||
e = l.L.Debug()
|
||||
lvl = slog.LevelInfo
|
||||
case log.Info:
|
||||
e = l.L.Debug().Str("error-type", "info")
|
||||
lvl = slog.LevelInfo
|
||||
case log.Warning:
|
||||
e = l.L.Warn()
|
||||
lvl = slog.LevelWarn
|
||||
case log.Error:
|
||||
e = l.L.Warn().Str("error-type", "error")
|
||||
lvl = slog.LevelError
|
||||
case log.Critical:
|
||||
e = l.L.Warn().Str("error-type", "critical")
|
||||
lvl = slog.LevelError
|
||||
}
|
||||
|
||||
// TODO set log values somehow
|
||||
|
||||
e.Msgf(r.Text())
|
||||
l.L.Log(context.Background(), lvl, r.Msg.String())
|
||||
}
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
package proto
|
||||
|
||||
//go:generate protoc --go_out=. --go_opt=paths=source_relative --go_opt=Mtstor.proto=git.kmsign.ru/royalcat/tstor/src/proto --go-grpc_out=. --go-grpc_opt=paths=source_relative --go-grpc_opt=Mtstor.proto=git.kmsign.ru/royalcat/tstor/src/proto --proto_path=../../proto tstor.proto
|
9
tools.go
Normal file
9
tools.go
Normal file
|
@ -0,0 +1,9 @@
|
|||
//go:build tools
|
||||
// +build tools
|
||||
|
||||
//go:generate go run github.com/99designs/gqlgen
|
||||
package tstor
|
||||
|
||||
import (
|
||||
_ "github.com/99designs/gqlgen"
|
||||
)
|
Loading…
Reference in a new issue