storage rework
Some checks failed
docker / build-docker (linux/amd64) (push) Failing after 18s
docker / build-docker (linux/386) (push) Successful in 1m57s
docker / build-docker (linux/arm64) (push) Successful in 7m22s
docker / build-docker (linux/arm/v7) (push) Successful in 7m53s
docker / build-docker (linux/arm64/v8) (push) Failing after 3h2m18s
Some checks failed
docker / build-docker (linux/amd64) (push) Failing after 18s
docker / build-docker (linux/386) (push) Successful in 1m57s
docker / build-docker (linux/arm64) (push) Successful in 7m22s
docker / build-docker (linux/arm/v7) (push) Successful in 7m53s
docker / build-docker (linux/arm64/v8) (push) Failing after 3h2m18s
This commit is contained in:
parent
06153d61c9
commit
e9df8925d1
49 changed files with 1825 additions and 1303 deletions
|
@ -100,7 +100,10 @@ func run(configPath string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ytdlpsrv := ytdlp.NewService("./ytdlp")
|
||||
ytdlpsrv, err := ytdlp.NewService("./ytdlp")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sfs := sources.NewHostedFS(
|
||||
vfs.NewCtxBillyFs("/", ctxbilly.WrapFileSystem(sourceFs)),
|
||||
|
@ -157,7 +160,7 @@ func run(configPath string) error {
|
|||
return
|
||||
}
|
||||
log.Info(ctx, "starting NFS server", slog.String("address", listener.Addr().String()))
|
||||
handler, err := nfs.NewNFSv3Handler(sfs)
|
||||
handler, err := nfs.NewNFSv3Handler(sfs, conf.Mounts.NFS)
|
||||
if err != nil {
|
||||
log.Error(ctx, "failed to create NFS handler", rlog.Error(err))
|
||||
return
|
||||
|
|
27
go.mod
27
go.mod
|
@ -9,7 +9,7 @@ require (
|
|||
github.com/anacrolix/dht/v2 v2.21.1
|
||||
github.com/anacrolix/log v0.15.2
|
||||
github.com/anacrolix/missinggo/v2 v2.7.3
|
||||
github.com/anacrolix/torrent v1.55.0
|
||||
github.com/anacrolix/torrent v1.56.0
|
||||
github.com/billziss-gh/cgofuse v1.5.0
|
||||
github.com/bodgit/sevenzip v1.5.1
|
||||
github.com/cyphar/filepath-securejoin v0.2.5
|
||||
|
@ -21,7 +21,6 @@ require (
|
|||
github.com/google/uuid v1.6.0
|
||||
github.com/grafana/otel-profiling-go v0.5.1
|
||||
github.com/grafana/pyroscope-go v1.1.1
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7
|
||||
github.com/iceber/iouring-go v0.0.0-20230403020409-002cfd2e2a90
|
||||
github.com/knadh/koanf/parsers/yaml v0.1.0
|
||||
|
@ -35,8 +34,9 @@ require (
|
|||
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93
|
||||
github.com/ravilushqa/otelgqlgen v0.15.0
|
||||
github.com/royalcat/ctxio v0.0.0-20240602060200-590d464c39be
|
||||
github.com/royalcat/ctxprogress v0.0.0-20240511091748-6d9b327537c3
|
||||
github.com/royalcat/kv v0.0.0-20240327213417-8cf5696b2389
|
||||
github.com/royalcat/ctxprogress v0.0.0-20240614113930-3cc5bb935bff
|
||||
github.com/royalcat/kv v0.0.0-20240612224509-6aa0da315950
|
||||
github.com/royalcat/kv/kvbadger v0.0.0-20240612224509-6aa0da315950
|
||||
github.com/rs/zerolog v1.32.0
|
||||
github.com/samber/slog-multi v1.0.2
|
||||
github.com/samber/slog-zerolog v1.0.0
|
||||
|
@ -52,7 +52,7 @@ require (
|
|||
go.opentelemetry.io/otel/sdk v1.27.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.27.0
|
||||
go.opentelemetry.io/otel/trace v1.27.0
|
||||
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
||||
golang.org/x/net v0.25.0
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/sys v0.20.0
|
||||
|
@ -65,7 +65,7 @@ require (
|
|||
github.com/alecthomas/atomic v0.1.0-alpha2 // indirect
|
||||
github.com/anacrolix/chansync v0.4.0 // indirect
|
||||
github.com/anacrolix/envpprof v1.3.0 // indirect
|
||||
github.com/anacrolix/generics v0.0.1 // indirect
|
||||
github.com/anacrolix/generics v0.0.2-0.20240227122613-f95486179cab // indirect
|
||||
github.com/anacrolix/go-libutp v1.3.1 // indirect
|
||||
github.com/anacrolix/missinggo v1.3.0 // indirect
|
||||
github.com/anacrolix/missinggo/perf v1.0.0 // indirect
|
||||
|
@ -90,7 +90,7 @@ require (
|
|||
github.com/cloudwego/base64x v0.1.4 // indirect
|
||||
github.com/cloudwego/iasm v0.2.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dgraph-io/ristretto v0.1.1 // indirect
|
||||
github.com/edsrzf/mmap-go v1.1.0 // indirect
|
||||
github.com/fatih/structs v1.1.0 // indirect
|
||||
|
@ -98,7 +98,7 @@ require (
|
|||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-llsqlite/adapter v0.1.0 // indirect
|
||||
github.com/go-llsqlite/crawshaw v0.5.1 // indirect
|
||||
github.com/go-llsqlite/crawshaw v0.5.2-0.20240425034140-f30eb7704568 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
|
@ -118,6 +118,7 @@ require (
|
|||
github.com/grafana/pyroscope-go/godeltaprof v0.1.7 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/huandu/xstrings v1.4.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.8 // indirect
|
||||
|
@ -127,12 +128,16 @@ require (
|
|||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/mschoch/smat v0.2.0 // indirect
|
||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||
github.com/multiformats/go-varint v0.0.6 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
|
@ -153,7 +158,7 @@ require (
|
|||
github.com/pion/turn/v2 v2.1.6 // indirect
|
||||
github.com/pion/webrtc/v3 v3.2.40 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/polydawn/go-timeless-api v0.0.0-20220821201550-b93919e12c56 // indirect
|
||||
github.com/polydawn/refmt v0.89.0 // indirect
|
||||
github.com/polydawn/rio v0.0.0-20220823181337-7c31ad9831a4 // indirect
|
||||
|
@ -163,6 +168,7 @@ require (
|
|||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect
|
||||
github.com/samber/lo v1.39.0 // indirect
|
||||
github.com/sosodev/duration v1.3.0 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/tidwall/btree v1.7.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||
|
@ -182,12 +188,13 @@ require (
|
|||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/text v0.15.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.20.0 // indirect
|
||||
golang.org/x/tools v0.21.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect
|
||||
google.golang.org/grpc v1.64.0 // indirect
|
||||
google.golang.org/protobuf v1.34.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
lukechampine.com/blake3 v1.1.6 // indirect
|
||||
modernc.org/libc v1.50.5 // indirect
|
||||
modernc.org/mathutil v1.6.0 // indirect
|
||||
modernc.org/memory v1.8.0 // indirect
|
||||
|
|
48
go.sum
48
go.sum
|
@ -60,8 +60,8 @@ github.com/anacrolix/envpprof v1.1.0/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAK
|
|||
github.com/anacrolix/envpprof v1.3.0 h1:WJt9bpuT7A/CDCxPOv/eeZqHWlle/Y0keJUvc6tcJDk=
|
||||
github.com/anacrolix/envpprof v1.3.0/go.mod h1:7QIG4CaX1uexQ3tqd5+BRa/9e2D02Wcertl6Yh0jCB0=
|
||||
github.com/anacrolix/generics v0.0.0-20230113004304-d6428d516633/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8=
|
||||
github.com/anacrolix/generics v0.0.1 h1:4WVhK6iLb3UAAAQP6I3uYlMOHcp9FqJC9j4n81Wv9Ks=
|
||||
github.com/anacrolix/generics v0.0.1/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8=
|
||||
github.com/anacrolix/generics v0.0.2-0.20240227122613-f95486179cab h1:MvuAC/UJtcohN6xWc8zYXSZfllh1LVNepQ0R3BCX5I4=
|
||||
github.com/anacrolix/generics v0.0.2-0.20240227122613-f95486179cab/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8=
|
||||
github.com/anacrolix/go-libutp v1.3.1 h1:idJzreNLl+hNjGC3ZnUOjujEaryeOGgkwHLqSGoige0=
|
||||
github.com/anacrolix/go-libutp v1.3.1/go.mod h1:heF41EC8kN0qCLMokLBVkB8NXiLwx3t8R8810MTNI5o=
|
||||
github.com/anacrolix/log v0.3.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU=
|
||||
|
@ -99,8 +99,8 @@ github.com/anacrolix/sync v0.5.1/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DC
|
|||
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
|
||||
github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
|
||||
github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8=
|
||||
github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8=
|
||||
github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE=
|
||||
github.com/anacrolix/torrent v1.56.0 h1:g/sM0K/BaWUv4Htu2bblLBhIxGdFZ1MUCoD7lcvemlo=
|
||||
github.com/anacrolix/torrent v1.56.0/go.mod h1:5DMHbeIM1TuC5wTQ99XieKKLiYZYz6iB2lyZpKZEr6w=
|
||||
github.com/anacrolix/upnp v0.1.4 h1:+2t2KA6QOhm/49zeNyeVwDu1ZYS9dB9wfxyVvh/wk7U=
|
||||
github.com/anacrolix/upnp v0.1.4/go.mod h1:Qyhbqo69gwNWvEk1xNTXsS5j7hMHef9hdr984+9fIic=
|
||||
github.com/anacrolix/utp v0.2.0 h1:65Cdmr6q9WSw2KsM+rtJFu7rqDzLl2bdysf4KlNPcFI=
|
||||
|
@ -164,8 +164,9 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
|
|||
github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo=
|
||||
github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8BzuWsEs=
|
||||
github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak=
|
||||
github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8=
|
||||
|
@ -218,8 +219,8 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
|
|||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-llsqlite/adapter v0.1.0 h1:wGSQNsu/rtYeu/lqZNZQMjwUdEF3OW66xTLvsFwJQUw=
|
||||
github.com/go-llsqlite/adapter v0.1.0/go.mod h1:DADrR88ONKPPeSGjFp5iEN55Arx3fi2qXZeKCYDpbmU=
|
||||
github.com/go-llsqlite/crawshaw v0.5.1 h1:dIYQG2qHrGjWXVXvl00JxIHBuwD+h8VXgNubLiMoPNU=
|
||||
github.com/go-llsqlite/crawshaw v0.5.1/go.mod h1:/YJdV7uBQaYDE0fwe4z3wwJIZBJxdYzd38ICggWqtaE=
|
||||
github.com/go-llsqlite/crawshaw v0.5.2-0.20240425034140-f30eb7704568 h1:3EpZo8LxIzF4q3BT+vttQQlRfA6uTtTb/cxVisWa5HM=
|
||||
github.com/go-llsqlite/crawshaw v0.5.2-0.20240425034140-f30eb7704568/go.mod h1:/YJdV7uBQaYDE0fwe4z3wwJIZBJxdYzd38ICggWqtaE=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
|
@ -361,6 +362,7 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
|||
github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
|
||||
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
|
@ -403,6 +405,8 @@ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
|
|||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
|
@ -416,9 +420,15 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
|
|||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
|
||||
github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
|
||||
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
|
||||
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
||||
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
||||
github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY=
|
||||
github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
||||
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
|
@ -487,8 +497,9 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
|||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/polydawn/go-timeless-api v0.0.0-20201121022836-7399661094a6/go.mod h1:z2fMUifgtqrZiNLgzF4ZR8pX+YFLCmAp1jJTSTvyDMM=
|
||||
github.com/polydawn/go-timeless-api v0.0.0-20220821201550-b93919e12c56 h1:LQ103HjiN76aqIxnQNgdZ+7NveuKd45+Q+TYGJVVsyw=
|
||||
github.com/polydawn/go-timeless-api v0.0.0-20220821201550-b93919e12c56/go.mod h1:OAK6p/pJUakz6jQ+HlSw16gVMnuohxqJFGoypUYyr4w=
|
||||
|
@ -532,8 +543,12 @@ github.com/royalcat/ctxio v0.0.0-20240602060200-590d464c39be h1:Ui+Imq1Vk26rfpkL
|
|||
github.com/royalcat/ctxio v0.0.0-20240602060200-590d464c39be/go.mod h1:NFNp3OsEMUPYj5LZUFDiyDt+2E6gR/g8JLd0k+y8XWI=
|
||||
github.com/royalcat/ctxprogress v0.0.0-20240511091748-6d9b327537c3 h1:1Ow/NUAWFZLghFcdNuyHt5Avb+bEI11qG8ELr9/XmQQ=
|
||||
github.com/royalcat/ctxprogress v0.0.0-20240511091748-6d9b327537c3/go.mod h1:RcUpbosy/m3bJ3JsVO18MXEbrKRHOHkmYBXigDGekaA=
|
||||
github.com/royalcat/kv v0.0.0-20240327213417-8cf5696b2389 h1:7XbHzr1TOaxs5Y/i9GtTEOOSTzfQ4ESYqF38DVfPkFY=
|
||||
github.com/royalcat/kv v0.0.0-20240327213417-8cf5696b2389/go.mod h1:Ff0Z/r1H3ojacpEe8SashMKJx6YCIhWrYtpdV8Y/k3A=
|
||||
github.com/royalcat/ctxprogress v0.0.0-20240614113930-3cc5bb935bff h1:KlZaOEZYhCzyNYIp0LcE7MNR2Ar0PJS3eJU6A5mMTpk=
|
||||
github.com/royalcat/ctxprogress v0.0.0-20240614113930-3cc5bb935bff/go.mod h1:RcUpbosy/m3bJ3JsVO18MXEbrKRHOHkmYBXigDGekaA=
|
||||
github.com/royalcat/kv v0.0.0-20240612224509-6aa0da315950 h1:zHYwRhGWVkGQnjmStcnxTQ95Mtk5DL6w1PmdIn63EpI=
|
||||
github.com/royalcat/kv v0.0.0-20240612224509-6aa0da315950/go.mod h1:UB/VwpTut8c3IXLJFvYWFxAAZymk9eBuJRMJmpSpwYU=
|
||||
github.com/royalcat/kv/kvbadger v0.0.0-20240612224509-6aa0da315950 h1:rKG2P4TNLgA4/Jl7LPayifjcw4txVGVSPkpHVhn3wnw=
|
||||
github.com/royalcat/kv/kvbadger v0.0.0-20240612224509-6aa0da315950/go.mod h1:JxgA1VGwbqu+WqdmjmjT0v6KeWoWlN6Y5lesjmphExM=
|
||||
github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529 h1:18kd+8ZUlt/ARXhljq+14TwAoKa61q6dX8jtwOf6DH8=
|
||||
github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
|
@ -569,8 +584,9 @@ github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hg
|
|||
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
|
||||
github.com/sosodev/duration v1.3.0 h1:g3E6mto+hFdA2uZXeNDYff8LYeg7v5D4YKP/Ng/NUkE=
|
||||
github.com/sosodev/duration v1.3.0/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
|
@ -690,8 +706,8 @@ golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE=
|
||||
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY=
|
||||
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -871,8 +887,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
|
|||
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
|
||||
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -957,6 +973,8 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
lukechampine.com/blake3 v1.1.6 h1:H3cROdztr7RCfoaTpGZFQsrqvweFLrqS73j7L7cmR5c=
|
||||
lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
|
||||
modernc.org/cc/v4 v4.21.0 h1:D/gLKtcztomvWbsbvBKo3leKQv+86f+DdqEZBBXhnag=
|
||||
modernc.org/cc/v4 v4.21.0/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
|
||||
modernc.org/ccgo/v4 v4.17.3 h1:t2CQci84jnxKw3GGnHvjGKjiNZeZqyQx/023spkk4hU=
|
||||
|
|
14
pkg/cowutils/cowutils.go
Normal file
14
pkg/cowutils/cowutils.go
Normal file
|
@ -0,0 +1,14 @@
|
|||
package cowutils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// ErrNotSupported is returned by Always() if the operation is not
|
||||
// supported on the current operating system. Auto() will never return this
|
||||
// error.
|
||||
var (
|
||||
ErrNotSupported = errors.New("cow is not supported on this OS")
|
||||
ErrFailed = errors.New("cow is not supported on this OS or file")
|
||||
ErrTooSmall = errors.New("file is too smaller then filesystem block size")
|
||||
)
|
88
pkg/cowutils/dedupe.go
Normal file
88
pkg/cowutils/dedupe.go
Normal file
|
@ -0,0 +1,88 @@
|
|||
package cowutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func DedupeFiles(ctx context.Context, paths []string) (deduped uint64, err error) {
|
||||
srcF, err := os.Open(paths[0])
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
defer srcF.Close()
|
||||
srcStat, err := srcF.Stat()
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
srcFd := int(srcF.Fd())
|
||||
srcSize := srcStat.Size()
|
||||
|
||||
fsStat := unix.Statfs_t{}
|
||||
err = unix.Fstatfs(srcFd, &fsStat)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
if int64(fsStat.Bsize) > srcSize { // for btrfs it means file residing in metadata and can't be deduplicated
|
||||
return deduped, nil
|
||||
}
|
||||
|
||||
blockSize := uint64((srcSize % int64(fsStat.Bsize)) * int64(fsStat.Bsize))
|
||||
|
||||
fdr := unix.FileDedupeRange{
|
||||
Src_offset: 0,
|
||||
Src_length: blockSize,
|
||||
Info: []unix.FileDedupeRangeInfo{},
|
||||
}
|
||||
|
||||
for _, dst := range paths[1:] {
|
||||
if ctx.Err() != nil {
|
||||
return deduped, ctx.Err()
|
||||
}
|
||||
|
||||
destF, err := os.OpenFile(dst, os.O_RDWR, os.ModePerm)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
// defer in cycle is intended, file must be closed only at the end of the function,
|
||||
// and, most importantly, this keeps GC from closing descriptor while dudupe in progress
|
||||
defer destF.Close()
|
||||
|
||||
fdr.Info = append(fdr.Info, unix.FileDedupeRangeInfo{
|
||||
Dest_fd: int64(destF.Fd()),
|
||||
Dest_offset: 0,
|
||||
})
|
||||
}
|
||||
|
||||
if len(fdr.Info) == 0 {
|
||||
return deduped, nil
|
||||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return deduped, ctx.Err()
|
||||
}
|
||||
|
||||
fdr.Src_offset = 0
|
||||
for i := range fdr.Info {
|
||||
fdr.Info[i].Dest_offset = 0
|
||||
}
|
||||
|
||||
err = unix.IoctlFileDedupeRange(srcFd, &fdr)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
for i := range fdr.Info {
|
||||
deduped += fdr.Info[i].Bytes_deduped
|
||||
|
||||
fdr.Info[i].Status = 0
|
||||
fdr.Info[i].Bytes_deduped = 0
|
||||
}
|
||||
|
||||
return deduped, nil
|
||||
}
|
54
pkg/cowutils/reflink.go
Normal file
54
pkg/cowutils/reflink.go
Normal file
|
@ -0,0 +1,54 @@
|
|||
package cowutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Reflink performs the reflink operation on the passed files, replacing
|
||||
// dst's contents with src. If fallback is true and reflink fails,
|
||||
// copy_file_range will be used first, and if that fails too io.Copy will
|
||||
// be used to copy the data.
|
||||
func Reflink(ctx context.Context, dst, src *os.File, fallback bool) error {
|
||||
err := reflink(dst, src)
|
||||
if (err != nil) && fallback {
|
||||
// reflink failed, but we can fallback, but first we need to know the file's size
|
||||
var st fs.FileInfo
|
||||
st, err = src.Stat()
|
||||
if err != nil {
|
||||
// couldn't stat source, this can't be helped
|
||||
return fmt.Errorf("failed to stat source: %w", err)
|
||||
}
|
||||
_, err = copyFileRange(dst, src, 0, 0, st.Size())
|
||||
if err != nil {
|
||||
// copyFileRange failed too, switch to simple io copy
|
||||
reader := io.NewSectionReader(src, 0, st.Size())
|
||||
writer := §ionWriter{w: dst}
|
||||
_ = dst.Truncate(0) // assuming any error in trucate will result in copy error
|
||||
_, err = io.Copy(writer, reader)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ReflinkRange performs a range reflink operation on the passed files, replacing
|
||||
// part of dst's contents with data from src. If fallback is true and reflink
|
||||
// fails, copy_file_range will be used first, and if that fails too io.CopyN
|
||||
// will be used to copy the data.
|
||||
func ReflinkRange(ctx context.Context, dst, src *os.File, dstOffset, srcOffset, n int64, fallback bool) error {
|
||||
err := reflinkRange(dst, src, dstOffset, srcOffset, n)
|
||||
if (err != nil) && fallback {
|
||||
_, err = copyFileRange(dst, src, dstOffset, srcOffset, n)
|
||||
}
|
||||
|
||||
if (err != nil) && fallback {
|
||||
// seek both src & dst
|
||||
reader := io.NewSectionReader(src, srcOffset, n)
|
||||
writer := §ionWriter{w: dst, base: dstOffset}
|
||||
_, err = io.CopyN(writer, reader, n)
|
||||
}
|
||||
return err
|
||||
}
|
53
pkg/cowutils/reflink_unix.go
Normal file
53
pkg/cowutils/reflink_unix.go
Normal file
|
@ -0,0 +1,53 @@
|
|||
//!build +unix
|
||||
|
||||
package cowutils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// reflink performs the actual reflink action without worrying about fallback
|
||||
func reflink(dst, src *os.File) error {
|
||||
srcFd := int(src.Fd())
|
||||
dstFd := int(dst.Fd())
|
||||
|
||||
err := unix.IoctlFileClone(dstFd, srcFd)
|
||||
|
||||
if err != nil && errors.Is(err, unix.ENOTSUP) {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func reflinkRange(dst, src *os.File, dstOffset, srcOffset, n int64) error {
|
||||
srcFd := int(src.Fd())
|
||||
dstFd := int(dst.Fd())
|
||||
|
||||
req := &unix.FileCloneRange{
|
||||
Src_fd: int64(srcFd),
|
||||
Src_offset: uint64(srcOffset),
|
||||
Src_length: uint64(n),
|
||||
Dest_offset: uint64(dstOffset),
|
||||
}
|
||||
|
||||
err := unix.IoctlFileCloneRange(dstFd, req)
|
||||
if err != nil && errors.Is(err, unix.ENOTSUP) {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func copyFileRange(dst, src *os.File, dstOffset, srcOffset, n int64) (int64, error) {
|
||||
srcFd := int(src.Fd())
|
||||
dstFd := int(dst.Fd())
|
||||
|
||||
resN, err := unix.CopyFileRange(srcFd, &srcOffset, dstFd, &dstOffset, int(n), 0)
|
||||
|
||||
return int64(resN), err
|
||||
|
||||
}
|
39
pkg/cowutils/writer.go
Normal file
39
pkg/cowutils/writer.go
Normal file
|
@ -0,0 +1,39 @@
|
|||
package cowutils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// sectionWriter is a helper used when we need to fallback into copying data manually
|
||||
type sectionWriter struct {
|
||||
w io.WriterAt // target file
|
||||
base int64 // base position in file
|
||||
off int64 // current relative offset
|
||||
}
|
||||
|
||||
// Write writes & updates offset
|
||||
func (s *sectionWriter) Write(p []byte) (int, error) {
|
||||
n, err := s.w.WriteAt(p, s.base+s.off)
|
||||
s.off += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (s *sectionWriter) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
// nothing needed
|
||||
case io.SeekCurrent:
|
||||
offset += s.off
|
||||
case io.SeekEnd:
|
||||
// we don't support io.SeekEnd
|
||||
fallthrough
|
||||
default:
|
||||
return s.off, errors.New("Seek: invalid whence")
|
||||
}
|
||||
if offset < 0 {
|
||||
return s.off, errors.New("Seek: invalid offset")
|
||||
}
|
||||
s.off = offset
|
||||
return offset, nil
|
||||
}
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/willscott/go-nfs-client/nfs/rpc"
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -144,6 +145,8 @@ func (c *conn) handle(ctx context.Context, w *response) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
|
||||
span.SetStatus(codes.Ok, "")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
24
pkg/kvsingle/single.go
Normal file
24
pkg/kvsingle/single.go
Normal file
|
@ -0,0 +1,24 @@
|
|||
package kvsingle
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/royalcat/kv"
|
||||
)
|
||||
|
||||
type Value[K, V any] struct {
|
||||
Key K
|
||||
db kv.Store[K, V]
|
||||
}
|
||||
|
||||
func New[K, V any](db kv.Store[K, V], key K) *Value[K, V] {
|
||||
return &Value[K, V]{Key: key, db: db}
|
||||
}
|
||||
|
||||
func (s *Value[K, V]) Get(ctx context.Context) (V, bool, error) {
|
||||
return s.db.Get(ctx, s.Key)
|
||||
}
|
||||
|
||||
func (s *Value[K, V]) Set(ctx context.Context, value V) error {
|
||||
return s.db.Set(ctx, s.Key, value)
|
||||
}
|
|
@ -53,7 +53,7 @@ func (m *traceSrtore[K, V]) Range(ctx context.Context, iter kv.Iter[K, V]) error
|
|||
defer span.End()
|
||||
|
||||
count := 0
|
||||
iterCount := func(k K, v V) bool {
|
||||
iterCount := func(k K, v V) error {
|
||||
count++
|
||||
return iter(k, v)
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ func (m *traceSrtore[K, V]) RangeWithPrefix(ctx context.Context, k K, iter kv.It
|
|||
defer span.End()
|
||||
|
||||
count := 0
|
||||
iterCount := func(k K, v V) bool {
|
||||
iterCount := func(k K, v V) error {
|
||||
count++
|
||||
return iter(k, v)
|
||||
}
|
||||
|
|
|
@ -91,6 +91,12 @@ func (l *Logger) With(attrs ...slog.Attr) *Logger {
|
|||
}
|
||||
}
|
||||
|
||||
// returns a new slog logger with the same attribures as the original logger
|
||||
// TODO currently not logging function name
|
||||
func (l *Logger) Slog() *slog.Logger {
|
||||
return slog.New(l.handler)
|
||||
}
|
||||
|
||||
const endpointKey = "endpoint"
|
||||
|
||||
func (l *Logger) WithEndpoint(name string) *Logger {
|
||||
|
@ -126,9 +132,9 @@ func errValue(err error) slog.Value {
|
|||
return slog.GroupValue(groupValues...)
|
||||
}
|
||||
|
||||
func Component(name string) *Logger {
|
||||
func Component(name ...string) *Logger {
|
||||
return &Logger{
|
||||
handler: handler,
|
||||
component: []string{name},
|
||||
component: name,
|
||||
}
|
||||
}
|
||||
|
|
38
pkg/slicesutils/intersections.go
Normal file
38
pkg/slicesutils/intersections.go
Normal file
|
@ -0,0 +1,38 @@
|
|||
package slicesutils
|
||||
|
||||
func Intersection[T comparable](slices ...[]T) []T {
|
||||
counts := map[T]int{}
|
||||
result := []T{}
|
||||
|
||||
for _, slice := range slices {
|
||||
for _, val := range slice {
|
||||
counts[val]++
|
||||
}
|
||||
}
|
||||
|
||||
for val, count := range counts {
|
||||
if count == len(slices) {
|
||||
|
||||
result = append(result, val)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
}
|
||||
|
||||
func IntersectionFunc[T any](s1 []T, s2 []T, cmp func(T, T) bool) []T {
|
||||
set := make([]T, 0)
|
||||
|
||||
for _, a := range s1 {
|
||||
for _, b := range s2 {
|
||||
if cmp(a, b) {
|
||||
set = append(set, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return set
|
||||
|
||||
}
|
|
@ -1,12 +1,10 @@
|
|||
package ytdlp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/royalcat/ctxprogress"
|
||||
|
@ -63,29 +61,27 @@ const rawProgressTemplate = `download:
|
|||
|
||||
var progressTemplate = strings.NewReplacer("\n", "", "\t", "", " ", "").Replace(rawProgressTemplate)
|
||||
|
||||
func (c *Client) Download(ctx context.Context, url string, dir string) error {
|
||||
func (c *Client) Download(ctx context.Context, url string, w io.Writer) error {
|
||||
args := []string{
|
||||
"--no-simulate", "-j",
|
||||
"--progress", "--newline", "--progress-template", progressTemplate,
|
||||
"-o", path.Join(dir, "%(title)s.%(ext)s"),
|
||||
"-o", "-",
|
||||
url,
|
||||
}
|
||||
|
||||
group, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
w, lines, err := lineReader(group)
|
||||
stderr, lines, err := lineReader(group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var stderr bytes.Buffer
|
||||
cmd := exec.CommandContext(ctx, c.binary, args...)
|
||||
|
||||
cmd.Stdout = w
|
||||
cmd.Stderr = &stderr
|
||||
cmd.Stderr = stderr
|
||||
|
||||
group.Go(func() error {
|
||||
err := cmd.Run()
|
||||
defer w.Close()
|
||||
stderr.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -105,361 +101,5 @@ func (c *Client) Download(ctx context.Context, url string, dir string) error {
|
|||
}
|
||||
}
|
||||
|
||||
err = group.Wait()
|
||||
if err != nil {
|
||||
if _, ok := err.(*exec.ExitError); ok {
|
||||
return fmt.Errorf("yt-dlp error: %s", stderr.Bytes())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
// // Used to deser the yt-dlp -J output
|
||||
// type DownloadInfo struct {
|
||||
// URL string `json:"url"`
|
||||
// Title string `json:"title"`
|
||||
// Thumbnail string `json:"thumbnail"`
|
||||
// Resolution string `json:"resolution"`
|
||||
// Size int32 `json:"filesize_approx"`
|
||||
// VCodec string `json:"vcodec"`
|
||||
// ACodec string `json:"acodec"`
|
||||
// Extension string `json:"ext"`
|
||||
// OriginalURL string `json:"original_url"`
|
||||
// CreatedAt time.Time `json:"created_at"`
|
||||
// }
|
||||
|
||||
// // Process descriptor
|
||||
// type Process struct {
|
||||
// Id string
|
||||
// Url string
|
||||
// Params []string
|
||||
// OutputDir string
|
||||
// Info DownloadInfo
|
||||
// Progress DownloadProgress
|
||||
// proc *os.Process
|
||||
// Logger *slog.Logger
|
||||
// }
|
||||
|
||||
// func NewProcess(dir string) (*Process, error) {
|
||||
|
||||
// }
|
||||
|
||||
// // Starts spawns/forks a new yt-dlp process and parse its stdout.
|
||||
// // The process is spawned to outputting a custom progress text that
|
||||
// // Resembles a JSON Object in order to Unmarshal it later.
|
||||
// // This approach is anyhow not perfect: quotes are not escaped properly.
|
||||
// // Each process is not identified by its PID but by a UUIDv4
|
||||
// func (p *Process) Start() {
|
||||
// // escape bash variable escaping and command piping, you'll never know
|
||||
// // what they might come with...
|
||||
// p.Params = slices.DeleteFunc(p.Params, func(e string) bool {
|
||||
// match, _ := regexp.MatchString(`(\$\{)|(\&\&)`, e)
|
||||
// return match
|
||||
// })
|
||||
|
||||
// p.Params = slices.DeleteFunc(p.Params, func(e string) bool {
|
||||
// return e == ""
|
||||
// })
|
||||
|
||||
// if p.Output.Path != "" {
|
||||
// out.Path = p.Output.Path
|
||||
// }
|
||||
|
||||
// if p.Output.Filename != "" {
|
||||
// out.Filename = p.Output.Filename
|
||||
// }
|
||||
|
||||
// buildFilename(&p.Output)
|
||||
|
||||
// go p.GetFileName(&out)
|
||||
|
||||
// params := []string{
|
||||
// strings.Split(p.Url, "?list")[0], //no playlist
|
||||
// "--newline",
|
||||
// "--no-colors",
|
||||
// "--no-playlist",
|
||||
// "--progress-template",
|
||||
// strings.NewReplacer("\n", "", "\t", "", " ", "").Replace(template),
|
||||
// }
|
||||
|
||||
// // if user asked to manually override the output path...
|
||||
// if !(slices.Contains(params, "-P") || slices.Contains(params, "--paths")) {
|
||||
// params = append(params, "-o")
|
||||
// params = append(params, fmt.Sprintf("%s/%s", out.Path, out.Filename))
|
||||
// }
|
||||
|
||||
// params = append(params, p.Params...)
|
||||
|
||||
// // ----------------- main block ----------------- //
|
||||
// cmd := exec.Command(config.Instance().DownloaderPath, params...)
|
||||
// cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
|
||||
// r, err := cmd.StdoutPipe()
|
||||
// if err != nil {
|
||||
// p.Logger.Error(
|
||||
// "failed to connect to stdout",
|
||||
// slog.String("err", err.Error()),
|
||||
// )
|
||||
// panic(err)
|
||||
// }
|
||||
|
||||
// err = cmd.Start()
|
||||
// if err != nil {
|
||||
// p.Logger.Error(
|
||||
// "failed to start yt-dlp process",
|
||||
// slog.String("err", err.Error()),
|
||||
// )
|
||||
// panic(err)
|
||||
// }
|
||||
|
||||
// p.proc = cmd.Process
|
||||
|
||||
// // --------------- progress block --------------- //
|
||||
// var (
|
||||
// sourceChan = make(chan []byte)
|
||||
// doneChan = make(chan struct{})
|
||||
// )
|
||||
|
||||
// // spawn a goroutine that does the dirty job of parsing the stdout
|
||||
// // filling the channel with as many stdout line as yt-dlp produces (producer)
|
||||
// go func() {
|
||||
// scan := bufio.NewScanner(r)
|
||||
|
||||
// defer func() {
|
||||
// r.Close()
|
||||
// p.Complete()
|
||||
// doneChan <- struct{}{}
|
||||
// close(sourceChan)
|
||||
// close(doneChan)
|
||||
// }()
|
||||
|
||||
// for scan.Scan() {
|
||||
// sourceChan <- scan.Bytes()
|
||||
// }
|
||||
// }()
|
||||
|
||||
// // Slows down the unmarshal operation to every 500ms
|
||||
// go func() {
|
||||
// rx.Sample(time.Millisecond*500, sourceChan, doneChan, func(event []byte) {
|
||||
// var progress ProgressTemplate
|
||||
|
||||
// if err := json.Unmarshal(event, &progress); err != nil {
|
||||
// return
|
||||
// }
|
||||
|
||||
// p.Progress = DownloadProgress{
|
||||
// Status: StatusDownloading,
|
||||
// Percentage: progress.Percentage,
|
||||
// Speed: progress.Speed,
|
||||
// ETA: progress.Eta,
|
||||
// }
|
||||
|
||||
// p.Logger.Info("progress",
|
||||
// slog.String("id", p.getShortId()),
|
||||
// slog.String("url", p.Url),
|
||||
// slog.String("percentage", progress.Percentage),
|
||||
// )
|
||||
// })
|
||||
// }()
|
||||
|
||||
// // ------------- end progress block ------------- //
|
||||
// cmd.Wait()
|
||||
// }
|
||||
|
||||
// // Keep process in the memoryDB but marks it as complete
|
||||
// // Convention: All completed processes has progress -1
|
||||
// // and speed 0 bps.
|
||||
// func (p *Process) Complete() {
|
||||
// p.Progress = DownloadProgress{
|
||||
// Status: StatusCompleted,
|
||||
// Percentage: "-1",
|
||||
// Speed: 0,
|
||||
// ETA: 0,
|
||||
// }
|
||||
|
||||
// p.Logger.Info("finished",
|
||||
// slog.String("id", p.getShortId()),
|
||||
// slog.String("url", p.Url),
|
||||
// )
|
||||
// }
|
||||
|
||||
// // Kill a process and remove it from the memory
|
||||
// func (p *Process) Kill() error {
|
||||
// // yt-dlp uses multiple child process the parent process
|
||||
// // has been spawned with setPgid = true. To properly kill
|
||||
// // all subprocesses a SIGTERM need to be sent to the correct
|
||||
// // process group
|
||||
// if p.proc != nil {
|
||||
// pgid, err := syscall.Getpgid(p.proc.Pid)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// err = syscall.Kill(-pgid, syscall.SIGTERM)
|
||||
|
||||
// p.Logger.Info("killed process", slog.String("id", p.Id))
|
||||
// return err
|
||||
// }
|
||||
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// // Returns the available format for this URL
|
||||
// func (p *Process) GetFormatsSync() (DownloadFormats, error) {
|
||||
// cmd := exec.Command(config.Instance().DownloaderPath, p.Url, "-J")
|
||||
|
||||
// stdout, err := cmd.Output()
|
||||
// if err != nil {
|
||||
// p.Logger.Error(
|
||||
// "failed to retrieve metadata",
|
||||
// slog.String("err", err.Error()),
|
||||
// )
|
||||
// return DownloadFormats{}, err
|
||||
// }
|
||||
|
||||
// info := DownloadFormats{URL: p.Url}
|
||||
// best := Format{}
|
||||
|
||||
// var (
|
||||
// wg sync.WaitGroup
|
||||
// decodingError error
|
||||
// )
|
||||
|
||||
// wg.Add(2)
|
||||
|
||||
// log.Println(
|
||||
// cli.BgRed, "Metadata", cli.Reset,
|
||||
// cli.BgBlue, "Formats", cli.Reset,
|
||||
// p.Url,
|
||||
// )
|
||||
|
||||
// p.Logger.Info(
|
||||
// "retrieving metadata",
|
||||
// slog.String("caller", "getFormats"),
|
||||
// slog.String("url", p.Url),
|
||||
// )
|
||||
|
||||
// go func() {
|
||||
// decodingError = json.Unmarshal(stdout, &info)
|
||||
// wg.Done()
|
||||
// }()
|
||||
|
||||
// go func() {
|
||||
// decodingError = json.Unmarshal(stdout, &best)
|
||||
// wg.Done()
|
||||
// }()
|
||||
|
||||
// wg.Wait()
|
||||
|
||||
// if decodingError != nil {
|
||||
// return DownloadFormats{}, err
|
||||
// }
|
||||
|
||||
// info.Best = best
|
||||
|
||||
// return info, nil
|
||||
// }
|
||||
|
||||
// func (p *Process) GetFileName(o *DownloadOutput) error {
|
||||
// cmd := exec.Command(
|
||||
// config.Instance().DownloaderPath,
|
||||
// "--print", "filename",
|
||||
// "-o", fmt.Sprintf("%s/%s", o.Path, o.Filename),
|
||||
// p.Url,
|
||||
// )
|
||||
// cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
|
||||
// out, err := cmd.Output()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// p.Output.SavedFilePath = strings.Trim(string(out), "\n")
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// func (p *Process) SetPending() {
|
||||
// // Since video's title isn't available yet, fill in with the URL.
|
||||
// p.Info = DownloadInfo{
|
||||
// URL: p.Url,
|
||||
// Title: p.Url,
|
||||
// CreatedAt: time.Now(),
|
||||
// }
|
||||
// p.Progress.Status = StatusPending
|
||||
// }
|
||||
|
||||
// func (p *Process) SetMetadata() error {
|
||||
// cmd := exec.Command(config.Instance().DownloaderPath, p.Url, "-J")
|
||||
// cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
|
||||
// stdout, err := cmd.StdoutPipe()
|
||||
// if err != nil {
|
||||
// p.Logger.Error("failed to connect to stdout",
|
||||
// slog.String("id", p.getShortId()),
|
||||
// slog.String("url", p.Url),
|
||||
// slog.String("err", err.Error()),
|
||||
// )
|
||||
// return err
|
||||
// }
|
||||
|
||||
// stderr, err := cmd.StderrPipe()
|
||||
// if err != nil {
|
||||
// p.Logger.Error("failed to connect to stderr",
|
||||
// slog.String("id", p.getShortId()),
|
||||
// slog.String("url", p.Url),
|
||||
// slog.String("err", err.Error()),
|
||||
// )
|
||||
// return err
|
||||
// }
|
||||
|
||||
// info := DownloadInfo{
|
||||
// URL: p.Url,
|
||||
// CreatedAt: time.Now(),
|
||||
// }
|
||||
|
||||
// if err := cmd.Start(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// var bufferedStderr bytes.Buffer
|
||||
|
||||
// go func() {
|
||||
// io.Copy(&bufferedStderr, stderr)
|
||||
// }()
|
||||
|
||||
// p.Logger.Info("retrieving metadata",
|
||||
// slog.String("id", p.getShortId()),
|
||||
// slog.String("url", p.Url),
|
||||
// )
|
||||
|
||||
// if err := json.NewDecoder(stdout).Decode(&info); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// p.Info = info
|
||||
// p.Progress.Status = StatusPending
|
||||
|
||||
// if err := cmd.Wait(); err != nil {
|
||||
// return errors.New(bufferedStderr.String())
|
||||
// }
|
||||
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// func (p *Process) getShortId() string {
|
||||
// return strings.Split(p.Id, "-")[0]
|
||||
// }
|
||||
|
||||
// func buildFilename(o *DownloadOutput) {
|
||||
// if o.Filename != "" && strings.Contains(o.Filename, ".%(ext)s") {
|
||||
// o.Filename += ".%(ext)s"
|
||||
// }
|
||||
|
||||
// o.Filename = strings.Replace(
|
||||
// o.Filename,
|
||||
// ".%(ext)s.%(ext)s",
|
||||
// ".%(ext)s",
|
||||
// 1,
|
||||
// )
|
||||
// }
|
||||
|
|
|
@ -3,6 +3,7 @@ package ytdlp_test
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/ytdlp"
|
||||
|
@ -10,7 +11,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestYtDlp(t *testing.T) {
|
||||
func TestDownload(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -21,6 +22,6 @@ func TestYtDlp(t *testing.T) {
|
|||
cur, total := p.Progress()
|
||||
fmt.Printf("%d/%d\n", cur, total)
|
||||
})
|
||||
err = c.Download(ctx, "https://www.youtube.com/watch?v=dQw4w9WgXcQ", "rickroll3")
|
||||
err = c.Download(ctx, "https://www.youtube.com/watch?v=dQw4w9WgXcQ", io.Discard)
|
||||
require.NoError(err)
|
||||
}
|
||||
|
|
33
pkg/ytdlp/info.go
Normal file
33
pkg/ytdlp/info.go
Normal file
|
@ -0,0 +1,33 @@
|
|||
package ytdlp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func (c *Client) Info(ctx context.Context, url string) (*Info, error) {
|
||||
args := []string{
|
||||
"-q", "-J", url,
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, c.binary, args...)
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info Info
|
||||
err = json.Unmarshal(stdout.Bytes(), &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &info, nil
|
||||
}
|
|
@ -1,31 +1,223 @@
|
|||
package ytdlp
|
||||
|
||||
type Info struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Availability string `json:"availability"`
|
||||
ChannelFollowerCount *int64 `json:"channel_follower_count"`
|
||||
Description string `json:"description"`
|
||||
Tags []string `json:"tags"`
|
||||
Thumbnails []Thumbnail `json:"thumbnails"`
|
||||
ModifiedDate *string `json:"modified_date,omitempty"`
|
||||
ViewCount int64 `json:"view_count"`
|
||||
PlaylistCount *int64 `json:"playlist_count,omitempty"`
|
||||
Channel string `json:"channel"`
|
||||
ChannelID string `json:"channel_id"`
|
||||
UploaderID string `json:"uploader_id"`
|
||||
Uploader string `json:"uploader"`
|
||||
ChannelURL string `json:"channel_url"`
|
||||
UploaderURL string `json:"uploader_url"`
|
||||
Type string `json:"_type"`
|
||||
Entries []Entry `json:"entries,omitempty"`
|
||||
ExtractorKey string `json:"extractor_key"`
|
||||
Extractor string `json:"extractor"`
|
||||
WebpageURL string `json:"webpage_url"`
|
||||
OriginalURL string `json:"original_url"`
|
||||
WebpageURLBasename string `json:"webpage_url_basename"`
|
||||
WebpageURLDomain string `json:"webpage_url_domain"`
|
||||
ReleaseYear interface{} `json:"release_year"`
|
||||
Epoch int64 `json:"epoch"`
|
||||
FilesToMove *FilesToMove `json:"__files_to_move,omitempty"`
|
||||
Version Version `json:"_version"`
|
||||
Formats []Format `json:"formats,omitempty"`
|
||||
Thumbnail *string `json:"thumbnail,omitempty"`
|
||||
Duration *int64 `json:"duration,omitempty"`
|
||||
AverageRating interface{} `json:"average_rating"`
|
||||
AgeLimit *int64 `json:"age_limit,omitempty"`
|
||||
Categories []string `json:"categories,omitempty"`
|
||||
PlayableInEmbed *bool `json:"playable_in_embed,omitempty"`
|
||||
LiveStatus *string `json:"live_status,omitempty"`
|
||||
ReleaseTimestamp interface{} `json:"release_timestamp"`
|
||||
FormatSortFields []string `json:"_format_sort_fields,omitempty"`
|
||||
AutomaticCaptions map[string][]AutomaticCaption `json:"automatic_captions,omitempty"`
|
||||
Subtitles *FilesToMove `json:"subtitles,omitempty"`
|
||||
CommentCount *int64 `json:"comment_count,omitempty"`
|
||||
Chapters interface{} `json:"chapters"`
|
||||
Heatmap []Heatmap `json:"heatmap,omitempty"`
|
||||
LikeCount *int64 `json:"like_count,omitempty"`
|
||||
ChannelIsVerified *bool `json:"channel_is_verified,omitempty"`
|
||||
UploadDate *string `json:"upload_date,omitempty"`
|
||||
Timestamp *int64 `json:"timestamp,omitempty"`
|
||||
Playlist interface{} `json:"playlist"`
|
||||
PlaylistIndex interface{} `json:"playlist_index"`
|
||||
DisplayID *string `json:"display_id,omitempty"`
|
||||
Fulltitle *string `json:"fulltitle,omitempty"`
|
||||
DurationString *string `json:"duration_string,omitempty"`
|
||||
IsLive *bool `json:"is_live,omitempty"`
|
||||
WasLive *bool `json:"was_live,omitempty"`
|
||||
RequestedSubtitles interface{} `json:"requested_subtitles"`
|
||||
HasDRM interface{} `json:"_has_drm"`
|
||||
RequestedDownloads []RequestedDownload `json:"requested_downloads,omitempty"`
|
||||
RequestedFormats []Format `json:"requested_formats,omitempty"`
|
||||
Format *string `json:"format,omitempty"`
|
||||
FormatID *string `json:"format_id,omitempty"`
|
||||
EXT *MediaEXT `json:"ext,omitempty"`
|
||||
Protocol *string `json:"protocol,omitempty"`
|
||||
Language *Language `json:"language,omitempty"`
|
||||
FormatNote *string `json:"format_note,omitempty"`
|
||||
FilesizeApprox *int64 `json:"filesize_approx,omitempty"`
|
||||
Tbr *float64 `json:"tbr,omitempty"`
|
||||
Width *int64 `json:"width,omitempty"`
|
||||
Height *int64 `json:"height,omitempty"`
|
||||
Resolution *Resolution `json:"resolution,omitempty"`
|
||||
FPS *int64 `json:"fps,omitempty"`
|
||||
DynamicRange *DynamicRange `json:"dynamic_range,omitempty"`
|
||||
Vcodec *string `json:"vcodec,omitempty"`
|
||||
Vbr *float64 `json:"vbr,omitempty"`
|
||||
StretchedRatio interface{} `json:"stretched_ratio"`
|
||||
AspectRatio *float64 `json:"aspect_ratio,omitempty"`
|
||||
Acodec *Acodec `json:"acodec,omitempty"`
|
||||
ABR *float64 `json:"abr,omitempty"`
|
||||
ASR *int64 `json:"asr,omitempty"`
|
||||
AudioChannels *int64 `json:"audio_channels,omitempty"`
|
||||
}
|
||||
|
||||
type AutomaticCaption struct {
|
||||
EXT AutomaticCaptionEXT `json:"ext"`
|
||||
URL string `json:"url"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type Entry struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Formats []Format `json:"formats"`
|
||||
Thumbnails []Thumbnail `json:"thumbnails"`
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
Description string `json:"description"`
|
||||
ChannelID string `json:"channel_id"`
|
||||
ChannelURL string `json:"channel_url"`
|
||||
Duration int64 `json:"duration"`
|
||||
ViewCount int64 `json:"view_count"`
|
||||
AverageRating interface{} `json:"average_rating"`
|
||||
AgeLimit int64 `json:"age_limit"`
|
||||
WebpageURL string `json:"webpage_url"`
|
||||
Categories []string `json:"categories"`
|
||||
Tags []string `json:"tags"`
|
||||
PlayableInEmbed bool `json:"playable_in_embed"`
|
||||
LiveStatus string `json:"live_status"`
|
||||
ReleaseTimestamp interface{} `json:"release_timestamp"`
|
||||
FormatSortFields []string `json:"_format_sort_fields"`
|
||||
AutomaticCaptions map[string][]AutomaticCaption `json:"automatic_captions"`
|
||||
Subtitles FilesToMove `json:"subtitles"`
|
||||
CommentCount int64 `json:"comment_count"`
|
||||
Chapters interface{} `json:"chapters"`
|
||||
Heatmap interface{} `json:"heatmap"`
|
||||
LikeCount int64 `json:"like_count"`
|
||||
Channel string `json:"channel"`
|
||||
ChannelFollowerCount int64 `json:"channel_follower_count"`
|
||||
Uploader string `json:"uploader"`
|
||||
UploaderID string `json:"uploader_id"`
|
||||
UploaderURL string `json:"uploader_url"`
|
||||
UploadDate string `json:"upload_date"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Availability string `json:"availability"`
|
||||
OriginalURL string `json:"original_url"`
|
||||
WebpageURLBasename string `json:"webpage_url_basename"`
|
||||
WebpageURLDomain string `json:"webpage_url_domain"`
|
||||
Extractor string `json:"extractor"`
|
||||
ExtractorKey string `json:"extractor_key"`
|
||||
PlaylistCount int64 `json:"playlist_count"`
|
||||
Playlist string `json:"playlist"`
|
||||
PlaylistID string `json:"playlist_id"`
|
||||
PlaylistTitle string `json:"playlist_title"`
|
||||
PlaylistUploader string `json:"playlist_uploader"`
|
||||
PlaylistUploaderID string `json:"playlist_uploader_id"`
|
||||
NEntries int64 `json:"n_entries"`
|
||||
PlaylistIndex int64 `json:"playlist_index"`
|
||||
LastPlaylistIndex int64 `json:"__last_playlist_index"`
|
||||
PlaylistAutonumber int64 `json:"playlist_autonumber"`
|
||||
DisplayID string `json:"display_id"`
|
||||
Fulltitle string `json:"fulltitle"`
|
||||
DurationString string `json:"duration_string"`
|
||||
ReleaseYear interface{} `json:"release_year"`
|
||||
IsLive bool `json:"is_live"`
|
||||
WasLive bool `json:"was_live"`
|
||||
RequestedSubtitles interface{} `json:"requested_subtitles"`
|
||||
HasDRM interface{} `json:"_has_drm"`
|
||||
Epoch int64 `json:"epoch"`
|
||||
RequestedDownloads []RequestedDownload `json:"requested_downloads"`
|
||||
RequestedFormats []Format `json:"requested_formats"`
|
||||
Format string `json:"format"`
|
||||
FormatID string `json:"format_id"`
|
||||
EXT string `json:"ext"`
|
||||
Protocol string `json:"protocol"`
|
||||
Language *Language `json:"language"`
|
||||
FormatNote string `json:"format_note"`
|
||||
FilesizeApprox int64 `json:"filesize_approx"`
|
||||
Tbr float64 `json:"tbr"`
|
||||
Width int64 `json:"width"`
|
||||
Height int64 `json:"height"`
|
||||
Resolution Resolution `json:"resolution"`
|
||||
FPS int64 `json:"fps"`
|
||||
DynamicRange DynamicRange `json:"dynamic_range"`
|
||||
Vcodec string `json:"vcodec"`
|
||||
Vbr float64 `json:"vbr"`
|
||||
StretchedRatio interface{} `json:"stretched_ratio"`
|
||||
AspectRatio float64 `json:"aspect_ratio"`
|
||||
Acodec Acodec `json:"acodec"`
|
||||
ABR float64 `json:"abr"`
|
||||
ASR int64 `json:"asr"`
|
||||
AudioChannels int64 `json:"audio_channels"`
|
||||
}
|
||||
|
||||
type Format struct {
|
||||
URL string `json:"url"`
|
||||
FormatID string `json:"format_id"`
|
||||
Height int64 `json:"height"`
|
||||
EXT EXT `json:"ext"`
|
||||
Protocol Protocol `json:"protocol"`
|
||||
Resolution string `json:"resolution"`
|
||||
DynamicRange DynamicRange `json:"dynamic_range"`
|
||||
AspectRatio *float64 `json:"aspect_ratio"`
|
||||
FilesizeApprox any `json:"filesize_approx"`
|
||||
HTTPHeaders HTTPHeaders `json:"http_headers"`
|
||||
VideoEXT EXT `json:"video_ext"`
|
||||
AudioEXT AudioEXT `json:"audio_ext"`
|
||||
Vbr any `json:"vbr"`
|
||||
ABR any `json:"abr"`
|
||||
Tbr *float64 `json:"tbr"`
|
||||
Format string `json:"format"`
|
||||
FormatIndex any `json:"format_index"`
|
||||
ManifestURL *string `json:"manifest_url,omitempty"`
|
||||
FPS *float64 `json:"fps,omitempty"`
|
||||
Preference any `json:"preference"`
|
||||
Quality any `json:"quality"`
|
||||
HasDRM *bool `json:"has_drm,omitempty"`
|
||||
Width *int64 `json:"width,omitempty"`
|
||||
Vcodec *string `json:"vcodec,omitempty"`
|
||||
Acodec *string `json:"acodec,omitempty"`
|
||||
FormatID string `json:"format_id"`
|
||||
FormatNote *FormatNote `json:"format_note,omitempty"`
|
||||
EXT MediaEXT `json:"ext"`
|
||||
Protocol Protocol `json:"protocol"`
|
||||
Acodec *Acodec `json:"acodec,omitempty"`
|
||||
Vcodec string `json:"vcodec"`
|
||||
URL string `json:"url"`
|
||||
Width *int64 `json:"width"`
|
||||
Height *int64 `json:"height"`
|
||||
FPS *float64 `json:"fps"`
|
||||
Rows *int64 `json:"rows,omitempty"`
|
||||
Columns *int64 `json:"columns,omitempty"`
|
||||
Fragments []Fragment `json:"fragments,omitempty"`
|
||||
Resolution Resolution `json:"resolution"`
|
||||
AspectRatio *float64 `json:"aspect_ratio"`
|
||||
FilesizeApprox *int64 `json:"filesize_approx"`
|
||||
HTTPHeaders HTTPHeaders `json:"http_headers"`
|
||||
AudioEXT MediaEXT `json:"audio_ext"`
|
||||
VideoEXT MediaEXT `json:"video_ext"`
|
||||
Vbr *float64 `json:"vbr"`
|
||||
ABR *float64 `json:"abr"`
|
||||
Tbr *float64 `json:"tbr"`
|
||||
Format string `json:"format"`
|
||||
FormatIndex interface{} `json:"format_index"`
|
||||
ManifestURL *string `json:"manifest_url,omitempty"`
|
||||
Language *Language `json:"language"`
|
||||
Preference interface{} `json:"preference"`
|
||||
Quality *int64 `json:"quality,omitempty"`
|
||||
HasDRM *bool `json:"has_drm,omitempty"`
|
||||
SourcePreference *int64 `json:"source_preference,omitempty"`
|
||||
ASR *int64 `json:"asr"`
|
||||
Filesize *int64 `json:"filesize"`
|
||||
AudioChannels *int64 `json:"audio_channels"`
|
||||
LanguagePreference *int64 `json:"language_preference,omitempty"`
|
||||
DynamicRange *DynamicRange `json:"dynamic_range"`
|
||||
Container *Container `json:"container,omitempty"`
|
||||
DownloaderOptions *DownloaderOptions `json:"downloader_options,omitempty"`
|
||||
}
|
||||
|
||||
type DownloaderOptions struct {
|
||||
HTTPChunkSize int64 `json:"http_chunk_size"`
|
||||
}
|
||||
|
||||
type Fragment struct {
|
||||
URL string `json:"url"`
|
||||
Duration float64 `json:"duration"`
|
||||
}
|
||||
|
||||
type HTTPHeaders struct {
|
||||
|
@ -35,25 +227,76 @@ type HTTPHeaders struct {
|
|||
SECFetchMode SECFetchMode `json:"Sec-Fetch-Mode"`
|
||||
}
|
||||
|
||||
type Subtitles struct {
|
||||
type RequestedDownload struct {
|
||||
RequestedFormats []Format `json:"requested_formats"`
|
||||
Format string `json:"format"`
|
||||
FormatID string `json:"format_id"`
|
||||
EXT string `json:"ext"`
|
||||
Protocol string `json:"protocol"`
|
||||
FormatNote string `json:"format_note"`
|
||||
FilesizeApprox int64 `json:"filesize_approx"`
|
||||
Tbr float64 `json:"tbr"`
|
||||
Width int64 `json:"width"`
|
||||
Height int64 `json:"height"`
|
||||
Resolution Resolution `json:"resolution"`
|
||||
FPS int64 `json:"fps"`
|
||||
DynamicRange DynamicRange `json:"dynamic_range"`
|
||||
Vcodec string `json:"vcodec"`
|
||||
Vbr float64 `json:"vbr"`
|
||||
AspectRatio float64 `json:"aspect_ratio"`
|
||||
Acodec Acodec `json:"acodec"`
|
||||
ABR float64 `json:"abr"`
|
||||
ASR int64 `json:"asr"`
|
||||
AudioChannels int64 `json:"audio_channels"`
|
||||
FilenameOld string `json:"_filename"`
|
||||
Filename string `json:"filename"`
|
||||
WriteDownloadArchive bool `json:"__write_download_archive"`
|
||||
Language *Language `json:"language,omitempty"`
|
||||
}
|
||||
|
||||
type FilesToMove struct {
|
||||
}
|
||||
|
||||
type Thumbnail struct {
|
||||
URL string `json:"url"`
|
||||
ID string `json:"id"`
|
||||
URL string `json:"url"`
|
||||
Preference *int64 `json:"preference,omitempty"`
|
||||
ID string `json:"id"`
|
||||
Height *int64 `json:"height,omitempty"`
|
||||
Width *int64 `json:"width,omitempty"`
|
||||
Resolution *string `json:"resolution,omitempty"`
|
||||
}
|
||||
|
||||
type Heatmap struct {
|
||||
StartTime float64 `json:"start_time"`
|
||||
EndTime float64 `json:"end_time"`
|
||||
Value float64 `json:"value"`
|
||||
}
|
||||
|
||||
type Version struct {
|
||||
Version string `json:"version"`
|
||||
CurrentGitHead string `json:"current_git_head"`
|
||||
ReleaseGitHead string `json:"release_git_head"`
|
||||
Repository string `json:"repository"`
|
||||
Version string `json:"version"`
|
||||
CurrentGitHead interface{} `json:"current_git_head"`
|
||||
ReleaseGitHead string `json:"release_git_head"`
|
||||
Repository string `json:"repository"`
|
||||
}
|
||||
|
||||
type AudioEXT string
|
||||
type Acodec string
|
||||
|
||||
const (
|
||||
None AudioEXT = "none"
|
||||
AcodecNone Acodec = "none"
|
||||
Mp4A402 Acodec = "mp4a.40.2"
|
||||
Mp4A405 Acodec = "mp4a.40.5"
|
||||
Opus Acodec = "opus"
|
||||
)
|
||||
|
||||
type AutomaticCaptionEXT string
|
||||
|
||||
const (
|
||||
Json3 AutomaticCaptionEXT = "json3"
|
||||
Srv1 AutomaticCaptionEXT = "srv1"
|
||||
Srv2 AutomaticCaptionEXT = "srv2"
|
||||
Srv3 AutomaticCaptionEXT = "srv3"
|
||||
Ttml AutomaticCaptionEXT = "ttml"
|
||||
Vtt AutomaticCaptionEXT = "vtt"
|
||||
)
|
||||
|
||||
type DynamicRange string
|
||||
|
@ -63,10 +306,38 @@ const (
|
|||
HDR DynamicRange = "HDR"
|
||||
)
|
||||
|
||||
type EXT string
|
||||
type MediaEXT string
|
||||
|
||||
const (
|
||||
Mp4 EXT = "mp4"
|
||||
EXTNone MediaEXT = "none"
|
||||
EXTMhtml MediaEXT = "mhtml"
|
||||
M4A MediaEXT = "m4a"
|
||||
Mp4 MediaEXT = "mp4"
|
||||
Webm MediaEXT = "webm"
|
||||
)
|
||||
|
||||
type Container string
|
||||
|
||||
const (
|
||||
M4ADash Container = "m4a_dash"
|
||||
Mp4Dash Container = "mp4_dash"
|
||||
WebmDash Container = "webm_dash"
|
||||
)
|
||||
|
||||
type FormatNote string
|
||||
|
||||
const (
|
||||
Default FormatNote = "Default"
|
||||
Low FormatNote = "low"
|
||||
Medium FormatNote = "medium"
|
||||
Premium FormatNote = "Premium"
|
||||
Storyboard FormatNote = "storyboard"
|
||||
The1080P FormatNote = "1080p"
|
||||
The144P FormatNote = "144p"
|
||||
The240P FormatNote = "240p"
|
||||
The360P FormatNote = "360p"
|
||||
The480P FormatNote = "480p"
|
||||
The720P FormatNote = "720p"
|
||||
)
|
||||
|
||||
type Accept string
|
||||
|
@ -87,9 +358,32 @@ const (
|
|||
Navigate SECFetchMode = "navigate"
|
||||
)
|
||||
|
||||
type Language string
|
||||
|
||||
const (
|
||||
En Language = "en"
|
||||
)
|
||||
|
||||
type Protocol string
|
||||
|
||||
const (
|
||||
HTTPS Protocol = "https"
|
||||
M3U8Native Protocol = "m3u8_native"
|
||||
HTTPS Protocol = "https"
|
||||
M3U8Native Protocol = "m3u8_native"
|
||||
ProtocolMhtml Protocol = "mhtml"
|
||||
)
|
||||
|
||||
type Resolution string
|
||||
|
||||
const (
|
||||
AudioOnly Resolution = "audio only"
|
||||
The1280X720 Resolution = "1280x720"
|
||||
The160X90 Resolution = "160x90"
|
||||
The1920X1080 Resolution = "1920x1080"
|
||||
The256X144 Resolution = "256x144"
|
||||
The320X180 Resolution = "320x180"
|
||||
The426X240 Resolution = "426x240"
|
||||
The48X27 Resolution = "48x27"
|
||||
The640X360 Resolution = "640x360"
|
||||
The80X45 Resolution = "80x45"
|
||||
The854X480 Resolution = "854x480"
|
||||
)
|
||||
|
|
|
@ -14,91 +14,26 @@ import (
|
|||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type PlaylistEntry struct {
|
||||
ID string `json:"id"`
|
||||
Uploader string `json:"uploader"`
|
||||
UploaderID string `json:"uploader_id"`
|
||||
UploadDate string `json:"upload_date"`
|
||||
Title string `json:"title"`
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
Duration int64 `json:"duration"`
|
||||
LikeCount int64 `json:"like_count"`
|
||||
DislikeCount int64 `json:"dislike_count"`
|
||||
CommentCount int64 `json:"comment_count"`
|
||||
Formats []Format `json:"formats"`
|
||||
AgeLimit int64 `json:"age_limit"`
|
||||
Tags []string `json:"tags"`
|
||||
Categories []string `json:"categories"`
|
||||
Cast []any `json:"cast"`
|
||||
Subtitles Subtitles `json:"subtitles"`
|
||||
Thumbnails []Thumbnail `json:"thumbnails"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
ViewCount int64 `json:"view_count"`
|
||||
WebpageURL string `json:"webpage_url"`
|
||||
OriginalURL string `json:"original_url"`
|
||||
WebpageURLBasename string `json:"webpage_url_basename"`
|
||||
WebpageURLDomain string `json:"webpage_url_domain"`
|
||||
Extractor string `json:"extractor"`
|
||||
ExtractorKey string `json:"extractor_key"`
|
||||
PlaylistCount int64 `json:"playlist_count"`
|
||||
Playlist string `json:"playlist"`
|
||||
PlaylistID string `json:"playlist_id"`
|
||||
PlaylistTitle string `json:"playlist_title"`
|
||||
PlaylistUploader string `json:"playlist_uploader"`
|
||||
PlaylistUploaderID string `json:"playlist_uploader_id"`
|
||||
NEntries int64 `json:"n_entries"`
|
||||
PlaylistIndex int64 `json:"playlist_index"`
|
||||
PlaylistAutonumber int64 `json:"playlist_autonumber"`
|
||||
DisplayID string `json:"display_id"`
|
||||
Fulltitle string `json:"fulltitle"`
|
||||
DurationString string `json:"duration_string"`
|
||||
ReleaseYear int `json:"release_year"`
|
||||
Epoch int64 `json:"epoch"`
|
||||
FormatID string `json:"format_id"`
|
||||
URL string `json:"url"`
|
||||
ManifestURL string `json:"manifest_url"`
|
||||
Tbr float64 `json:"tbr"`
|
||||
EXT EXT `json:"ext"`
|
||||
FPS float64 `json:"fps"`
|
||||
Protocol Protocol `json:"protocol"`
|
||||
VideoHasDRM bool `json:"has_drm"`
|
||||
Width int64 `json:"width"`
|
||||
Height int64 `json:"height"`
|
||||
Vcodec string `json:"vcodec"`
|
||||
Acodec string `json:"acodec"`
|
||||
DynamicRange DynamicRange `json:"dynamic_range"`
|
||||
Resolution string `json:"resolution"`
|
||||
AspectRatio float64 `json:"aspect_ratio"`
|
||||
HTTPHeaders HTTPHeaders `json:"http_headers"`
|
||||
VideoEXT EXT `json:"video_ext"`
|
||||
AudioEXT AudioEXT `json:"audio_ext"`
|
||||
Format string `json:"format"`
|
||||
Filename string `json:"_filename"`
|
||||
VideoFilename string `json:"filename"`
|
||||
Type string `json:"_type"`
|
||||
Version Version `json:"_version"`
|
||||
}
|
||||
|
||||
// Progress implements ctxprogress.Progress.
|
||||
func (p PlaylistEntry) Progress() (current int, total int) {
|
||||
func (p Entry) Progress() (current int, total int) {
|
||||
return int(p.PlaylistIndex), int(p.PlaylistCount)
|
||||
}
|
||||
|
||||
func (p PlaylistEntry) Url() string {
|
||||
if p.URL != "" {
|
||||
return p.URL
|
||||
}
|
||||
if p.WebpageURL != "" {
|
||||
return p.WebpageURL
|
||||
}
|
||||
if p.OriginalURL != "" {
|
||||
return p.OriginalURL
|
||||
}
|
||||
// func (p PlaylistEntry) Url() string {
|
||||
// if p.URL != "" {
|
||||
// return p.URL
|
||||
// }
|
||||
// if p.WebpageURL != "" {
|
||||
// return p.WebpageURL
|
||||
// }
|
||||
// if p.OriginalURL != "" {
|
||||
// return p.OriginalURL
|
||||
// }
|
||||
|
||||
return ""
|
||||
}
|
||||
// return ""
|
||||
// }
|
||||
|
||||
func (yt *Client) Playlist(ctx context.Context, url string) ([]PlaylistEntry, error) {
|
||||
func (yt *Client) Playlist(ctx context.Context, url string) ([]Entry, error) {
|
||||
group, ctx := errgroup.WithContext(ctx)
|
||||
w, lines, err := lineReader(group)
|
||||
if err != nil {
|
||||
|
@ -117,9 +52,9 @@ func (yt *Client) Playlist(ctx context.Context, url string) ([]PlaylistEntry, er
|
|||
return w.Close()
|
||||
})
|
||||
|
||||
playlists := []PlaylistEntry{}
|
||||
playlists := []Entry{}
|
||||
for line := range lines {
|
||||
entry := PlaylistEntry{}
|
||||
entry := Entry{}
|
||||
err = json.Unmarshal([]byte(line), &entry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -163,7 +98,7 @@ func lineReader(group *errgroup.Group) (io.WriteCloser, <-chan string, error) {
|
|||
return w, lines, nil
|
||||
}
|
||||
|
||||
var _ ctxprogress.Progress = (*PlaylistEntry)(nil)
|
||||
var _ ctxprogress.Progress = (*Entry)(nil)
|
||||
|
||||
var _ ctxprogress.Progress = (*DownloadProgress)(nil)
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ func (r *mutationResolver) ValidateTorrents(ctx context.Context, filter model.To
|
|||
return false, nil
|
||||
}
|
||||
|
||||
t.ValidateTorrent()
|
||||
t.ValidateTorrent(ctx)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
@ -40,7 +40,7 @@ func (r *mutationResolver) ValidateTorrents(ctx context.Context, filter model.To
|
|||
return false, err
|
||||
}
|
||||
for _, v := range torrents {
|
||||
if err := v.ValidateTorrent(); err != nil {
|
||||
if err := v.ValidateTorrent(ctx); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
// It serves as dependency injection for your app, add any dependencies you require here.
|
||||
|
||||
type Resolver struct {
|
||||
Service *torrent.Service
|
||||
Service *torrent.Daemon
|
||||
VFS vfs.Filesystem
|
||||
SourceFS billy.Filesystem
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
"github.com/labstack/echo/v4/middleware"
|
||||
)
|
||||
|
||||
func New(fc *filecache.Cache, s *torrent.Service, vfs vfs.Filesystem, logPath string, cfg *config.Settings) error {
|
||||
func New(fc *filecache.Cache, s *torrent.Daemon, vfs vfs.Filesystem, logPath string, cfg *config.Settings) error {
|
||||
log := slog.With()
|
||||
|
||||
r := echo.New()
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
"github.com/ravilushqa/otelgqlgen"
|
||||
)
|
||||
|
||||
func GraphQLHandler(service *torrent.Service, vfs vfs.Filesystem) http.Handler {
|
||||
func GraphQLHandler(service *torrent.Daemon, vfs vfs.Filesystem) http.Handler {
|
||||
graphqlHandler := handler.NewDefaultServer(
|
||||
graph.NewExecutableSchema(
|
||||
graph.Config{
|
||||
|
|
|
@ -6,11 +6,12 @@ import (
|
|||
|
||||
nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
|
||||
nfshelper "git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
|
||||
"git.kmsign.ru/royalcat/tstor/src/config"
|
||||
"git.kmsign.ru/royalcat/tstor/src/log"
|
||||
"git.kmsign.ru/royalcat/tstor/src/vfs"
|
||||
)
|
||||
|
||||
func NewNFSv3Handler(fs vfs.Filesystem) (nfs.Handler, error) {
|
||||
func NewNFSv3Handler(fs vfs.Filesystem, config config.NFS) (nfs.Handler, error) {
|
||||
nfslog := slog.With("component", "nfs")
|
||||
nfs.SetLogger(log.NewNFSLog(nfslog))
|
||||
nfs.Log.SetLevel(nfs.InfoLevel)
|
||||
|
@ -18,7 +19,7 @@ func NewNFSv3Handler(fs vfs.Filesystem) (nfs.Handler, error) {
|
|||
bfs := &fsWrapper{fs: fs, log: nfslog, timeout: time.Minute}
|
||||
handler := nfshelper.NewNullAuthHandler(bfs)
|
||||
|
||||
cacheHelper, err := NewKvHandler(handler, bfs)
|
||||
cacheHelper, err := NewKvHandler(handler, bfs, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
|
||||
"git.kmsign.ru/royalcat/tstor/src/config"
|
||||
|
@ -14,6 +15,7 @@ import (
|
|||
|
||||
"github.com/google/uuid"
|
||||
"github.com/royalcat/kv"
|
||||
"github.com/royalcat/kv/kvbadger"
|
||||
)
|
||||
|
||||
type handle []string
|
||||
|
@ -45,17 +47,20 @@ func bytesToPath(path []string) string {
|
|||
var kvhandlerMeter = otel.Meter("git.kmsign.ru/royalcat/tstor/src/export/nfs.kvhandler")
|
||||
|
||||
// NewKvHandler provides a basic to/from-file handle cache that can be tuned with a smaller cache of active directory listings.
|
||||
func NewKvHandler(h nfs.Handler, fs nfs.Filesystem) (nfs.Handler, error) {
|
||||
activeHandles, err := kv.NewBadgerKVMarhsler[uuid.UUID, handle](path.Join(config.Config.Mounts.NFS.CachePath, "handlers"))
|
||||
func NewKvHandler(h nfs.Handler, fs nfs.Filesystem, config config.NFS) (nfs.Handler, error) {
|
||||
opts := kvbadger.DefaultOptions(path.Join(config.CachePath, "handlers"))
|
||||
opts.DefaultTTL = time.Hour
|
||||
|
||||
activeHandles, err := kvbadger.NewBagerKVBinaryKey[uuid.UUID, handle](opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reverseCache := map[string]uuid.UUID{}
|
||||
|
||||
activeHandles.Range(context.Background(), func(k uuid.UUID, v handle) bool {
|
||||
activeHandles.Range(context.Background(), func(k uuid.UUID, v handle) error {
|
||||
reverseCache[v.String()] = k
|
||||
return true
|
||||
return nil
|
||||
})
|
||||
|
||||
c := &CachingHandler{
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"git.kmsign.ru/royalcat/tstor/src/vfs"
|
||||
)
|
||||
|
||||
func NewHostedFS(sourceFS vfs.Filesystem, tsrv *torrent.Service, ytdlpsrv *ytdlp.Service) vfs.Filesystem {
|
||||
func NewHostedFS(sourceFS vfs.Filesystem, tsrv *torrent.Daemon, ytdlpsrv *ytdlp.Daemon) vfs.Filesystem {
|
||||
factories := map[string]vfs.FsFactory{
|
||||
".torrent": tsrv.NewTorrentFs,
|
||||
".ts-ytdlp": ytdlpsrv.BuildFS,
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
dlog "git.kmsign.ru/royalcat/tstor/src/log"
|
||||
)
|
||||
|
||||
// MOVE
|
||||
func newClient(st storage.ClientImpl, fis bep44.Store, cfg *config.TorrentClient, id [20]byte) (*torrent.Client, error) {
|
||||
l := slog.With("component", "torrent-client")
|
||||
|
||||
|
|
|
@ -2,9 +2,11 @@ package torrent
|
|||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/rlog"
|
||||
"github.com/anacrolix/torrent"
|
||||
)
|
||||
|
||||
|
@ -12,10 +14,15 @@ type Controller struct {
|
|||
torrentFilePath string
|
||||
t *torrent.Torrent
|
||||
rep *filesMappingsStore
|
||||
log *rlog.Logger
|
||||
}
|
||||
|
||||
func newController(t *torrent.Torrent, rep *filesMappingsStore) *Controller {
|
||||
return &Controller{t: t, rep: rep}
|
||||
return &Controller{
|
||||
t: t,
|
||||
rep: rep,
|
||||
log: rlog.Component("torrent/controller").With(slog.String("infohash", t.InfoHash().HexString())),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Controller) TorrentFilePath() string {
|
||||
|
@ -107,8 +114,41 @@ func (s *Controller) isFileComplete(startIndex int, endIndex int) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (s *Controller) ValidateTorrent() error {
|
||||
<-s.t.GotInfo()
|
||||
s.t.VerifyData()
|
||||
func (s *Controller) ValidateTorrent(ctx context.Context) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-s.t.GotInfo():
|
||||
}
|
||||
|
||||
for i := 0; i < s.t.NumPieces(); i++ {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
s.t.Piece(i).VerifyData()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) initializeTorrentPriories(ctx context.Context) error {
|
||||
log := c.log.WithComponent("initializeTorrentPriories")
|
||||
|
||||
// files, err := c.Files(ctx)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// for _, file := range files {
|
||||
// if file == nil {
|
||||
// continue
|
||||
// }
|
||||
|
||||
// file.SetPriority(torrent.PiecePriorityNormal)
|
||||
// }
|
||||
|
||||
log.Info(ctx, "torrent initialization complete", slog.String("infohash", c.InfoHash()), slog.String("torrent_name", c.Name()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -8,11 +8,11 @@ import (
|
|||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/ctxbilly"
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/rlog"
|
||||
"git.kmsign.ru/royalcat/tstor/src/config"
|
||||
"git.kmsign.ru/royalcat/tstor/src/vfs"
|
||||
|
@ -38,7 +38,7 @@ type DirAquire struct {
|
|||
Hashes []infohash.T
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
type Daemon struct {
|
||||
client *torrent.Client
|
||||
excludedFiles *filesMappingsStore
|
||||
infoBytes *infoBytesStore
|
||||
|
@ -54,8 +54,8 @@ type Service struct {
|
|||
log *rlog.Logger
|
||||
}
|
||||
|
||||
func NewService(sourceFs billy.Filesystem, conf config.TorrentClient) (*Service, error) {
|
||||
s := &Service{
|
||||
func NewService(sourceFs billy.Filesystem, conf config.TorrentClient) (*Daemon, error) {
|
||||
s := &Daemon{
|
||||
log: rlog.Component("torrent-service"),
|
||||
sourceFs: sourceFs,
|
||||
torrentLoaded: make(chan struct{}),
|
||||
|
@ -115,9 +115,9 @@ func NewService(sourceFs billy.Filesystem, conf config.TorrentClient) (*Service,
|
|||
return s, nil
|
||||
}
|
||||
|
||||
var _ vfs.FsFactory = (*Service)(nil).NewTorrentFs
|
||||
var _ vfs.FsFactory = (*Daemon)(nil).NewTorrentFs
|
||||
|
||||
func (s *Service) Close(ctx context.Context) error {
|
||||
func (s *Daemon) Close(ctx context.Context) error {
|
||||
return errors.Join(append(
|
||||
s.client.Close(),
|
||||
s.Storage.Close(),
|
||||
|
@ -128,7 +128,7 @@ func (s *Service) Close(ctx context.Context) error {
|
|||
)...)
|
||||
}
|
||||
|
||||
func (s *Service) LoadTorrent(ctx context.Context, f vfs.File) (*Controller, error) {
|
||||
func (s *Daemon) LoadTorrent(ctx context.Context, f vfs.File) (*Controller, error) {
|
||||
ctx, span := tracer.Start(ctx, "LoadTorrent")
|
||||
defer span.End()
|
||||
log := s.log
|
||||
|
@ -197,138 +197,31 @@ func (s *Service) LoadTorrent(ctx context.Context, f vfs.File) (*Controller, err
|
|||
}
|
||||
span.AddEvent("got info")
|
||||
|
||||
info := t.Info()
|
||||
if info == nil {
|
||||
return nil, fmt.Errorf("info is nil")
|
||||
}
|
||||
// info := t.Info()
|
||||
// if info == nil {
|
||||
// return nil, fmt.Errorf("info is nil")
|
||||
// }
|
||||
|
||||
compatable, _, err := s.checkTorrentCompatable(ctx, spec.InfoHash, *info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !compatable {
|
||||
return nil, fmt.Errorf(
|
||||
"torrent with name '%s' not compatable existing infohash: %s, new: %s",
|
||||
t.Name(), t.InfoHash().HexString(), spec.InfoHash.HexString(),
|
||||
)
|
||||
}
|
||||
// compatable, _, err := s.checkTorrentCompatable(ctx, spec.InfoHash, *info)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// if !compatable {
|
||||
// return nil, fmt.Errorf(
|
||||
// "torrent with name '%s' not compatable existing infohash: %s, new: %s",
|
||||
// t.Name(), t.InfoHash().HexString(), spec.InfoHash.HexString(),
|
||||
// )
|
||||
// }
|
||||
}
|
||||
|
||||
return newController(t, s.excludedFiles), nil
|
||||
}
|
||||
ctl := newController(t, s.excludedFiles)
|
||||
|
||||
func (s *Service) checkTorrentCompatable(ctx context.Context, ih infohash.T, info metainfo.Info) (compatable bool, tryLater bool, err error) {
|
||||
log := s.log.With(
|
||||
slog.String("new-name", info.BestName()),
|
||||
slog.String("new-infohash", ih.String()),
|
||||
)
|
||||
|
||||
name := info.BestName()
|
||||
|
||||
aq, found, err := s.dirsAquire.Get(ctx, info.BestName())
|
||||
err = ctl.initializeTorrentPriories(ctx)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
if !found {
|
||||
err = s.dirsAquire.Set(ctx, name, DirAquire{
|
||||
Name: name,
|
||||
Hashes: slices.Compact([]infohash.T{ih}),
|
||||
})
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
log.Debug(ctx, "acquiring was not found, so created")
|
||||
return true, false, nil
|
||||
log.Error(ctx, "error initializing torrent priorities", rlog.Error(err))
|
||||
}
|
||||
|
||||
if slices.Contains(aq.Hashes, ih) {
|
||||
log.Debug(ctx, "hash already know to be compatable")
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
for _, existingTorrent := range s.client.Torrents() {
|
||||
if existingTorrent.Name() != name || existingTorrent.InfoHash() == ih {
|
||||
continue
|
||||
}
|
||||
|
||||
existingInfo := existingTorrent.Info()
|
||||
|
||||
existingFiles := slices.Clone(existingInfo.Files)
|
||||
newFiles := slices.Clone(info.Files)
|
||||
|
||||
if !s.checkTorrentFilesCompatable(ctx, aq, existingFiles, newFiles) {
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
aq.Hashes = slicesUnique(append(aq.Hashes, ih))
|
||||
err = s.dirsAquire.Set(ctx, aq.Name, aq)
|
||||
if err != nil {
|
||||
log.Warn(ctx, "torrent not compatible")
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if slices.Contains(aq.Hashes, ih) {
|
||||
log.Debug(ctx, "hash is compatable")
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
log.Debug(ctx, "torrent with same name not found, try later")
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
func (s *Service) checkTorrentFilesCompatable(ctx context.Context, aq DirAquire, existingFiles, newFiles []metainfo.FileInfo) bool {
|
||||
log := s.log.With(slog.String("name", aq.Name))
|
||||
|
||||
pathCmp := func(a, b metainfo.FileInfo) int {
|
||||
return slices.Compare(a.BestPath(), b.BestPath())
|
||||
}
|
||||
slices.SortStableFunc(existingFiles, pathCmp)
|
||||
slices.SortStableFunc(newFiles, pathCmp)
|
||||
|
||||
// torrents basically equals
|
||||
if slices.EqualFunc(existingFiles, newFiles, func(fi1, fi2 metainfo.FileInfo) bool {
|
||||
return fi1.Length == fi2.Length && slices.Equal(fi1.BestPath(), fi1.BestPath())
|
||||
}) {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(newFiles) > len(existingFiles) {
|
||||
type fileInfo struct {
|
||||
Path string
|
||||
Length int64
|
||||
}
|
||||
mapInfo := func(fi metainfo.FileInfo) fileInfo {
|
||||
return fileInfo{
|
||||
Path: strings.Join(fi.BestPath(), "/"),
|
||||
Length: fi.Length,
|
||||
}
|
||||
}
|
||||
|
||||
existingFiles := apply(existingFiles, mapInfo)
|
||||
newFiles := apply(newFiles, mapInfo)
|
||||
|
||||
for _, n := range newFiles {
|
||||
if slices.Contains(existingFiles, n) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, e := range existingFiles {
|
||||
if e.Path == n.Path && e.Length != n.Length {
|
||||
log.Warn(ctx, "torrents not compatible, has files with different length",
|
||||
slog.String("path", n.Path),
|
||||
slog.Int64("existing-length", e.Length),
|
||||
slog.Int64("new-length", e.Length),
|
||||
)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
return ctl, nil
|
||||
}
|
||||
|
||||
func isValidInfoHashBytes(d []byte) bool {
|
||||
|
@ -337,17 +230,17 @@ func isValidInfoHashBytes(d []byte) bool {
|
|||
return err == nil
|
||||
}
|
||||
|
||||
func (s *Service) Stats() (*Stats, error) {
|
||||
func (s *Daemon) Stats() (*Stats, error) {
|
||||
return &Stats{}, nil
|
||||
}
|
||||
|
||||
func (s *Service) GetStats() torrent.ConnStats {
|
||||
func (s *Daemon) GetStats() torrent.ConnStats {
|
||||
return s.client.ConnStats()
|
||||
}
|
||||
|
||||
const loadWorkers = 5
|
||||
|
||||
func (s *Service) loadTorrentFiles(ctx context.Context) error {
|
||||
func (s *Daemon) loadTorrentFiles(ctx context.Context) error {
|
||||
ctx, span := tracer.Start(ctx, "loadTorrentFiles", trace.WithAttributes(
|
||||
attribute.Int("workers", loadWorkers),
|
||||
))
|
||||
|
@ -363,16 +256,23 @@ func (s *Service) loadTorrentFiles(ctx context.Context) error {
|
|||
}()
|
||||
|
||||
loaderWorker := func() {
|
||||
wg.Add(1)
|
||||
for path := range loaderPaths {
|
||||
file, err := vfs.NewLazyOsFile(path)
|
||||
info, err := s.sourceFs.Stat(path)
|
||||
if err != nil {
|
||||
log.Error(ctx, "error stat torrent file", slog.String("filename", path), rlog.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
file, err := s.sourceFs.Open(path)
|
||||
if err != nil {
|
||||
log.Error(ctx, "error opening torrent file", slog.String("filename", path), rlog.Error(err))
|
||||
continue
|
||||
}
|
||||
defer file.Close(ctx)
|
||||
defer file.Close()
|
||||
|
||||
_, err = s.LoadTorrent(ctx, file)
|
||||
vfile := vfs.NewCtxBillyFile(info, ctxbilly.WrapFile(file))
|
||||
|
||||
_, err = s.LoadTorrent(ctx, vfile)
|
||||
if err != nil {
|
||||
log.Error(ctx, "failed adding torrent", rlog.Error(err))
|
||||
}
|
||||
|
@ -380,11 +280,12 @@ func (s *Service) loadTorrentFiles(ctx context.Context) error {
|
|||
wg.Done()
|
||||
}
|
||||
|
||||
wg.Add(loadWorkers)
|
||||
for range loadWorkers {
|
||||
go loaderWorker()
|
||||
}
|
||||
|
||||
return util.Walk(s.sourceFs, "/", func(path string, info os.FileInfo, err error) error {
|
||||
return util.Walk(s.sourceFs, "", func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("fs walk error: %w", err)
|
||||
}
|
||||
|
@ -405,7 +306,7 @@ func (s *Service) loadTorrentFiles(ctx context.Context) error {
|
|||
})
|
||||
}
|
||||
|
||||
func (s *Service) ListTorrents(ctx context.Context) ([]*Controller, error) {
|
||||
func (s *Daemon) ListTorrents(ctx context.Context) ([]*Controller, error) {
|
||||
<-s.torrentLoaded
|
||||
|
||||
out := []*Controller{}
|
||||
|
@ -415,7 +316,7 @@ func (s *Service) ListTorrents(ctx context.Context) ([]*Controller, error) {
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (s *Service) GetTorrent(infohashHex string) (*Controller, error) {
|
||||
func (s *Daemon) GetTorrent(infohashHex string) (*Controller, error) {
|
||||
<-s.torrentLoaded
|
||||
|
||||
t, ok := s.client.Torrent(infohash.FromHexString(infohashHex))
|
92
src/sources/torrent/dup_cache.go
Normal file
92
src/sources/torrent/dup_cache.go
Normal file
|
@ -0,0 +1,92 @@
|
|||
package torrent
|
||||
|
||||
import (
|
||||
"path"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/slicesutils"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/anacrolix/torrent/types/infohash"
|
||||
)
|
||||
|
||||
type dupInfo struct {
|
||||
infohash infohash.T
|
||||
fileinfo metainfo.FileInfo
|
||||
}
|
||||
|
||||
type dupIndex struct {
|
||||
mu sync.RWMutex
|
||||
torrents map[infohash.T][]metainfo.FileInfo
|
||||
sha1 map[string][]dupInfo // bittorrent v1
|
||||
piecesRoot map[[32]byte][]dupInfo // bittorrent v2
|
||||
}
|
||||
|
||||
func newDupIndex() *dupIndex {
|
||||
return &dupIndex{
|
||||
torrents: map[infohash.T][]metainfo.FileInfo{},
|
||||
sha1: map[string][]dupInfo{},
|
||||
piecesRoot: map[[32]byte][]dupInfo{},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *dupIndex) AddFile(fileinfo metainfo.FileInfo, ih infohash.T) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.torrents[ih] = append(c.torrents[ih], fileinfo)
|
||||
|
||||
if fileinfo.Sha1 != "" {
|
||||
c.sha1[fileinfo.Sha1] = append(c.sha1[fileinfo.Sha1], dupInfo{fileinfo: fileinfo, infohash: ih})
|
||||
}
|
||||
|
||||
if fileinfo.PiecesRoot.Ok {
|
||||
c.piecesRoot[fileinfo.PiecesRoot.Value] = append(c.piecesRoot[fileinfo.PiecesRoot.Value], dupInfo{fileinfo: fileinfo, infohash: ih})
|
||||
}
|
||||
}
|
||||
|
||||
func (c *dupIndex) DuplicateFiles(fileinfo metainfo.FileInfo, ih infohash.T) []dupInfo {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
if fileinfo.Sha1 != "" {
|
||||
if dups, ok := c.sha1[fileinfo.Sha1]; ok {
|
||||
return slices.Clone(dups)
|
||||
}
|
||||
}
|
||||
|
||||
if fileinfo.PiecesRoot.Ok {
|
||||
if dups, ok := c.piecesRoot[fileinfo.PiecesRoot.Value]; ok {
|
||||
return slices.Clone(dups)
|
||||
}
|
||||
}
|
||||
|
||||
return []dupInfo{}
|
||||
}
|
||||
|
||||
func (c *dupIndex) Includes(ih infohash.T, files []metainfo.FileInfo) []dupInfo {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
out := []dupInfo{}
|
||||
|
||||
for ih, v := range c.torrents {
|
||||
intersection := slicesutils.IntersectionFunc(files, v, func(a, b metainfo.FileInfo) bool {
|
||||
mostly := path.Join(a.BestPath()...) == path.Join(b.BestPath()...) && a.Length == b.Length
|
||||
if a.Sha1 != "" && b.Sha1 != "" {
|
||||
return mostly && a.Sha1 == b.Sha1
|
||||
}
|
||||
if a.PiecesRoot.Ok && b.PiecesRoot.Ok {
|
||||
return mostly && a.PiecesRoot.Value == b.PiecesRoot.Value
|
||||
}
|
||||
|
||||
return mostly
|
||||
})
|
||||
|
||||
for _, v := range intersection {
|
||||
out = append(out, dupInfo{infohash: ih, fileinfo: v})
|
||||
}
|
||||
}
|
||||
|
||||
return []dupInfo{}
|
||||
}
|
|
@ -7,10 +7,12 @@ import (
|
|||
"github.com/anacrolix/torrent"
|
||||
"github.com/anacrolix/torrent/types/infohash"
|
||||
"github.com/royalcat/kv"
|
||||
"github.com/royalcat/kv/kvbadger"
|
||||
)
|
||||
|
||||
func newFileMappingsStore(metaDir string, storage TorrentFileDeleter) (*filesMappingsStore, error) {
|
||||
str, err := kv.NewBadgerKVBytes[string, string](filepath.Join(metaDir, "file-mappings"))
|
||||
opts := kvbadger.DefaultOptions(filepath.Join(metaDir, "file-mappings"))
|
||||
str, err := kvbadger.NewBadgerKVBytes[string, string](opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -46,9 +48,9 @@ func (r *filesMappingsStore) ExcludeFile(ctx context.Context, file *torrent.File
|
|||
|
||||
func (r *filesMappingsStore) FileMappings(ctx context.Context, ih infohash.T) (map[string]string, error) {
|
||||
out := map[string]string{}
|
||||
err := r.mappings.RangeWithPrefix(ctx, ih.HexString(), func(k, v string) bool {
|
||||
err := r.mappings.RangeWithPrefix(ctx, ih.HexString(), func(k, v string) error {
|
||||
out[k] = v
|
||||
return true
|
||||
return nil
|
||||
})
|
||||
return out, err
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ type TorrentFS struct {
|
|||
|
||||
var _ vfs.Filesystem = (*TorrentFS)(nil)
|
||||
|
||||
func (s *Service) NewTorrentFs(ctx context.Context, f vfs.File) (vfs.Filesystem, error) {
|
||||
func (s *Daemon) NewTorrentFs(ctx context.Context, f vfs.File) (vfs.Filesystem, error) {
|
||||
defer f.Close(ctx)
|
||||
|
||||
info, err := f.Info()
|
||||
|
@ -240,6 +240,17 @@ func (fs *TorrentFS) traceAttrs(add ...attribute.KeyValue) trace.SpanStartOption
|
|||
}, add...)...)
|
||||
}
|
||||
|
||||
func (tfs *TorrentFS) readContext(ctx context.Context) (context.Context, context.CancelFunc) {
|
||||
lastReadTimeout := tfs.lastAccessTimeout.Load()
|
||||
if lastReadTimeout != nil && time.Since(*lastReadTimeout) < secondaryTimeout { // make short timeout for already faliled files
|
||||
trace.SpanFromContext(ctx).SetAttributes(attribute.Bool("short_timeout", true))
|
||||
|
||||
return context.WithTimeout(ctx, time.Millisecond)
|
||||
}
|
||||
|
||||
return ctx, func() {}
|
||||
}
|
||||
|
||||
// Stat implements Filesystem.
|
||||
func (tfs *TorrentFS) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
|
||||
ctx, span := tracer.Start(ctx, "Stat",
|
||||
|
@ -251,25 +262,21 @@ func (tfs *TorrentFS) Stat(ctx context.Context, filename string) (fs.FileInfo, e
|
|||
return tfs, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
ctx, cancel := tfs.readContext(ctx)
|
||||
defer func() {
|
||||
cancel()
|
||||
if err == context.DeadlineExceeded {
|
||||
now := time.Now()
|
||||
tfs.lastAccessTimeout.Store(&now)
|
||||
}
|
||||
}()
|
||||
|
||||
fsPath, nestedFs, nestedFsPath, err := tfs.resolver.ResolvePath(ctx, filename, tfs.rawOpen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if nestedFs != nil {
|
||||
lastReadTimeout := tfs.lastAccessTimeout.Load()
|
||||
if lastReadTimeout != nil && time.Since(*lastReadTimeout) < secondaryTimeout { // make short timeout for already faliled files
|
||||
span.SetAttributes(attribute.Bool("short_timeout", true))
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, time.Millisecond)
|
||||
defer cancel()
|
||||
}
|
||||
defer func() {
|
||||
if err == context.DeadlineExceeded {
|
||||
now := time.Now()
|
||||
tfs.lastAccessTimeout.Store(&now)
|
||||
}
|
||||
}()
|
||||
|
||||
return nestedFs.Stat(ctx, nestedFsPath)
|
||||
}
|
||||
|
||||
|
@ -286,24 +293,20 @@ func (tfs *TorrentFS) Open(ctx context.Context, filename string) (file vfs.File,
|
|||
return vfs.NewDirFile(tfs.name), nil
|
||||
}
|
||||
|
||||
ctx, cancel := tfs.readContext(ctx)
|
||||
defer func() {
|
||||
cancel()
|
||||
if err == context.DeadlineExceeded {
|
||||
now := time.Now()
|
||||
tfs.lastAccessTimeout.Store(&now)
|
||||
}
|
||||
}()
|
||||
|
||||
fsPath, nestedFs, nestedFsPath, err := tfs.resolver.ResolvePath(ctx, filename, tfs.rawOpen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if nestedFs != nil {
|
||||
lastReadTimeout := tfs.lastAccessTimeout.Load()
|
||||
if lastReadTimeout != nil && time.Since(*lastReadTimeout) < secondaryTimeout { // make short timeout for already faliled files
|
||||
span.SetAttributes(attribute.Bool("short_timeout", true))
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, time.Millisecond)
|
||||
defer cancel()
|
||||
}
|
||||
defer func() {
|
||||
if err == context.DeadlineExceeded {
|
||||
now := time.Now()
|
||||
tfs.lastAccessTimeout.Store(&now)
|
||||
}
|
||||
}()
|
||||
|
||||
return nestedFs.Open(ctx, nestedFsPath)
|
||||
}
|
||||
|
@ -317,25 +320,21 @@ func (tfs *TorrentFS) ReadDir(ctx context.Context, name string) ([]fs.DirEntry,
|
|||
)
|
||||
defer span.End()
|
||||
|
||||
var err error
|
||||
ctx, cancel := tfs.readContext(ctx)
|
||||
defer func() {
|
||||
cancel()
|
||||
if err == context.DeadlineExceeded {
|
||||
now := time.Now()
|
||||
tfs.lastAccessTimeout.Store(&now)
|
||||
}
|
||||
}()
|
||||
|
||||
fsPath, nestedFs, nestedFsPath, err := tfs.resolver.ResolvePath(ctx, name, tfs.rawOpen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if nestedFs != nil {
|
||||
lastReadTimeout := tfs.lastAccessTimeout.Load()
|
||||
if lastReadTimeout != nil && time.Since(*lastReadTimeout) < secondaryTimeout { // make short timeout for already faliled files
|
||||
span.SetAttributes(attribute.Bool("short_timeout", true))
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, time.Millisecond)
|
||||
defer cancel()
|
||||
}
|
||||
defer func() {
|
||||
if err == context.DeadlineExceeded {
|
||||
now := time.Now()
|
||||
tfs.lastAccessTimeout.Store(&now)
|
||||
}
|
||||
}()
|
||||
|
||||
return nestedFs.ReadDir(ctx, nestedFsPath)
|
||||
}
|
||||
files, err := tfs.files(ctx)
|
||||
|
@ -394,12 +393,12 @@ type torrentFile struct {
|
|||
const secondaryTimeout = time.Hour * 24
|
||||
|
||||
func openTorrentFile(ctx context.Context, name string, file *torrent.File) (*torrentFile, error) {
|
||||
// select {
|
||||
// case <-file.Torrent().GotInfo():
|
||||
// break
|
||||
// case <-ctx.Done():
|
||||
// return nil, ctx.Err()
|
||||
// }
|
||||
select {
|
||||
case <-file.Torrent().GotInfo():
|
||||
break
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
r := file.NewReader()
|
||||
r.SetReadahead(1024 * 1024 * 16) // TODO configurable
|
||||
|
@ -448,6 +447,16 @@ func (rw *torrentFile) Close(ctx context.Context) error {
|
|||
return rw.tr.Close()
|
||||
}
|
||||
|
||||
func (tf *torrentFile) readTimeout(ctx context.Context) (context.Context, context.CancelFunc) {
|
||||
lastReadTimeout := tf.lastReadTimeout.Load()
|
||||
if lastReadTimeout != nil && time.Since(*lastReadTimeout) < secondaryTimeout { // make short timeout for already faliled files
|
||||
trace.SpanFromContext(ctx).SetAttributes(attribute.Bool("short_timeout", true))
|
||||
return context.WithTimeout(ctx, time.Millisecond)
|
||||
}
|
||||
|
||||
return ctx, func() {}
|
||||
}
|
||||
|
||||
// Read implements ctxio.Reader.
|
||||
func (tf *torrentFile) Read(ctx context.Context, p []byte) (n int, err error) {
|
||||
ctx, span := tracer.Start(ctx, "Read",
|
||||
|
@ -461,13 +470,8 @@ func (tf *torrentFile) Read(ctx context.Context, p []byte) (n int, err error) {
|
|||
tf.mu.RLock()
|
||||
defer tf.mu.RUnlock()
|
||||
|
||||
lastReadTimeout := tf.lastReadTimeout.Load()
|
||||
if lastReadTimeout != nil && time.Since(*lastReadTimeout) < secondaryTimeout { // make short timeout for already faliled files
|
||||
span.SetAttributes(attribute.Bool("short_timeout", true))
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, time.Millisecond)
|
||||
defer cancel()
|
||||
}
|
||||
ctx, cancel := tf.readTimeout(ctx)
|
||||
defer cancel()
|
||||
defer func() {
|
||||
if err == context.DeadlineExceeded {
|
||||
now := time.Now()
|
||||
|
@ -490,13 +494,8 @@ func (tf *torrentFile) ReadAt(ctx context.Context, p []byte, off int64) (n int,
|
|||
tf.mu.RLock()
|
||||
defer tf.mu.RUnlock()
|
||||
|
||||
lastReadTimeout := tf.lastReadTimeout.Load()
|
||||
if lastReadTimeout != nil && time.Since(*lastReadTimeout) < secondaryTimeout { /// make short timeout for already faliled files
|
||||
span.SetAttributes(attribute.Bool("short_timeout", true))
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, time.Millisecond)
|
||||
defer cancel()
|
||||
}
|
||||
ctx, cancel := tf.readTimeout(ctx)
|
||||
defer cancel()
|
||||
defer func() {
|
||||
if err == context.DeadlineExceeded {
|
||||
now := time.Now()
|
||||
|
|
|
@ -5,12 +5,13 @@ import (
|
|||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/kvtrace"
|
||||
"github.com/royalcat/kv"
|
||||
"github.com/royalcat/kv/kvbadger"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
func NewKV[K kv.Bytes, V any](dbdir, name string) (store kv.Store[K, V], err error) {
|
||||
dir := path.Join(dbdir, name)
|
||||
store, err = kv.NewBadgerKV[K, V](dir)
|
||||
opts := kvbadger.DefaultOptions(path.Join(dbdir, name))
|
||||
store, err = kvbadger.NewBadgerKVBytesKey[K, V](opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -1,168 +0,0 @@
|
|||
package torrent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
atorrent "github.com/anacrolix/torrent"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/anacrolix/torrent/storage"
|
||||
"github.com/anacrolix/torrent/types/infohash"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
)
|
||||
|
||||
// NOT USED
|
||||
type PieceStorage struct {
|
||||
basePath string
|
||||
completion storage.PieceCompletion
|
||||
}
|
||||
|
||||
func NewPieceStorage(path string, completion storage.PieceCompletion) *PieceStorage {
|
||||
return &PieceStorage{
|
||||
basePath: path,
|
||||
completion: completion,
|
||||
}
|
||||
}
|
||||
|
||||
// OpenTorrent implements FileStorageDeleter.
|
||||
func (p *PieceStorage) OpenTorrent(info *metainfo.Info, infoHash infohash.T) (storage.TorrentImpl, error) {
|
||||
torrentPath := path.Join(p.basePath, infoHash.HexString())
|
||||
descriptors := []*os.File{}
|
||||
|
||||
return storage.TorrentImpl{
|
||||
Piece: func(piece metainfo.Piece) storage.PieceImpl {
|
||||
hash := piece.Hash().HexString()
|
||||
piecePrefixDir := path.Join(torrentPath, hash[:2])
|
||||
err := os.MkdirAll(piecePrefixDir, os.ModePerm|os.ModeDir)
|
||||
if err != nil {
|
||||
return &errPiece{err: err}
|
||||
}
|
||||
piecePath := path.Join(torrentPath, hash[:2], hash)
|
||||
file, err := os.OpenFile(piecePath, os.O_CREATE|os.O_RDWR, os.ModePerm)
|
||||
if err != nil {
|
||||
return &errPiece{err: err}
|
||||
}
|
||||
pk := metainfo.PieceKey{
|
||||
InfoHash: infoHash,
|
||||
Index: piece.Index(),
|
||||
}
|
||||
return newPieceFile(pk, file, p.completion)
|
||||
|
||||
// file, err os.OpenFile(piecePath)
|
||||
},
|
||||
Flush: func() error {
|
||||
var res error
|
||||
for _, f := range descriptors {
|
||||
if err := f.Sync(); err != nil {
|
||||
res = multierror.Append(res, err)
|
||||
}
|
||||
}
|
||||
return res
|
||||
},
|
||||
Close: func() error {
|
||||
var res error
|
||||
for _, f := range descriptors {
|
||||
if err := f.Close(); err != nil {
|
||||
res = multierror.Append(res, err)
|
||||
}
|
||||
}
|
||||
return res
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close implements FileStorageDeleter.
|
||||
func (p *PieceStorage) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteFile implements FileStorageDeleter.
|
||||
func (p *PieceStorage) DeleteFile(file *atorrent.File) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
// CleanupDirs implements DataStorage.
|
||||
func (p *PieceStorage) CleanupDirs(ctx context.Context, expected []*Controller, dryRun bool) (int, error) {
|
||||
return 0, nil // TODO
|
||||
}
|
||||
|
||||
// CleanupFiles implements DataStorage.
|
||||
func (p *PieceStorage) CleanupFiles(ctx context.Context, expected []*Controller, dryRun bool) (int, error) {
|
||||
return 0, nil // TODO
|
||||
}
|
||||
|
||||
func newPieceFile(pk metainfo.PieceKey, file *os.File, completion storage.PieceCompletionGetSetter) *piece {
|
||||
return &piece{
|
||||
pk: pk,
|
||||
File: file,
|
||||
completion: completion,
|
||||
}
|
||||
}
|
||||
|
||||
type piece struct {
|
||||
*os.File
|
||||
pk metainfo.PieceKey
|
||||
completion storage.PieceCompletionGetSetter
|
||||
}
|
||||
|
||||
// Completion implements storage.PieceImpl.
|
||||
func (p *piece) Completion() storage.Completion {
|
||||
compl, err := p.completion.Get(p.pk)
|
||||
if err != nil {
|
||||
return storage.Completion{Complete: false, Ok: false, Err: err}
|
||||
}
|
||||
return compl
|
||||
}
|
||||
|
||||
// MarkComplete implements storage.PieceImpl.
|
||||
func (p *piece) MarkComplete() error {
|
||||
return p.completion.Set(p.pk, true)
|
||||
}
|
||||
|
||||
// MarkNotComplete implements storage.PieceImpl.
|
||||
func (p *piece) MarkNotComplete() error {
|
||||
return p.completion.Set(p.pk, false)
|
||||
}
|
||||
|
||||
var _ storage.PieceImpl = (*piece)(nil)
|
||||
var _ io.WriterTo = (*piece)(nil)
|
||||
|
||||
type errPiece struct {
|
||||
err error
|
||||
}
|
||||
|
||||
// WriteTo implements io.WriterTo.
|
||||
func (p *errPiece) WriteTo(io.Writer) (int64, error) {
|
||||
return 0, p.err
|
||||
}
|
||||
|
||||
// ReadAt implements storage.PieceImpl.
|
||||
func (p *errPiece) ReadAt([]byte, int64) (int, error) {
|
||||
return 0, p.err
|
||||
}
|
||||
|
||||
// WriteAt implements storage.PieceImpl.
|
||||
func (p *errPiece) WriteAt([]byte, int64) (int, error) {
|
||||
return 0, p.err
|
||||
}
|
||||
|
||||
// Completion implements storage.PieceImpl.
|
||||
func (p *errPiece) Completion() storage.Completion {
|
||||
return storage.Completion{Complete: false, Ok: false, Err: p.err}
|
||||
}
|
||||
|
||||
// MarkComplete implements storage.PieceImpl.
|
||||
func (p *errPiece) MarkComplete() error {
|
||||
return p.err
|
||||
}
|
||||
|
||||
// MarkNotComplete implements storage.PieceImpl.
|
||||
func (p *errPiece) MarkNotComplete() error {
|
||||
return p.err
|
||||
}
|
||||
|
||||
var _ storage.PieceImpl = (*errPiece)(nil)
|
||||
var _ io.WriterTo = (*errPiece)(nil)
|
|
@ -15,7 +15,7 @@ type DownloadTask struct {
|
|||
File string
|
||||
}
|
||||
|
||||
func (s *Service) Download(ctx context.Context, task *DownloadTask) error {
|
||||
func (s *Daemon) Download(ctx context.Context, task *DownloadTask) error {
|
||||
t, ok := s.client.Torrent(task.InfoHash)
|
||||
if !ok {
|
||||
return fmt.Errorf("torrent with IH %s not found", task.InfoHash.HexString())
|
||||
|
@ -101,7 +101,7 @@ type TorrentProgress struct {
|
|||
Total int64
|
||||
}
|
||||
|
||||
func (s *Service) DownloadProgress(ctx context.Context) (<-chan TorrentProgress, error) {
|
||||
func (s *Daemon) DownloadProgress(ctx context.Context) (<-chan TorrentProgress, error) {
|
||||
torrents, err := s.ListTorrents(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -2,9 +2,7 @@ package torrent
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"io"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
@ -12,72 +10,62 @@ import (
|
|||
"path/filepath"
|
||||
"slices"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/rlog"
|
||||
"github.com/anacrolix/torrent"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/anacrolix/torrent/storage"
|
||||
"github.com/dustin/go-humanize"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// NewFileStorage creates a new ClientImplCloser that stores files using the OS native filesystem.
|
||||
func NewFileStorage(baseDir string, pc storage.PieceCompletion) *fileStorage {
|
||||
return &fileStorage{
|
||||
ClientImplCloser: storage.NewFileOpts(storage.NewFileClientOpts{
|
||||
client: storage.NewFileOpts(storage.NewFileClientOpts{
|
||||
ClientBaseDir: baseDir,
|
||||
PieceCompletion: pc,
|
||||
TorrentDirMaker: torrentDir,
|
||||
FilePathMaker: filePath,
|
||||
TorrentDirMaker: func(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string {
|
||||
return torrentDir(baseDir, infoHash)
|
||||
},
|
||||
FilePathMaker: func(opts storage.FilePathMakerOpts) string {
|
||||
return filePath(*opts.File)
|
||||
},
|
||||
}),
|
||||
baseDir: baseDir,
|
||||
pieceCompletion: pc,
|
||||
log: slog.With("component", "torrent-client"),
|
||||
dupIndex: newDupIndex(),
|
||||
log: rlog.Component("daemon", "torrent"),
|
||||
}
|
||||
}
|
||||
|
||||
// File-based storage for torrents, that isn't yet bound to a particular torrent.
|
||||
type fileStorage struct {
|
||||
baseDir string
|
||||
storage.ClientImplCloser
|
||||
baseDir string
|
||||
client storage.ClientImplCloser
|
||||
pieceCompletion storage.PieceCompletion
|
||||
log *slog.Logger
|
||||
dupIndex *dupIndex
|
||||
log *rlog.Logger
|
||||
}
|
||||
|
||||
var _ storage.ClientImplCloser = (*fileStorage)(nil)
|
||||
|
||||
func (me *fileStorage) Close() error {
|
||||
return me.pieceCompletion.Close()
|
||||
return errors.Join(
|
||||
me.client.Close(),
|
||||
me.pieceCompletion.Close(),
|
||||
)
|
||||
}
|
||||
|
||||
func torrentDir(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string {
|
||||
dirName := info.Name
|
||||
if dirName == "" {
|
||||
dirName = infoHash.HexString()
|
||||
}
|
||||
|
||||
return filepath.Join(baseDir, dirName)
|
||||
}
|
||||
|
||||
func filePath(opts storage.FilePathMakerOpts) string {
|
||||
return filepath.Join(opts.File.Path...)
|
||||
}
|
||||
|
||||
func (fs *fileStorage) filePath(info *metainfo.Info, infoHash metainfo.Hash, fileInfo *metainfo.FileInfo) string {
|
||||
return filepath.Join(torrentDir(fs.baseDir, info, infoHash), filePath(storage.FilePathMakerOpts{
|
||||
Info: info,
|
||||
File: fileInfo,
|
||||
}))
|
||||
func (fs *fileStorage) fullFilePath(infoHash metainfo.Hash, fileInfo metainfo.FileInfo) string {
|
||||
return filepath.Join(
|
||||
torrentDir(fs.baseDir, infoHash),
|
||||
filePath(fileInfo),
|
||||
)
|
||||
}
|
||||
|
||||
func (fs *fileStorage) DeleteFile(file *torrent.File) error {
|
||||
info := file.Torrent().Info()
|
||||
infoHash := file.Torrent().InfoHash()
|
||||
torrentDir := torrentDir(fs.baseDir, info, infoHash)
|
||||
torrentDir := torrentDir(fs.baseDir, infoHash)
|
||||
fileInfo := file.FileInfo()
|
||||
relFilePath := filePath(storage.FilePathMakerOpts{
|
||||
Info: info,
|
||||
File: &fileInfo,
|
||||
})
|
||||
relFilePath := filePath(fileInfo)
|
||||
filePath := path.Join(torrentDir, relFilePath)
|
||||
for i := file.BeginPieceIndex(); i < file.EndPieceIndex(); i++ {
|
||||
pk := metainfo.PieceKey{InfoHash: infoHash, Index: i}
|
||||
|
@ -90,11 +78,11 @@ func (fs *fileStorage) DeleteFile(file *torrent.File) error {
|
|||
}
|
||||
|
||||
func (fs *fileStorage) CleanupDirs(ctx context.Context, expected []*Controller, dryRun bool) ([]string, error) {
|
||||
log := fs.log.With("function", "CleanupDirs", "expectedTorrents", len(expected), "dryRun", dryRun)
|
||||
log := fs.log.With(slog.Int("expectedTorrents", len(expected)), slog.Bool("dryRun", dryRun))
|
||||
|
||||
expectedEntries := []string{}
|
||||
for _, e := range expected {
|
||||
expectedEntries = append(expectedEntries, e.Torrent().Name())
|
||||
expectedEntries = append(expectedEntries, e.Torrent().InfoHash().HexString())
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(fs.baseDir)
|
||||
|
@ -113,11 +101,11 @@ func (fs *fileStorage) CleanupDirs(ctx context.Context, expected []*Controller,
|
|||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
log.Info("deleting trash data", "dirsCount", len(toDelete))
|
||||
log.Info(ctx, "deleting trash data", slog.Int("dirsCount", len(toDelete)))
|
||||
if !dryRun {
|
||||
for i, name := range toDelete {
|
||||
p := path.Join(fs.baseDir, name)
|
||||
log.Warn("deleting trash data", "path", p)
|
||||
log.Warn(ctx, "deleting trash data", slog.String("path", p))
|
||||
err := os.RemoveAll(p)
|
||||
if err != nil {
|
||||
return toDelete[:i], err
|
||||
|
@ -129,7 +117,7 @@ func (fs *fileStorage) CleanupDirs(ctx context.Context, expected []*Controller,
|
|||
}
|
||||
|
||||
func (s *fileStorage) CleanupFiles(ctx context.Context, expected []*Controller, dryRun bool) ([]string, error) {
|
||||
log := s.log.With("function", "CleanupFiles", "expectedTorrents", len(expected), "dryRun", dryRun)
|
||||
log := s.log.With(slog.Int("expectedTorrents", len(expected)), slog.Bool("dryRun", dryRun))
|
||||
|
||||
expectedEntries := []string{}
|
||||
{
|
||||
|
@ -140,7 +128,7 @@ func (s *fileStorage) CleanupFiles(ctx context.Context, expected []*Controller,
|
|||
}
|
||||
|
||||
for _, f := range files {
|
||||
expectedEntries = append(expectedEntries, s.filePath(e.Torrent().Info(), e.Torrent().InfoHash(), ptr(f.FileInfo())))
|
||||
expectedEntries = append(expectedEntries, s.fullFilePath(e.Torrent().InfoHash(), f.FileInfo()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -176,10 +164,10 @@ func (s *fileStorage) CleanupFiles(ctx context.Context, expected []*Controller,
|
|||
return toDelete, ctx.Err()
|
||||
}
|
||||
|
||||
log.Info("deleting trash data", "filesCount", len(toDelete))
|
||||
log.Info(ctx, "deleting trash data", slog.Int("filesCount", len(toDelete)))
|
||||
if !dryRun {
|
||||
for i, p := range toDelete {
|
||||
s.log.Warn("deleting trash data", "path", p)
|
||||
s.log.Warn(ctx, "deleting trash data", slog.String("path", p))
|
||||
err := os.Remove(p)
|
||||
if err != nil {
|
||||
return toDelete[i:], err
|
||||
|
@ -206,212 +194,3 @@ func (s *fileStorage) iterFiles(ctx context.Context, iter func(ctx context.Conte
|
|||
return iter(ctx, path, info)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *fileStorage) Dedupe(ctx context.Context) (uint64, error) {
|
||||
ctx, span := tracer.Start(ctx, fmt.Sprintf("Dedupe"))
|
||||
defer span.End()
|
||||
|
||||
log := s.log
|
||||
|
||||
sizeMap := map[int64][]string{}
|
||||
err := s.iterFiles(ctx, func(ctx context.Context, path string, info fs.FileInfo) error {
|
||||
size := info.Size()
|
||||
sizeMap[size] = append(sizeMap[size], path)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
maps.DeleteFunc(sizeMap, func(k int64, v []string) bool {
|
||||
return len(v) <= 1
|
||||
})
|
||||
|
||||
span.AddEvent("collected files with same size", trace.WithAttributes(
|
||||
attribute.Int("count", len(sizeMap)),
|
||||
))
|
||||
|
||||
var deduped uint64 = 0
|
||||
|
||||
i := 0
|
||||
for _, paths := range sizeMap {
|
||||
if i%100 == 0 {
|
||||
log.Info("deduping in progress", "current", i, "total", len(sizeMap))
|
||||
}
|
||||
i++
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return deduped, ctx.Err()
|
||||
}
|
||||
|
||||
slices.Sort(paths)
|
||||
paths = slices.Compact(paths)
|
||||
if len(paths) <= 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
paths, err = applyErr(paths, filepath.Abs)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
dedupedGroup, err := s.dedupeFiles(ctx, paths)
|
||||
if err != nil {
|
||||
log.Error("Error applying dedupe", "files", paths, "error", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
if dedupedGroup > 0 {
|
||||
deduped += dedupedGroup
|
||||
log.Info("deduped file group",
|
||||
slog.String("files", fmt.Sprint(paths)),
|
||||
slog.String("deduped", humanize.Bytes(dedupedGroup)),
|
||||
slog.String("deduped_total", humanize.Bytes(deduped)),
|
||||
)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return deduped, nil
|
||||
}
|
||||
|
||||
func applyErr[E, O any](in []E, apply func(E) (O, error)) ([]O, error) {
|
||||
out := make([]O, 0, len(in))
|
||||
for _, p := range in {
|
||||
o, err := apply(p)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
out = append(out, o)
|
||||
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// const blockSize uint64 = 4096
|
||||
|
||||
func (s *fileStorage) dedupeFiles(ctx context.Context, paths []string) (deduped uint64, err error) {
|
||||
ctx, span := tracer.Start(ctx, fmt.Sprintf("dedupeFiles"), trace.WithAttributes(
|
||||
attribute.StringSlice("files", paths),
|
||||
))
|
||||
defer func() {
|
||||
span.SetAttributes(attribute.Int64("deduped", int64(deduped)))
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
}
|
||||
span.End()
|
||||
}()
|
||||
|
||||
log := s.log
|
||||
|
||||
srcF, err := os.Open(paths[0])
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
defer srcF.Close()
|
||||
srcStat, err := srcF.Stat()
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
srcFd := int(srcF.Fd())
|
||||
srcSize := srcStat.Size()
|
||||
|
||||
fsStat := unix.Statfs_t{}
|
||||
err = unix.Fstatfs(srcFd, &fsStat)
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
srcHash, err := filehash(srcF)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
if int64(fsStat.Bsize) > srcSize { // for btrfs it means file in residing in not deduplicatable metadata
|
||||
return deduped, nil
|
||||
}
|
||||
|
||||
blockSize := uint64((srcSize % int64(fsStat.Bsize)) * int64(fsStat.Bsize))
|
||||
|
||||
span.SetAttributes(attribute.Int64("blocksize", int64(blockSize)))
|
||||
|
||||
rng := unix.FileDedupeRange{
|
||||
Src_offset: 0,
|
||||
Src_length: blockSize,
|
||||
Info: []unix.FileDedupeRangeInfo{},
|
||||
}
|
||||
|
||||
for _, dst := range paths[1:] {
|
||||
if ctx.Err() != nil {
|
||||
return deduped, ctx.Err()
|
||||
}
|
||||
|
||||
destF, err := os.OpenFile(dst, os.O_RDWR, os.ModePerm)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
defer destF.Close()
|
||||
|
||||
dstHash, err := filehash(destF)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
if srcHash != dstHash {
|
||||
destF.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
rng.Info = append(rng.Info, unix.FileDedupeRangeInfo{
|
||||
Dest_fd: int64(destF.Fd()),
|
||||
Dest_offset: 0,
|
||||
})
|
||||
}
|
||||
|
||||
if len(rng.Info) == 0 {
|
||||
return deduped, nil
|
||||
}
|
||||
|
||||
log.Info("found same files, deduping", "files", paths, "size", humanize.Bytes(uint64(srcStat.Size())))
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return deduped, ctx.Err()
|
||||
}
|
||||
|
||||
rng.Src_offset = 0
|
||||
for i := range rng.Info {
|
||||
rng.Info[i].Dest_offset = 0
|
||||
}
|
||||
|
||||
err = unix.IoctlFileDedupeRange(srcFd, &rng)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
for i := range rng.Info {
|
||||
deduped += rng.Info[i].Bytes_deduped
|
||||
|
||||
rng.Info[i].Status = 0
|
||||
rng.Info[i].Bytes_deduped = 0
|
||||
}
|
||||
|
||||
return deduped, nil
|
||||
}
|
||||
|
||||
const compareBlockSize = 1024 * 128
|
||||
|
||||
func filehash(r io.Reader) ([20]byte, error) {
|
||||
buf := make([]byte, compareBlockSize)
|
||||
_, err := r.Read(buf)
|
||||
if err != nil && err != io.EOF {
|
||||
return [20]byte{}, err
|
||||
}
|
||||
|
||||
return sha1.Sum(buf), nil
|
||||
}
|
||||
|
||||
func ptr[D any](v D) *D {
|
||||
return &v
|
||||
}
|
||||
|
|
229
src/sources/torrent/storage_dedupe.go
Normal file
229
src/sources/torrent/storage_dedupe.go
Normal file
|
@ -0,0 +1,229 @@
|
|||
package torrent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/rlog"
|
||||
"github.com/dustin/go-humanize"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func (s *fileStorage) Dedupe(ctx context.Context) (uint64, error) {
|
||||
ctx, span := tracer.Start(ctx, fmt.Sprintf("Dedupe"))
|
||||
defer span.End()
|
||||
|
||||
log := s.log
|
||||
|
||||
sizeMap := map[int64][]string{}
|
||||
err := s.iterFiles(ctx, func(ctx context.Context, path string, info fs.FileInfo) error {
|
||||
size := info.Size()
|
||||
sizeMap[size] = append(sizeMap[size], path)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
maps.DeleteFunc(sizeMap, func(k int64, v []string) bool {
|
||||
return len(v) <= 1
|
||||
})
|
||||
|
||||
span.AddEvent("collected files with same size", trace.WithAttributes(
|
||||
attribute.Int("count", len(sizeMap)),
|
||||
))
|
||||
|
||||
var deduped uint64 = 0
|
||||
|
||||
i := 0
|
||||
for _, paths := range sizeMap {
|
||||
if i%100 == 0 {
|
||||
log.Info(ctx, "deduping in progress", slog.Int("current", i), slog.Int("total", len(sizeMap)))
|
||||
}
|
||||
i++
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return deduped, ctx.Err()
|
||||
}
|
||||
|
||||
slices.Sort(paths)
|
||||
paths = slices.Compact(paths)
|
||||
if len(paths) <= 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
paths, err = applyErr(paths, filepath.Abs)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
dedupedGroup, err := s.dedupeFiles(ctx, paths)
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error applying dedupe", slog.Any("files", paths), rlog.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
if dedupedGroup > 0 {
|
||||
deduped += dedupedGroup
|
||||
log.Info(ctx, "deduped file group",
|
||||
slog.String("files", fmt.Sprint(paths)),
|
||||
slog.String("deduped", humanize.Bytes(dedupedGroup)),
|
||||
slog.String("deduped_total", humanize.Bytes(deduped)),
|
||||
)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return deduped, nil
|
||||
}
|
||||
|
||||
func applyErr[E, O any](in []E, apply func(E) (O, error)) ([]O, error) {
|
||||
out := make([]O, 0, len(in))
|
||||
for _, p := range in {
|
||||
o, err := apply(p)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
out = append(out, o)
|
||||
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// const blockSize uint64 = 4096
|
||||
|
||||
func (s *fileStorage) dedupeFiles(ctx context.Context, paths []string) (deduped uint64, err error) {
|
||||
ctx, span := tracer.Start(ctx, fmt.Sprintf("dedupeFiles"), trace.WithAttributes(
|
||||
attribute.StringSlice("files", paths),
|
||||
))
|
||||
defer func() {
|
||||
span.SetAttributes(attribute.Int64("deduped", int64(deduped)))
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
}
|
||||
span.End()
|
||||
}()
|
||||
|
||||
log := s.log
|
||||
|
||||
srcF, err := os.Open(paths[0])
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
defer srcF.Close()
|
||||
srcStat, err := srcF.Stat()
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
srcFd := int(srcF.Fd())
|
||||
srcSize := srcStat.Size()
|
||||
|
||||
fsStat := unix.Statfs_t{}
|
||||
err = unix.Fstatfs(srcFd, &fsStat)
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
srcHash, err := filehash(srcF)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
if int64(fsStat.Bsize) > srcSize { // for btrfs it means file in residing in not deduplicatable metadata
|
||||
return deduped, nil
|
||||
}
|
||||
|
||||
blockSize := uint64((srcSize % int64(fsStat.Bsize)) * int64(fsStat.Bsize))
|
||||
|
||||
span.SetAttributes(attribute.Int64("blocksize", int64(blockSize)))
|
||||
|
||||
rng := unix.FileDedupeRange{
|
||||
Src_offset: 0,
|
||||
Src_length: blockSize,
|
||||
Info: []unix.FileDedupeRangeInfo{},
|
||||
}
|
||||
|
||||
for _, dst := range paths[1:] {
|
||||
if ctx.Err() != nil {
|
||||
return deduped, ctx.Err()
|
||||
}
|
||||
|
||||
destF, err := os.OpenFile(dst, os.O_RDWR, os.ModePerm)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
defer destF.Close()
|
||||
|
||||
dstHash, err := filehash(destF)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
if srcHash != dstHash {
|
||||
destF.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
rng.Info = append(rng.Info, unix.FileDedupeRangeInfo{
|
||||
Dest_fd: int64(destF.Fd()),
|
||||
Dest_offset: 0,
|
||||
})
|
||||
}
|
||||
|
||||
if len(rng.Info) == 0 {
|
||||
return deduped, nil
|
||||
}
|
||||
|
||||
log.Info(ctx, "found same files, deduping", slog.Any("files", paths), slog.String("size", humanize.Bytes(uint64(srcStat.Size()))))
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return deduped, ctx.Err()
|
||||
}
|
||||
|
||||
rng.Src_offset = 0
|
||||
for i := range rng.Info {
|
||||
rng.Info[i].Dest_offset = 0
|
||||
}
|
||||
|
||||
err = unix.IoctlFileDedupeRange(srcFd, &rng)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
for i := range rng.Info {
|
||||
deduped += rng.Info[i].Bytes_deduped
|
||||
|
||||
rng.Info[i].Status = 0
|
||||
rng.Info[i].Bytes_deduped = 0
|
||||
}
|
||||
|
||||
return deduped, nil
|
||||
}
|
||||
|
||||
const compareBlockSize = 1024 * 128
|
||||
|
||||
func filehash(r io.Reader) ([20]byte, error) {
|
||||
buf := make([]byte, compareBlockSize)
|
||||
_, err := r.Read(buf)
|
||||
if err != nil && err != io.EOF {
|
||||
return [20]byte{}, err
|
||||
}
|
||||
|
||||
return sha1.Sum(buf), nil
|
||||
}
|
||||
|
||||
func ptr[D any](v D) *D {
|
||||
return &v
|
||||
}
|
199
src/sources/torrent/storage_open.go
Normal file
199
src/sources/torrent/storage_open.go
Normal file
|
@ -0,0 +1,199 @@
|
|||
package torrent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/cowutils"
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/rlog"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/anacrolix/torrent/storage"
|
||||
"github.com/anacrolix/torrent/types/infohash"
|
||||
)
|
||||
|
||||
// OpenTorrent implements storage.ClientImplCloser.
|
||||
func (me *fileStorage) OpenTorrent(info *metainfo.Info, infoHash infohash.T) (storage.TorrentImpl, error) {
|
||||
ctx := context.Background()
|
||||
log := me.log
|
||||
|
||||
dir := torrentDir(me.baseDir, infoHash)
|
||||
legacyDir := filepath.Join(me.baseDir, info.Name)
|
||||
|
||||
log = log.With(slog.String("legacy_dir", legacyDir), slog.String("dir", dir))
|
||||
if _, err := os.Stat(legacyDir); err == nil {
|
||||
log.Warn(ctx, "legacy torrent dir found, renaming", slog.String("dir", dir))
|
||||
err = os.Rename(legacyDir, dir)
|
||||
if err != nil {
|
||||
return storage.TorrentImpl{}, fmt.Errorf("error renaming legacy torrent dir: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Stat(dir); errors.Is(err, fs.ErrNotExist) {
|
||||
log.Info(ctx, "new torrent, trying copy files from existing")
|
||||
dups := me.dupIndex.Includes(infoHash, info.Files)
|
||||
|
||||
for _, dup := range dups {
|
||||
err := me.copyDup(ctx, infoHash, dup)
|
||||
if err != nil {
|
||||
log.Error(ctx, "error copying file", slog.String("file", dup.fileinfo.DisplayPath(info)), rlog.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return me.client.OpenTorrent(info, infoHash)
|
||||
}
|
||||
|
||||
func (me *fileStorage) copyDup(ctx context.Context, infoHash infohash.T, dup dupInfo) error {
|
||||
log := me.log.With(slog.String("infohash", infoHash.HexString()), slog.String("dup_infohash", dup.infohash.HexString()))
|
||||
|
||||
srcPath := me.fullFilePath(dup.infohash, dup.fileinfo)
|
||||
src, err := os.Open(me.fullFilePath(dup.infohash, dup.fileinfo))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dstPath := me.fullFilePath(infoHash, dup.fileinfo)
|
||||
dst, err := os.OpenFile(dstPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info(ctx, "copying duplicate file", slog.String("src", srcPath), slog.String("dst", dstPath))
|
||||
|
||||
err = cowutils.Reflink(ctx, dst, src, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error copying file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func torrentDir(baseDir string, infoHash metainfo.Hash) string {
|
||||
return filepath.Join(baseDir, infoHash.HexString())
|
||||
}
|
||||
|
||||
func filePath(file metainfo.FileInfo) string {
|
||||
return filepath.Join(file.BestPath()...)
|
||||
}
|
||||
|
||||
func (s *Daemon) checkTorrentCompatable(ctx context.Context, ih infohash.T, info metainfo.Info) (compatable bool, tryLater bool, err error) {
|
||||
log := s.log.With(
|
||||
slog.String("new-name", info.BestName()),
|
||||
slog.String("new-infohash", ih.String()),
|
||||
)
|
||||
|
||||
name := info.BestName()
|
||||
|
||||
aq, found, err := s.dirsAquire.Get(ctx, info.BestName())
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
if !found {
|
||||
err = s.dirsAquire.Set(ctx, name, DirAquire{
|
||||
Name: name,
|
||||
Hashes: slices.Compact([]infohash.T{ih}),
|
||||
})
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
log.Debug(ctx, "acquiring was not found, so created")
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
if slices.Contains(aq.Hashes, ih) {
|
||||
log.Debug(ctx, "hash already know to be compatable")
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
for _, existingTorrent := range s.client.Torrents() {
|
||||
if existingTorrent.Name() != name || existingTorrent.InfoHash() == ih {
|
||||
continue
|
||||
}
|
||||
|
||||
existingInfo := existingTorrent.Info()
|
||||
|
||||
existingFiles := slices.Clone(existingInfo.Files)
|
||||
newFiles := slices.Clone(info.Files)
|
||||
|
||||
if !s.checkTorrentFilesCompatable(ctx, aq, existingFiles, newFiles) {
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
aq.Hashes = slicesUnique(append(aq.Hashes, ih))
|
||||
err = s.dirsAquire.Set(ctx, aq.Name, aq)
|
||||
if err != nil {
|
||||
log.Warn(ctx, "torrent not compatible")
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if slices.Contains(aq.Hashes, ih) {
|
||||
log.Debug(ctx, "hash is compatable")
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
log.Debug(ctx, "torrent with same name not found, try later")
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
func (s *Daemon) checkTorrentFilesCompatable(ctx context.Context, aq DirAquire, existingFiles, newFiles []metainfo.FileInfo) bool {
|
||||
log := s.log.With(slog.String("name", aq.Name))
|
||||
|
||||
pathCmp := func(a, b metainfo.FileInfo) int {
|
||||
return slices.Compare(a.BestPath(), b.BestPath())
|
||||
}
|
||||
slices.SortStableFunc(existingFiles, pathCmp)
|
||||
slices.SortStableFunc(newFiles, pathCmp)
|
||||
|
||||
// torrents basically equals
|
||||
if slices.EqualFunc(existingFiles, newFiles, func(fi1, fi2 metainfo.FileInfo) bool {
|
||||
return fi1.Length == fi2.Length && slices.Equal(fi1.BestPath(), fi1.BestPath())
|
||||
}) {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(newFiles) > len(existingFiles) {
|
||||
type fileInfo struct {
|
||||
Path string
|
||||
Length int64
|
||||
}
|
||||
mapInfo := func(fi metainfo.FileInfo) fileInfo {
|
||||
return fileInfo{
|
||||
Path: strings.Join(fi.BestPath(), "/"),
|
||||
Length: fi.Length,
|
||||
}
|
||||
}
|
||||
|
||||
existingFiles := apply(existingFiles, mapInfo)
|
||||
newFiles := apply(newFiles, mapInfo)
|
||||
|
||||
for _, n := range newFiles {
|
||||
if slices.Contains(existingFiles, n) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, e := range existingFiles {
|
||||
if e.Path == n.Path && e.Length != n.Length {
|
||||
log.Warn(ctx, "torrents not compatible, has files with different length",
|
||||
slog.String("path", n.Path),
|
||||
slog.Int64("existing-length", e.Length),
|
||||
slog.Int64("new-length", e.Length),
|
||||
)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
|
@ -2,93 +2,109 @@ package ytdlp
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
"os"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/ctxbilly"
|
||||
"git.kmsign.ru/royalcat/tstor/src/vfs"
|
||||
"github.com/go-git/go-billy/v5/osfs"
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/kvsingle"
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/rlog"
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/ytdlp"
|
||||
"git.kmsign.ru/royalcat/tstor/src/tasks"
|
||||
"github.com/royalcat/ctxio"
|
||||
"github.com/royalcat/ctxprogress"
|
||||
)
|
||||
|
||||
func NewService(dataDir string) *Service {
|
||||
s := &Service{
|
||||
dataDir: dataDir,
|
||||
sources: make(map[string]ytdlpSource, 0),
|
||||
type Controller struct {
|
||||
datafs ctxbilly.Filesystem
|
||||
source Source
|
||||
client *ytdlp.Client
|
||||
cachedinfo *kvsingle.Value[string, ytdlp.Info]
|
||||
}
|
||||
|
||||
func newYtdlpController(datafs ctxbilly.Filesystem, source Source, client *ytdlp.Client) *Controller {
|
||||
return &Controller{
|
||||
datafs: datafs,
|
||||
source: source,
|
||||
client: client,
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
ctx := context.Background()
|
||||
ctx = ctxprogress.New(ctx)
|
||||
ctxprogress.AddCallback(ctx, func(p ctxprogress.Progress) {
|
||||
cur, total := p.Progress()
|
||||
fmt.Printf("updating sources: %d/%d\n", cur, total)
|
||||
})
|
||||
|
||||
err := s.Update(ctx)
|
||||
if err != nil {
|
||||
fmt.Println("failed to update sources:", err)
|
||||
}
|
||||
time.Sleep(time.Minute)
|
||||
}
|
||||
}()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
mu sync.Mutex
|
||||
|
||||
dataDir string
|
||||
sources map[string]ytdlpSource
|
||||
func (c *Controller) Source() Source {
|
||||
return c.source
|
||||
}
|
||||
|
||||
func (c *Service) addSource(s ytdlpSource) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
const sizeApprox = 1024 * 1024 * 1024
|
||||
|
||||
c.sources[s.Name()] = s
|
||||
}
|
||||
|
||||
func (c *Service) sourceDir(s ytdlpSource) string {
|
||||
return path.Join(c.dataDir, s.Name())
|
||||
}
|
||||
|
||||
func (c *Service) Update(ctx context.Context) error {
|
||||
for name, s := range c.sources {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
func (c *Controller) Update(ctx context.Context, updater tasks.Updater) error {
|
||||
log := updater.Logger()
|
||||
ctxprogress.New(ctx)
|
||||
ctxprogress.Set(ctx, ctxprogress.RangeProgress{Current: 0, Total: 10})
|
||||
plst, err := c.client.Playlist(ctx, c.source.Url)
|
||||
ctxprogress.Set(ctx, ctxprogress.RangeProgress{Current: 1, Total: 10})
|
||||
ctxprogress.Range(ctx, plst, func(ctx context.Context, _ int, e ytdlp.Entry) bool {
|
||||
if e.OriginalURL == "" {
|
||||
log.Error("no URL in entry", rlog.Error(err))
|
||||
return true
|
||||
}
|
||||
|
||||
dir := c.sourceDir(s)
|
||||
err := s.Download(ctx, nil, dir)
|
||||
info, err := c.Info(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch source %s: %w", name, err)
|
||||
log.Error("error getting info", rlog.Error(err))
|
||||
return true
|
||||
}
|
||||
|
||||
dwl := info.RequestedDownloads[0]
|
||||
|
||||
fileinfo, err := c.datafs.Stat(ctx, dwl.Filename)
|
||||
if err != nil {
|
||||
log.Error("error getting file info", rlog.Error(err))
|
||||
return true
|
||||
}
|
||||
|
||||
if fileinfo.Size()+sizeApprox > dwl.FilesizeApprox && fileinfo.Size()-sizeApprox < dwl.FilesizeApprox {
|
||||
log.Debug("file already downloaded", "filename", dwl.Filename)
|
||||
return true
|
||||
}
|
||||
|
||||
file, err := c.datafs.OpenFile(ctx, dwl.Filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
log.Error("error opening destination file", rlog.Error(err))
|
||||
return true
|
||||
}
|
||||
|
||||
err = c.client.Download(ctx, info.OriginalURL, ctxio.IoWriter(ctx, file))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
ctxprogress.Set(ctx, ctxprogress.RangeProgress{Current: 2, Total: 2})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Service) BuildFS(ctx context.Context, f vfs.File) (vfs.Filesystem, error) {
|
||||
data, err := ctxio.ReadAll(ctx, f)
|
||||
func (c *Controller) Info(ctx context.Context) (ytdlp.Info, error) {
|
||||
info, found, err := c.cachedinfo.Get(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read source file: %w", err)
|
||||
return info, err
|
||||
}
|
||||
if found {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
var s ytdlpSource
|
||||
err = json.Unmarshal(data, &s)
|
||||
info, err = c.Info(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return info, err
|
||||
}
|
||||
|
||||
c.addSource(s)
|
||||
|
||||
downloadFS := ctxbilly.WrapFileSystem(osfs.New(c.sourceDir(s)))
|
||||
|
||||
return newSourceFS(path.Base(f.Name()), downloadFS, c, s), nil
|
||||
err = c.cachedinfo.Set(ctx, info)
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (c *Controller) Downloaded() error {
|
||||
return nil
|
||||
}
|
||||
|
|
71
src/sources/ytdlp/daemon.go
Normal file
71
src/sources/ytdlp/daemon.go
Normal file
|
@ -0,0 +1,71 @@
|
|||
package ytdlp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/ctxbilly"
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/ytdlp"
|
||||
"git.kmsign.ru/royalcat/tstor/src/vfs"
|
||||
"github.com/go-git/go-billy/v5/osfs"
|
||||
"github.com/royalcat/ctxio"
|
||||
)
|
||||
|
||||
func NewService(dataDir string) (*Daemon, error) {
|
||||
client, err := ytdlp.New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := &Daemon{
|
||||
mu: sync.Mutex{},
|
||||
client: client,
|
||||
dataDir: dataDir,
|
||||
controllers: make(map[string]*Controller, 0),
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
type Daemon struct {
|
||||
mu sync.Mutex
|
||||
|
||||
dataDir string
|
||||
client *ytdlp.Client
|
||||
controllers map[string]*Controller
|
||||
}
|
||||
|
||||
func (c *Daemon) addSource(s Source) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
ctl := newYtdlpController(ctxbilly.WrapFileSystem(osfs.New(c.sourceDir(s))), s, c.client)
|
||||
|
||||
c.controllers[s.Name()] = ctl
|
||||
}
|
||||
|
||||
func (c *Daemon) sourceDir(s Source) string {
|
||||
return path.Join(c.dataDir, s.Name())
|
||||
}
|
||||
|
||||
func (c *Daemon) BuildFS(ctx context.Context, f vfs.File) (vfs.Filesystem, error) {
|
||||
data, err := ctxio.ReadAll(ctx, f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read source file: %w", err)
|
||||
}
|
||||
|
||||
var s Source
|
||||
err = json.Unmarshal(data, &s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.addSource(s)
|
||||
|
||||
downloadFS := ctxbilly.WrapFileSystem(osfs.New(c.sourceDir(s)))
|
||||
|
||||
return newSourceFS(path.Base(f.Name()), downloadFS, c, s), nil
|
||||
}
|
|
@ -9,8 +9,8 @@ import (
|
|||
)
|
||||
|
||||
type SourceFS struct {
|
||||
service *Service
|
||||
source ytdlpSource
|
||||
service *Daemon
|
||||
source Source
|
||||
|
||||
fs ctxbilly.Filesystem
|
||||
|
||||
|
@ -19,7 +19,7 @@ type SourceFS struct {
|
|||
|
||||
var _ vfs.Filesystem = (*SourceFS)(nil)
|
||||
|
||||
func newSourceFS(name string, fs ctxbilly.Filesystem, service *Service, source ytdlpSource) *SourceFS {
|
||||
func newSourceFS(name string, fs ctxbilly.Filesystem, service *Daemon, source Source) *SourceFS {
|
||||
return &SourceFS{
|
||||
fs: fs,
|
||||
service: service,
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
package ytdlp
|
||||
|
||||
import "io"
|
||||
|
||||
type TaskUpdater interface {
|
||||
Output() io.Writer
|
||||
}
|
37
src/sources/ytdlp/tasks.go
Normal file
37
src/sources/ytdlp/tasks.go
Normal file
|
@ -0,0 +1,37 @@
|
|||
package ytdlp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/tasks"
|
||||
)
|
||||
|
||||
const executorName = "ytdlp"
|
||||
|
||||
type DownloadTask struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
var _ tasks.Task = (*DownloadTask)(nil)
|
||||
|
||||
// Executor implements tasks.Task.
|
||||
func (d *DownloadTask) Executor() string {
|
||||
return executorName
|
||||
}
|
||||
|
||||
var _ tasks.TaskExecutor = (*Daemon)(nil)
|
||||
|
||||
// ExecutorName implements tasks.TaskExecutor.
|
||||
func (c *Daemon) ExecutorName() string {
|
||||
return executorName
|
||||
}
|
||||
|
||||
func (c *Daemon) RunTask(ctx context.Context, upd tasks.Updater, task tasks.Task) error {
|
||||
switch t := task.(type) {
|
||||
case *DownloadTask:
|
||||
return c.controllers[t.Name].Update(ctx, upd)
|
||||
default:
|
||||
return fmt.Errorf("unknown task type: %T", task)
|
||||
}
|
||||
}
|
|
@ -1,44 +1,29 @@
|
|||
package ytdlp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/ytdlp"
|
||||
"github.com/royalcat/ctxprogress"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ytdlpSource struct {
|
||||
type Source struct {
|
||||
Url string `json:"url"`
|
||||
}
|
||||
|
||||
var hasher = sha1.New()
|
||||
|
||||
func (s *ytdlpSource) Name() string {
|
||||
return base64.URLEncoding.EncodeToString(hasher.Sum([]byte(s.Url)))
|
||||
var prefixCutset = [...]string{
|
||||
"https://", "http://", "www.",
|
||||
}
|
||||
|
||||
func (s *ytdlpSource) Download(ctx context.Context, task TaskUpdater, dir string) error {
|
||||
client, err := ytdlp.New()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctxprogress.New(ctx)
|
||||
ctxprogress.Set(ctx, ctxprogress.RangeProgress{Current: 0, Total: 2})
|
||||
plst, err := client.Playlist(ctx, s.Url)
|
||||
ctxprogress.Set(ctx, ctxprogress.RangeProgress{Current: 1, Total: 2})
|
||||
ctxprogress.Range(ctx, plst, func(ctx context.Context, _ int, e ytdlp.PlaylistEntry) bool {
|
||||
err = client.Download(ctx, e.Url(), dir)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
ctxprogress.Set(ctx, ctxprogress.RangeProgress{Current: 2, Total: 2})
|
||||
if err != nil {
|
||||
return err
|
||||
func urlHash(url string) string {
|
||||
for _, v := range prefixCutset {
|
||||
url = strings.TrimPrefix(url, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
return base64.URLEncoding.EncodeToString(hasher.Sum([]byte(url)))
|
||||
}
|
||||
|
||||
func (s *Source) Name() string {
|
||||
return urlHash(s.Url)
|
||||
}
|
||||
|
|
8
src/tasks/executor.go
Normal file
8
src/tasks/executor.go
Normal file
|
@ -0,0 +1,8 @@
|
|||
package tasks
|
||||
|
||||
import "context"
|
||||
|
||||
type TaskExecutor interface {
|
||||
ExecutorName() string
|
||||
RunTask(ctx context.Context, upd Updater, task Task) error
|
||||
}
|
5
src/tasks/task.go
Normal file
5
src/tasks/task.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
package tasks
|
||||
|
||||
type Task interface {
|
||||
Executor() string
|
||||
}
|
8
src/tasks/updater.go
Normal file
8
src/tasks/updater.go
Normal file
|
@ -0,0 +1,8 @@
|
|||
package tasks
|
||||
|
||||
import "log/slog"
|
||||
|
||||
type Updater interface {
|
||||
Logger() *slog.Logger
|
||||
SetProgress(current, total int64)
|
||||
}
|
|
@ -140,34 +140,34 @@ func Setup(ctx context.Context, endpoint string) (*Client, error) {
|
|||
// recreate telemetry logger
|
||||
client.log = rlog.Component("telemetry")
|
||||
|
||||
// runtime.SetMutexProfileFraction(5)
|
||||
// runtime.SetBlockProfileRate(5)
|
||||
// _, err = pyroscope.Start(pyroscope.Config{
|
||||
// ApplicationName: appName,
|
||||
// // replace this with the address of pyroscope server
|
||||
// ServerAddress: "https://pyroscope.kmsign.ru",
|
||||
// // you can disable logging by setting this to nil
|
||||
// Logger: &pyroscopeLogger{
|
||||
// log: client.log.WithComponent("pyroscope"),
|
||||
// },
|
||||
// ProfileTypes: []pyroscope.ProfileType{
|
||||
// // these profile types are enabled by default:
|
||||
// pyroscope.ProfileCPU,
|
||||
// pyroscope.ProfileAllocObjects,
|
||||
// pyroscope.ProfileAllocSpace,
|
||||
// pyroscope.ProfileInuseObjects,
|
||||
// pyroscope.ProfileInuseSpace,
|
||||
// // these profile types are optional:
|
||||
// // pyroscope.ProfileGoroutines,
|
||||
// // pyroscope.ProfileMutexCount,
|
||||
// // pyroscope.ProfileMutexDuration,
|
||||
// // pyroscope.ProfileBlockCount,
|
||||
// // pyroscope.ProfileBlockDuration,
|
||||
// },
|
||||
// })
|
||||
// if err != nil {
|
||||
// return client, nil
|
||||
// }
|
||||
runtime.SetMutexProfileFraction(5)
|
||||
runtime.SetBlockProfileRate(5)
|
||||
_, err = pyroscope.Start(pyroscope.Config{
|
||||
ApplicationName: appName,
|
||||
// replace this with the address of pyroscope server
|
||||
ServerAddress: "https://pyroscope.kmsign.ru",
|
||||
// you can disable logging by setting this to nil
|
||||
Logger: &pyroscopeLogger{
|
||||
log: client.log.WithComponent("pyroscope"),
|
||||
},
|
||||
ProfileTypes: []pyroscope.ProfileType{
|
||||
// these profile types are enabled by default:
|
||||
pyroscope.ProfileCPU,
|
||||
pyroscope.ProfileAllocObjects,
|
||||
pyroscope.ProfileAllocSpace,
|
||||
pyroscope.ProfileInuseObjects,
|
||||
pyroscope.ProfileInuseSpace,
|
||||
// these profile types are optional:
|
||||
// pyroscope.ProfileGoroutines,
|
||||
// pyroscope.ProfileMutexCount,
|
||||
// pyroscope.ProfileMutexDuration,
|
||||
// pyroscope.ProfileBlockCount,
|
||||
// pyroscope.ProfileBlockDuration,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return client, nil
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
|
|
@ -259,7 +259,7 @@ func (r *Resolver) NestedFs(ctx context.Context, fsPath string, file File) (File
|
|||
|
||||
// open requeue raw open, without resolver call
|
||||
func (r *Resolver) ResolvePath(ctx context.Context, name string, rawOpen openFile) (fsPath string, nestedFs Filesystem, nestedFsPath string, err error) {
|
||||
ctx, span := tracer.Start(ctx, "resolvePath")
|
||||
ctx, span := tracer.Start(ctx, "ResolvePath")
|
||||
defer span.End()
|
||||
|
||||
name = path.Clean(name)
|
||||
|
|
|
@ -2,7 +2,9 @@ package vfs
|
|||
|
||||
import (
|
||||
"context"
|
||||
"io/fs"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -69,3 +71,71 @@ func subTimeout(ctx context.Context) (context.Context, context.CancelFunc) {
|
|||
|
||||
return ctx, func() {}
|
||||
}
|
||||
|
||||
func Walk(ctx context.Context, vfs Filesystem, root string, walkFn filepath.WalkFunc) error {
|
||||
info, err := vfs.Stat(ctx, root)
|
||||
if err != nil {
|
||||
err = walkFn(root, nil, err)
|
||||
} else {
|
||||
err = walk(ctx, vfs, root, info, walkFn)
|
||||
}
|
||||
|
||||
if err == filepath.SkipDir {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// walk recursively descends path, calling walkFn
|
||||
// adapted from https://golang.org/src/path/filepath/path.go
|
||||
func walk(ctx context.Context, vfs Filesystem, path string, info fs.FileInfo, walkFn filepath.WalkFunc) error {
|
||||
if !info.IsDir() {
|
||||
return walkFn(path, info, nil)
|
||||
}
|
||||
|
||||
names, err := readdirnames(ctx, vfs, path)
|
||||
err1 := walkFn(path, info, err)
|
||||
// If err != nil, walk can't walk into this directory.
|
||||
// err1 != nil means walkFn want walk to skip this directory or stop walking.
|
||||
// Therefore, if one of err and err1 isn't nil, walk will return.
|
||||
if err != nil || err1 != nil {
|
||||
// The caller's behavior is controlled by the return value, which is decided
|
||||
// by walkFn. walkFn may ignore err and return nil.
|
||||
// If walkFn returns SkipDir, it will be handled by the caller.
|
||||
// So walk should return whatever walkFn returns.
|
||||
return err1
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
filename := filepath.Join(path, name)
|
||||
fileInfo, err := vfs.Stat(ctx, filename)
|
||||
if err != nil {
|
||||
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = walk(ctx, vfs, filename, fileInfo, walkFn)
|
||||
if err != nil {
|
||||
if !fileInfo.IsDir() || err != filepath.SkipDir {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readdirnames(ctx context.Context, vfs Filesystem, dir string) ([]string, error) {
|
||||
files, err := vfs.ReadDir(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var names []string
|
||||
for _, file := range files {
|
||||
names = append(names, file.Name())
|
||||
}
|
||||
|
||||
return names, nil
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue