update
This commit is contained in:
parent
7b1863109c
commit
ef751771d2
107 changed files with 9435 additions and 850 deletions
|
@ -19,7 +19,7 @@ linters:
|
|||
- asciicheck
|
||||
- bidichk
|
||||
- bodyclose
|
||||
# - containedctx
|
||||
- containedctx
|
||||
- durationcheck
|
||||
- errcheck
|
||||
- nakedret
|
||||
|
|
|
@ -3,7 +3,6 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"net"
|
||||
nethttp "net/http"
|
||||
|
@ -14,6 +13,8 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
wnfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/rlog"
|
||||
"git.kmsign.ru/royalcat/tstor/src/config"
|
||||
"git.kmsign.ru/royalcat/tstor/src/delivery"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host"
|
||||
|
@ -23,7 +24,6 @@ import (
|
|||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
"git.kmsign.ru/royalcat/tstor/src/telemetry"
|
||||
"github.com/urfave/cli/v2"
|
||||
wnfs "github.com/willscott/go-nfs"
|
||||
|
||||
_ "git.kmsign.ru/royalcat/tstor/pkg/rlog"
|
||||
"git.kmsign.ru/royalcat/tstor/src/export/fuse"
|
||||
|
@ -79,7 +79,7 @@ func run(configPath string) error {
|
|||
defer client.Shutdown(ctx)
|
||||
}
|
||||
|
||||
log := slog.Default().With("component", "run")
|
||||
log := rlog.ComponentLog("run")
|
||||
|
||||
// TODO make optional
|
||||
err = syscall.Setpriority(syscall.PRIO_PGRP, 0, 19)
|
||||
|
@ -137,7 +137,7 @@ func run(configPath string) error {
|
|||
return fmt.Errorf("error creating data folder: %w", err)
|
||||
}
|
||||
sfs := host.NewTorrentStorage(conf.SourceDir, ts)
|
||||
sfs = vfs.WrapLogFS(sfs, slog.Default())
|
||||
sfs = vfs.WrapLogFS(sfs)
|
||||
|
||||
// TODO make separate function
|
||||
// {
|
||||
|
|
17
go.mod
17
go.mod
|
@ -14,6 +14,7 @@ require (
|
|||
github.com/bodgit/sevenzip v1.4.5
|
||||
github.com/dgraph-io/badger/v4 v4.2.0
|
||||
github.com/dgraph-io/ristretto v0.1.1
|
||||
github.com/dustin/go-humanize v1.0.0
|
||||
github.com/gin-contrib/pprof v1.4.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/go-git/go-billy/v5 v5.5.0
|
||||
|
@ -27,8 +28,9 @@ require (
|
|||
github.com/knadh/koanf/providers/structs v0.1.0
|
||||
github.com/knadh/koanf/v2 v2.0.1
|
||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.2
|
||||
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93
|
||||
github.com/ravilushqa/otelgqlgen v0.15.0
|
||||
github.com/royalcat/kv v0.0.0-20240318203654-181645f85b10
|
||||
github.com/royalcat/kv v0.0.0-20240327213417-8cf5696b2389
|
||||
github.com/rs/zerolog v1.32.0
|
||||
github.com/samber/slog-multi v1.0.2
|
||||
github.com/samber/slog-zerolog v1.0.0
|
||||
|
@ -36,7 +38,8 @@ require (
|
|||
github.com/stretchr/testify v1.8.4
|
||||
github.com/urfave/cli/v2 v2.27.0
|
||||
github.com/vektah/gqlparser/v2 v2.5.11
|
||||
github.com/willscott/go-nfs v0.0.2
|
||||
github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00
|
||||
github.com/willscott/memphis v0.0.0-20210922141505-529d4987ab7e
|
||||
go.opentelemetry.io/otel v1.24.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.46.0
|
||||
|
@ -46,6 +49,7 @@ require (
|
|||
go.uber.org/multierr v1.11.0
|
||||
golang.org/x/exp v0.0.0-20231226003508-02704c960a9b
|
||||
golang.org/x/net v0.19.0
|
||||
golang.org/x/sys v0.17.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
@ -79,8 +83,8 @@ require (
|
|||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.1.0 // indirect
|
||||
github.com/fatih/structs v1.1.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
|
@ -141,11 +145,13 @@ require (
|
|||
github.com/pion/webrtc/v3 v3.1.42 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/polydawn/go-timeless-api v0.0.0-20220821201550-b93919e12c56 // indirect
|
||||
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect
|
||||
github.com/polydawn/rio v0.0.0-20220823181337-7c31ad9831a4 // indirect
|
||||
github.com/prometheus/client_golang v1.18.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.0 // indirect
|
||||
github.com/prometheus/common v0.45.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
|
@ -156,7 +162,7 @@ require (
|
|||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/ulikunitz/xz v0.5.11 // indirect
|
||||
github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 // indirect
|
||||
github.com/warpfork/go-errcat v0.0.0-20180917083543-335044ffc86e // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
|
@ -169,7 +175,6 @@ require (
|
|||
golang.org/x/crypto v0.17.0 // indirect
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/sync v0.5.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.16.0 // indirect
|
||||
|
|
37
go.sum
37
go.sum
|
@ -159,6 +159,8 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
|
|||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
@ -207,6 +209,7 @@ github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod
|
|||
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
|
||||
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
|
||||
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
|
@ -310,6 +313,7 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m
|
|||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
|
@ -342,6 +346,7 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
|
|||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
|
@ -405,6 +410,7 @@ github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE
|
|||
github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
|
||||
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q0rDaRO0MPaOk=
|
||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
|
@ -418,6 +424,8 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
|
|||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
|
||||
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
|
||||
|
@ -476,6 +484,15 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/polydawn/go-timeless-api v0.0.0-20201121022836-7399661094a6/go.mod h1:z2fMUifgtqrZiNLgzF4ZR8pX+YFLCmAp1jJTSTvyDMM=
|
||||
github.com/polydawn/go-timeless-api v0.0.0-20220821201550-b93919e12c56 h1:LQ103HjiN76aqIxnQNgdZ+7NveuKd45+Q+TYGJVVsyw=
|
||||
github.com/polydawn/go-timeless-api v0.0.0-20220821201550-b93919e12c56/go.mod h1:OAK6p/pJUakz6jQ+HlSw16gVMnuohxqJFGoypUYyr4w=
|
||||
github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
|
||||
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls=
|
||||
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
|
||||
github.com/polydawn/rio v0.0.0-20201122020833-6192319df581/go.mod h1:mwZtAu36D3fSNzVLN1we6PFdRU4VeE+RXLTZiOiQlJ0=
|
||||
github.com/polydawn/rio v0.0.0-20220823181337-7c31ad9831a4 h1:SNhgcsCNGEqz7Tp46YHEvcjF1s5x+ZGWcVzFoghkuMA=
|
||||
github.com/polydawn/rio v0.0.0-20220823181337-7c31ad9831a4/go.mod h1:fZ8OGW5CVjZHyQeNs8QH3X3tUxrPcx1jxHSl2z6Xv00=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
|
@ -514,8 +531,8 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE
|
|||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/royalcat/kv v0.0.0-20240318203654-181645f85b10 h1:8vwpCzvVqzNzkYRH9kA3GV5fkWs+8s0jdxtGvswL/MU=
|
||||
github.com/royalcat/kv v0.0.0-20240318203654-181645f85b10/go.mod h1:Ff0Z/r1H3ojacpEe8SashMKJx6YCIhWrYtpdV8Y/k3A=
|
||||
github.com/royalcat/kv v0.0.0-20240327213417-8cf5696b2389 h1:7XbHzr1TOaxs5Y/i9GtTEOOSTzfQ4ESYqF38DVfPkFY=
|
||||
github.com/royalcat/kv v0.0.0-20240327213417-8cf5696b2389/go.mod h1:Ff0Z/r1H3ojacpEe8SashMKJx6YCIhWrYtpdV8Y/k3A=
|
||||
github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 h1:Lt9DzQALzHoDwMBGJ6v8ObDPR0dzr2a6sXTB1Fq7IHs=
|
||||
github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
|
@ -541,8 +558,13 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
|
|||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
|
||||
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
|
||||
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
|
||||
github.com/sosodev/duration v1.2.0 h1:pqK/FLSjsAADWY74SyWDCjOcd5l7H8GSnnOGEB9A1Us=
|
||||
github.com/sosodev/duration v1.2.0/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
|
||||
|
@ -582,12 +604,16 @@ github.com/urfave/cli/v2 v2.27.0 h1:uNs1K8JwTFL84X68j5Fjny6hfANh9nTlJ6dRtZAFAHY=
|
|||
github.com/urfave/cli/v2 v2.27.0/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/vektah/gqlparser/v2 v2.5.11 h1:JJxLtXIoN7+3x6MBdtIP59TP1RANnY7pXOaDnADQSf8=
|
||||
github.com/vektah/gqlparser/v2 v2.5.11/go.mod h1:1rCcfwB2ekJofmluGWXMSEnPMZgbxzwj6FaZ/4OT8Cc=
|
||||
github.com/warpfork/go-errcat v0.0.0-20180917083543-335044ffc86e h1:FIB2fi7XJGHIdf5rWNsfFQqatIKxutT45G+wNuMQNgs=
|
||||
github.com/warpfork/go-errcat v0.0.0-20180917083543-335044ffc86e/go.mod h1:/qe02xr3jvTUz8u/PV0FHGpP8t96OQNP7U9BJMwMLEw=
|
||||
github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w=
|
||||
github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
|
||||
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/willscott/go-nfs v0.0.2 h1:BaBp1CpGDMooCT6bCgX6h6ZwgPcTMST4yToYZ9byee0=
|
||||
github.com/willscott/go-nfs v0.0.2/go.mod h1:SvullWeHxr/924WQNbUaZqtluBt2vuZ61g6yAV+xj7w=
|
||||
github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 h1:U0DnHRZFzoIV1oFEZczg5XyPut9yxk9jjtax/9Bxr/o=
|
||||
github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00/go.mod h1:Tq++Lr/FgiS3X48q5FETemXiSLGuYMQT2sPjYNPJSwA=
|
||||
github.com/willscott/memphis v0.0.0-20210922141505-529d4987ab7e h1:1eHCP4w7tMmpfFBdrd5ff+vYU9THtrtA1yM9f0TLlJw=
|
||||
github.com/willscott/memphis v0.0.0-20210922141505-529d4987ab7e/go.mod h1:59vHBW4EpjiL5oiqgCrBp1Tc9JXRzKCNMEOaGmNfSHo=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
|
@ -753,6 +779,7 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -807,6 +834,7 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
|
|||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
|
@ -899,6 +927,7 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks
|
|||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
|
|
|
@ -1,20 +1,25 @@
|
|||
type Mutation {
|
||||
validateTorrents(filter: TorrentFilter!): Boolean!
|
||||
cleanupTorrents(files: Boolean, dryRun: Boolean!): Int!
|
||||
downloadTorrent(infohash: String!, file: String): DownloadTorrentResponse
|
||||
validateTorrents(filter: TorrentFilter!): Boolean!
|
||||
cleanupTorrents(files: Boolean, dryRun: Boolean!): CleanupResponse!
|
||||
downloadTorrent(infohash: String!, file: String): DownloadTorrentResponse
|
||||
dedupeStorage: Int!
|
||||
}
|
||||
|
||||
|
||||
input TorrentFilter @oneOf {
|
||||
everything: Boolean
|
||||
infohash: String
|
||||
# pathGlob: String!
|
||||
everything: Boolean
|
||||
infohash: String
|
||||
# pathGlob: String!
|
||||
}
|
||||
|
||||
type DownloadTorrentResponse {
|
||||
task: Task
|
||||
task: Task
|
||||
}
|
||||
|
||||
type CleanupResponse {
|
||||
count: Int!
|
||||
list: [String!]!
|
||||
}
|
||||
|
||||
type Task {
|
||||
id: ID!
|
||||
}
|
||||
id: ID!
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
type Query {
|
||||
torrents(filter: TorrentsFilter, pagination: Pagination): [Torrent!]!
|
||||
fsListDir(path: String!): [DirEntry!]!
|
||||
fsListDir(path: String!): ListDirResponse!
|
||||
}
|
||||
|
||||
input TorrentsFilter {
|
||||
|
@ -11,6 +11,11 @@ input TorrentsFilter {
|
|||
peersCount: IntFilter
|
||||
}
|
||||
|
||||
type ListDirResponse {
|
||||
root: DirEntry!
|
||||
entries: [DirEntry!]!
|
||||
}
|
||||
|
||||
input Pagination {
|
||||
offset: Int!
|
||||
limit: Int!
|
||||
|
|
27
pkg/ctxbilly/change.go
Normal file
27
pkg/ctxbilly/change.go
Normal file
|
@ -0,0 +1,27 @@
|
|||
package ctxbilly
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Change abstract the FileInfo change related operations in a storage-agnostic
|
||||
// interface as an extension to the Basic interface
|
||||
type Change interface {
|
||||
// Chmod changes the mode of the named file to mode. If the file is a
|
||||
// symbolic link, it changes the mode of the link's target.
|
||||
Chmod(ctx context.Context, name string, mode os.FileMode) error
|
||||
// Lchown changes the numeric uid and gid of the named file. If the file is
|
||||
// a symbolic link, it changes the uid and gid of the link itself.
|
||||
Lchown(ctx context.Context, name string, uid, gid int) error
|
||||
// Chown changes the numeric uid and gid of the named file. If the file is a
|
||||
// symbolic link, it changes the uid and gid of the link's target.
|
||||
Chown(ctx context.Context, name string, uid, gid int) error
|
||||
// Chtimes changes the access and modification times of the named file,
|
||||
// similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a less
|
||||
// precise time unit.
|
||||
Chtimes(ctx context.Context, name string, atime time.Time, mtime time.Time) error
|
||||
}
|
92
pkg/ctxbilly/fs.go
Normal file
92
pkg/ctxbilly/fs.go
Normal file
|
@ -0,0 +1,92 @@
|
|||
package ctxbilly
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/ctxio"
|
||||
)
|
||||
|
||||
type Filesystem interface {
|
||||
// Create creates the named file with mode 0666 (before umask), truncating
|
||||
// it if it already exists. If successful, methods on the returned File can
|
||||
// be used for I/O; the associated file descriptor has mode O_RDWR.
|
||||
Create(ctx context.Context, filename string) (File, error)
|
||||
// Open opens the named file for reading. If successful, methods on the
|
||||
// returned file can be used for reading; the associated file descriptor has
|
||||
// mode O_RDONLY.
|
||||
Open(ctx context.Context, filename string) (File, error)
|
||||
// OpenFile is the generalized open call; most users will use Open or Create
|
||||
// instead. It opens the named file with specified flag (O_RDONLY etc.) and
|
||||
// perm, (0666 etc.) if applicable. If successful, methods on the returned
|
||||
// File can be used for I/O.
|
||||
OpenFile(ctx context.Context, filename string, flag int, perm os.FileMode) (File, error)
|
||||
// Stat returns a FileInfo describing the named file.
|
||||
Stat(ctx context.Context, filename string) (os.FileInfo, error)
|
||||
// Rename renames (moves) oldpath to newpath. If newpath already exists and
|
||||
// is not a directory, Rename replaces it. OS-specific restrictions may
|
||||
// apply when oldpath and newpath are in different directories.
|
||||
Rename(ctx context.Context, oldpath, newpath string) error
|
||||
// Remove removes the named file or directory.
|
||||
Remove(ctx context.Context, filename string) error
|
||||
// Join joins any number of path elements into a single path, adding a
|
||||
// Separator if necessary. Join calls filepath.Clean on the result; in
|
||||
// particular, all empty strings are ignored. On Windows, the result is a
|
||||
// UNC path if and only if the first path element is a UNC path.
|
||||
Join(elem ...string) string
|
||||
|
||||
// TempFile creates a new temporary file in the directory dir with a name
|
||||
// beginning with prefix, opens the file for reading and writing, and
|
||||
// returns the resulting *os.File. If dir is the empty string, TempFile
|
||||
// uses the default directory for temporary files (see os.TempDir).
|
||||
// Multiple programs calling TempFile simultaneously will not choose the
|
||||
// same file. The caller can use f.Name() to find the pathname of the file.
|
||||
// It is the caller's responsibility to remove the file when no longer
|
||||
// needed.
|
||||
TempFile(ctx context.Context, dir, prefix string) (File, error)
|
||||
|
||||
// ReadDir reads the directory named by d(irname and returns a list of
|
||||
// directory entries sorted by filename.
|
||||
ReadDir(ctx context.Context, path string) ([]os.FileInfo, error)
|
||||
// MkdirAll creates a directory named path, along with any necessary
|
||||
// parents, and returns nil, or else returns an error. The permission bits
|
||||
// perm are used for all directories that MkdirAll creates. If path is/
|
||||
// already a directory, MkdirAll does nothing and returns nil.
|
||||
MkdirAll(ctx context.Context, filename string, perm os.FileMode) error
|
||||
|
||||
// Lstat returns a FileInfo describing the named file. If the file is a
|
||||
// symbolic link, the returned FileInfo describes the symbolic link. Lstat
|
||||
// makes no attempt to follow the link.
|
||||
Lstat(ctx context.Context, filename string) (os.FileInfo, error)
|
||||
// Symlink creates a symbolic-link from link to target. target may be an
|
||||
// absolute or relative path, and need not refer to an existing node.
|
||||
// Parent directories of link are created as necessary.
|
||||
Symlink(ctx context.Context, target, link string) error
|
||||
// Readlink returns the target path of link.
|
||||
Readlink(ctx context.Context, link string) (string, error)
|
||||
|
||||
// // Chroot returns a new filesystem from the same type where the new root is
|
||||
// // the given path. Files outside of the designated directory tree cannot be
|
||||
// // accessed.
|
||||
// Chroot(path string) (Filesystem, error)
|
||||
// // Root returns the root path of the filesystem.
|
||||
// Root() string
|
||||
}
|
||||
|
||||
type File interface {
|
||||
// Name returns the name of the file as presented to Open.
|
||||
Name() string
|
||||
ctxio.Writer
|
||||
ctxio.Reader
|
||||
ctxio.ReaderAt
|
||||
io.Seeker
|
||||
ctxio.Closer
|
||||
// Lock locks the file like e.g. flock. It protects against access from
|
||||
// other processes.
|
||||
Lock() error
|
||||
// Unlock unlocks the file.
|
||||
Unlock() error
|
||||
// Truncate the file.
|
||||
Truncate(ctx context.Context, size int64) error
|
||||
}
|
166
pkg/ctxbilly/mem.go
Normal file
166
pkg/ctxbilly/mem.go
Normal file
|
@ -0,0 +1,166 @@
|
|||
package ctxbilly
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/fs"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
)
|
||||
|
||||
func WrapFileSystem(bf billy.Filesystem) Filesystem {
|
||||
return &wrapFS{
|
||||
Filesystem: bf,
|
||||
}
|
||||
}
|
||||
|
||||
type wrapFS struct {
|
||||
billy.Filesystem
|
||||
}
|
||||
|
||||
var _ Filesystem = (*wrapFS)(nil)
|
||||
|
||||
// Create implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).Create of MemFS.Filesystem.
|
||||
func (m *wrapFS) Create(ctx context.Context, filename string) (File, error) {
|
||||
bf, err := m.Filesystem.Create(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &wrapFile{bf}, nil
|
||||
}
|
||||
|
||||
// Lstat implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).Lstat of MemFS.Filesystem.
|
||||
func (m *wrapFS) Lstat(ctx context.Context, filename string) (fs.FileInfo, error) {
|
||||
return m.Filesystem.Lstat(filename)
|
||||
}
|
||||
|
||||
// MkdirAll implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).MkdirAll of MemFS.Filesystem.
|
||||
func (m *wrapFS) MkdirAll(ctx context.Context, filename string, perm fs.FileMode) error {
|
||||
return m.Filesystem.MkdirAll(filename, perm)
|
||||
}
|
||||
|
||||
// Open implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).Open of MemFS.Filesystem.
|
||||
func (m *wrapFS) Open(ctx context.Context, filename string) (File, error) {
|
||||
bf, err := m.Filesystem.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return WrapFile(bf), nil
|
||||
}
|
||||
|
||||
// OpenFile implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).OpenFile of MemFS.Filesystem.
|
||||
func (m *wrapFS) OpenFile(ctx context.Context, filename string, flag int, perm fs.FileMode) (File, error) {
|
||||
bf, err := m.Filesystem.OpenFile(filename, flag, perm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return WrapFile(bf), nil
|
||||
}
|
||||
|
||||
// ReadDir implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).ReadDir of MemFS.Filesystem.
|
||||
func (m *wrapFS) ReadDir(ctx context.Context, path string) ([]fs.FileInfo, error) {
|
||||
return m.Filesystem.ReadDir(path)
|
||||
}
|
||||
|
||||
// Readlink implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).Readlink of MemFS.Filesystem.
|
||||
func (m *wrapFS) Readlink(ctx context.Context, link string) (string, error) {
|
||||
return m.Filesystem.Readlink(link)
|
||||
}
|
||||
|
||||
// Remove implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).Remove of MemFS.Filesystem.
|
||||
func (m *wrapFS) Remove(ctx context.Context, filename string) error {
|
||||
return m.Filesystem.Remove(filename)
|
||||
}
|
||||
|
||||
// Rename implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).Rename of MemFS.Filesystem.
|
||||
func (m *wrapFS) Rename(ctx context.Context, oldpath string, newpath string) error {
|
||||
return m.Filesystem.Rename(oldpath, newpath)
|
||||
}
|
||||
|
||||
// Stat implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).Stat of MemFS.Filesystem.
|
||||
func (m *wrapFS) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
|
||||
return m.Filesystem.Stat(filename)
|
||||
}
|
||||
|
||||
// Symlink implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).Symlink of MemFS.Filesystem.
|
||||
func (m *wrapFS) Symlink(ctx context.Context, target string, link string) error {
|
||||
return m.Filesystem.Symlink(target, link)
|
||||
}
|
||||
|
||||
// TempFile implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).TempFile of MemFS.Filesystem.
|
||||
func (m *wrapFS) TempFile(ctx context.Context, dir string, prefix string) (File, error) {
|
||||
file, err := m.Filesystem.TempFile(dir, prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return WrapFile(file), nil
|
||||
}
|
||||
|
||||
func WrapFile(bf billy.File) File {
|
||||
return &wrapFile{File: bf}
|
||||
}
|
||||
|
||||
type wrapFile struct {
|
||||
billy.File
|
||||
}
|
||||
|
||||
var _ File = (*wrapFile)(nil)
|
||||
|
||||
// Close implements File.
|
||||
// Subtle: this method shadows the method (File).Close of MemFile.File.
|
||||
func (m *wrapFile) Close(ctx context.Context) error {
|
||||
return m.File.Close()
|
||||
}
|
||||
|
||||
// Lock implements File.
|
||||
// Subtle: this method shadows the method (File).Lock of MemFile.File.
|
||||
func (m *wrapFile) Lock() error {
|
||||
return m.File.Lock()
|
||||
}
|
||||
|
||||
// Name implements File.
|
||||
// Subtle: this method shadows the method (File).Name of MemFile.File.
|
||||
func (m *wrapFile) Name() string {
|
||||
return m.File.Name()
|
||||
}
|
||||
|
||||
// Truncate implements File.
|
||||
// Subtle: this method shadows the method (File).Truncate of memFile.File.
|
||||
func (m *wrapFile) Truncate(ctx context.Context, size int64) error {
|
||||
return m.File.Truncate(size)
|
||||
}
|
||||
|
||||
// Read implements File.
|
||||
// Subtle: this method shadows the method (File).Read of MemFile.File.
|
||||
func (m *wrapFile) Read(ctx context.Context, p []byte) (n int, err error) {
|
||||
return m.File.Read(p)
|
||||
}
|
||||
|
||||
// ReadAt implements File.
|
||||
// Subtle: this method shadows the method (File).ReadAt of MemFile.File.
|
||||
func (m *wrapFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
|
||||
return m.File.ReadAt(p, off)
|
||||
}
|
||||
|
||||
// Unlock implements File.
|
||||
// Subtle: this method shadows the method (File).Unlock of MemFile.File.
|
||||
func (m *wrapFile) Unlock() error {
|
||||
return m.File.Unlock()
|
||||
}
|
||||
|
||||
// Write implements File.
|
||||
// Subtle: this method shadows the method (File).Write of MemFile.File.
|
||||
func (m *wrapFile) Write(ctx context.Context, p []byte) (n int, err error) {
|
||||
return m.File.Write(p)
|
||||
}
|
63
pkg/ctxio/cachereader.go
Normal file
63
pkg/ctxio/cachereader.go
Normal file
|
@ -0,0 +1,63 @@
|
|||
package ctxio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type CacheReader struct {
|
||||
m sync.Mutex
|
||||
|
||||
fo int64
|
||||
fr *FileBuffer
|
||||
to int64
|
||||
tr Reader
|
||||
}
|
||||
|
||||
var _ FileReader = (*CacheReader)(nil)
|
||||
|
||||
func NewCacheReader(r Reader) (FileReader, error) {
|
||||
fr := NewFileBuffer(nil)
|
||||
tr := TeeReader(r, fr)
|
||||
return &CacheReader{fr: fr, tr: tr}, nil
|
||||
}
|
||||
|
||||
func (dtr *CacheReader) ReadAt(ctx context.Context, p []byte, off int64) (int, error) {
|
||||
dtr.m.Lock()
|
||||
defer dtr.m.Unlock()
|
||||
tb := off + int64(len(p))
|
||||
|
||||
if tb > dtr.fo {
|
||||
w, err := CopyN(ctx, Discard, dtr.tr, tb-dtr.fo)
|
||||
dtr.to += w
|
||||
if err != nil && err != io.EOF {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
n, err := dtr.fr.ReadAt(ctx, p, off)
|
||||
dtr.fo += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (dtr *CacheReader) Read(ctx context.Context, p []byte) (n int, err error) {
|
||||
dtr.m.Lock()
|
||||
defer dtr.m.Unlock()
|
||||
// use directly tee reader here
|
||||
n, err = dtr.tr.Read(ctx, p)
|
||||
dtr.to += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (dtr *CacheReader) Close(ctx context.Context) error {
|
||||
frcloser := dtr.fr.Close(ctx)
|
||||
|
||||
var closeerr error
|
||||
if rc, ok := dtr.tr.(ReadCloser); ok {
|
||||
closeerr = rc.Close(ctx)
|
||||
}
|
||||
|
||||
return errors.Join(frcloser, closeerr)
|
||||
}
|
89
pkg/ctxio/copy.go
Normal file
89
pkg/ctxio/copy.go
Normal file
|
@ -0,0 +1,89 @@
|
|||
package ctxio
|
||||
|
||||
// // CopyN copies n bytes (or until an error) from src to dst.
|
||||
// // It returns the number of bytes copied and the earliest
|
||||
// // error encountered while copying.
|
||||
// // On return, written == n if and only if err == nil.
|
||||
// //
|
||||
// // If dst implements [ReaderFrom], the copy is implemented using it.
|
||||
// func CopyN(ctx context.Context, dst Writer, src Reader, n int64) (written int64, err error) {
|
||||
// written, err = Copy(ctx, dst, LimitReader(src, n))
|
||||
// if written == n {
|
||||
// return n, nil
|
||||
// }
|
||||
// if written < n && err == nil {
|
||||
// // src stopped early; must have been EOF.
|
||||
// err = io.EOF
|
||||
// }
|
||||
|
||||
// return
|
||||
// }
|
||||
|
||||
// // Copy copies from src to dst until either EOF is reached
|
||||
// // on src or an error occurs. It returns the number of bytes
|
||||
// // copied and the first error encountered while copying, if any.
|
||||
// //
|
||||
// // A successful Copy returns err == nil, not err == EOF.
|
||||
// // Because Copy is defined to read from src until EOF, it does
|
||||
// // not treat an EOF from Read as an error to be reported.
|
||||
// //
|
||||
// // If src implements [WriterTo],
|
||||
// // the copy is implemented by calling src.WriteTo(dst).
|
||||
// // Otherwise, if dst implements [ReaderFrom],
|
||||
// // the copy is implemented by calling dst.ReadFrom(src).
|
||||
// func Copy(ctx context.Context, dst Writer, src Reader) (written int64, err error) {
|
||||
// return copyBuffer(ctx, dst, src, nil)
|
||||
// }
|
||||
|
||||
// // copyBuffer is the actual implementation of Copy and CopyBuffer.
|
||||
// // if buf is nil, one is allocated.
|
||||
// func copyBuffer(ctx context.Context, dst Writer, src Reader, buf []byte) (written int64, err error) {
|
||||
// // If the reader has a WriteTo method, use it to do the copy.
|
||||
// // Avoids an allocation and a copy.
|
||||
// if wt, ok := src.(WriterTo); ok {
|
||||
// return wt.WriteTo(dst)
|
||||
// }
|
||||
// // Similarly, if the writer has a ReadFrom method, use it to do the copy.
|
||||
// if rt, ok := dst.(ReaderFrom); ok {
|
||||
// return rt.ReadFrom(src)
|
||||
// }
|
||||
// if buf == nil {
|
||||
// size := 32 * 1024
|
||||
// if l, ok := src.(*LimitedReader); ok && int64(size) > l.N {
|
||||
// if l.N < 1 {
|
||||
// size = 1
|
||||
// } else {
|
||||
// size = int(l.N)
|
||||
// }
|
||||
// }
|
||||
// buf = make([]byte, size)
|
||||
// }
|
||||
// for {
|
||||
// nr, er := src.Read(ctx, buf)
|
||||
// if nr > 0 {
|
||||
// nw, ew := dst.Write(ctx, buf[0:nr])
|
||||
// if nw < 0 || nr < nw {
|
||||
// nw = 0
|
||||
// if ew == nil {
|
||||
// ew = errInvalidWrite
|
||||
// }
|
||||
// }
|
||||
// written += int64(nw)
|
||||
// if ew != nil {
|
||||
// err = ew
|
||||
// break
|
||||
// }
|
||||
// if nr != nw {
|
||||
// err = io.ErrShortWrite
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// if er != nil {
|
||||
// if er != io.EOF {
|
||||
// err = er
|
||||
// }
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// return written, err
|
||||
// }
|
180
pkg/ctxio/filebuffer.go
Normal file
180
pkg/ctxio/filebuffer.go
Normal file
|
@ -0,0 +1,180 @@
|
|||
package ctxio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// FileBuffer implements interfaces implemented by files.
|
||||
// The main purpose of this type is to have an in memory replacement for a
|
||||
// file.
|
||||
type FileBuffer struct {
|
||||
// buff is the backing buffer
|
||||
buff *bytes.Buffer
|
||||
// index indicates where in the buffer we are at
|
||||
index int64
|
||||
isClosed bool
|
||||
}
|
||||
|
||||
var _ FileReader = (*FileBuffer)(nil)
|
||||
var _ Writer = (*FileBuffer)(nil)
|
||||
|
||||
// NewFileBuffer returns a new populated Buffer
|
||||
func NewFileBuffer(b []byte) *FileBuffer {
|
||||
return &FileBuffer{buff: bytes.NewBuffer(b)}
|
||||
}
|
||||
|
||||
// NewFileBufferFromReader is a convenience method that returns a new populated Buffer
|
||||
// whose contents are sourced from a supplied reader by loading it entirely
|
||||
// into memory.
|
||||
func NewFileBufferFromReader(ctx context.Context, reader Reader) (*FileBuffer, error) {
|
||||
data, err := ReadAll(ctx, reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewFileBuffer(data), nil
|
||||
}
|
||||
|
||||
// NewFileBufferFromReader is a convenience method that returns a new populated Buffer
|
||||
// whose contents are sourced from a supplied reader by loading it entirely
|
||||
// into memory.
|
||||
func NewFileBufferFromIoReader(reader io.Reader) (*FileBuffer, error) {
|
||||
data, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewFileBuffer(data), nil
|
||||
}
|
||||
|
||||
// Bytes returns the bytes available until the end of the buffer.
|
||||
func (f *FileBuffer) Bytes() []byte {
|
||||
if f.isClosed || f.index >= int64(f.buff.Len()) {
|
||||
return []byte{}
|
||||
}
|
||||
return f.buff.Bytes()[f.index:]
|
||||
}
|
||||
|
||||
// String implements the Stringer interface
|
||||
func (f *FileBuffer) String() string {
|
||||
return string(f.buff.Bytes()[f.index:])
|
||||
}
|
||||
|
||||
// Read implements io.Reader https://golang.org/pkg/io/#Reader
|
||||
// Read reads up to len(p) bytes into p. It returns the number of bytes read (0 <= n <= len(p))
|
||||
// and any error encountered. Even if Read returns n < len(p), it may use all of p as scratch
|
||||
// space during the call. If some data is available but not len(p) bytes, Read conventionally
|
||||
// returns what is available instead of waiting for more.
|
||||
|
||||
// When Read encounters an error or end-of-file condition after successfully reading n > 0 bytes,
|
||||
// it returns the number of bytes read. It may return the (non-nil) error from the same call or
|
||||
// return the error (and n == 0) from a subsequent call. An instance of this general case is
|
||||
// that a Reader returning a non-zero number of bytes at the end of the input stream may return
|
||||
// either err == EOF or err == nil. The next Read should return 0, EOF.
|
||||
func (f *FileBuffer) Read(ctx context.Context, b []byte) (n int, err error) {
|
||||
if f.isClosed {
|
||||
return 0, os.ErrClosed
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if f.index >= int64(f.buff.Len()) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err = bytes.NewBuffer(f.buff.Bytes()[f.index:]).Read(b)
|
||||
f.index += int64(n)
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// ReadAt implements io.ReaderAt https://golang.org/pkg/io/#ReaderAt
|
||||
// ReadAt reads len(p) bytes into p starting at offset off in the underlying input source.
|
||||
// It returns the number of bytes read (0 <= n <= len(p)) and any error encountered.
|
||||
//
|
||||
// When ReadAt returns n < len(p), it returns a non-nil error explaining why more bytes were not returned.
|
||||
// In this respect, ReadAt is stricter than Read.
|
||||
//
|
||||
// Even if ReadAt returns n < len(p), it may use all of p as scratch space during the call.
|
||||
// If some data is available but not len(p) bytes, ReadAt blocks until either all the data is available or an error occurs.
|
||||
// In this respect ReadAt is different from Read.
|
||||
//
|
||||
// If the n = len(p) bytes returned by ReadAt are at the end of the input source,
|
||||
// ReadAt may return either err == EOF or err == nil.
|
||||
//
|
||||
// If ReadAt is reading from an input source with a seek offset,
|
||||
// ReadAt should not affect nor be affected by the underlying seek offset.
|
||||
// Clients of ReadAt can execute parallel ReadAt calls on the same input source.
|
||||
func (f *FileBuffer) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
|
||||
if f.isClosed {
|
||||
return 0, os.ErrClosed
|
||||
}
|
||||
if off < 0 {
|
||||
return 0, errors.New("filebuffer.ReadAt: negative offset")
|
||||
}
|
||||
reqLen := len(p)
|
||||
buffLen := int64(f.buff.Len())
|
||||
if off >= buffLen {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n = copy(p, f.buff.Bytes()[off:])
|
||||
if n < reqLen {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Write implements io.Writer https://golang.org/pkg/io/#Writer
|
||||
// by appending the passed bytes to the buffer unless the buffer is closed or index negative.
|
||||
func (f *FileBuffer) Write(ctx context.Context, p []byte) (n int, err error) {
|
||||
if f.isClosed {
|
||||
return 0, os.ErrClosed
|
||||
}
|
||||
if f.index < 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
// we might have rewinded, let's reset the buffer before appending to it
|
||||
idx := int(f.index)
|
||||
buffLen := f.buff.Len()
|
||||
if idx != buffLen && idx <= buffLen {
|
||||
f.buff = bytes.NewBuffer(f.Bytes()[:f.index])
|
||||
}
|
||||
n, err = f.buff.Write(p)
|
||||
|
||||
f.index += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Seek implements io.Seeker https://golang.org/pkg/io/#Seeker
|
||||
func (f *FileBuffer) Seek(offset int64, whence int) (idx int64, err error) {
|
||||
if f.isClosed {
|
||||
return 0, os.ErrClosed
|
||||
}
|
||||
|
||||
var abs int64
|
||||
switch whence {
|
||||
case 0:
|
||||
abs = offset
|
||||
case 1:
|
||||
abs = int64(f.index) + offset
|
||||
case 2:
|
||||
abs = int64(f.buff.Len()) + offset
|
||||
default:
|
||||
return 0, errors.New("filebuffer.Seek: invalid whence")
|
||||
}
|
||||
if abs < 0 {
|
||||
return 0, errors.New("filebuffer.Seek: negative position")
|
||||
}
|
||||
f.index = abs
|
||||
return abs, nil
|
||||
}
|
||||
|
||||
// Close implements io.Closer https://golang.org/pkg/io/#Closer
|
||||
// It closes the buffer, rendering it unusable for I/O. It returns an error, if any.
|
||||
func (f *FileBuffer) Close(ctx context.Context) error {
|
||||
f.isClosed = true
|
||||
f.buff = nil
|
||||
return nil
|
||||
}
|
663
pkg/ctxio/io.go
Normal file
663
pkg/ctxio/io.go
Normal file
|
@ -0,0 +1,663 @@
|
|||
package ctxio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Seek whence values.
|
||||
const (
|
||||
SeekStart = 0 // seek relative to the origin of the file
|
||||
SeekCurrent = 1 // seek relative to the current offset
|
||||
SeekEnd = 2 // seek relative to the end
|
||||
)
|
||||
|
||||
// ErrShortWrite means that a write accepted fewer bytes than requested
|
||||
// but failed to return an explicit error.
|
||||
var ErrShortWrite = io.ErrShortWrite
|
||||
|
||||
// errInvalidWrite means that a write returned an impossible count.
|
||||
var errInvalidWrite = errors.New("invalid write result")
|
||||
|
||||
// ErrShortBuffer means that a read required a longer buffer than was provided.
|
||||
var ErrShortBuffer = io.ErrShortBuffer
|
||||
|
||||
// EOF is the error returned by Read when no more input is available.
|
||||
// (Read must return EOF itself, not an error wrapping EOF,
|
||||
// because callers will test for EOF using ==.)
|
||||
// Functions should return EOF only to signal a graceful end of input.
|
||||
// If the EOF occurs unexpectedly in a structured data stream,
|
||||
// the appropriate error is either [ErrUnexpectedEOF] or some other error
|
||||
// giving more detail.
|
||||
var EOF = io.EOF
|
||||
|
||||
// ErrUnexpectedEOF means that EOF was encountered in the
|
||||
// middle of reading a fixed-size block or data structure.
|
||||
var ErrUnexpectedEOF = io.ErrUnexpectedEOF
|
||||
|
||||
// ErrNoProgress is returned by some clients of a [Reader] when
|
||||
// many calls to Read have failed to return any data or error,
|
||||
// usually the sign of a broken [Reader] implementation.
|
||||
var ErrNoProgress = io.ErrNoProgress
|
||||
|
||||
// Reader is the interface that wraps the basic Read method.
|
||||
//
|
||||
// Read reads up to len(p) bytes into p. It returns the number of bytes
|
||||
// read (0 <= n <= len(p)) and any error encountered. Even if Read
|
||||
// returns n < len(p), it may use all of p as scratch space during the call.
|
||||
// If some data is available but not len(p) bytes, Read conventionally
|
||||
// returns what is available instead of waiting for more.
|
||||
//
|
||||
// When Read encounters an error or end-of-file condition after
|
||||
// successfully reading n > 0 bytes, it returns the number of
|
||||
// bytes read. It may return the (non-nil) error from the same call
|
||||
// or return the error (and n == 0) from a subsequent call.
|
||||
// An instance of this general case is that a Reader returning
|
||||
// a non-zero number of bytes at the end of the input stream may
|
||||
// return either err == EOF or err == nil. The next Read should
|
||||
// return 0, EOF.
|
||||
//
|
||||
// Callers should always process the n > 0 bytes returned before
|
||||
// considering the error err. Doing so correctly handles I/O errors
|
||||
// that happen after reading some bytes and also both of the
|
||||
// allowed EOF behaviors.
|
||||
//
|
||||
// If len(p) == 0, Read should always return n == 0. It may return a
|
||||
// non-nil error if some error condition is known, such as EOF.
|
||||
//
|
||||
// Implementations of Read are discouraged from returning a
|
||||
// zero byte count with a nil error, except when len(p) == 0.
|
||||
// Callers should treat a return of 0 and nil as indicating that
|
||||
// nothing happened; in particular it does not indicate EOF.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
type Reader interface {
|
||||
Read(ctx context.Context, p []byte) (n int, err error)
|
||||
}
|
||||
|
||||
// Writer is the interface that wraps the basic Write method.
|
||||
//
|
||||
// Write writes len(p) bytes from p to the underlying data stream.
|
||||
// It returns the number of bytes written from p (0 <= n <= len(p))
|
||||
// and any error encountered that caused the write to stop early.
|
||||
// Write must return a non-nil error if it returns n < len(p).
|
||||
// Write must not modify the slice data, even temporarily.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
type Writer interface {
|
||||
Write(ctx context.Context, p []byte) (n int, err error)
|
||||
}
|
||||
|
||||
// Closer is the interface that wraps the basic Close method.
|
||||
//
|
||||
// The behavior of Close after the first call is undefined.
|
||||
// Specific implementations may document their own behavior.
|
||||
type Closer interface {
|
||||
Close(ctx context.Context) error
|
||||
}
|
||||
|
||||
// Seeker is the interface that wraps the basic Seek method.
|
||||
//
|
||||
// Seek sets the offset for the next Read or Write to offset,
|
||||
// interpreted according to whence:
|
||||
// [SeekStart] means relative to the start of the file,
|
||||
// [SeekCurrent] means relative to the current offset, and
|
||||
// [SeekEnd] means relative to the end
|
||||
// (for example, offset = -2 specifies the penultimate byte of the file).
|
||||
// Seek returns the new offset relative to the start of the
|
||||
// file or an error, if any.
|
||||
//
|
||||
// Seeking to an offset before the start of the file is an error.
|
||||
// Seeking to any positive offset may be allowed, but if the new offset exceeds
|
||||
// the size of the underlying object the behavior of subsequent I/O operations
|
||||
// is implementation-dependent.
|
||||
type Seeker interface {
|
||||
Seek(offset int64, whence int) (int64, error)
|
||||
}
|
||||
|
||||
// ReadWriter is the interface that groups the basic Read and Write methods.
|
||||
type ReadWriter interface {
|
||||
Reader
|
||||
Writer
|
||||
}
|
||||
|
||||
// ReadCloser is the interface that groups the basic Read and Close methods.
|
||||
type ReadCloser interface {
|
||||
Reader
|
||||
Closer
|
||||
}
|
||||
|
||||
// WriteCloser is the interface that groups the basic Write and Close methods.
|
||||
type WriteCloser interface {
|
||||
Writer
|
||||
Closer
|
||||
}
|
||||
|
||||
// ReadWriteCloser is the interface that groups the basic Read, Write and Close methods.
|
||||
type ReadWriteCloser interface {
|
||||
Reader
|
||||
Writer
|
||||
Closer
|
||||
}
|
||||
|
||||
// ReadSeeker is the interface that groups the basic Read and Seek methods.
|
||||
type ReadSeeker interface {
|
||||
Reader
|
||||
Seeker
|
||||
}
|
||||
|
||||
// ReadSeekCloser is the interface that groups the basic Read, Seek and Close
|
||||
// methods.
|
||||
type ReadSeekCloser interface {
|
||||
Reader
|
||||
Seeker
|
||||
Closer
|
||||
}
|
||||
|
||||
// WriteSeeker is the interface that groups the basic Write and Seek methods.
|
||||
type WriteSeeker interface {
|
||||
Writer
|
||||
Seeker
|
||||
}
|
||||
|
||||
// ReadWriteSeeker is the interface that groups the basic Read, Write and Seek methods.
|
||||
type ReadWriteSeeker interface {
|
||||
Reader
|
||||
Writer
|
||||
Seeker
|
||||
}
|
||||
|
||||
// ReaderFrom is the interface that wraps the ReadFrom method.
|
||||
//
|
||||
// ReadFrom reads data from r until EOF or error.
|
||||
// The return value n is the number of bytes read.
|
||||
// Any error except EOF encountered during the read is also returned.
|
||||
//
|
||||
// The [Copy] function uses [ReaderFrom] if available.
|
||||
type ReaderFrom interface {
|
||||
ReadFrom(ctx context.Context, r Reader) (n int64, err error)
|
||||
}
|
||||
|
||||
// WriterTo is the interface that wraps the WriteTo method.
|
||||
//
|
||||
// WriteTo writes data to w until there's no more data to write or
|
||||
// when an error occurs. The return value n is the number of bytes
|
||||
// written. Any error encountered during the write is also returned.
|
||||
//
|
||||
// The Copy function uses WriterTo if available.
|
||||
type WriterTo interface {
|
||||
WriteTo(ctx context.Context, w Writer) (n int64, err error)
|
||||
}
|
||||
|
||||
// ReaderAt is the interface that wraps the basic ReadAt method.
|
||||
//
|
||||
// ReadAt reads len(p) bytes into p starting at offset off in the
|
||||
// underlying input source. It returns the number of bytes
|
||||
// read (0 <= n <= len(p)) and any error encountered.
|
||||
//
|
||||
// When ReadAt returns n < len(p), it returns a non-nil error
|
||||
// explaining why more bytes were not returned. In this respect,
|
||||
// ReadAt is stricter than Read.
|
||||
//
|
||||
// Even if ReadAt returns n < len(p), it may use all of p as scratch
|
||||
// space during the call. If some data is available but not len(p) bytes,
|
||||
// ReadAt blocks until either all the data is available or an error occurs.
|
||||
// In this respect ReadAt is different from Read.
|
||||
//
|
||||
// If the n = len(p) bytes returned by ReadAt are at the end of the
|
||||
// input source, ReadAt may return either err == EOF or err == nil.
|
||||
//
|
||||
// If ReadAt is reading from an input source with a seek offset,
|
||||
// ReadAt should not affect nor be affected by the underlying
|
||||
// seek offset.
|
||||
//
|
||||
// Clients of ReadAt can execute parallel ReadAt calls on the
|
||||
// same input source.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
type ReaderAt interface {
|
||||
ReadAt(ctx context.Context, p []byte, off int64) (n int, err error)
|
||||
}
|
||||
|
||||
// WriterAt is the interface that wraps the basic WriteAt method.
|
||||
//
|
||||
// WriteAt writes len(p) bytes from p to the underlying data stream
|
||||
// at offset off. It returns the number of bytes written from p (0 <= n <= len(p))
|
||||
// and any error encountered that caused the write to stop early.
|
||||
// WriteAt must return a non-nil error if it returns n < len(p).
|
||||
//
|
||||
// If WriteAt is writing to a destination with a seek offset,
|
||||
// WriteAt should not affect nor be affected by the underlying
|
||||
// seek offset.
|
||||
//
|
||||
// Clients of WriteAt can execute parallel WriteAt calls on the same
|
||||
// destination if the ranges do not overlap.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
type WriterAt interface {
|
||||
WriteAt(ctx context.Context, p []byte, off int64) (n int, err error)
|
||||
}
|
||||
|
||||
// StringWriter is the interface that wraps the WriteString method.
|
||||
type StringWriter interface {
|
||||
WriteString(s string) (n int, err error)
|
||||
}
|
||||
|
||||
// WriteString writes the contents of the string s to w, which accepts a slice of bytes.
|
||||
// If w implements [StringWriter], [StringWriter.WriteString] is invoked directly.
|
||||
// Otherwise, [Writer.Write] is called exactly once.
|
||||
func WriteString(ctx context.Context, w Writer, s string) (n int, err error) {
|
||||
if sw, ok := w.(StringWriter); ok {
|
||||
return sw.WriteString(s)
|
||||
}
|
||||
return w.Write(ctx, []byte(s))
|
||||
}
|
||||
|
||||
// ReadAtLeast reads from r into buf until it has read at least min bytes.
|
||||
// It returns the number of bytes copied and an error if fewer bytes were read.
|
||||
// The error is EOF only if no bytes were read.
|
||||
// If an EOF happens after reading fewer than min bytes,
|
||||
// ReadAtLeast returns [ErrUnexpectedEOF].
|
||||
// If min is greater than the length of buf, ReadAtLeast returns [ErrShortBuffer].
|
||||
// On return, n >= min if and only if err == nil.
|
||||
// If r returns an error having read at least min bytes, the error is dropped.
|
||||
func ReadAtLeast(ctx context.Context, r Reader, buf []byte, min int) (n int, err error) {
|
||||
if len(buf) < min {
|
||||
return 0, ErrShortBuffer
|
||||
}
|
||||
for n < min && err == nil {
|
||||
var nn int
|
||||
nn, err = r.Read(ctx, buf[n:])
|
||||
n += nn
|
||||
}
|
||||
if n >= min {
|
||||
err = nil
|
||||
} else if n > 0 && err == EOF {
|
||||
err = ErrUnexpectedEOF
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ReadFull reads exactly len(buf) bytes from r into buf.
|
||||
// It returns the number of bytes copied and an error if fewer bytes were read.
|
||||
// The error is EOF only if no bytes were read.
|
||||
// If an EOF happens after reading some but not all the bytes,
|
||||
// ReadFull returns [ErrUnexpectedEOF].
|
||||
// On return, n == len(buf) if and only if err == nil.
|
||||
// If r returns an error having read at least len(buf) bytes, the error is dropped.
|
||||
func ReadFull(ctx context.Context, r Reader, buf []byte) (n int, err error) {
|
||||
return ReadAtLeast(ctx, r, buf, len(buf))
|
||||
}
|
||||
|
||||
// CopyN copies n bytes (or until an error) from src to dst.
|
||||
// It returns the number of bytes copied and the earliest
|
||||
// error encountered while copying.
|
||||
// On return, written == n if and only if err == nil.
|
||||
//
|
||||
// If dst implements [ReaderFrom], the copy is implemented using it.
|
||||
func CopyN(ctx context.Context, dst Writer, src Reader, n int64) (written int64, err error) {
|
||||
written, err = Copy(ctx, dst, LimitReader(src, n))
|
||||
if written == n {
|
||||
return n, nil
|
||||
}
|
||||
if written < n && err == nil {
|
||||
// src stopped early; must have been EOF.
|
||||
err = EOF
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Copy copies from src to dst until either EOF is reached
|
||||
// on src or an error occurs. It returns the number of bytes
|
||||
// copied and the first error encountered while copying, if any.
|
||||
//
|
||||
// A successful Copy returns err == nil, not err == EOF.
|
||||
// Because Copy is defined to read from src until EOF, it does
|
||||
// not treat an EOF from Read as an error to be reported.
|
||||
//
|
||||
// If src implements [WriterTo],
|
||||
// the copy is implemented by calling src.WriteTo(dst).
|
||||
// Otherwise, if dst implements [ReaderFrom],
|
||||
// the copy is implemented by calling dst.ReadFrom(src).
|
||||
func Copy(ctx context.Context, dst Writer, src Reader) (written int64, err error) {
|
||||
return copyBuffer(ctx, dst, src, nil)
|
||||
}
|
||||
|
||||
// CopyBuffer is identical to Copy except that it stages through the
|
||||
// provided buffer (if one is required) rather than allocating a
|
||||
// temporary one. If buf is nil, one is allocated; otherwise if it has
|
||||
// zero length, CopyBuffer panics.
|
||||
//
|
||||
// If either src implements [WriterTo] or dst implements [ReaderFrom],
|
||||
// buf will not be used to perform the copy.
|
||||
func CopyBuffer(ctx context.Context, dst Writer, src Reader, buf []byte) (written int64, err error) {
|
||||
if buf != nil && len(buf) == 0 {
|
||||
panic("empty buffer in CopyBuffer")
|
||||
}
|
||||
return copyBuffer(ctx, dst, src, buf)
|
||||
}
|
||||
|
||||
// copyBuffer is the actual implementation of Copy and CopyBuffer.
|
||||
// if buf is nil, one is allocated.
|
||||
func copyBuffer(ctx context.Context, dst Writer, src Reader, buf []byte) (written int64, err error) {
|
||||
// If the reader has a WriteTo method, use it to do the copy.
|
||||
// Avoids an allocation and a copy.
|
||||
if wt, ok := src.(WriterTo); ok {
|
||||
return wt.WriteTo(ctx, dst)
|
||||
}
|
||||
// Similarly, if the writer has a ReadFrom method, use it to do the copy.
|
||||
if rt, ok := dst.(ReaderFrom); ok {
|
||||
return rt.ReadFrom(ctx, src)
|
||||
}
|
||||
if buf == nil {
|
||||
size := 32 * 1024
|
||||
if l, ok := src.(*LimitedReader); ok && int64(size) > l.N {
|
||||
if l.N < 1 {
|
||||
size = 1
|
||||
} else {
|
||||
size = int(l.N)
|
||||
}
|
||||
}
|
||||
buf = make([]byte, size)
|
||||
}
|
||||
for {
|
||||
nr, er := src.Read(ctx, buf)
|
||||
if nr > 0 {
|
||||
nw, ew := dst.Write(ctx, buf[0:nr])
|
||||
if nw < 0 || nr < nw {
|
||||
nw = 0
|
||||
if ew == nil {
|
||||
ew = errInvalidWrite
|
||||
}
|
||||
}
|
||||
written += int64(nw)
|
||||
if ew != nil {
|
||||
err = ew
|
||||
break
|
||||
}
|
||||
if nr != nw {
|
||||
err = ErrShortWrite
|
||||
break
|
||||
}
|
||||
}
|
||||
if er != nil {
|
||||
if er != EOF {
|
||||
err = er
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return written, err
|
||||
}
|
||||
|
||||
// LimitReader returns a Reader that reads from r
|
||||
// but stops with EOF after n bytes.
|
||||
// The underlying implementation is a *LimitedReader.
|
||||
func LimitReader(r Reader, n int64) Reader { return &LimitedReader{r, n} }
|
||||
|
||||
// A LimitedReader reads from R but limits the amount of
|
||||
// data returned to just N bytes. Each call to Read
|
||||
// updates N to reflect the new amount remaining.
|
||||
// Read returns EOF when N <= 0 or when the underlying R returns EOF.
|
||||
type LimitedReader struct {
|
||||
R Reader // underlying reader
|
||||
N int64 // max bytes remaining
|
||||
}
|
||||
|
||||
func (l *LimitedReader) Read(ctx context.Context, p []byte) (n int, err error) {
|
||||
if l.N <= 0 {
|
||||
return 0, EOF
|
||||
}
|
||||
if int64(len(p)) > l.N {
|
||||
p = p[0:l.N]
|
||||
}
|
||||
n, err = l.R.Read(ctx, p)
|
||||
l.N -= int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
// NewSectionReader returns a [SectionReader] that reads from r
|
||||
// starting at offset off and stops with EOF after n bytes.
|
||||
func NewSectionReader(r ReaderAt, off int64, n int64) *SectionReader {
|
||||
var remaining int64
|
||||
const maxint64 = 1<<63 - 1
|
||||
if off <= maxint64-n {
|
||||
remaining = n + off
|
||||
} else {
|
||||
// Overflow, with no way to return error.
|
||||
// Assume we can read up to an offset of 1<<63 - 1.
|
||||
remaining = maxint64
|
||||
}
|
||||
return &SectionReader{r, off, off, remaining, n}
|
||||
}
|
||||
|
||||
// SectionReader implements Read, Seek, and ReadAt on a section
|
||||
// of an underlying [ReaderAt].
|
||||
type SectionReader struct {
|
||||
r ReaderAt // constant after creation
|
||||
base int64 // constant after creation
|
||||
off int64
|
||||
limit int64 // constant after creation
|
||||
n int64 // constant after creation
|
||||
}
|
||||
|
||||
func (s *SectionReader) Read(ctx context.Context, p []byte) (n int, err error) {
|
||||
if s.off >= s.limit {
|
||||
return 0, EOF
|
||||
}
|
||||
if max := s.limit - s.off; int64(len(p)) > max {
|
||||
p = p[0:max]
|
||||
}
|
||||
n, err = s.r.ReadAt(ctx, p, s.off)
|
||||
s.off += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
var errWhence = errors.New("Seek: invalid whence")
|
||||
var errOffset = errors.New("Seek: invalid offset")
|
||||
|
||||
func (s *SectionReader) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
default:
|
||||
return 0, errWhence
|
||||
case SeekStart:
|
||||
offset += s.base
|
||||
case SeekCurrent:
|
||||
offset += s.off
|
||||
case SeekEnd:
|
||||
offset += s.limit
|
||||
}
|
||||
if offset < s.base {
|
||||
return 0, errOffset
|
||||
}
|
||||
s.off = offset
|
||||
return offset - s.base, nil
|
||||
}
|
||||
|
||||
func (s *SectionReader) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
|
||||
if off < 0 || off >= s.Size() {
|
||||
return 0, EOF
|
||||
}
|
||||
off += s.base
|
||||
if max := s.limit - off; int64(len(p)) > max {
|
||||
p = p[0:max]
|
||||
n, err = s.r.ReadAt(ctx, p, off)
|
||||
if err == nil {
|
||||
err = EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
return s.r.ReadAt(ctx, p, off)
|
||||
}
|
||||
|
||||
// Size returns the size of the section in bytes.
|
||||
func (s *SectionReader) Size() int64 { return s.limit - s.base }
|
||||
|
||||
// Outer returns the underlying [ReaderAt] and offsets for the section.
|
||||
//
|
||||
// The returned values are the same that were passed to [NewSectionReader]
|
||||
// when the [SectionReader] was created.
|
||||
func (s *SectionReader) Outer() (r ReaderAt, off int64, n int64) {
|
||||
return s.r, s.base, s.n
|
||||
}
|
||||
|
||||
// An OffsetWriter maps writes at offset base to offset base+off in the underlying writer.
|
||||
type OffsetWriter struct {
|
||||
w WriterAt
|
||||
base int64 // the original offset
|
||||
off int64 // the current offset
|
||||
}
|
||||
|
||||
// NewOffsetWriter returns an [OffsetWriter] that writes to w
|
||||
// starting at offset off.
|
||||
func NewOffsetWriter(w WriterAt, off int64) *OffsetWriter {
|
||||
return &OffsetWriter{w, off, off}
|
||||
}
|
||||
|
||||
func (o *OffsetWriter) Write(ctx context.Context, p []byte) (n int, err error) {
|
||||
n, err = o.w.WriteAt(ctx, p, o.off)
|
||||
o.off += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (o *OffsetWriter) WriteAt(ctx context.Context, p []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return 0, errOffset
|
||||
}
|
||||
|
||||
off += o.base
|
||||
return o.w.WriteAt(ctx, p, off)
|
||||
}
|
||||
|
||||
func (o *OffsetWriter) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
default:
|
||||
return 0, errWhence
|
||||
case SeekStart:
|
||||
offset += o.base
|
||||
case SeekCurrent:
|
||||
offset += o.off
|
||||
}
|
||||
if offset < o.base {
|
||||
return 0, errOffset
|
||||
}
|
||||
o.off = offset
|
||||
return offset - o.base, nil
|
||||
}
|
||||
|
||||
// TeeReader returns a [Reader] that writes to w what it reads from r.
|
||||
// All reads from r performed through it are matched with
|
||||
// corresponding writes to w. There is no internal buffering -
|
||||
// the write must complete before the read completes.
|
||||
// Any error encountered while writing is reported as a read error.
|
||||
func TeeReader(r Reader, w Writer) Reader {
|
||||
return &teeReader{r, w}
|
||||
}
|
||||
|
||||
type teeReader struct {
|
||||
r Reader
|
||||
w Writer
|
||||
}
|
||||
|
||||
func (t *teeReader) Read(ctx context.Context, p []byte) (n int, err error) {
|
||||
n, err = t.r.Read(ctx, p)
|
||||
if n > 0 {
|
||||
if n, err := t.w.Write(ctx, p[:n]); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Discard is a [Writer] on which all Write calls succeed
|
||||
// without doing anything.
|
||||
var Discard Writer = discard{}
|
||||
|
||||
type discard struct{}
|
||||
|
||||
// discard implements ReaderFrom as an optimization so Copy to
|
||||
// io.Discard can avoid doing unnecessary work.
|
||||
var _ ReaderFrom = discard{}
|
||||
|
||||
func (discard) Write(ctx context.Context, p []byte) (int, error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (discard) WriteString(ctx context.Context, s string) (int, error) {
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
var blackHolePool = sync.Pool{
|
||||
New: func() any {
|
||||
b := make([]byte, 8192)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
|
||||
func (discard) ReadFrom(ctx context.Context, r Reader) (n int64, err error) {
|
||||
bufp := blackHolePool.Get().(*[]byte)
|
||||
readSize := 0
|
||||
for {
|
||||
readSize, err = r.Read(ctx, *bufp)
|
||||
n += int64(readSize)
|
||||
if err != nil {
|
||||
blackHolePool.Put(bufp)
|
||||
if err == EOF {
|
||||
return n, nil
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NopCloser returns a [ReadCloser] with a no-op Close method wrapping
|
||||
// the provided [Reader] r.
|
||||
// If r implements [WriterTo], the returned [ReadCloser] will implement [WriterTo]
|
||||
// by forwarding calls to r.
|
||||
func NopCloser(r Reader) ReadCloser {
|
||||
if _, ok := r.(WriterTo); ok {
|
||||
return nopCloserWriterTo{r}
|
||||
}
|
||||
return nopCloser{r}
|
||||
}
|
||||
|
||||
type nopCloser struct {
|
||||
Reader
|
||||
}
|
||||
|
||||
func (nopCloser) Close(ctx context.Context) error { return nil }
|
||||
|
||||
type nopCloserWriterTo struct {
|
||||
Reader
|
||||
}
|
||||
|
||||
func (nopCloserWriterTo) Close(ctx context.Context) error { return nil }
|
||||
|
||||
func (c nopCloserWriterTo) WriteTo(ctx context.Context, w Writer) (n int64, err error) {
|
||||
return c.Reader.(WriterTo).WriteTo(ctx, w)
|
||||
}
|
||||
|
||||
// ReadAll reads from r until an error or EOF and returns the data it read.
|
||||
// A successful call returns err == nil, not err == EOF. Because ReadAll is
|
||||
// defined to read from src until EOF, it does not treat an EOF from Read
|
||||
// as an error to be reported.
|
||||
func ReadAll(ctx context.Context, r Reader) ([]byte, error) {
|
||||
b := make([]byte, 0, 512)
|
||||
for {
|
||||
n, err := r.Read(ctx, b[len(b):cap(b)])
|
||||
b = b[:len(b)+n]
|
||||
if err != nil {
|
||||
if err == EOF {
|
||||
err = nil
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
|
||||
if len(b) == cap(b) {
|
||||
// Add more capacity (let append pick how much).
|
||||
b = append(b, 0)[:len(b)]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -5,28 +5,22 @@ import (
|
|||
"io"
|
||||
)
|
||||
|
||||
type ReaderAtCloser interface {
|
||||
type FileReader interface {
|
||||
Reader
|
||||
ReaderAt
|
||||
Closer
|
||||
}
|
||||
|
||||
type ReaderAt interface {
|
||||
ReadAt(ctx context.Context, p []byte, off int64) (n int, err error)
|
||||
}
|
||||
|
||||
type Reader interface {
|
||||
Read(ctx context.Context, p []byte) (n int, err error)
|
||||
}
|
||||
|
||||
type Closer interface {
|
||||
Close(ctx context.Context) error
|
||||
}
|
||||
type contextReader struct {
|
||||
ctx context.Context
|
||||
r Reader
|
||||
}
|
||||
|
||||
func (r *contextReader) Read(p []byte) (n int, err error) {
|
||||
if r.ctx.Err() != nil {
|
||||
return 0, r.ctx.Err()
|
||||
}
|
||||
|
||||
return r.r.Read(r.ctx, p)
|
||||
}
|
||||
|
||||
|
@ -40,9 +34,31 @@ type contextReaderAt struct {
|
|||
}
|
||||
|
||||
func (c *contextReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
if c.ctx.Err() != nil {
|
||||
return 0, c.ctx.Err()
|
||||
}
|
||||
|
||||
return c.r.ReadAt(c.ctx, p, off)
|
||||
}
|
||||
|
||||
func IoReader(ctx context.Context, r Reader) io.Reader {
|
||||
return &contextReader{ctx: ctx, r: r}
|
||||
}
|
||||
|
||||
func WrapIoReader(r io.Reader) Reader {
|
||||
return &wrapReader{r: r}
|
||||
}
|
||||
|
||||
type wrapReader struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
var _ Reader = (*wrapReader)(nil)
|
||||
|
||||
// Read implements Reader.
|
||||
func (c *wrapReader) Read(ctx context.Context, p []byte) (n int, err error) {
|
||||
if ctx.Err() != nil {
|
||||
return 0, ctx.Err()
|
||||
}
|
||||
return c.r.Read(p)
|
||||
}
|
||||
|
|
|
@ -59,10 +59,10 @@ type ioSeekerCloserWrapper struct {
|
|||
pos int64
|
||||
size int64
|
||||
|
||||
r ReaderAtCloser
|
||||
r FileReader
|
||||
}
|
||||
|
||||
func IoReadSeekCloserWrapper(ctx context.Context, r ReaderAtCloser, size int64) io.ReadSeekCloser {
|
||||
func IoReadSeekCloserWrapper(ctx context.Context, r FileReader, size int64) io.ReadSeekCloser {
|
||||
return &ioSeekerCloserWrapper{
|
||||
ctx: ctx,
|
||||
r: r,
|
||||
|
|
20
pkg/ctxio/teereader.go
Normal file
20
pkg/ctxio/teereader.go
Normal file
|
@ -0,0 +1,20 @@
|
|||
package ctxio
|
||||
|
||||
// func TeeReader(r Reader, w Writer) Reader {
|
||||
// return &teeReader{r, w}
|
||||
// }
|
||||
|
||||
// type teeReader struct {
|
||||
// r Reader
|
||||
// w Writer
|
||||
// }
|
||||
|
||||
// func (t *teeReader) Read(ctx context.Context, p []byte) (n int, err error) {
|
||||
// n, err = t.r.Read(ctx, p)
|
||||
// if n > 0 {
|
||||
// if n, err := t.w.Write(ctx, p[:n]); err != nil {
|
||||
// return n, err
|
||||
// }
|
||||
// }
|
||||
// return
|
||||
// }
|
11
pkg/go-nfs/.github/dependabot.yml
vendored
Normal file
11
pkg/go-nfs/.github/dependabot.yml
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/" # Location of package manifests
|
||||
schedule:
|
||||
interval: "daily"
|
51
pkg/go-nfs/.github/workflows/codeql-analysis.yml
vendored
Normal file
51
pkg/go-nfs/.github/workflows/codeql-analysis.yml
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
name: "Code scanning - action"
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
schedule:
|
||||
- cron: '0 18 * * 3'
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
fetch-depth: 2
|
||||
|
||||
# If this run was triggered by a pull request event, then checkout
|
||||
# the head of the pull request instead of the merge commit.
|
||||
- run: git checkout HEAD^2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
# Override language selection by uncommenting this and choosing your languages
|
||||
# with:
|
||||
# languages: go, javascript, csharp, python, cpp, java
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
36
pkg/go-nfs/.github/workflows/go.yml
vendored
Normal file
36
pkg/go-nfs/.github/workflows/go.yml
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
name: Go
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ^1.19
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Get dependencies
|
||||
run: go get -v -t -d ./...
|
||||
|
||||
- name: Build
|
||||
run: go build -v ./...
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
|
||||
- name: Test
|
||||
run: go test -v .
|
11
pkg/go-nfs/CONTRIBUTING.md
Normal file
11
pkg/go-nfs/CONTRIBUTING.md
Normal file
|
@ -0,0 +1,11 @@
|
|||
# Contributing Guidelines
|
||||
|
||||
We appreciate your interest in improving go-nfs!
|
||||
|
||||
## Looking for ways to contribute?
|
||||
|
||||
There are several ways you can contribute:
|
||||
- Start contributing immediately via the [opened](https://github.com/willscott/go-nfs/issues) issues on GitHub.
|
||||
Defined issues provide an excellent starting point.
|
||||
- Reporting issues, bugs, mistakes, or inconsistencies.
|
||||
As many open source projects, we are short-staffed, we thus kindly ask you to be open to contribute a fix for discovered issues.
|
202
pkg/go-nfs/LICENSE
Normal file
202
pkg/go-nfs/LICENSE
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
96
pkg/go-nfs/README.md
Normal file
96
pkg/go-nfs/README.md
Normal file
|
@ -0,0 +1,96 @@
|
|||
Golang Network File Server
|
||||
===
|
||||
|
||||
NFSv3 protocol implementation in pure Golang.
|
||||
|
||||
Current Status:
|
||||
* Minimally tested
|
||||
* Mounts, read-only and read-write support
|
||||
|
||||
Usage
|
||||
===
|
||||
|
||||
The most interesting demo is currently in `example/osview`.
|
||||
|
||||
Start the server
|
||||
`go run ./example/osview .`.
|
||||
|
||||
The local folder at `.` will be the initial view in the mount. mutations to metadata or contents
|
||||
will be stored purely in memory and not written back to the OS. When run, this
|
||||
demo will print the port it is listening on.
|
||||
|
||||
The mount can be accessed using a command similar to
|
||||
`mount -o port=<n>,mountport=<n> -t nfs localhost:/mount <mountpoint>` (For Mac users)
|
||||
|
||||
or
|
||||
|
||||
`mount -o port=<n>,mountport=<n>,nfsvers=3,noacl,tcp -t nfs localhost:/mount <mountpoint>` (For Linux users)
|
||||
|
||||
API
|
||||
===
|
||||
|
||||
The NFS server runs on a `net.Listener` to export a file system to NFS clients.
|
||||
Usage is structured similarly to many other golang network servers.
|
||||
|
||||
```golang
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
|
||||
"github.com/go-git/go-billy/v5/memfs"
|
||||
nfs "github.com/willscott/go-nfs"
|
||||
nfshelper "github.com/willscott/go-nfs/helpers"
|
||||
)
|
||||
|
||||
func main() {
|
||||
listener, err := net.Listen("tcp", ":0")
|
||||
panicOnErr(err, "starting TCP listener")
|
||||
fmt.Printf("Server running at %s\n", listener.Addr())
|
||||
mem := memfs.New()
|
||||
f, err := mem.Create("hello.txt")
|
||||
panicOnErr(err, "creating file")
|
||||
_, err = f.Write([]byte("hello world"))
|
||||
panicOnErr(err, "writing data")
|
||||
f.Close()
|
||||
handler := nfshelper.NewNullAuthHandler(mem)
|
||||
cacheHelper := nfshelper.NewCachingHandler(handler, 1)
|
||||
panicOnErr(nfs.Serve(listener, cacheHelper), "serving nfs")
|
||||
}
|
||||
|
||||
func panicOnErr(err error, desc ...interface{}) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
log.Println(desc...)
|
||||
log.Panicln(err)
|
||||
}
|
||||
```
|
||||
|
||||
Notes
|
||||
---
|
||||
|
||||
* Ports are typically determined through portmap. The need for running portmap
|
||||
(which is the only part that needs a privileged listening port) can be avoided
|
||||
through specific mount options. e.g.
|
||||
`mount -o port=n,mountport=n -t nfs host:/mount /localmount`
|
||||
|
||||
* This server currently uses [billy](https://github.com/go-git/go-billy/) to
|
||||
provide a file system abstraction layer. There are some edges of the NFS protocol
|
||||
which do not translate to this abstraction.
|
||||
* NFS expects access to an `inode` or equivalent unique identifier to reference
|
||||
files in a file system. These are considered opaque identifiers here, which
|
||||
means they will not work as expected in cases of hard linking.
|
||||
* The billy abstraction layer does not extend to exposing `uid` and `gid`
|
||||
ownership of files. If ownership is important to your file system, you
|
||||
will need to ensure that the `os.FileInfo` meets additional constraints.
|
||||
In particular, the `Sys()` escape hatch is queried by this library, and
|
||||
if your file system populates a [`syscall.Stat_t`](https://golang.org/pkg/syscall/#Stat_t)
|
||||
concrete struct, the ownership specified in that object will be used.
|
||||
|
||||
* Relevant RFCS:
|
||||
[5531 - RPC protocol](https://tools.ietf.org/html/rfc5531),
|
||||
[1813 - NFSv3](https://tools.ietf.org/html/rfc1813),
|
||||
[1094 - NFS](https://tools.ietf.org/html/rfc1094)
|
11
pkg/go-nfs/SECURITY.md
Normal file
11
pkg/go-nfs/SECURITY.md
Normal file
|
@ -0,0 +1,11 @@
|
|||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
The latest release reflects the current best recommendation / supported version at this time.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please email Will (the git commit author) if you need to report issues privately.
|
||||
I will endeavor to respond within a day, but if I am offline, responses may be delayed longer than that.
|
||||
If you need a stronger SLA to have confidence in using this code, feel free to reach out.
|
9
pkg/go-nfs/capability_check.go
Normal file
9
pkg/go-nfs/capability_check.go
Normal file
|
@ -0,0 +1,9 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
billy "github.com/go-git/go-billy/v5"
|
||||
)
|
||||
|
||||
func CapabilityCheck(fs Filesystem, cap billy.Capability) bool {
|
||||
return true
|
||||
}
|
335
pkg/go-nfs/conn.go
Normal file
335
pkg/go-nfs/conn.go
Normal file
|
@ -0,0 +1,335 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
xdr2 "github.com/rasky/go-xdr/xdr2"
|
||||
"github.com/willscott/go-nfs-client/nfs/rpc"
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInputInvalid is returned when input cannot be parsed
|
||||
ErrInputInvalid = errors.New("invalid input")
|
||||
// ErrAlreadySent is returned when writing a header/status multiple times
|
||||
ErrAlreadySent = errors.New("response already started")
|
||||
)
|
||||
|
||||
// ResponseCode is a combination of accept_stat and reject_stat.
|
||||
type ResponseCode uint32
|
||||
|
||||
// ResponseCode Codes
|
||||
const (
|
||||
ResponseCodeSuccess ResponseCode = iota
|
||||
ResponseCodeProgUnavailable
|
||||
ResponseCodeProcUnavailable
|
||||
ResponseCodeGarbageArgs
|
||||
ResponseCodeSystemErr
|
||||
ResponseCodeRPCMismatch
|
||||
ResponseCodeAuthError
|
||||
)
|
||||
|
||||
type conn struct {
|
||||
*Server
|
||||
writeSerializer chan []byte
|
||||
net.Conn
|
||||
}
|
||||
|
||||
var tracer = otel.Tracer("git.kmsign.ru/royalcat/tstor/pkg/go-nfs")
|
||||
|
||||
func (c *conn) serve() {
|
||||
ctx := context.Background() // TODO implement correct timeout on serve side
|
||||
|
||||
c.writeSerializer = make(chan []byte, 1)
|
||||
go c.serializeWrites(ctx)
|
||||
|
||||
bio := bufio.NewReader(c.Conn)
|
||||
for {
|
||||
w, err := c.readRequestHeader(ctx, bio)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
// Clean close.
|
||||
c.Close()
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
Log.Tracef("request: %v", w.req)
|
||||
err = c.handle(ctx, w)
|
||||
respErr := w.finish(ctx)
|
||||
if err != nil {
|
||||
Log.Errorf("error handling req: %v", err)
|
||||
// failure to handle at a level needing to close the connection.
|
||||
c.Close()
|
||||
return
|
||||
}
|
||||
if respErr != nil {
|
||||
Log.Errorf("error sending response: %v", respErr)
|
||||
c.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *conn) serializeWrites(ctx context.Context) {
|
||||
// todo: maybe don't need the extra buffer
|
||||
writer := bufio.NewWriter(c.Conn)
|
||||
var fragmentBuf [4]byte
|
||||
var fragmentInt uint32
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case msg, ok := <-c.writeSerializer:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// prepend the fragmentation header
|
||||
fragmentInt = uint32(len(msg))
|
||||
fragmentInt |= (1 << 31)
|
||||
binary.BigEndian.PutUint32(fragmentBuf[:], fragmentInt)
|
||||
n, err := writer.Write(fragmentBuf[:])
|
||||
if n < 4 || err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writer.Write(msg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if n < len(msg) {
|
||||
panic("todo: ensure writes complete fully.")
|
||||
}
|
||||
if err = writer.Flush(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle a request. errors from this method indicate a failure to read or
|
||||
// write on the network stream, and trigger a disconnection of the connection.
|
||||
func (c *conn) handle(ctx context.Context, w *response) error {
|
||||
ctx, span := tracer.Start(ctx, fmt.Sprintf("nfs.handle.%s", NFSProcedure(w.req.Header.Proc).String()))
|
||||
defer span.End()
|
||||
|
||||
handler := c.Server.handlerFor(w.req.Header.Prog, w.req.Header.Proc)
|
||||
if handler == nil {
|
||||
Log.Errorf("No handler for %d.%d", w.req.Header.Prog, w.req.Header.Proc)
|
||||
if err := w.drain(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return c.err(ctx, w, &ResponseCodeProcUnavailableError{})
|
||||
}
|
||||
appError := handler(ctx, w, c.Server.Handler)
|
||||
if drainErr := w.drain(ctx); drainErr != nil {
|
||||
return drainErr
|
||||
}
|
||||
if appError != nil && !w.responded {
|
||||
Log.Errorf("call to %+v failed: %v", handler, appError)
|
||||
if err := c.err(ctx, w, appError); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !w.responded {
|
||||
Log.Errorf("Handler did not indicate response status via writing or erroring")
|
||||
if err := c.err(ctx, w, &ResponseCodeSystemError{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *conn) err(ctx context.Context, w *response, err error) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
if w.err == nil {
|
||||
w.err = err
|
||||
}
|
||||
|
||||
if w.responded {
|
||||
return nil
|
||||
}
|
||||
|
||||
rpcErr := w.errorFmt(err)
|
||||
if writeErr := w.writeHeader(rpcErr.Code()); writeErr != nil {
|
||||
return writeErr
|
||||
}
|
||||
|
||||
body, _ := rpcErr.MarshalBinary()
|
||||
return w.Write(body)
|
||||
}
|
||||
|
||||
type request struct {
|
||||
xid uint32
|
||||
rpc.Header
|
||||
Body io.Reader
|
||||
}
|
||||
|
||||
func (r *request) String() string {
|
||||
if r.Header.Prog == nfsServiceID {
|
||||
return fmt.Sprintf("RPC #%d (nfs.%s)", r.xid, NFSProcedure(r.Header.Proc))
|
||||
} else if r.Header.Prog == mountServiceID {
|
||||
return fmt.Sprintf("RPC #%d (mount.%s)", r.xid, MountProcedure(r.Header.Proc))
|
||||
}
|
||||
return fmt.Sprintf("RPC #%d (%d.%d)", r.xid, r.Header.Prog, r.Header.Proc)
|
||||
}
|
||||
|
||||
type response struct {
|
||||
*conn
|
||||
writer *bytes.Buffer
|
||||
responded bool
|
||||
err error
|
||||
errorFmt func(error) RPCError
|
||||
req *request
|
||||
}
|
||||
|
||||
func (w *response) writeXdrHeader() error {
|
||||
err := xdr.Write(w.writer, &w.req.xid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
respType := uint32(1)
|
||||
err = xdr.Write(w.writer, &respType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *response) writeHeader(code ResponseCode) error {
|
||||
if w.responded {
|
||||
return ErrAlreadySent
|
||||
}
|
||||
w.responded = true
|
||||
if err := w.writeXdrHeader(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
status := rpc.MsgAccepted
|
||||
if code == ResponseCodeAuthError || code == ResponseCodeRPCMismatch {
|
||||
status = rpc.MsgDenied
|
||||
}
|
||||
|
||||
err := xdr.Write(w.writer, &status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if status == rpc.MsgAccepted {
|
||||
// Write opaque_auth header.
|
||||
err = xdr.Write(w.writer, &rpc.AuthNull)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return xdr.Write(w.writer, &code)
|
||||
}
|
||||
|
||||
// Write a response to an xdr message
|
||||
func (w *response) Write(dat []byte) error {
|
||||
if !w.responded {
|
||||
if err := w.writeHeader(ResponseCodeSuccess); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
acc := 0
|
||||
for acc < len(dat) {
|
||||
n, err := w.writer.Write(dat[acc:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acc += n
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// drain reads the rest of the request frame if not consumed by the handler.
|
||||
func (w *response) drain(ctx context.Context) error {
|
||||
if reader, ok := w.req.Body.(*io.LimitedReader); ok {
|
||||
if reader.N == 0 {
|
||||
return nil
|
||||
}
|
||||
// todo: wrap body in a context reader.
|
||||
_, err := io.CopyN(io.Discard, w.req.Body, reader.N)
|
||||
if err == nil || err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
func (w *response) finish(ctx context.Context) error {
|
||||
select {
|
||||
case w.conn.writeSerializer <- w.writer.Bytes():
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *conn) readRequestHeader(ctx context.Context, reader *bufio.Reader) (w *response, err error) {
|
||||
fragment, err := xdr.ReadUint32(reader)
|
||||
if err != nil {
|
||||
if xdrErr, ok := err.(*xdr2.UnmarshalError); ok {
|
||||
if xdrErr.Err == io.EOF {
|
||||
return nil, io.EOF
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if fragment&(1<<31) == 0 {
|
||||
Log.Warnf("Warning: haven't implemented fragment reconstruction.\n")
|
||||
return nil, ErrInputInvalid
|
||||
}
|
||||
reqLen := fragment - uint32(1<<31)
|
||||
if reqLen < 40 {
|
||||
return nil, ErrInputInvalid
|
||||
}
|
||||
|
||||
r := io.LimitedReader{R: reader, N: int64(reqLen)}
|
||||
|
||||
xid, err := xdr.ReadUint32(&r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reqType, err := xdr.ReadUint32(&r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if reqType != 0 { // 0 = request, 1 = response
|
||||
return nil, ErrInputInvalid
|
||||
}
|
||||
|
||||
req := request{
|
||||
xid,
|
||||
rpc.Header{},
|
||||
&r,
|
||||
}
|
||||
if err = xdr.Read(&r, &req.Header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w = &response{
|
||||
conn: c,
|
||||
req: &req,
|
||||
errorFmt: basicErrorFormatter,
|
||||
// TODO: use a pool for these.
|
||||
writer: bytes.NewBuffer([]byte{}),
|
||||
}
|
||||
return w, nil
|
||||
}
|
230
pkg/go-nfs/errors.go
Normal file
230
pkg/go-nfs/errors.go
Normal file
|
@ -0,0 +1,230 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// RPCError provides the error interface for errors thrown by
|
||||
// procedures to be transmitted over the XDR RPC channel
|
||||
type RPCError interface {
|
||||
// An RPCError is an `error` with this method
|
||||
Error() string
|
||||
// Code is the RPC Response code to send
|
||||
Code() ResponseCode
|
||||
// BinaryMarshaler is the on-wire representation of this error
|
||||
encoding.BinaryMarshaler
|
||||
}
|
||||
|
||||
// AuthStat is an enumeration of why authentication ahs failed
|
||||
type AuthStat uint32
|
||||
|
||||
// AuthStat Codes
|
||||
const (
|
||||
AuthStatOK AuthStat = iota
|
||||
AuthStatBadCred
|
||||
AuthStatRejectedCred
|
||||
AuthStatBadVerifier
|
||||
AuthStatRejectedVerfier
|
||||
AuthStatTooWeak
|
||||
AuthStatInvalidResponse
|
||||
AuthStatFailed
|
||||
AuthStatKerbGeneric
|
||||
AuthStatTimeExpire
|
||||
AuthStatTktFile
|
||||
AuthStatDecode
|
||||
AuthStatNetAddr
|
||||
AuthStatRPCGSSCredProblem
|
||||
AuthStatRPCGSSCTXProblem
|
||||
)
|
||||
|
||||
// AuthError is an RPCError
|
||||
type AuthError struct {
|
||||
AuthStat
|
||||
}
|
||||
|
||||
// Code for AuthErrors is ResponseCodeAuthError
|
||||
func (a *AuthError) Code() ResponseCode {
|
||||
return ResponseCodeAuthError
|
||||
}
|
||||
|
||||
// Error is a textual representaiton of the auth error. From the RFC
|
||||
func (a *AuthError) Error() string {
|
||||
switch a.AuthStat {
|
||||
case AuthStatOK:
|
||||
return "Auth Status: OK"
|
||||
case AuthStatBadCred:
|
||||
return "Auth Status: bad credential"
|
||||
case AuthStatRejectedCred:
|
||||
return "Auth Status: client must begin new session"
|
||||
case AuthStatBadVerifier:
|
||||
return "Auth Status: bad verifier"
|
||||
case AuthStatRejectedVerfier:
|
||||
return "Auth Status: verifier expired or replayed"
|
||||
case AuthStatTooWeak:
|
||||
return "Auth Status: rejected for security reasons"
|
||||
case AuthStatInvalidResponse:
|
||||
return "Auth Status: bogus response verifier"
|
||||
case AuthStatFailed:
|
||||
return "Auth Status: reason unknown"
|
||||
case AuthStatKerbGeneric:
|
||||
return "Auth Status: kerberos generic error"
|
||||
case AuthStatTimeExpire:
|
||||
return "Auth Status: time of credential expired"
|
||||
case AuthStatTktFile:
|
||||
return "Auth Status: problem with ticket file"
|
||||
case AuthStatDecode:
|
||||
return "Auth Status: can't decode authenticator"
|
||||
case AuthStatNetAddr:
|
||||
return "Auth Status: wrong net address in ticket"
|
||||
case AuthStatRPCGSSCredProblem:
|
||||
return "Auth Status: no credentials for user"
|
||||
case AuthStatRPCGSSCTXProblem:
|
||||
return "Auth Status: problem with context"
|
||||
}
|
||||
return "Auth Status: Unknown"
|
||||
}
|
||||
|
||||
// MarshalBinary sends the specific auth status
|
||||
func (a *AuthError) MarshalBinary() (data []byte, err error) {
|
||||
var resp [4]byte
|
||||
binary.LittleEndian.PutUint32(resp[:], uint32(a.AuthStat))
|
||||
return resp[:], nil
|
||||
}
|
||||
|
||||
// RPCMismatchError is an RPCError
|
||||
type RPCMismatchError struct {
|
||||
Low uint32
|
||||
High uint32
|
||||
}
|
||||
|
||||
// Code for RPCMismatchError is ResponseCodeRPCMismatch
|
||||
func (r *RPCMismatchError) Code() ResponseCode {
|
||||
return ResponseCodeRPCMismatch
|
||||
}
|
||||
|
||||
func (r *RPCMismatchError) Error() string {
|
||||
return fmt.Sprintf("RPC Mismatch: Expected version between %d and %d.", r.Low, r.High)
|
||||
}
|
||||
|
||||
// MarshalBinary sends the specific rpc mismatch range
|
||||
func (r *RPCMismatchError) MarshalBinary() (data []byte, err error) {
|
||||
var resp [8]byte
|
||||
binary.LittleEndian.PutUint32(resp[0:4], uint32(r.Low))
|
||||
binary.LittleEndian.PutUint32(resp[4:8], uint32(r.High))
|
||||
return resp[:], nil
|
||||
}
|
||||
|
||||
// ResponseCodeProcUnavailableError is an RPCError
|
||||
type ResponseCodeProcUnavailableError struct {
|
||||
}
|
||||
|
||||
// Code for ResponseCodeProcUnavailableError
|
||||
func (r *ResponseCodeProcUnavailableError) Code() ResponseCode {
|
||||
return ResponseCodeProcUnavailable
|
||||
}
|
||||
|
||||
func (r *ResponseCodeProcUnavailableError) Error() string {
|
||||
return "The requested procedure is unexported"
|
||||
}
|
||||
|
||||
// MarshalBinary - this error has no associated body
|
||||
func (r *ResponseCodeProcUnavailableError) MarshalBinary() (data []byte, err error) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
// ResponseCodeSystemError is an RPCError
|
||||
type ResponseCodeSystemError struct {
|
||||
}
|
||||
|
||||
// Code for ResponseCodeSystemError
|
||||
func (r *ResponseCodeSystemError) Code() ResponseCode {
|
||||
return ResponseCodeSystemErr
|
||||
}
|
||||
|
||||
func (r *ResponseCodeSystemError) Error() string {
|
||||
return "memory allocation failure"
|
||||
}
|
||||
|
||||
// MarshalBinary - this error has no associated body
|
||||
func (r *ResponseCodeSystemError) MarshalBinary() (data []byte, err error) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
// basicErrorFormatter is the default error handler for response errors.
|
||||
// if the error is already formatted, it is directly written. Otherwise,
|
||||
// ResponseCodeSystemError is sent to the client.
|
||||
func basicErrorFormatter(err error) RPCError {
|
||||
var rpcErr RPCError
|
||||
if errors.As(err, &rpcErr) {
|
||||
return rpcErr
|
||||
}
|
||||
return &ResponseCodeSystemError{}
|
||||
}
|
||||
|
||||
// NFSStatusError represents an error at the NFS level.
|
||||
type NFSStatusError struct {
|
||||
NFSStatus
|
||||
WrappedErr error
|
||||
}
|
||||
|
||||
// Error is The wrapped error
|
||||
func (s *NFSStatusError) Error() string {
|
||||
message := s.NFSStatus.String()
|
||||
if s.WrappedErr != nil {
|
||||
message = fmt.Sprintf("%s: %v", message, s.WrappedErr)
|
||||
}
|
||||
return message
|
||||
}
|
||||
|
||||
// Code for NFS issues are successful RPC responses
|
||||
func (s *NFSStatusError) Code() ResponseCode {
|
||||
return ResponseCodeSuccess
|
||||
}
|
||||
|
||||
// MarshalBinary - The binary form of the code.
|
||||
func (s *NFSStatusError) MarshalBinary() (data []byte, err error) {
|
||||
var resp [4]byte
|
||||
binary.BigEndian.PutUint32(resp[0:4], uint32(s.NFSStatus))
|
||||
return resp[:], nil
|
||||
}
|
||||
|
||||
// Unwrap unpacks wrapped errors
|
||||
func (s *NFSStatusError) Unwrap() error {
|
||||
return s.WrappedErr
|
||||
}
|
||||
|
||||
// StatusErrorWithBody is an NFS error with a payload.
|
||||
type StatusErrorWithBody struct {
|
||||
NFSStatusError
|
||||
Body []byte
|
||||
}
|
||||
|
||||
// MarshalBinary provides the wire format of the error response
|
||||
func (s *StatusErrorWithBody) MarshalBinary() (data []byte, err error) {
|
||||
head, err := s.NFSStatusError.MarshalBinary()
|
||||
return append(head, s.Body...), err
|
||||
}
|
||||
|
||||
// errFormatterWithBody appends a provided body to errors
|
||||
func errFormatterWithBody(body []byte) func(err error) RPCError {
|
||||
return func(err error) RPCError {
|
||||
if nerr, ok := err.(*NFSStatusError); ok {
|
||||
return &StatusErrorWithBody{*nerr, body[:]}
|
||||
}
|
||||
var rErr RPCError
|
||||
if errors.As(err, &rErr) {
|
||||
return rErr
|
||||
}
|
||||
return &ResponseCodeSystemError{}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
opAttrErrorBody = [4]byte{}
|
||||
opAttrErrorFormatter = errFormatterWithBody(opAttrErrorBody[:])
|
||||
wccDataErrorBody = [8]byte{}
|
||||
wccDataErrorFormatter = errFormatterWithBody(wccDataErrorBody[:])
|
||||
)
|
52
pkg/go-nfs/example/helloworld/main.go
Normal file
52
pkg/go-nfs/example/helloworld/main.go
Normal file
|
@ -0,0 +1,52 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
"github.com/go-git/go-billy/v5/memfs"
|
||||
|
||||
nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
|
||||
nfshelper "git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
|
||||
)
|
||||
|
||||
// ROFS is an intercepter for the filesystem indicating it should
|
||||
// be read only. The undelrying billy.Memfs indicates it supports
|
||||
// writing, but does not in implement billy.Change to support
|
||||
// modification of permissions / modTimes, and as such cannot be
|
||||
// used as RW system.
|
||||
type ROFS struct {
|
||||
nfs.Filesystem
|
||||
}
|
||||
|
||||
// Capabilities exports the filesystem as readonly
|
||||
func (ROFS) Capabilities() billy.Capability {
|
||||
return billy.ReadCapability | billy.SeekCapability
|
||||
}
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
|
||||
listener, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to listen: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("Server running at %s\n", listener.Addr())
|
||||
|
||||
mem := helpers.WrapBillyFS(memfs.New())
|
||||
f, err := mem.Create(ctx, "hello.txt")
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to create file: %v\n", err)
|
||||
return
|
||||
}
|
||||
_, _ = f.Write(ctx, []byte("hello world"))
|
||||
_ = f.Close(ctx)
|
||||
|
||||
handler := nfshelper.NewNullAuthHandler(ROFS{mem})
|
||||
cacheHelper := nfshelper.NewCachingHandler(handler, 1024)
|
||||
fmt.Printf("%v", nfs.Serve(listener, cacheHelper))
|
||||
}
|
38
pkg/go-nfs/example/osnfs/changeos.go
Normal file
38
pkg/go-nfs/example/osnfs/changeos.go
Normal file
|
@ -0,0 +1,38 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
)
|
||||
|
||||
// NewChangeOSFS wraps billy osfs to add the change interface
|
||||
func NewChangeOSFS(fs billy.Filesystem) billy.Filesystem {
|
||||
return COS{fs}
|
||||
}
|
||||
|
||||
// COS or OSFS + Change wraps a billy.FS to not fail the `Change` interface.
|
||||
type COS struct {
|
||||
billy.Filesystem
|
||||
}
|
||||
|
||||
// Chmod changes mode
|
||||
func (fs COS) Chmod(name string, mode os.FileMode) error {
|
||||
return os.Chmod(fs.Join(fs.Root(), name), mode)
|
||||
}
|
||||
|
||||
// Lchown changes ownership
|
||||
func (fs COS) Lchown(name string, uid, gid int) error {
|
||||
return os.Lchown(fs.Join(fs.Root(), name), uid, gid)
|
||||
}
|
||||
|
||||
// Chown changes ownership
|
||||
func (fs COS) Chown(name string, uid, gid int) error {
|
||||
return os.Chown(fs.Join(fs.Root(), name), uid, gid)
|
||||
}
|
||||
|
||||
// Chtimes changes access time
|
||||
func (fs COS) Chtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
return os.Chtimes(fs.Join(fs.Root(), name), atime, mtime)
|
||||
}
|
28
pkg/go-nfs/example/osnfs/changeos_unix.go
Normal file
28
pkg/go-nfs/example/osnfs/changeos_unix.go
Normal file
|
@ -0,0 +1,28 @@
|
|||
//go:build darwin || dragonfly || freebsd || linux || nacl || netbsd || openbsd || solaris
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func (fs COS) Mknod(path string, mode uint32, major uint32, minor uint32) error {
|
||||
dev := unix.Mkdev(major, minor)
|
||||
return unix.Mknod(fs.Join(fs.Root(), path), mode, int(dev))
|
||||
}
|
||||
|
||||
func (fs COS) Mkfifo(path string, mode uint32) error {
|
||||
return unix.Mkfifo(fs.Join(fs.Root(), path), mode)
|
||||
}
|
||||
|
||||
func (fs COS) Link(path string, link string) error {
|
||||
return unix.Link(fs.Join(fs.Root(), path), link)
|
||||
}
|
||||
|
||||
func (fs COS) Socket(path string) error {
|
||||
fd, err := unix.Socket(unix.AF_UNIX, unix.SOCK_STREAM, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return unix.Bind(fd, &unix.SockaddrUnix{Name: fs.Join(fs.Root(), path)})
|
||||
}
|
36
pkg/go-nfs/example/osnfs/main.go
Normal file
36
pkg/go-nfs/example/osnfs/main.go
Normal file
|
@ -0,0 +1,36 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
|
||||
nfshelper "git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
|
||||
osfs "github.com/go-git/go-billy/v5/osfs"
|
||||
)
|
||||
|
||||
func main() {
|
||||
port := ""
|
||||
if len(os.Args) < 2 {
|
||||
fmt.Printf("Usage: osnfs </path/to/folder> [port]\n")
|
||||
return
|
||||
} else if len(os.Args) == 3 {
|
||||
port = os.Args[2]
|
||||
}
|
||||
|
||||
listener, err := net.Listen("tcp", ":"+port)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to listen: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("osnfs server running at %s\n", listener.Addr())
|
||||
|
||||
bfs := osfs.New(os.Args[1])
|
||||
bfsPlusChange := helpers.WrapBillyFS(NewChangeOSFS(bfs))
|
||||
|
||||
handler := nfshelper.NewNullAuthHandler(bfsPlusChange)
|
||||
cacheHelper := nfshelper.NewCachingHandler(handler, 1024)
|
||||
fmt.Printf("%v", nfs.Serve(listener, cacheHelper))
|
||||
}
|
37
pkg/go-nfs/example/osview/main.go
Normal file
37
pkg/go-nfs/example/osview/main.go
Normal file
|
@ -0,0 +1,37 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/willscott/memphis"
|
||||
|
||||
nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
|
||||
nfshelper "git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
|
||||
)
|
||||
|
||||
func main() {
|
||||
port := ""
|
||||
if len(os.Args) < 2 {
|
||||
fmt.Printf("Usage: osview </path/to/folder> [port]\n")
|
||||
return
|
||||
} else if len(os.Args) == 3 {
|
||||
port = os.Args[2]
|
||||
}
|
||||
|
||||
listener, err := net.Listen("tcp", ":"+port)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to listen: %v\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("Server running at %s\n", listener.Addr())
|
||||
|
||||
fs := memphis.FromOS(os.Args[1])
|
||||
bfs := helpers.WrapBillyFS(fs.AsBillyFS(0, 0))
|
||||
|
||||
handler := nfshelper.NewNullAuthHandler(bfs)
|
||||
cacheHelper := nfshelper.NewCachingHandler(handler, 1024)
|
||||
fmt.Printf("%v", nfs.Serve(listener, cacheHelper))
|
||||
}
|
377
pkg/go-nfs/file.go
Normal file
377
pkg/go-nfs/file.go
Normal file
|
@ -0,0 +1,377 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/go-nfs/file"
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
// FileAttribute holds metadata about a filesystem object
|
||||
type FileAttribute struct {
|
||||
Type FileType
|
||||
FileMode uint32
|
||||
Nlink uint32
|
||||
UID uint32
|
||||
GID uint32
|
||||
Filesize uint64
|
||||
Used uint64
|
||||
SpecData [2]uint32
|
||||
FSID uint64
|
||||
Fileid uint64
|
||||
Atime, Mtime, Ctime FileTime
|
||||
}
|
||||
|
||||
// FileType represents a NFS File Type
|
||||
type FileType uint32
|
||||
|
||||
// Enumeration of NFS FileTypes
|
||||
const (
|
||||
FileTypeRegular FileType = iota + 1
|
||||
FileTypeDirectory
|
||||
FileTypeBlock
|
||||
FileTypeCharacter
|
||||
FileTypeLink
|
||||
FileTypeSocket
|
||||
FileTypeFIFO
|
||||
)
|
||||
|
||||
func (f FileType) String() string {
|
||||
switch f {
|
||||
case FileTypeRegular:
|
||||
return "Regular"
|
||||
case FileTypeDirectory:
|
||||
return "Directory"
|
||||
case FileTypeBlock:
|
||||
return "Block Device"
|
||||
case FileTypeCharacter:
|
||||
return "Character Device"
|
||||
case FileTypeLink:
|
||||
return "Symbolic Link"
|
||||
case FileTypeSocket:
|
||||
return "Socket"
|
||||
case FileTypeFIFO:
|
||||
return "FIFO"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// Mode provides the OS interpreted mode of the file attributes
|
||||
func (f *FileAttribute) Mode() os.FileMode {
|
||||
return os.FileMode(f.FileMode)
|
||||
}
|
||||
|
||||
// FileCacheAttribute is the subset of FileAttribute used by
|
||||
// wcc_attr
|
||||
type FileCacheAttribute struct {
|
||||
Filesize uint64
|
||||
Mtime, Ctime FileTime
|
||||
}
|
||||
|
||||
// AsCache provides the wcc view of the file attributes
|
||||
func (f FileAttribute) AsCache() *FileCacheAttribute {
|
||||
wcc := FileCacheAttribute{
|
||||
Filesize: f.Filesize,
|
||||
Mtime: f.Mtime,
|
||||
Ctime: f.Ctime,
|
||||
}
|
||||
return &wcc
|
||||
}
|
||||
|
||||
// ToFileAttribute creates an NFS fattr3 struct from an OS.FileInfo
|
||||
func ToFileAttribute(info os.FileInfo, filePath string) *FileAttribute {
|
||||
f := FileAttribute{}
|
||||
|
||||
m := info.Mode()
|
||||
f.FileMode = uint32(m)
|
||||
if info.IsDir() {
|
||||
f.Type = FileTypeDirectory
|
||||
} else if m&os.ModeSymlink != 0 {
|
||||
f.Type = FileTypeLink
|
||||
} else if m&os.ModeCharDevice != 0 {
|
||||
f.Type = FileTypeCharacter
|
||||
} else if m&os.ModeDevice != 0 {
|
||||
f.Type = FileTypeBlock
|
||||
} else if m&os.ModeSocket != 0 {
|
||||
f.Type = FileTypeSocket
|
||||
} else if m&os.ModeNamedPipe != 0 {
|
||||
f.Type = FileTypeFIFO
|
||||
} else {
|
||||
f.Type = FileTypeRegular
|
||||
}
|
||||
// The number of hard links to the file.
|
||||
f.Nlink = 1
|
||||
|
||||
if a := file.GetInfo(info); a != nil {
|
||||
f.Nlink = a.Nlink
|
||||
f.UID = a.UID
|
||||
f.GID = a.GID
|
||||
f.SpecData = [2]uint32{a.Major, a.Minor}
|
||||
f.Fileid = a.Fileid
|
||||
} else {
|
||||
hasher := fnv.New64()
|
||||
_, _ = hasher.Write([]byte(filePath))
|
||||
f.Fileid = hasher.Sum64()
|
||||
}
|
||||
|
||||
f.Filesize = uint64(info.Size())
|
||||
f.Used = uint64(info.Size())
|
||||
f.Atime = ToNFSTime(info.ModTime())
|
||||
f.Mtime = f.Atime
|
||||
f.Ctime = f.Atime
|
||||
return &f
|
||||
}
|
||||
|
||||
// tryStat attempts to create a FileAttribute from a path.
|
||||
func tryStat(ctx context.Context, fs Filesystem, path []string) *FileAttribute {
|
||||
fullPath := fs.Join(path...)
|
||||
attrs, err := fs.Lstat(ctx, fullPath)
|
||||
if err != nil || attrs == nil {
|
||||
Log.Errorf("err loading attrs for %s: %v", fs.Join(path...), err)
|
||||
return nil
|
||||
}
|
||||
return ToFileAttribute(attrs, fullPath)
|
||||
}
|
||||
|
||||
// WriteWcc writes the `wcc_data` representation of an object.
|
||||
func WriteWcc(writer io.Writer, pre *FileCacheAttribute, post *FileAttribute) error {
|
||||
if pre == nil {
|
||||
if err := xdr.Write(writer, uint32(0)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := xdr.Write(writer, uint32(1)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := xdr.Write(writer, *pre); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if post == nil {
|
||||
if err := xdr.Write(writer, uint32(0)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := xdr.Write(writer, uint32(1)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := xdr.Write(writer, *post); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WritePostOpAttrs writes the `post_op_attr` representation of a files attributes
|
||||
func WritePostOpAttrs(writer io.Writer, post *FileAttribute) error {
|
||||
if post == nil {
|
||||
if err := xdr.Write(writer, uint32(0)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := xdr.Write(writer, uint32(1)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := xdr.Write(writer, *post); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetFileAttributes represents a command to update some metadata
|
||||
// about a file.
|
||||
type SetFileAttributes struct {
|
||||
SetMode *uint32
|
||||
SetUID *uint32
|
||||
SetGID *uint32
|
||||
SetSize *uint64
|
||||
SetAtime *time.Time
|
||||
SetMtime *time.Time
|
||||
}
|
||||
|
||||
// Apply uses a `Change` implementation to set defined attributes on a
|
||||
// provided file.
|
||||
func (s *SetFileAttributes) Apply(ctx context.Context, changer Change, fs Filesystem, file string) error {
|
||||
curOS, err := fs.Lstat(ctx, file)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return &NFSStatusError{NFSStatusNoEnt, os.ErrNotExist}
|
||||
} else if errors.Is(err, os.ErrPermission) {
|
||||
return &NFSStatusError{NFSStatusAccess, os.ErrPermission}
|
||||
} else if err != nil {
|
||||
return nil
|
||||
}
|
||||
curr := ToFileAttribute(curOS, file)
|
||||
|
||||
if s.SetMode != nil {
|
||||
mode := os.FileMode(*s.SetMode) & os.ModePerm
|
||||
if mode != curr.Mode().Perm() {
|
||||
if changer == nil {
|
||||
return &NFSStatusError{NFSStatusNotSupp, os.ErrPermission}
|
||||
}
|
||||
if err := changer.Chmod(ctx, file, mode); err != nil {
|
||||
if errors.Is(err, os.ErrPermission) {
|
||||
return &NFSStatusError{NFSStatusAccess, os.ErrPermission}
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if s.SetUID != nil || s.SetGID != nil {
|
||||
euid := curr.UID
|
||||
if s.SetUID != nil {
|
||||
euid = *s.SetUID
|
||||
}
|
||||
egid := curr.GID
|
||||
if s.SetGID != nil {
|
||||
egid = *s.SetGID
|
||||
}
|
||||
if euid != curr.UID || egid != curr.GID {
|
||||
if changer == nil {
|
||||
return &NFSStatusError{NFSStatusNotSupp, os.ErrPermission}
|
||||
}
|
||||
if err := changer.Lchown(ctx, file, int(euid), int(egid)); err != nil {
|
||||
if errors.Is(err, os.ErrPermission) {
|
||||
return &NFSStatusError{NFSStatusAccess, os.ErrPermission}
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if s.SetSize != nil {
|
||||
if curr.Mode()&os.ModeSymlink != 0 {
|
||||
return &NFSStatusError{NFSStatusNotSupp, os.ErrInvalid}
|
||||
}
|
||||
fp, err := fs.OpenFile(ctx, file, os.O_WRONLY|os.O_EXCL, 0)
|
||||
if errors.Is(err, os.ErrPermission) {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
if *s.SetSize > math.MaxInt64 {
|
||||
return &NFSStatusError{NFSStatusInval, os.ErrInvalid}
|
||||
}
|
||||
if err := fp.Truncate(ctx, int64(*s.SetSize)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := fp.Close(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if s.SetAtime != nil || s.SetMtime != nil {
|
||||
atime := curr.Atime.Native()
|
||||
if s.SetAtime != nil {
|
||||
atime = s.SetAtime
|
||||
}
|
||||
mtime := curr.Mtime.Native()
|
||||
if s.SetMtime != nil {
|
||||
mtime = s.SetMtime
|
||||
}
|
||||
if atime != curr.Atime.Native() || mtime != curr.Mtime.Native() {
|
||||
if changer == nil {
|
||||
return &NFSStatusError{NFSStatusNotSupp, os.ErrPermission}
|
||||
}
|
||||
if err := changer.Chtimes(ctx, file, *atime, *mtime); err != nil {
|
||||
if errors.Is(err, os.ErrPermission) {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mode returns a mode if specified or the provided default mode.
|
||||
func (s *SetFileAttributes) Mode(def os.FileMode) os.FileMode {
|
||||
if s.SetMode != nil {
|
||||
return os.FileMode(*s.SetMode) & os.ModePerm
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
// ReadSetFileAttributes reads an sattr3 xdr stream into a go struct.
|
||||
func ReadSetFileAttributes(r io.Reader) (*SetFileAttributes, error) {
|
||||
attrs := SetFileAttributes{}
|
||||
hasMode, err := xdr.ReadUint32(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if hasMode != 0 {
|
||||
mode, err := xdr.ReadUint32(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attrs.SetMode = &mode
|
||||
}
|
||||
hasUID, err := xdr.ReadUint32(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if hasUID != 0 {
|
||||
uid, err := xdr.ReadUint32(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attrs.SetUID = &uid
|
||||
}
|
||||
hasGID, err := xdr.ReadUint32(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if hasGID != 0 {
|
||||
gid, err := xdr.ReadUint32(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attrs.SetGID = &gid
|
||||
}
|
||||
hasSize, err := xdr.ReadUint32(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if hasSize != 0 {
|
||||
var size uint64
|
||||
attrs.SetSize = &size
|
||||
if err := xdr.Read(r, &size); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
aTime, err := xdr.ReadUint32(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if aTime == 1 {
|
||||
now := time.Now()
|
||||
attrs.SetAtime = &now
|
||||
} else if aTime == 2 {
|
||||
t := FileTime{}
|
||||
if err := xdr.Read(r, &t); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attrs.SetAtime = t.Native()
|
||||
}
|
||||
mTime, err := xdr.ReadUint32(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if mTime == 1 {
|
||||
now := time.Now()
|
||||
attrs.SetMtime = &now
|
||||
} else if mTime == 2 {
|
||||
t := FileTime{}
|
||||
if err := xdr.Read(r, &t); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attrs.SetMtime = t.Native()
|
||||
}
|
||||
return &attrs, nil
|
||||
}
|
17
pkg/go-nfs/file/file.go
Normal file
17
pkg/go-nfs/file/file.go
Normal file
|
@ -0,0 +1,17 @@
|
|||
package file
|
||||
|
||||
import "os"
|
||||
|
||||
type FileInfo struct {
|
||||
Nlink uint32
|
||||
UID uint32
|
||||
GID uint32
|
||||
Major uint32
|
||||
Minor uint32
|
||||
Fileid uint64
|
||||
}
|
||||
|
||||
// GetInfo extracts some non-standardized items from the result of a Stat call.
|
||||
func GetInfo(fi os.FileInfo) *FileInfo {
|
||||
return getInfo(fi)
|
||||
}
|
24
pkg/go-nfs/file/file_unix.go
Normal file
24
pkg/go-nfs/file/file_unix.go
Normal file
|
@ -0,0 +1,24 @@
|
|||
//go:build darwin || dragonfly || freebsd || linux || nacl || netbsd || openbsd || solaris
|
||||
|
||||
package file
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func getInfo(info os.FileInfo) *FileInfo {
|
||||
fi := &FileInfo{}
|
||||
if s, ok := info.Sys().(*syscall.Stat_t); ok {
|
||||
fi.Nlink = uint32(s.Nlink)
|
||||
fi.UID = s.Uid
|
||||
fi.GID = s.Gid
|
||||
fi.Major = unix.Major(uint64(s.Rdev))
|
||||
fi.Minor = unix.Minor(uint64(s.Rdev))
|
||||
fi.Fileid = s.Ino
|
||||
return fi
|
||||
}
|
||||
return nil
|
||||
}
|
12
pkg/go-nfs/file/file_windows.go
Normal file
12
pkg/go-nfs/file/file_windows.go
Normal file
|
@ -0,0 +1,12 @@
|
|||
//go:build windows
|
||||
|
||||
package file
|
||||
|
||||
import "os"
|
||||
|
||||
func getInfo(info os.FileInfo) *FileInfo {
|
||||
// https://godoc.org/golang.org/x/sys/windows#GetFileInformationByHandle
|
||||
// can be potentially used to populate Nlink
|
||||
|
||||
return nil
|
||||
}
|
101
pkg/go-nfs/filesystem.go
Normal file
101
pkg/go-nfs/filesystem.go
Normal file
|
@ -0,0 +1,101 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/ctxio"
|
||||
)
|
||||
|
||||
// FSStat returns metadata about a file system
|
||||
type FSStat struct {
|
||||
TotalSize uint64
|
||||
FreeSize uint64
|
||||
AvailableSize uint64
|
||||
TotalFiles uint64
|
||||
FreeFiles uint64
|
||||
AvailableFiles uint64
|
||||
// CacheHint is called "invarsec" in the nfs standard
|
||||
CacheHint time.Duration
|
||||
}
|
||||
|
||||
type Filesystem interface {
|
||||
// Create creates the named file with mode 0666 (before umask), truncating
|
||||
// it if it already exists. If successful, methods on the returned File can
|
||||
// be used for I/O; the associated file descriptor has mode O_RDWR.
|
||||
Create(ctx context.Context, filename string) (File, error)
|
||||
// Open opens the named file for reading. If successful, methods on the
|
||||
// returned file can be used for reading; the associated file descriptor has
|
||||
// mode O_RDONLY.
|
||||
Open(ctx context.Context, filename string) (File, error)
|
||||
// OpenFile is the generalized open call; most users will use Open or Create
|
||||
// instead. It opens the named file with specified flag (O_RDONLY etc.) and
|
||||
// perm, (0666 etc.) if applicable. If successful, methods on the returned
|
||||
// File can be used for I/O.
|
||||
OpenFile(ctx context.Context, filename string, flag int, perm os.FileMode) (File, error)
|
||||
// Stat returns a FileInfo describing the named file.
|
||||
Stat(ctx context.Context, filename string) (os.FileInfo, error)
|
||||
// Rename renames (moves) oldpath to newpath. If newpath already exists and
|
||||
// is not a directory, Rename replaces it. OS-specific restrictions may
|
||||
// apply when oldpath and newpath are in different directories.
|
||||
Rename(ctx context.Context, oldpath, newpath string) error
|
||||
// Remove removes the named file or directory.
|
||||
Remove(ctx context.Context, filename string) error
|
||||
// Join joins any number of path elements into a single path, adding a
|
||||
// Separator if necessary. Join calls filepath.Clean on the result; in
|
||||
// particular, all empty strings are ignored. On Windows, the result is a
|
||||
// UNC path if and only if the first path element is a UNC path.
|
||||
Join(elem ...string) string
|
||||
|
||||
// ReadDir reads the directory named by d(irname and returns a list of
|
||||
// directory entries sorted by filename.
|
||||
ReadDir(ctx context.Context, path string) ([]os.FileInfo, error)
|
||||
// MkdirAll creates a directory named path, along with any necessary
|
||||
// parents, and returns nil, or else returns an error. The permission bits
|
||||
// perm are used for all directories that MkdirAll creates. If path is/
|
||||
// already a directory, MkdirAll does nothing and returns nil.
|
||||
MkdirAll(ctx context.Context, filename string, perm os.FileMode) error
|
||||
|
||||
// Lstat returns a FileInfo describing the named file. If the file is a
|
||||
// symbolic link, the returned FileInfo describes the symbolic link. Lstat
|
||||
// makes no attempt to follow the link.
|
||||
Lstat(ctx context.Context, filename string) (os.FileInfo, error)
|
||||
// Symlink creates a symbolic-link from link to target. target may be an
|
||||
// absolute or relative path, and need not refer to an existing node.
|
||||
// Parent directories of link are created as necessary.
|
||||
Symlink(ctx context.Context, target, link string) error
|
||||
// Readlink returns the target path of link.
|
||||
Readlink(ctx context.Context, link string) (string, error)
|
||||
}
|
||||
|
||||
type File interface {
|
||||
// Name returns the name of the file as presented to Open.
|
||||
Name() string
|
||||
ctxio.Writer
|
||||
ctxio.Reader
|
||||
ctxio.ReaderAt
|
||||
io.Seeker
|
||||
ctxio.Closer
|
||||
|
||||
// Truncate the file.
|
||||
Truncate(ctx context.Context, size int64) error
|
||||
}
|
||||
|
||||
// Change abstract the FileInfo change related operations in a storage-agnostic
|
||||
// interface as an extension to the Basic interface
|
||||
type Change interface {
|
||||
// Chmod changes the mode of the named file to mode. If the file is a
|
||||
// symbolic link, it changes the mode of the link's target.
|
||||
Chmod(ctx context.Context, name string, mode os.FileMode) error
|
||||
// Lchown changes the numeric uid and gid of the named file. If the file is
|
||||
// a symbolic link, it changes the uid and gid of the link itself.
|
||||
Lchown(ctx context.Context, name string, uid, gid int) error
|
||||
// Chtimes changes the access and modification times of the named file,
|
||||
// similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a less
|
||||
// precise time unit.
|
||||
Chtimes(ctx context.Context, name string, atime time.Time, mtime time.Time) error
|
||||
}
|
52
pkg/go-nfs/handler.go
Normal file
52
pkg/go-nfs/handler.go
Normal file
|
@ -0,0 +1,52 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/fs"
|
||||
"net"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/ctxbilly"
|
||||
)
|
||||
|
||||
// Handler represents the interface of the file system / vfs being exposed over NFS
|
||||
type Handler interface {
|
||||
// Required methods
|
||||
|
||||
Mount(context.Context, net.Conn, MountRequest) (MountStatus, Filesystem, []AuthFlavor)
|
||||
|
||||
// Change can return 'nil' if filesystem is read-only
|
||||
// If the returned value can be cast to `UnixChange`, mknod and link RPCs will be available.
|
||||
Change(Filesystem) Change
|
||||
|
||||
// Optional methods - generic helpers or trivial implementations can be sufficient depending on use case.
|
||||
|
||||
// Fill in information about a file system's free space.
|
||||
FSStat(context.Context, Filesystem, *FSStat) error
|
||||
|
||||
// represent file objects as opaque references
|
||||
// Can be safely implemented via helpers/cachinghandler.
|
||||
ToHandle(fs Filesystem, path []string) []byte
|
||||
FromHandle(fh []byte) (Filesystem, []string, error)
|
||||
InvalidateHandle(Filesystem, []byte) error
|
||||
|
||||
// How many handles can be safely maintained by the handler.
|
||||
HandleLimit() int
|
||||
}
|
||||
|
||||
// UnixChange extends the billy `Change` interface with support for special files.
|
||||
type UnixChange interface {
|
||||
ctxbilly.Change
|
||||
Mknod(ctx context.Context, path string, mode uint32, major uint32, minor uint32) error
|
||||
Mkfifo(ctx context.Context, path string, mode uint32) error
|
||||
Socket(ctx context.Context, path string) error
|
||||
Link(ctx context.Context, path string, link string) error
|
||||
}
|
||||
|
||||
// CachingHandler represents the optional caching work that a user may wish to over-ride with
|
||||
// their own implementations, but which can be otherwise provided through defaults.
|
||||
type CachingHandler interface {
|
||||
VerifierFor(path string, contents []fs.FileInfo) uint64
|
||||
|
||||
// fs.FileInfo needs to be sorted by Name(), nil in case of a cache-miss
|
||||
DataForVerifier(path string, verifier uint64) []fs.FileInfo
|
||||
}
|
157
pkg/go-nfs/helpers/billlyfs.go
Normal file
157
pkg/go-nfs/helpers/billlyfs.go
Normal file
|
@ -0,0 +1,157 @@
|
|||
package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/fs"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
|
||||
"github.com/go-git/go-billy/v5"
|
||||
)
|
||||
|
||||
func WrapBillyFS(bf billy.Filesystem) nfs.Filesystem {
|
||||
return &wrapFS{
|
||||
Filesystem: bf,
|
||||
}
|
||||
}
|
||||
|
||||
type wrapFS struct {
|
||||
billy.Filesystem
|
||||
}
|
||||
|
||||
var _ nfs.Filesystem = (*wrapFS)(nil)
|
||||
|
||||
// Create implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).Create of MemFS.Filesystem.
|
||||
func (m *wrapFS) Create(ctx context.Context, filename string) (nfs.File, error) {
|
||||
bf, err := m.Filesystem.Create(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &wrapFile{bf}, nil
|
||||
}
|
||||
|
||||
// Lstat implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).Lstat of MemFS.Filesystem.
|
||||
func (m *wrapFS) Lstat(ctx context.Context, filename string) (fs.FileInfo, error) {
|
||||
return m.Filesystem.Lstat(filename)
|
||||
}
|
||||
|
||||
// MkdirAll implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).MkdirAll of MemFS.Filesystem.
|
||||
func (m *wrapFS) MkdirAll(ctx context.Context, filename string, perm fs.FileMode) error {
|
||||
return m.Filesystem.MkdirAll(filename, perm)
|
||||
}
|
||||
|
||||
// Open implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).Open of MemFS.Filesystem.
|
||||
func (m *wrapFS) Open(ctx context.Context, filename string) (nfs.File, error) {
|
||||
bf, err := m.Filesystem.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return WrapFile(bf), nil
|
||||
}
|
||||
|
||||
// OpenFile implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).OpenFile of MemFS.Filesystem.
|
||||
func (m *wrapFS) OpenFile(ctx context.Context, filename string, flag int, perm fs.FileMode) (nfs.File, error) {
|
||||
bf, err := m.Filesystem.OpenFile(filename, flag, perm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return WrapFile(bf), nil
|
||||
}
|
||||
|
||||
// ReadDir implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).ReadDir of MemFS.Filesystem.
|
||||
func (m *wrapFS) ReadDir(ctx context.Context, path string) ([]fs.FileInfo, error) {
|
||||
return m.Filesystem.ReadDir(path)
|
||||
}
|
||||
|
||||
// Readlink implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).Readlink of MemFS.Filesystem.
|
||||
func (m *wrapFS) Readlink(ctx context.Context, link string) (string, error) {
|
||||
return m.Filesystem.Readlink(link)
|
||||
}
|
||||
|
||||
// Remove implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).Remove of MemFS.Filesystem.
|
||||
func (m *wrapFS) Remove(ctx context.Context, filename string) error {
|
||||
return m.Filesystem.Remove(filename)
|
||||
}
|
||||
|
||||
// Rename implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).Rename of MemFS.Filesystem.
|
||||
func (m *wrapFS) Rename(ctx context.Context, oldpath string, newpath string) error {
|
||||
return m.Filesystem.Rename(oldpath, newpath)
|
||||
}
|
||||
|
||||
// Stat implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).Stat of MemFS.Filesystem.
|
||||
func (m *wrapFS) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
|
||||
return m.Filesystem.Stat(filename)
|
||||
}
|
||||
|
||||
// Symlink implements Filesystem.
|
||||
// Subtle: this method shadows the method (Filesystem).Symlink of MemFS.Filesystem.
|
||||
func (m *wrapFS) Symlink(ctx context.Context, target string, link string) error {
|
||||
return m.Filesystem.Symlink(target, link)
|
||||
}
|
||||
|
||||
func WrapFile(bf billy.File) nfs.File {
|
||||
return &wrapFile{File: bf}
|
||||
}
|
||||
|
||||
type wrapFile struct {
|
||||
billy.File
|
||||
}
|
||||
|
||||
var _ nfs.File = (*wrapFile)(nil)
|
||||
|
||||
// Close implements File.
|
||||
// Subtle: this method shadows the method (File).Close of MemFile.File.
|
||||
func (m *wrapFile) Close(ctx context.Context) error {
|
||||
return m.File.Close()
|
||||
}
|
||||
|
||||
// Lock implements File.
|
||||
// Subtle: this method shadows the method (File).Lock of MemFile.File.
|
||||
func (m *wrapFile) Lock() error {
|
||||
return m.File.Lock()
|
||||
}
|
||||
|
||||
// Name implements File.
|
||||
// Subtle: this method shadows the method (File).Name of MemFile.File.
|
||||
func (m *wrapFile) Name() string {
|
||||
return m.File.Name()
|
||||
}
|
||||
|
||||
// Truncate implements File.
|
||||
// Subtle: this method shadows the method (File).Truncate of memFile.File.
|
||||
func (m *wrapFile) Truncate(ctx context.Context, size int64) error {
|
||||
return m.File.Truncate(size)
|
||||
}
|
||||
|
||||
// Read implements File.
|
||||
// Subtle: this method shadows the method (File).Read of MemFile.File.
|
||||
func (m *wrapFile) Read(ctx context.Context, p []byte) (n int, err error) {
|
||||
return m.File.Read(p)
|
||||
}
|
||||
|
||||
// ReadAt implements File.
|
||||
// Subtle: this method shadows the method (File).ReadAt of MemFile.File.
|
||||
func (m *wrapFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
|
||||
return m.File.ReadAt(p, off)
|
||||
}
|
||||
|
||||
// Unlock implements File.
|
||||
// Subtle: this method shadows the method (File).Unlock of MemFile.File.
|
||||
func (m *wrapFile) Unlock() error {
|
||||
return m.File.Unlock()
|
||||
}
|
||||
|
||||
// Write implements File.
|
||||
// Subtle: this method shadows the method (File).Write of MemFile.File.
|
||||
func (m *wrapFile) Write(ctx context.Context, p []byte) (n int, err error) {
|
||||
return m.File.Write(p)
|
||||
}
|
|
@ -1,15 +1,13 @@
|
|||
package nfs
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"io/fs"
|
||||
"reflect"
|
||||
"slices"
|
||||
|
||||
"github.com/willscott/go-nfs"
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
"github.com/google/uuid"
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
)
|
||||
|
@ -46,14 +44,14 @@ type CachingHandler struct {
|
|||
}
|
||||
|
||||
type entry struct {
|
||||
f billy.Filesystem
|
||||
f nfs.Filesystem
|
||||
p []string
|
||||
}
|
||||
|
||||
// ToHandle takes a file and represents it with an opaque handle to reference it.
|
||||
// In stateless nfs (when it's serving a unix fs) this can be the device + inode
|
||||
// but we can generalize with a stateful local cache of handed out IDs.
|
||||
func (c *CachingHandler) ToHandle(f billy.Filesystem, path []string) []byte {
|
||||
func (c *CachingHandler) ToHandle(f nfs.Filesystem, path []string) []byte {
|
||||
joinedPath := f.Join(path...)
|
||||
|
||||
if handle := c.searchReverseCache(f, joinedPath); handle != nil {
|
||||
|
@ -81,7 +79,7 @@ func (c *CachingHandler) ToHandle(f billy.Filesystem, path []string) []byte {
|
|||
}
|
||||
|
||||
// FromHandle converts from an opaque handle to the file it represents
|
||||
func (c *CachingHandler) FromHandle(fh []byte) (billy.Filesystem, []string, error) {
|
||||
func (c *CachingHandler) FromHandle(fh []byte) (nfs.Filesystem, []string, error) {
|
||||
id, err := uuid.FromBytes(fh)
|
||||
if err != nil {
|
||||
return nil, []string{}, err
|
||||
|
@ -94,13 +92,16 @@ func (c *CachingHandler) FromHandle(fh []byte) (billy.Filesystem, []string, erro
|
|||
_, _ = c.activeHandles.Get(k)
|
||||
}
|
||||
}
|
||||
|
||||
return f.f, slices.Clone(f.p), nil
|
||||
if ok {
|
||||
newP := make([]string, len(f.p))
|
||||
copy(newP, f.p)
|
||||
return f.f, newP, nil
|
||||
}
|
||||
}
|
||||
return nil, []string{}, &nfs.NFSStatusError{NFSStatus: nfs.NFSStatusStale}
|
||||
}
|
||||
|
||||
func (c *CachingHandler) searchReverseCache(f billy.Filesystem, path string) []byte {
|
||||
func (c *CachingHandler) searchReverseCache(f nfs.Filesystem, path string) []byte {
|
||||
uuids, exists := c.reverseHandles[path]
|
||||
|
||||
if !exists {
|
||||
|
@ -133,7 +134,7 @@ func (c *CachingHandler) evictReverseCache(path string, handle uuid.UUID) {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *CachingHandler) InvalidateHandle(fs billy.Filesystem, handle []byte) error {
|
||||
func (c *CachingHandler) InvalidateHandle(fs nfs.Filesystem, handle []byte) error {
|
||||
//Remove from cache
|
||||
id, _ := uuid.FromBytes(handle)
|
||||
entry, ok := c.activeHandles.Get(id)
|
414
pkg/go-nfs/helpers/memfs/memfs.go
Normal file
414
pkg/go-nfs/helpers/memfs/memfs.go
Normal file
|
@ -0,0 +1,414 @@
|
|||
// Package memfs is a variant of "github.com/go-git/go-billy/v5/memfs" with
|
||||
// stable mtimes for items.
|
||||
package memfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
"github.com/go-git/go-billy/v5/helper/chroot"
|
||||
"github.com/go-git/go-billy/v5/util"
|
||||
)
|
||||
|
||||
const separator = filepath.Separator
|
||||
|
||||
// Memory a very convenient filesystem based on memory files
|
||||
type Memory struct {
|
||||
s *storage
|
||||
}
|
||||
|
||||
// New returns a new Memory filesystem.
|
||||
func New() billy.Filesystem {
|
||||
fs := &Memory{s: newStorage()}
|
||||
return chroot.New(fs, string(separator))
|
||||
}
|
||||
|
||||
func (fs *Memory) Create(filename string) (billy.File, error) {
|
||||
return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
}
|
||||
|
||||
func (fs *Memory) Open(filename string) (billy.File, error) {
|
||||
return fs.OpenFile(filename, os.O_RDONLY, 0)
|
||||
}
|
||||
|
||||
func (fs *Memory) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) {
|
||||
f, has := fs.s.Get(filename)
|
||||
if !has {
|
||||
if !isCreate(flag) {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
var err error
|
||||
f, err = fs.s.New(filename, perm, flag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if isExclusive(flag) {
|
||||
return nil, os.ErrExist
|
||||
}
|
||||
|
||||
if target, isLink := fs.resolveLink(filename, f); isLink {
|
||||
return fs.OpenFile(target, flag, perm)
|
||||
}
|
||||
}
|
||||
|
||||
if f.mode.IsDir() {
|
||||
return nil, fmt.Errorf("cannot open directory: %s", filename)
|
||||
}
|
||||
|
||||
return f.Duplicate(filename, perm, flag), nil
|
||||
}
|
||||
|
||||
func (fs *Memory) resolveLink(fullpath string, f *file) (target string, isLink bool) {
|
||||
if !isSymlink(f.mode) {
|
||||
return fullpath, false
|
||||
}
|
||||
|
||||
target = string(f.content.bytes)
|
||||
if !isAbs(target) {
|
||||
target = fs.Join(filepath.Dir(fullpath), target)
|
||||
}
|
||||
|
||||
return target, true
|
||||
}
|
||||
|
||||
// On Windows OS, IsAbs validates if a path is valid based on if stars with a
|
||||
// unit (eg.: `C:\`) to assert that is absolute, but in this mem implementation
|
||||
// any path starting by `separator` is also considered absolute.
|
||||
func isAbs(path string) bool {
|
||||
return filepath.IsAbs(path) || strings.HasPrefix(path, string(separator))
|
||||
}
|
||||
|
||||
func (fs *Memory) Stat(filename string) (os.FileInfo, error) {
|
||||
f, has := fs.s.Get(filename)
|
||||
if !has {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
fi, _ := f.Stat()
|
||||
|
||||
var err error
|
||||
if target, isLink := fs.resolveLink(filename, f); isLink {
|
||||
fi, err = fs.Stat(target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// the name of the file should always the name of the stated file, so we
|
||||
// overwrite the Stat returned from the storage with it, since the
|
||||
// filename may belong to a link.
|
||||
fi.(*fileInfo).name = filepath.Base(filename)
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
func (fs *Memory) Lstat(filename string) (os.FileInfo, error) {
|
||||
f, has := fs.s.Get(filename)
|
||||
if !has {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
return f.Stat()
|
||||
}
|
||||
|
||||
type ByName []os.FileInfo
|
||||
|
||||
func (a ByName) Len() int { return len(a) }
|
||||
func (a ByName) Less(i, j int) bool { return a[i].Name() < a[j].Name() }
|
||||
func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
func (fs *Memory) ReadDir(path string) ([]os.FileInfo, error) {
|
||||
if f, has := fs.s.Get(path); has {
|
||||
if target, isLink := fs.resolveLink(path, f); isLink {
|
||||
return fs.ReadDir(target)
|
||||
}
|
||||
} else {
|
||||
return nil, &os.PathError{Op: "open", Path: path, Err: syscall.ENOENT}
|
||||
}
|
||||
|
||||
var entries []os.FileInfo
|
||||
for _, f := range fs.s.Children(path) {
|
||||
fi, _ := f.Stat()
|
||||
entries = append(entries, fi)
|
||||
}
|
||||
|
||||
sort.Sort(ByName(entries))
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (fs *Memory) MkdirAll(path string, perm os.FileMode) error {
|
||||
_, err := fs.s.New(path, perm|os.ModeDir, 0)
|
||||
return err
|
||||
}
|
||||
|
||||
func (fs *Memory) TempFile(dir, prefix string) (billy.File, error) {
|
||||
return util.TempFile(fs, dir, prefix)
|
||||
}
|
||||
|
||||
func (fs *Memory) Rename(from, to string) error {
|
||||
return fs.s.Rename(from, to)
|
||||
}
|
||||
|
||||
func (fs *Memory) Remove(filename string) error {
|
||||
return fs.s.Remove(filename)
|
||||
}
|
||||
|
||||
func (fs *Memory) Join(elem ...string) string {
|
||||
return filepath.Join(elem...)
|
||||
}
|
||||
|
||||
func (fs *Memory) Symlink(target, link string) error {
|
||||
_, err := fs.Stat(link)
|
||||
if err == nil {
|
||||
return os.ErrExist
|
||||
}
|
||||
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return util.WriteFile(fs, link, []byte(target), 0777|os.ModeSymlink)
|
||||
}
|
||||
|
||||
func (fs *Memory) Readlink(link string) (string, error) {
|
||||
f, has := fs.s.Get(link)
|
||||
if !has {
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
|
||||
if !isSymlink(f.mode) {
|
||||
return "", &os.PathError{
|
||||
Op: "readlink",
|
||||
Path: link,
|
||||
Err: fmt.Errorf("not a symlink"),
|
||||
}
|
||||
}
|
||||
|
||||
return string(f.content.bytes), nil
|
||||
}
|
||||
|
||||
// Capabilities implements the Capable interface.
|
||||
func (fs *Memory) Capabilities() billy.Capability {
|
||||
return billy.WriteCapability |
|
||||
billy.ReadCapability |
|
||||
billy.ReadAndWriteCapability |
|
||||
billy.SeekCapability |
|
||||
billy.TruncateCapability
|
||||
}
|
||||
|
||||
type file struct {
|
||||
name string
|
||||
content *content
|
||||
position int64
|
||||
flag int
|
||||
mode os.FileMode
|
||||
mtime time.Time
|
||||
|
||||
isClosed bool
|
||||
}
|
||||
|
||||
func (f *file) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *file) Read(b []byte) (int, error) {
|
||||
n, err := f.ReadAt(b, f.position)
|
||||
f.position += int64(n)
|
||||
|
||||
if err == io.EOF && n != 0 {
|
||||
err = nil
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *file) ReadAt(b []byte, off int64) (int, error) {
|
||||
if f.isClosed {
|
||||
return 0, os.ErrClosed
|
||||
}
|
||||
|
||||
if !isReadAndWrite(f.flag) && !isReadOnly(f.flag) {
|
||||
return 0, errors.New("read not supported")
|
||||
}
|
||||
|
||||
n, err := f.content.ReadAt(b, off)
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *file) Seek(offset int64, whence int) (int64, error) {
|
||||
if f.isClosed {
|
||||
return 0, os.ErrClosed
|
||||
}
|
||||
|
||||
switch whence {
|
||||
case io.SeekCurrent:
|
||||
f.position += offset
|
||||
case io.SeekStart:
|
||||
f.position = offset
|
||||
case io.SeekEnd:
|
||||
f.position = int64(f.content.Len()) + offset
|
||||
}
|
||||
|
||||
return f.position, nil
|
||||
}
|
||||
|
||||
func (f *file) Write(p []byte) (int, error) {
|
||||
return f.WriteAt(p, f.position)
|
||||
}
|
||||
|
||||
func (f *file) WriteAt(p []byte, off int64) (int, error) {
|
||||
if f.isClosed {
|
||||
return 0, os.ErrClosed
|
||||
}
|
||||
|
||||
if !isReadAndWrite(f.flag) && !isWriteOnly(f.flag) {
|
||||
return 0, errors.New("write not supported")
|
||||
}
|
||||
|
||||
n, err := f.content.WriteAt(p, off)
|
||||
f.position = off + int64(n)
|
||||
f.mtime = time.Now()
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *file) Close() error {
|
||||
if f.isClosed {
|
||||
return os.ErrClosed
|
||||
}
|
||||
|
||||
f.isClosed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *file) Truncate(size int64) error {
|
||||
if size < int64(len(f.content.bytes)) {
|
||||
f.content.bytes = f.content.bytes[:size]
|
||||
} else if more := int(size) - len(f.content.bytes); more > 0 {
|
||||
f.content.bytes = append(f.content.bytes, make([]byte, more)...)
|
||||
}
|
||||
f.mtime = time.Now()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *file) Duplicate(filename string, mode os.FileMode, flag int) billy.File {
|
||||
new := &file{
|
||||
name: filename,
|
||||
content: f.content,
|
||||
mode: mode,
|
||||
flag: flag,
|
||||
mtime: time.Now(),
|
||||
}
|
||||
|
||||
if isTruncate(flag) {
|
||||
new.content.Truncate()
|
||||
}
|
||||
|
||||
if isAppend(flag) {
|
||||
new.position = int64(new.content.Len())
|
||||
}
|
||||
|
||||
return new
|
||||
}
|
||||
|
||||
func (f *file) Stat() (os.FileInfo, error) {
|
||||
return &fileInfo{
|
||||
name: f.Name(),
|
||||
mode: f.mode,
|
||||
size: f.content.Len(),
|
||||
mtime: f.mtime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Lock is a no-op in memfs.
|
||||
func (f *file) Lock() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unlock is a no-op in memfs.
|
||||
func (f *file) Unlock() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type fileInfo struct {
|
||||
name string
|
||||
size int
|
||||
mode os.FileMode
|
||||
mtime time.Time
|
||||
}
|
||||
|
||||
func (fi *fileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
|
||||
func (fi *fileInfo) Size() int64 {
|
||||
return int64(fi.size)
|
||||
}
|
||||
|
||||
func (fi *fileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
|
||||
func (fi *fileInfo) ModTime() time.Time {
|
||||
return fi.mtime
|
||||
}
|
||||
|
||||
func (fi *fileInfo) IsDir() bool {
|
||||
return fi.mode.IsDir()
|
||||
}
|
||||
|
||||
func (*fileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *content) Truncate() {
|
||||
c.bytes = make([]byte, 0)
|
||||
}
|
||||
|
||||
func (c *content) Len() int {
|
||||
return len(c.bytes)
|
||||
}
|
||||
|
||||
func isCreate(flag int) bool {
|
||||
return flag&os.O_CREATE != 0
|
||||
}
|
||||
|
||||
func isExclusive(flag int) bool {
|
||||
return flag&os.O_EXCL != 0
|
||||
}
|
||||
|
||||
func isAppend(flag int) bool {
|
||||
return flag&os.O_APPEND != 0
|
||||
}
|
||||
|
||||
func isTruncate(flag int) bool {
|
||||
return flag&os.O_TRUNC != 0
|
||||
}
|
||||
|
||||
func isReadAndWrite(flag int) bool {
|
||||
return flag&os.O_RDWR != 0
|
||||
}
|
||||
|
||||
func isReadOnly(flag int) bool {
|
||||
return flag == os.O_RDONLY
|
||||
}
|
||||
|
||||
func isWriteOnly(flag int) bool {
|
||||
return flag&os.O_WRONLY != 0
|
||||
}
|
||||
|
||||
func isSymlink(m os.FileMode) bool {
|
||||
return m&os.ModeSymlink != 0
|
||||
}
|
243
pkg/go-nfs/helpers/memfs/storage.go
Normal file
243
pkg/go-nfs/helpers/memfs/storage.go
Normal file
|
@ -0,0 +1,243 @@
|
|||
package memfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type storage struct {
|
||||
files map[string]*file
|
||||
children map[string]map[string]*file
|
||||
}
|
||||
|
||||
func newStorage() *storage {
|
||||
return &storage{
|
||||
files: make(map[string]*file, 0),
|
||||
children: make(map[string]map[string]*file, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *storage) Has(path string) bool {
|
||||
path = clean(path)
|
||||
|
||||
_, ok := s.files[path]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (s *storage) New(path string, mode os.FileMode, flag int) (*file, error) {
|
||||
path = clean(path)
|
||||
if s.Has(path) {
|
||||
if !s.MustGet(path).mode.IsDir() {
|
||||
return nil, fmt.Errorf("file already exists %q", path)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
name := filepath.Base(path)
|
||||
|
||||
f := &file{
|
||||
name: name,
|
||||
content: &content{name: name},
|
||||
mode: mode,
|
||||
flag: flag,
|
||||
mtime: time.Now(),
|
||||
}
|
||||
|
||||
s.files[path] = f
|
||||
if err := s.createParent(path, mode, f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (s *storage) createParent(path string, mode os.FileMode, f *file) error {
|
||||
base := filepath.Dir(path)
|
||||
base = clean(base)
|
||||
if f.Name() == string(separator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := s.New(base, mode.Perm()|os.ModeDir, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := s.children[base]; !ok {
|
||||
s.children[base] = make(map[string]*file, 0)
|
||||
}
|
||||
|
||||
s.children[base][f.Name()] = f
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *storage) Children(path string) []*file {
|
||||
path = clean(path)
|
||||
|
||||
l := make([]*file, 0)
|
||||
for _, f := range s.children[path] {
|
||||
l = append(l, f)
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (s *storage) MustGet(path string) *file {
|
||||
f, ok := s.Get(path)
|
||||
if !ok {
|
||||
panic(fmt.Errorf("couldn't find %q", path))
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (s *storage) Get(path string) (*file, bool) {
|
||||
path = clean(path)
|
||||
if !s.Has(path) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
file, ok := s.files[path]
|
||||
return file, ok
|
||||
}
|
||||
|
||||
func (s *storage) Rename(from, to string) error {
|
||||
from = clean(from)
|
||||
to = clean(to)
|
||||
|
||||
if !s.Has(from) {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
move := [][2]string{{from, to}}
|
||||
|
||||
for pathFrom := range s.files {
|
||||
if pathFrom == from || !strings.HasPrefix(pathFrom, from) {
|
||||
continue
|
||||
}
|
||||
|
||||
rel, _ := filepath.Rel(from, pathFrom)
|
||||
pathTo := filepath.Join(to, rel)
|
||||
|
||||
move = append(move, [2]string{pathFrom, pathTo})
|
||||
}
|
||||
|
||||
for _, ops := range move {
|
||||
from := ops[0]
|
||||
to := ops[1]
|
||||
|
||||
if err := s.move(from, to); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *storage) move(from, to string) error {
|
||||
s.files[to] = s.files[from]
|
||||
s.files[to].name = filepath.Base(to)
|
||||
s.children[to] = s.children[from]
|
||||
|
||||
defer func() {
|
||||
delete(s.children, from)
|
||||
delete(s.files, from)
|
||||
delete(s.children[filepath.Dir(from)], filepath.Base(from))
|
||||
}()
|
||||
|
||||
return s.createParent(to, 0644, s.files[to])
|
||||
}
|
||||
|
||||
func (s *storage) Remove(path string) error {
|
||||
path = clean(path)
|
||||
|
||||
f, has := s.Get(path)
|
||||
if !has {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
if f.mode.IsDir() && len(s.children[path]) != 0 {
|
||||
return fmt.Errorf("dir: %s contains files", path)
|
||||
}
|
||||
|
||||
base, file := filepath.Split(path)
|
||||
base = filepath.Clean(base)
|
||||
|
||||
delete(s.children[base], file)
|
||||
delete(s.files, path)
|
||||
return nil
|
||||
}
|
||||
|
||||
func clean(path string) string {
|
||||
return filepath.Clean(filepath.FromSlash(path))
|
||||
}
|
||||
|
||||
type content struct {
|
||||
name string
|
||||
bytes []byte
|
||||
|
||||
m sync.RWMutex
|
||||
}
|
||||
|
||||
func (c *content) WriteAt(p []byte, off int64) (int, error) {
|
||||
if off < 0 {
|
||||
return 0, &os.PathError{
|
||||
Op: "writeat",
|
||||
Path: c.name,
|
||||
Err: errors.New("negative offset"),
|
||||
}
|
||||
}
|
||||
|
||||
c.m.Lock()
|
||||
prev := len(c.bytes)
|
||||
|
||||
diff := int(off) - prev
|
||||
if diff > 0 {
|
||||
c.bytes = append(c.bytes, make([]byte, diff)...)
|
||||
}
|
||||
|
||||
c.bytes = append(c.bytes[:off], p...)
|
||||
if len(c.bytes) < prev {
|
||||
c.bytes = c.bytes[:prev]
|
||||
}
|
||||
c.m.Unlock()
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (c *content) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return 0, &os.PathError{
|
||||
Op: "readat",
|
||||
Path: c.name,
|
||||
Err: errors.New("negative offset"),
|
||||
}
|
||||
}
|
||||
|
||||
c.m.RLock()
|
||||
size := int64(len(c.bytes))
|
||||
if off >= size {
|
||||
c.m.RUnlock()
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
l := int64(len(b))
|
||||
if off+l > size {
|
||||
l = size - off
|
||||
}
|
||||
|
||||
btr := c.bytes[off : off+l]
|
||||
n = copy(b, btr)
|
||||
|
||||
if len(btr) < len(b) {
|
||||
err = io.EOF
|
||||
}
|
||||
c.m.RUnlock()
|
||||
|
||||
return
|
||||
}
|
59
pkg/go-nfs/helpers/nullauthhandler.go
Normal file
59
pkg/go-nfs/helpers/nullauthhandler.go
Normal file
|
@ -0,0 +1,59 @@
|
|||
package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/ctxbilly"
|
||||
nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
|
||||
)
|
||||
|
||||
// NewNullAuthHandler creates a handler for the provided filesystem
|
||||
func NewNullAuthHandler(fs nfs.Filesystem) nfs.Handler {
|
||||
return &NullAuthHandler{fs}
|
||||
}
|
||||
|
||||
// NullAuthHandler returns a NFS backing that exposes a given file system in response to all mount requests.
|
||||
type NullAuthHandler struct {
|
||||
fs nfs.Filesystem
|
||||
}
|
||||
|
||||
// Mount backs Mount RPC Requests, allowing for access control policies.
|
||||
func (h *NullAuthHandler) Mount(ctx context.Context, conn net.Conn, req nfs.MountRequest) (status nfs.MountStatus, hndl nfs.Filesystem, auths []nfs.AuthFlavor) {
|
||||
status = nfs.MountStatusOk
|
||||
hndl = h.fs
|
||||
auths = []nfs.AuthFlavor{nfs.AuthFlavorNull}
|
||||
return
|
||||
}
|
||||
|
||||
// Change provides an interface for updating file attributes.
|
||||
func (h *NullAuthHandler) Change(fs nfs.Filesystem) nfs.Change {
|
||||
if c, ok := h.fs.(ctxbilly.Change); ok {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FSStat provides information about a filesystem.
|
||||
func (h *NullAuthHandler) FSStat(ctx context.Context, f nfs.Filesystem, s *nfs.FSStat) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ToHandle handled by CachingHandler
|
||||
func (h *NullAuthHandler) ToHandle(f nfs.Filesystem, s []string) []byte {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
// FromHandle handled by CachingHandler
|
||||
func (h *NullAuthHandler) FromHandle([]byte) (nfs.Filesystem, []string, error) {
|
||||
return nil, []string{}, nil
|
||||
}
|
||||
|
||||
func (c *NullAuthHandler) InvalidateHandle(nfs.Filesystem, []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleLImit handled by cachingHandler
|
||||
func (h *NullAuthHandler) HandleLimit() int {
|
||||
return -1
|
||||
}
|
216
pkg/go-nfs/log.go
Normal file
216
pkg/go-nfs/log.go
Normal file
|
@ -0,0 +1,216 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
Log Logger = &DefaultLogger{}
|
||||
)
|
||||
|
||||
type LogLevel int
|
||||
|
||||
const (
|
||||
PanicLevel LogLevel = iota
|
||||
FatalLevel
|
||||
ErrorLevel
|
||||
WarnLevel
|
||||
InfoLevel
|
||||
DebugLevel
|
||||
TraceLevel
|
||||
|
||||
panicLevelStr string = "[PANIC] "
|
||||
fatalLevelStr string = "[FATAL] "
|
||||
errorLevelStr string = "[ERROR] "
|
||||
warnLevelStr string = "[WARN] "
|
||||
infoLevelStr string = "[INFO] "
|
||||
debugLevelStr string = "[DEBUG] "
|
||||
traceLevelStr string = "[TRACE] "
|
||||
)
|
||||
|
||||
type Logger interface {
|
||||
SetLevel(level LogLevel)
|
||||
GetLevel() LogLevel
|
||||
ParseLevel(level string) (LogLevel, error)
|
||||
|
||||
Panic(args ...interface{})
|
||||
Fatal(args ...interface{})
|
||||
Error(args ...interface{})
|
||||
Warn(args ...interface{})
|
||||
Info(args ...interface{})
|
||||
Debug(args ...interface{})
|
||||
Trace(args ...interface{})
|
||||
Print(args ...interface{})
|
||||
|
||||
Panicf(format string, args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
Warnf(format string, args ...interface{})
|
||||
Infof(format string, args ...interface{})
|
||||
Debugf(format string, args ...interface{})
|
||||
Tracef(format string, args ...interface{})
|
||||
Printf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
type DefaultLogger struct {
|
||||
Level LogLevel
|
||||
}
|
||||
|
||||
func SetLogger(logger Logger) {
|
||||
Log = logger
|
||||
}
|
||||
|
||||
func init() {
|
||||
if os.Getenv("LOG_LEVEL") != "" {
|
||||
if level, err := Log.ParseLevel(os.Getenv("LOG_LEVEL")); err == nil {
|
||||
Log.SetLevel(level)
|
||||
}
|
||||
} else {
|
||||
// set default log level to info
|
||||
Log.SetLevel(InfoLevel)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) GetLevel() LogLevel {
|
||||
return l.Level
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) SetLevel(level LogLevel) {
|
||||
l.Level = level
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) ParseLevel(level string) (LogLevel, error) {
|
||||
switch level {
|
||||
case "panic":
|
||||
return PanicLevel, nil
|
||||
case "fatal":
|
||||
return FatalLevel, nil
|
||||
case "error":
|
||||
return ErrorLevel, nil
|
||||
case "warn":
|
||||
return WarnLevel, nil
|
||||
case "info":
|
||||
return InfoLevel, nil
|
||||
case "debug":
|
||||
return DebugLevel, nil
|
||||
case "trace":
|
||||
return TraceLevel, nil
|
||||
}
|
||||
var ll LogLevel
|
||||
return ll, fmt.Errorf("invalid log level %q", level)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Panic(args ...interface{}) {
|
||||
if l.Level < PanicLevel {
|
||||
return
|
||||
}
|
||||
args = append([]interface{}{panicLevelStr}, args...)
|
||||
log.Print(args...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Panicf(format string, args ...interface{}) {
|
||||
if l.Level < PanicLevel {
|
||||
return
|
||||
}
|
||||
log.Printf(panicLevelStr+format, args...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Fatal(args ...interface{}) {
|
||||
if l.Level < FatalLevel {
|
||||
return
|
||||
}
|
||||
args = append([]interface{}{fatalLevelStr}, args...)
|
||||
log.Print(args...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Fatalf(format string, args ...interface{}) {
|
||||
if l.Level < FatalLevel {
|
||||
return
|
||||
}
|
||||
log.Printf(fatalLevelStr+format, args...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Error(args ...interface{}) {
|
||||
if l.Level < ErrorLevel {
|
||||
return
|
||||
}
|
||||
args = append([]interface{}{errorLevelStr}, args...)
|
||||
log.Print(args...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Errorf(format string, args ...interface{}) {
|
||||
if l.Level < ErrorLevel {
|
||||
return
|
||||
}
|
||||
log.Printf(errorLevelStr+format, args...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Warn(args ...interface{}) {
|
||||
if l.Level < WarnLevel {
|
||||
return
|
||||
}
|
||||
args = append([]interface{}{warnLevelStr}, args...)
|
||||
log.Print(args...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Warnf(format string, args ...interface{}) {
|
||||
if l.Level < WarnLevel {
|
||||
return
|
||||
}
|
||||
log.Printf(warnLevelStr+format, args...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Info(args ...interface{}) {
|
||||
if l.Level < InfoLevel {
|
||||
return
|
||||
}
|
||||
args = append([]interface{}{infoLevelStr}, args...)
|
||||
log.Print(args...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Infof(format string, args ...interface{}) {
|
||||
if l.Level < InfoLevel {
|
||||
return
|
||||
}
|
||||
log.Printf(infoLevelStr+format, args...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Debug(args ...interface{}) {
|
||||
if l.Level < DebugLevel {
|
||||
return
|
||||
}
|
||||
args = append([]interface{}{debugLevelStr}, args...)
|
||||
log.Print(args...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Debugf(format string, args ...interface{}) {
|
||||
if l.Level < DebugLevel {
|
||||
return
|
||||
}
|
||||
log.Printf(debugLevelStr+format, args...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Trace(args ...interface{}) {
|
||||
if l.Level < TraceLevel {
|
||||
return
|
||||
}
|
||||
args = append([]interface{}{traceLevelStr}, args...)
|
||||
log.Print(args...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Tracef(format string, args ...interface{}) {
|
||||
if l.Level < TraceLevel {
|
||||
return
|
||||
}
|
||||
log.Printf(traceLevelStr+format, args...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Print(args ...interface{}) {
|
||||
log.Print(args...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Printf(format string, args ...interface{}) {
|
||||
log.Printf(format, args...)
|
||||
}
|
58
pkg/go-nfs/mount.go
Normal file
58
pkg/go-nfs/mount.go
Normal file
|
@ -0,0 +1,58 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
const (
|
||||
mountServiceID = 100005
|
||||
)
|
||||
|
||||
func init() {
|
||||
_ = RegisterMessageHandler(mountServiceID, uint32(MountProcNull), onMountNull)
|
||||
_ = RegisterMessageHandler(mountServiceID, uint32(MountProcMount), onMount)
|
||||
_ = RegisterMessageHandler(mountServiceID, uint32(MountProcUmnt), onUMount)
|
||||
}
|
||||
|
||||
func onMountNull(ctx context.Context, w *response, userHandle Handler) error {
|
||||
return w.writeHeader(ResponseCodeSuccess)
|
||||
}
|
||||
|
||||
func onMount(ctx context.Context, w *response, userHandle Handler) error {
|
||||
// TODO: auth check.
|
||||
dirpath, err := xdr.ReadOpaque(w.req.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mountReq := MountRequest{Header: w.req.Header, Dirpath: dirpath}
|
||||
status, handle, flavors := userHandle.Mount(ctx, w.conn, mountReq)
|
||||
|
||||
if err := w.writeHeader(ResponseCodeSuccess); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(status)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rootHndl := userHandle.ToHandle(handle, []string{})
|
||||
|
||||
if status == MountStatusOk {
|
||||
_ = xdr.Write(writer, rootHndl)
|
||||
_ = xdr.Write(writer, flavors)
|
||||
}
|
||||
return w.Write(writer.Bytes())
|
||||
}
|
||||
|
||||
func onUMount(ctx context.Context, w *response, userHandle Handler) error {
|
||||
_, err := xdr.ReadOpaque(w.req.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return w.writeHeader(ResponseCodeSuccess)
|
||||
}
|
90
pkg/go-nfs/mountinterface.go
Normal file
90
pkg/go-nfs/mountinterface.go
Normal file
|
@ -0,0 +1,90 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"github.com/willscott/go-nfs-client/nfs/rpc"
|
||||
)
|
||||
|
||||
// FHSize is the maximum size of a FileHandle
|
||||
const FHSize = 64
|
||||
|
||||
// MNTNameLen is the maximum size of a mount name
|
||||
const MNTNameLen = 255
|
||||
|
||||
// MntPathLen is the maximum size of a mount path
|
||||
const MntPathLen = 1024
|
||||
|
||||
// FileHandle maps to a fhandle3
|
||||
type FileHandle []byte
|
||||
|
||||
// MountStatus defines the response to the Mount Procedure
|
||||
type MountStatus uint32
|
||||
|
||||
// MountStatus Codes
|
||||
const (
|
||||
MountStatusOk MountStatus = 0
|
||||
MountStatusErrPerm MountStatus = 1
|
||||
MountStatusErrNoEnt MountStatus = 2
|
||||
MountStatusErrIO MountStatus = 5
|
||||
MountStatusErrAcces MountStatus = 13
|
||||
MountStatusErrNotDir MountStatus = 20
|
||||
MountStatusErrInval MountStatus = 22
|
||||
MountStatusErrNameTooLong MountStatus = 63
|
||||
MountStatusErrNotSupp MountStatus = 10004
|
||||
MountStatusErrServerFault MountStatus = 10006
|
||||
)
|
||||
|
||||
// MountProcedure is the valid RPC calls for the mount service.
|
||||
type MountProcedure uint32
|
||||
|
||||
// MountProcedure Codes
|
||||
const (
|
||||
MountProcNull MountProcedure = iota
|
||||
MountProcMount
|
||||
MountProcDump
|
||||
MountProcUmnt
|
||||
MountProcUmntAll
|
||||
MountProcExport
|
||||
)
|
||||
|
||||
func (m MountProcedure) String() string {
|
||||
switch m {
|
||||
case MountProcNull:
|
||||
return "Null"
|
||||
case MountProcMount:
|
||||
return "Mount"
|
||||
case MountProcDump:
|
||||
return "Dump"
|
||||
case MountProcUmnt:
|
||||
return "Umnt"
|
||||
case MountProcUmntAll:
|
||||
return "UmntAll"
|
||||
case MountProcExport:
|
||||
return "Export"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// AuthFlavor is a form of authentication, per rfc1057 section 7.2
|
||||
type AuthFlavor uint32
|
||||
|
||||
// AuthFlavor Codes
|
||||
const (
|
||||
AuthFlavorNull AuthFlavor = 0
|
||||
AuthFlavorUnix AuthFlavor = 1
|
||||
AuthFlavorShort AuthFlavor = 2
|
||||
AuthFlavorDES AuthFlavor = 3
|
||||
)
|
||||
|
||||
// MountRequest contains the format of a client request to open a mount.
|
||||
type MountRequest struct {
|
||||
rpc.Header
|
||||
Dirpath []byte
|
||||
}
|
||||
|
||||
// MountResponse is the server's response with status `MountStatusOk`
|
||||
type MountResponse struct {
|
||||
rpc.Header
|
||||
FileHandle
|
||||
AuthFlavors []int
|
||||
}
|
38
pkg/go-nfs/nfs.go
Normal file
38
pkg/go-nfs/nfs.go
Normal file
|
@ -0,0 +1,38 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
const (
|
||||
nfsServiceID = 100003
|
||||
)
|
||||
|
||||
func init() {
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureNull), onNull) // 0
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureGetAttr), onGetAttr) // 1
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureSetAttr), onSetAttr) // 2
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureLookup), onLookup) // 3
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureAccess), onAccess) // 4
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureReadlink), onReadLink) // 5
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureRead), onRead) // 6
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureWrite), onWrite) // 7
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureCreate), onCreate) // 8
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureMkDir), onMkdir) // 9
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureSymlink), onSymlink) // 10
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureMkNod), onMknod) // 11
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureRemove), onRemove) // 12
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureRmDir), onRmDir) // 13
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureRename), onRename) // 14
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureLink), onLink) // 15
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureReadDir), onReadDir) // 16
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureReadDirPlus), onReadDirPlus) // 17
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureFSStat), onFSStat) // 18
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureFSInfo), onFSInfo) // 19
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedurePathConf), onPathConf) // 20
|
||||
_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureCommit), onCommit) // 21
|
||||
}
|
||||
|
||||
func onNull(ctx context.Context, w *response, userHandle Handler) error {
|
||||
return w.Write([]byte{})
|
||||
}
|
45
pkg/go-nfs/nfs_onaccess.go
Normal file
45
pkg/go-nfs/nfs_onaccess.go
Normal file
|
@ -0,0 +1,45 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
billy "github.com/go-git/go-billy/v5"
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
func onAccess(ctx context.Context, w *response, userHandle Handler) error {
|
||||
w.errorFmt = opAttrErrorFormatter
|
||||
roothandle, err := xdr.ReadOpaque(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
fs, path, err := userHandle.FromHandle(roothandle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
mask, err := xdr.ReadUint32(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, path)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if !CapabilityCheck(fs, billy.WriteCapability) {
|
||||
mask = mask & (1 | 2 | 0x20)
|
||||
}
|
||||
|
||||
if err := xdr.Write(writer, mask); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
51
pkg/go-nfs/nfs_oncommit.go
Normal file
51
pkg/go-nfs/nfs_oncommit.go
Normal file
|
@ -0,0 +1,51 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
|
||||
billy "github.com/go-git/go-billy/v5"
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
// onCommit - note this is a no-op, as we always push writes to the backing store.
|
||||
func onCommit(ctx context.Context, w *response, userHandle Handler) error {
|
||||
w.errorFmt = wccDataErrorFormatter
|
||||
handle, err := xdr.ReadOpaque(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
// The conn will drain the unread offset and count arguments.
|
||||
|
||||
fs, path, err := userHandle.FromHandle(handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
|
||||
if !CapabilityCheck(fs, billy.WriteCapability) {
|
||||
return &NFSStatusError{NFSStatusServerFault, os.ErrPermission}
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// no pre-op cache data.
|
||||
if err := xdr.Write(writer, uint32(0)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, path)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
// write the 8 bytes of write verification.
|
||||
if err := xdr.Write(writer, w.Server.ID); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
125
pkg/go-nfs/nfs_oncreate.go
Normal file
125
pkg/go-nfs/nfs_oncreate.go
Normal file
|
@ -0,0 +1,125 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
|
||||
billy "github.com/go-git/go-billy/v5"
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
const (
|
||||
createModeUnchecked = 0
|
||||
createModeGuarded = 1
|
||||
createModeExclusive = 2
|
||||
)
|
||||
|
||||
func onCreate(ctx context.Context, w *response, userHandle Handler) error {
|
||||
w.errorFmt = wccDataErrorFormatter
|
||||
obj := DirOpArg{}
|
||||
err := xdr.Read(w.req.Body, &obj)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
how, err := xdr.ReadUint32(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
var attrs *SetFileAttributes
|
||||
if how == createModeUnchecked || how == createModeGuarded {
|
||||
sattr, err := ReadSetFileAttributes(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
attrs = sattr
|
||||
} else if how == createModeExclusive {
|
||||
// read createverf3
|
||||
var verf [8]byte
|
||||
if err := xdr.Read(w.req.Body, &verf); err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
Log.Errorf("failing create to indicate lack of support for 'exclusive' mode.")
|
||||
// TODO: support 'exclusive' mode.
|
||||
return &NFSStatusError{NFSStatusNotSupp, os.ErrPermission}
|
||||
} else {
|
||||
// invalid
|
||||
return &NFSStatusError{NFSStatusNotSupp, os.ErrInvalid}
|
||||
}
|
||||
|
||||
fs, path, err := userHandle.FromHandle(obj.Handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
|
||||
if !CapabilityCheck(fs, billy.WriteCapability) {
|
||||
return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
|
||||
}
|
||||
|
||||
if len(string(obj.Filename)) > PathNameMax {
|
||||
return &NFSStatusError{NFSStatusNameTooLong, nil}
|
||||
}
|
||||
|
||||
newFile := append(path, string(obj.Filename))
|
||||
newFilePath := fs.Join(newFile...)
|
||||
if s, err := fs.Stat(ctx, newFilePath); err == nil {
|
||||
if s.IsDir() {
|
||||
return &NFSStatusError{NFSStatusExist, nil}
|
||||
}
|
||||
if how == createModeGuarded {
|
||||
return &NFSStatusError{NFSStatusExist, os.ErrPermission}
|
||||
}
|
||||
} else {
|
||||
if s, err := fs.Stat(ctx, fs.Join(path...)); err != nil {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
} else if !s.IsDir() {
|
||||
return &NFSStatusError{NFSStatusNotDir, nil}
|
||||
}
|
||||
}
|
||||
|
||||
file, err := fs.Create(ctx, newFilePath)
|
||||
if err != nil {
|
||||
Log.Errorf("Error Creating: %v", err)
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
if err := file.Close(ctx); err != nil {
|
||||
Log.Errorf("Error Creating: %v", err)
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
|
||||
fp := userHandle.ToHandle(fs, newFile)
|
||||
changer := userHandle.Change(fs)
|
||||
if err := attrs.Apply(ctx, changer, fs, newFilePath); err != nil {
|
||||
Log.Errorf("Error applying attributes: %v\n", err)
|
||||
return &NFSStatusError{NFSStatusIO, err}
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
// "handle follows"
|
||||
if err := xdr.Write(writer, uint32(1)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := xdr.Write(writer, fp); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, []string{file.Name()})); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
// dir_wcc (we don't include pre_op_attr)
|
||||
if err := xdr.Write(writer, uint32(0)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, path)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
89
pkg/go-nfs/nfs_onfsinfo.go
Normal file
89
pkg/go-nfs/nfs_onfsinfo.go
Normal file
|
@ -0,0 +1,89 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
const (
|
||||
// FSInfoPropertyLink does the FS support hard links?
|
||||
FSInfoPropertyLink = 0x0001
|
||||
// FSInfoPropertySymlink does the FS support soft links?
|
||||
FSInfoPropertySymlink = 0x0002
|
||||
// FSInfoPropertyHomogeneous does the FS need PATHCONF calls for each file
|
||||
FSInfoPropertyHomogeneous = 0x0008
|
||||
// FSInfoPropertyCanSetTime can the FS support setting access/mod times?
|
||||
FSInfoPropertyCanSetTime = 0x0010
|
||||
)
|
||||
|
||||
func onFSInfo(ctx context.Context, w *response, userHandle Handler) error {
|
||||
roothandle, err := xdr.ReadOpaque(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
fs, path, err := userHandle.FromHandle(roothandle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, path)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
type fsinfores struct {
|
||||
Rtmax uint32
|
||||
Rtpref uint32
|
||||
Rtmult uint32
|
||||
Wtmax uint32
|
||||
Wtpref uint32
|
||||
Wtmult uint32
|
||||
Dtpref uint32
|
||||
Maxfilesize uint64
|
||||
TimeDelta uint64
|
||||
Properties uint32
|
||||
}
|
||||
|
||||
res := fsinfores{
|
||||
Rtmax: 1 << 30,
|
||||
Rtpref: 1 << 30,
|
||||
Rtmult: 4096,
|
||||
Wtmax: 1 << 30,
|
||||
Wtpref: 1 << 30,
|
||||
Wtmult: 4096,
|
||||
Dtpref: 8192,
|
||||
Maxfilesize: 1 << 62, // wild guess. this seems big.
|
||||
TimeDelta: 1, // nanosecond precision.
|
||||
Properties: 0,
|
||||
}
|
||||
|
||||
// TODO: these aren't great indications of support, really.
|
||||
// if _, ok := fs.(billy.Symlink); ok {
|
||||
// res.Properties |= FSInfoPropertyLink
|
||||
// res.Properties |= FSInfoPropertySymlink
|
||||
// }
|
||||
// TODO: if the nfs share spans multiple virtual mounts, may need
|
||||
// to support granular PATHINFO responses.
|
||||
res.Properties |= FSInfoPropertyHomogeneous
|
||||
// TODO: not a perfect indicator
|
||||
|
||||
if CapabilityCheck(fs, billy.WriteCapability) {
|
||||
res.Properties |= FSInfoPropertyCanSetTime
|
||||
}
|
||||
|
||||
// TODO: this whole struct should be specifiable by the userhandler.
|
||||
|
||||
if err := xdr.Write(writer, res); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
59
pkg/go-nfs/nfs_onfsstat.go
Normal file
59
pkg/go-nfs/nfs_onfsstat.go
Normal file
|
@ -0,0 +1,59 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
billy "github.com/go-git/go-billy/v5"
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
func onFSStat(ctx context.Context, w *response, userHandle Handler) error {
|
||||
roothandle, err := xdr.ReadOpaque(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
fs, path, err := userHandle.FromHandle(roothandle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
|
||||
defaults := FSStat{
|
||||
TotalSize: 1 << 62,
|
||||
FreeSize: 1 << 62,
|
||||
AvailableSize: 1 << 62,
|
||||
TotalFiles: 1 << 62,
|
||||
FreeFiles: 1 << 62,
|
||||
AvailableFiles: 1 << 62,
|
||||
CacheHint: 0,
|
||||
}
|
||||
|
||||
if !CapabilityCheck(fs, billy.WriteCapability) {
|
||||
defaults.AvailableFiles = 0
|
||||
defaults.AvailableSize = 0
|
||||
}
|
||||
|
||||
err = userHandle.FSStat(ctx, fs, &defaults)
|
||||
if err != nil {
|
||||
if _, ok := err.(*NFSStatusError); ok {
|
||||
return err
|
||||
}
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, path)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := xdr.Write(writer, defaults); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
48
pkg/go-nfs/nfs_ongetattr.go
Normal file
48
pkg/go-nfs/nfs_ongetattr.go
Normal file
|
@ -0,0 +1,48 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
func onGetAttr(ctx context.Context, w *response, userHandle Handler) error {
|
||||
handle, err := xdr.ReadOpaque(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
fs, path, err := userHandle.FromHandle(handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
|
||||
fullPath := fs.Join(path...)
|
||||
info, err := fs.Lstat(ctx, fullPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return &NFSStatusError{NFSStatusNoEnt, err}
|
||||
}
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return &NFSStatusError{NFSStatusJukebox, err}
|
||||
}
|
||||
return &NFSStatusError{NFSStatusIO, err}
|
||||
}
|
||||
attr := ToFileAttribute(info, fullPath)
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := xdr.Write(writer, attr); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
94
pkg/go-nfs/nfs_onlink.go
Normal file
94
pkg/go-nfs/nfs_onlink.go
Normal file
|
@ -0,0 +1,94 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
// Backing billy.FS doesn't support hard links
|
||||
func onLink(ctx context.Context, w *response, userHandle Handler) error {
|
||||
w.errorFmt = wccDataErrorFormatter
|
||||
obj := DirOpArg{}
|
||||
err := xdr.Read(w.req.Body, &obj)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
attrs, err := ReadSetFileAttributes(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
target, err := xdr.ReadOpaque(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
fs, path, err := userHandle.FromHandle(obj.Handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
if !CapabilityCheck(fs, billy.WriteCapability) {
|
||||
return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
|
||||
}
|
||||
|
||||
if len(string(obj.Filename)) > PathNameMax {
|
||||
return &NFSStatusError{NFSStatusNameTooLong, os.ErrInvalid}
|
||||
}
|
||||
|
||||
newFilePath := fs.Join(append(path, string(obj.Filename))...)
|
||||
if _, err := fs.Stat(ctx, newFilePath); err == nil {
|
||||
return &NFSStatusError{NFSStatusExist, os.ErrExist}
|
||||
}
|
||||
if s, err := fs.Stat(ctx, fs.Join(path...)); err != nil {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
} else if !s.IsDir() {
|
||||
return &NFSStatusError{NFSStatusNotDir, nil}
|
||||
}
|
||||
|
||||
fp := userHandle.ToHandle(fs, append(path, string(obj.Filename)))
|
||||
changer := userHandle.Change(fs)
|
||||
if changer == nil {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
cos, ok := changer.(UnixChange)
|
||||
if !ok {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
|
||||
err = cos.Link(ctx, string(target), newFilePath)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
if err := attrs.Apply(ctx, changer, fs, newFilePath); err != nil {
|
||||
return &NFSStatusError{NFSStatusIO, err}
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
// "handle follows"
|
||||
if err := xdr.Write(writer, uint32(1)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := xdr.Write(writer, fp); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, append(path, string(obj.Filename)))); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := WriteWcc(writer, nil, tryStat(ctx, fs, path)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
86
pkg/go-nfs/nfs_onlookup.go
Normal file
86
pkg/go-nfs/nfs_onlookup.go
Normal file
|
@ -0,0 +1,86 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
func lookupSuccessResponse(ctx context.Context, handle []byte, entPath, dirPath []string, fs Filesystem) ([]byte, error) {
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := xdr.Write(writer, handle); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, entPath)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, dirPath)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return writer.Bytes(), nil
|
||||
}
|
||||
|
||||
func onLookup(ctx context.Context, w *response, userHandle Handler) error {
|
||||
w.errorFmt = opAttrErrorFormatter
|
||||
obj := DirOpArg{}
|
||||
err := xdr.Read(w.req.Body, &obj)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
fs, p, err := userHandle.FromHandle(obj.Handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
dirInfo, err := fs.Lstat(ctx, fs.Join(p...))
|
||||
if err != nil || !dirInfo.IsDir() {
|
||||
return &NFSStatusError{NFSStatusNotDir, err}
|
||||
}
|
||||
|
||||
// Special cases for "." and ".."
|
||||
if bytes.Equal(obj.Filename, []byte(".")) {
|
||||
resp, err := lookupSuccessResponse(ctx, obj.Handle, p, p, fs)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := w.Write(resp); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if bytes.Equal(obj.Filename, []byte("..")) {
|
||||
if len(p) == 0 {
|
||||
return &NFSStatusError{NFSStatusAccess, os.ErrPermission}
|
||||
}
|
||||
pPath := p[0 : len(p)-1]
|
||||
pHandle := userHandle.ToHandle(fs, pPath)
|
||||
resp, err := lookupSuccessResponse(ctx, pHandle, pPath, p, fs)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := w.Write(resp); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
reqPath := append(p, string(obj.Filename))
|
||||
if _, err = fs.Lstat(ctx, fs.Join(reqPath...)); err != nil {
|
||||
return &NFSStatusError{NFSStatusNoEnt, os.ErrNotExist}
|
||||
}
|
||||
|
||||
newHandle := userHandle.ToHandle(fs, reqPath)
|
||||
resp, err := lookupSuccessResponse(ctx, newHandle, reqPath, p, fs)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := w.Write(resp); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
94
pkg/go-nfs/nfs_onmkdir.go
Normal file
94
pkg/go-nfs/nfs_onmkdir.go
Normal file
|
@ -0,0 +1,94 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
const (
|
||||
mkdirDefaultMode = 755
|
||||
)
|
||||
|
||||
func onMkdir(ctx context.Context, w *response, userHandle Handler) error {
|
||||
w.errorFmt = wccDataErrorFormatter
|
||||
obj := DirOpArg{}
|
||||
err := xdr.Read(w.req.Body, &obj)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
attrs, err := ReadSetFileAttributes(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
fs, path, err := userHandle.FromHandle(obj.Handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
if !CapabilityCheck(fs, billy.WriteCapability) {
|
||||
return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
|
||||
}
|
||||
|
||||
if len(string(obj.Filename)) > PathNameMax {
|
||||
return &NFSStatusError{NFSStatusNameTooLong, os.ErrInvalid}
|
||||
}
|
||||
if string(obj.Filename) == "." || string(obj.Filename) == ".." {
|
||||
return &NFSStatusError{NFSStatusExist, os.ErrExist}
|
||||
}
|
||||
|
||||
newFolder := append(path, string(obj.Filename))
|
||||
newFolderPath := fs.Join(newFolder...)
|
||||
if s, err := fs.Stat(ctx, newFolderPath); err == nil {
|
||||
if s.IsDir() {
|
||||
return &NFSStatusError{NFSStatusExist, nil}
|
||||
}
|
||||
} else {
|
||||
if s, err := fs.Stat(ctx, fs.Join(path...)); err != nil {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
} else if !s.IsDir() {
|
||||
return &NFSStatusError{NFSStatusNotDir, nil}
|
||||
}
|
||||
}
|
||||
|
||||
if err := fs.MkdirAll(ctx, newFolderPath, attrs.Mode(mkdirDefaultMode)); err != nil {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
|
||||
fp := userHandle.ToHandle(fs, newFolder)
|
||||
changer := userHandle.Change(fs)
|
||||
if changer != nil {
|
||||
if err := attrs.Apply(ctx, changer, fs, newFolderPath); err != nil {
|
||||
return &NFSStatusError{NFSStatusIO, err}
|
||||
}
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
// "handle follows"
|
||||
if err := xdr.Write(writer, uint32(1)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := xdr.Write(writer, fp); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, newFolder)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := WriteWcc(writer, nil, tryStat(ctx, fs, path)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
158
pkg/go-nfs/nfs_onmknod.go
Normal file
158
pkg/go-nfs/nfs_onmknod.go
Normal file
|
@ -0,0 +1,158 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
|
||||
billy "github.com/go-git/go-billy/v5"
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
type nfs_ftype int32
|
||||
|
||||
const (
|
||||
FTYPE_NF3REG nfs_ftype = 1
|
||||
FTYPE_NF3DIR nfs_ftype = 2
|
||||
FTYPE_NF3BLK nfs_ftype = 3
|
||||
FTYPE_NF3CHR nfs_ftype = 4
|
||||
FTYPE_NF3LNK nfs_ftype = 5
|
||||
FTYPE_NF3SOCK nfs_ftype = 6
|
||||
FTYPE_NF3FIFO nfs_ftype = 7
|
||||
)
|
||||
|
||||
// Backing billy.FS doesn't support creation of
|
||||
// char, block, socket, or fifo pipe nodes
|
||||
func onMknod(ctx context.Context, w *response, userHandle Handler) error {
|
||||
w.errorFmt = wccDataErrorFormatter
|
||||
obj := DirOpArg{}
|
||||
err := xdr.Read(w.req.Body, &obj)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
ftype, err := xdr.ReadUint32(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
// see if the filesystem supports mknod
|
||||
fs, path, err := userHandle.FromHandle(obj.Handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
|
||||
if !CapabilityCheck(fs, billy.WriteCapability) {
|
||||
return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
|
||||
}
|
||||
|
||||
c := userHandle.Change(fs)
|
||||
if c == nil {
|
||||
return &NFSStatusError{NFSStatusAccess, os.ErrPermission}
|
||||
}
|
||||
cu, ok := c.(UnixChange)
|
||||
if !ok {
|
||||
return &NFSStatusError{NFSStatusAccess, os.ErrPermission}
|
||||
}
|
||||
|
||||
if len(string(obj.Filename)) > PathNameMax {
|
||||
return &NFSStatusError{NFSStatusNameTooLong, os.ErrInvalid}
|
||||
}
|
||||
|
||||
newFilePath := fs.Join(append(path, string(obj.Filename))...)
|
||||
if _, err := fs.Stat(ctx, newFilePath); err == nil {
|
||||
return &NFSStatusError{NFSStatusExist, os.ErrExist}
|
||||
}
|
||||
parent, err := fs.Stat(ctx, fs.Join(path...))
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
} else if !parent.IsDir() {
|
||||
return &NFSStatusError{NFSStatusNotDir, nil}
|
||||
}
|
||||
fp := userHandle.ToHandle(fs, append(path, string(obj.Filename)))
|
||||
|
||||
switch nfs_ftype(ftype) {
|
||||
case FTYPE_NF3CHR:
|
||||
case FTYPE_NF3BLK:
|
||||
// read devicedata3 = {sattr3, specdata3}
|
||||
attrs, err := ReadSetFileAttributes(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
specData1, err := xdr.ReadUint32(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
specData2, err := xdr.ReadUint32(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
err = cu.Mknod(ctx, newFilePath, uint32(attrs.Mode(parent.Mode())), specData1, specData2)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
if err = attrs.Apply(ctx, cu, fs, newFilePath); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
case FTYPE_NF3SOCK:
|
||||
// read sattr3
|
||||
attrs, err := ReadSetFileAttributes(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
if err := cu.Socket(ctx, newFilePath); err != nil {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
if err = attrs.Apply(ctx, cu, fs, newFilePath); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
case FTYPE_NF3FIFO:
|
||||
// read sattr3
|
||||
attrs, err := ReadSetFileAttributes(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
err = cu.Mkfifo(ctx, newFilePath, uint32(attrs.Mode(parent.Mode())))
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
if err = attrs.Apply(ctx, cu, fs, newFilePath); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
default:
|
||||
return &NFSStatusError{NFSStatusBadType, os.ErrInvalid}
|
||||
// end of input.
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
// "handle follows"
|
||||
if err := xdr.Write(writer, uint32(1)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
// fh3
|
||||
if err := xdr.Write(writer, fp); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
// attr
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, append(path, string(obj.Filename)))); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
// wcc
|
||||
if err := WriteWcc(writer, nil, tryStat(ctx, fs, path)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
55
pkg/go-nfs/nfs_onpathconf.go
Normal file
55
pkg/go-nfs/nfs_onpathconf.go
Normal file
|
@ -0,0 +1,55 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
// PathNameMax is the maximum length for a file name
|
||||
const PathNameMax = 255
|
||||
|
||||
func onPathConf(ctx context.Context, w *response, userHandle Handler) error {
|
||||
roothandle, err := xdr.ReadOpaque(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
fs, path, err := userHandle.FromHandle(roothandle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, path)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
type PathConf struct {
|
||||
LinkMax uint32
|
||||
NameMax uint32
|
||||
NoTrunc uint32
|
||||
ChownRestricted uint32
|
||||
CaseInsensitive uint32
|
||||
CasePreserving uint32
|
||||
}
|
||||
|
||||
defaults := PathConf{
|
||||
LinkMax: 1,
|
||||
NameMax: PathNameMax,
|
||||
NoTrunc: 1,
|
||||
ChownRestricted: 0,
|
||||
CaseInsensitive: 0,
|
||||
CasePreserving: 1,
|
||||
}
|
||||
if err := xdr.Write(writer, defaults); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
97
pkg/go-nfs/nfs_onread.go
Normal file
97
pkg/go-nfs/nfs_onread.go
Normal file
|
@ -0,0 +1,97 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
type nfsReadArgs struct {
|
||||
Handle []byte
|
||||
Offset uint64
|
||||
Count uint32
|
||||
}
|
||||
|
||||
type nfsReadResponse struct {
|
||||
Count uint32
|
||||
EOF uint32
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// MaxRead is the advertised largest buffer the server is willing to read
|
||||
const MaxRead = 1 << 24
|
||||
|
||||
// CheckRead is a size where - if a request to read is larger than this,
|
||||
// the server will stat the file to learn it's actual size before allocating
|
||||
// a buffer to read into.
|
||||
const CheckRead = 1 << 15
|
||||
|
||||
func onRead(ctx context.Context, w *response, userHandle Handler) error {
|
||||
w.errorFmt = opAttrErrorFormatter
|
||||
var obj nfsReadArgs
|
||||
err := xdr.Read(w.req.Body, &obj)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
fs, path, err := userHandle.FromHandle(obj.Handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
|
||||
fh, err := fs.Open(ctx, fs.Join(path...))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return &NFSStatusError{NFSStatusNoEnt, err}
|
||||
}
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return &NFSStatusError{NFSStatusJukebox, err}
|
||||
}
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
|
||||
resp := nfsReadResponse{}
|
||||
|
||||
if obj.Count > CheckRead {
|
||||
info, err := fs.Stat(ctx, fs.Join(path...))
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
if info.Size()-int64(obj.Offset) < int64(obj.Count) {
|
||||
obj.Count = uint32(uint64(info.Size()) - obj.Offset)
|
||||
}
|
||||
}
|
||||
if obj.Count > MaxRead {
|
||||
obj.Count = MaxRead
|
||||
}
|
||||
resp.Data = make([]byte, obj.Count)
|
||||
// todo: multiple reads if size isn't full
|
||||
cnt, err := fh.ReadAt(ctx, resp.Data, int64(obj.Offset))
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
return &NFSStatusError{NFSStatusIO, err}
|
||||
}
|
||||
resp.Count = uint32(cnt)
|
||||
resp.Data = resp.Data[:resp.Count]
|
||||
if errors.Is(err, io.EOF) {
|
||||
resp.EOF = 1
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, path)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := xdr.Write(writer, resp); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
195
pkg/go-nfs/nfs_onreaddir.go
Normal file
195
pkg/go-nfs/nfs_onreaddir.go
Normal file
|
@ -0,0 +1,195 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
type readDirArgs struct {
|
||||
Handle []byte
|
||||
Cookie uint64
|
||||
CookieVerif uint64
|
||||
Count uint32
|
||||
}
|
||||
|
||||
type readDirEntity struct {
|
||||
FileID uint64
|
||||
Name []byte
|
||||
Cookie uint64
|
||||
Next bool
|
||||
}
|
||||
|
||||
func onReadDir(ctx context.Context, w *response, userHandle Handler) error {
|
||||
w.errorFmt = opAttrErrorFormatter
|
||||
obj := readDirArgs{}
|
||||
err := xdr.Read(w.req.Body, &obj)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
if obj.Count < 1024 {
|
||||
return &NFSStatusError{NFSStatusTooSmall, io.ErrShortBuffer}
|
||||
}
|
||||
|
||||
fs, p, err := userHandle.FromHandle(obj.Handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
|
||||
contents, verifier, err := getDirListingWithVerifier(ctx, userHandle, obj.Handle, obj.CookieVerif)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if obj.Cookie > 0 && obj.CookieVerif > 0 && verifier != obj.CookieVerif {
|
||||
return &NFSStatusError{NFSStatusBadCookie, nil}
|
||||
}
|
||||
|
||||
entities := make([]readDirEntity, 0)
|
||||
maxBytes := uint32(100) // conservative overhead measure
|
||||
|
||||
started := obj.Cookie == 0
|
||||
if started {
|
||||
// add '.' and '..' to entities
|
||||
dotdotFileID := uint64(0)
|
||||
if len(p) > 0 {
|
||||
dda := tryStat(ctx, fs, p[0:len(p)-1])
|
||||
if dda != nil {
|
||||
dotdotFileID = dda.Fileid
|
||||
}
|
||||
}
|
||||
dotFileID := uint64(0)
|
||||
da := tryStat(ctx, fs, p)
|
||||
if da != nil {
|
||||
dotFileID = da.Fileid
|
||||
}
|
||||
entities = append(entities,
|
||||
readDirEntity{Name: []byte("."), Cookie: 0, Next: true, FileID: dotFileID},
|
||||
readDirEntity{Name: []byte(".."), Cookie: 1, Next: true, FileID: dotdotFileID},
|
||||
)
|
||||
}
|
||||
|
||||
eof := true
|
||||
maxEntities := userHandle.HandleLimit() / 2
|
||||
for i, c := range contents {
|
||||
// cookie equates to index within contents + 2 (for '.' and '..')
|
||||
cookie := uint64(i + 2)
|
||||
if started {
|
||||
maxBytes += 512 // TODO: better estimation.
|
||||
if maxBytes > obj.Count || len(entities) > maxEntities {
|
||||
eof = false
|
||||
break
|
||||
}
|
||||
|
||||
attrs := ToFileAttribute(c, path.Join(append(p, c.Name())...))
|
||||
entities = append(entities, readDirEntity{
|
||||
FileID: attrs.Fileid,
|
||||
Name: []byte(c.Name()),
|
||||
Cookie: cookie,
|
||||
Next: true,
|
||||
})
|
||||
} else if cookie == obj.Cookie {
|
||||
started = true
|
||||
}
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, p)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := xdr.Write(writer, verifier); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := xdr.Write(writer, len(entities) > 0); err != nil { // next
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if len(entities) > 0 {
|
||||
entities[len(entities)-1].Next = false
|
||||
// no next for last entity
|
||||
|
||||
for _, e := range entities {
|
||||
if err := xdr.Write(writer, e); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := xdr.Write(writer, eof); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
// TODO: track writer size at this point to validate maxcount estimation and stop early if needed.
|
||||
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDirListingWithVerifier(ctx context.Context, userHandle Handler, fsHandle []byte, verifier uint64) ([]fs.FileInfo, uint64, error) {
|
||||
// figure out what directory it is.
|
||||
fs, p, err := userHandle.FromHandle(fsHandle)
|
||||
if err != nil {
|
||||
return nil, 0, &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
|
||||
path := fs.Join(p...)
|
||||
// see if the verifier has this dir cached:
|
||||
if vh, ok := userHandle.(CachingHandler); verifier != 0 && ok {
|
||||
entries := vh.DataForVerifier(path, verifier)
|
||||
if entries != nil {
|
||||
return entries, verifier, nil
|
||||
}
|
||||
}
|
||||
// load the entries.
|
||||
contents, err := fs.ReadDir(ctx, path)
|
||||
if err != nil {
|
||||
if os.IsPermission(err) {
|
||||
return nil, 0, &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return nil, 0, &NFSStatusError{NFSStatusJukebox, err}
|
||||
}
|
||||
return nil, 0, &NFSStatusError{NFSStatusIO, err}
|
||||
}
|
||||
|
||||
sort.Slice(contents, func(i, j int) bool {
|
||||
return contents[i].Name() < contents[j].Name()
|
||||
})
|
||||
|
||||
if vh, ok := userHandle.(CachingHandler); ok {
|
||||
// let the user handler make a verifier if it can.
|
||||
v := vh.VerifierFor(path, contents)
|
||||
return contents, v, nil
|
||||
}
|
||||
|
||||
id := hashPathAndContents(path, contents)
|
||||
return contents, id, nil
|
||||
}
|
||||
|
||||
func hashPathAndContents(path string, contents []fs.FileInfo) uint64 {
|
||||
//calculate a cookie-verifier.
|
||||
vHash := sha256.New()
|
||||
|
||||
// Add the path to avoid collisions of directories with the same content
|
||||
vHash.Write([]byte(path))
|
||||
|
||||
for _, c := range contents {
|
||||
vHash.Write([]byte(c.Name())) // Never fails according to the docs
|
||||
}
|
||||
|
||||
verify := vHash.Sum(nil)[0:8]
|
||||
return binary.BigEndian.Uint64(verify)
|
||||
}
|
153
pkg/go-nfs/nfs_onreaddirplus.go
Normal file
153
pkg/go-nfs/nfs_onreaddirplus.go
Normal file
|
@ -0,0 +1,153 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"path"
|
||||
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
type readDirPlusArgs struct {
|
||||
Handle []byte
|
||||
Cookie uint64
|
||||
CookieVerif uint64
|
||||
DirCount uint32
|
||||
MaxCount uint32
|
||||
}
|
||||
|
||||
type readDirPlusEntity struct {
|
||||
FileID uint64
|
||||
Name []byte
|
||||
Cookie uint64
|
||||
Attributes *FileAttribute `xdr:"optional"`
|
||||
Handle *[]byte `xdr:"optional"`
|
||||
Next bool
|
||||
}
|
||||
|
||||
func joinPath(parent []string, elements ...string) []string {
|
||||
joinedPath := make([]string, 0, len(parent)+len(elements))
|
||||
joinedPath = append(joinedPath, parent...)
|
||||
joinedPath = append(joinedPath, elements...)
|
||||
return joinedPath
|
||||
}
|
||||
|
||||
func onReadDirPlus(ctx context.Context, w *response, userHandle Handler) error {
|
||||
w.errorFmt = opAttrErrorFormatter
|
||||
obj := readDirPlusArgs{}
|
||||
if err := xdr.Read(w.req.Body, &obj); err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
// in case of test, nfs-client send:
|
||||
// DirCount = 512
|
||||
// MaxCount = 4096
|
||||
if obj.DirCount < 512 || obj.MaxCount < 4096 {
|
||||
return &NFSStatusError{NFSStatusTooSmall, nil}
|
||||
}
|
||||
|
||||
fs, p, err := userHandle.FromHandle(obj.Handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
|
||||
contents, verifier, err := getDirListingWithVerifier(ctx, userHandle, obj.Handle, obj.CookieVerif)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if obj.Cookie > 0 && obj.CookieVerif > 0 && verifier != obj.CookieVerif {
|
||||
return &NFSStatusError{NFSStatusBadCookie, nil}
|
||||
}
|
||||
|
||||
entities := make([]readDirPlusEntity, 0)
|
||||
dirBytes := uint32(0)
|
||||
maxBytes := uint32(100) // conservative overhead measure
|
||||
|
||||
started := obj.Cookie == 0
|
||||
if started {
|
||||
// add '.' and '..' to entities
|
||||
dotdotFileID := uint64(0)
|
||||
if len(p) > 0 {
|
||||
dda := tryStat(ctx, fs, p[0:len(p)-1])
|
||||
if dda != nil {
|
||||
dotdotFileID = dda.Fileid
|
||||
}
|
||||
}
|
||||
dotFileID := uint64(0)
|
||||
da := tryStat(ctx, fs, p)
|
||||
if da != nil {
|
||||
dotFileID = da.Fileid
|
||||
}
|
||||
entities = append(entities,
|
||||
readDirPlusEntity{Name: []byte("."), Cookie: 0, Next: true, FileID: dotFileID, Attributes: da},
|
||||
readDirPlusEntity{Name: []byte(".."), Cookie: 1, Next: true, FileID: dotdotFileID},
|
||||
)
|
||||
}
|
||||
|
||||
eof := true
|
||||
maxEntities := userHandle.HandleLimit() / 2
|
||||
fb := 0
|
||||
fss := 0
|
||||
for i, c := range contents {
|
||||
// cookie equates to index within contents + 2 (for '.' and '..')
|
||||
cookie := uint64(i + 2)
|
||||
fb++
|
||||
if started {
|
||||
fss++
|
||||
dirBytes += uint32(len(c.Name()) + 20)
|
||||
maxBytes += 512 // TODO: better estimation.
|
||||
if dirBytes > obj.DirCount || maxBytes > obj.MaxCount || len(entities) > maxEntities {
|
||||
eof = false
|
||||
break
|
||||
}
|
||||
|
||||
filePath := joinPath(p, c.Name())
|
||||
handle := userHandle.ToHandle(fs, filePath)
|
||||
attrs := ToFileAttribute(c, path.Join(filePath...))
|
||||
entities = append(entities, readDirPlusEntity{
|
||||
FileID: attrs.Fileid,
|
||||
Name: []byte(c.Name()),
|
||||
Cookie: cookie,
|
||||
Attributes: attrs,
|
||||
Handle: &handle,
|
||||
Next: true,
|
||||
})
|
||||
} else if cookie == obj.Cookie {
|
||||
started = true
|
||||
}
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, p)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := xdr.Write(writer, verifier); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := xdr.Write(writer, len(entities) > 0); err != nil { // next
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if len(entities) > 0 {
|
||||
entities[len(entities)-1].Next = false
|
||||
// no next for last entity
|
||||
|
||||
for _, e := range entities {
|
||||
if err := xdr.Write(writer, e); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := xdr.Write(writer, eof); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
// TODO: track writer size at this point to validate maxcount estimation and stop early if needed.
|
||||
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
55
pkg/go-nfs/nfs_onreadlink.go
Normal file
55
pkg/go-nfs/nfs_onreadlink.go
Normal file
|
@ -0,0 +1,55 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
func onReadLink(ctx context.Context, w *response, userHandle Handler) error {
|
||||
w.errorFmt = opAttrErrorFormatter
|
||||
handle, err := xdr.ReadOpaque(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
fs, path, err := userHandle.FromHandle(handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
|
||||
out, err := fs.Readlink(ctx, fs.Join(path...))
|
||||
if err != nil {
|
||||
if info, err := fs.Stat(ctx, fs.Join(path...)); err == nil {
|
||||
if info.Mode()&os.ModeSymlink == 0 {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
return &NFSStatusError{NFSStatusNoEnt, err}
|
||||
}
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return &NFSStatusError{NFSStatusJukebox, err}
|
||||
}
|
||||
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, path)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := xdr.Write(writer, out); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
85
pkg/go-nfs/nfs_onremove.go
Normal file
85
pkg/go-nfs/nfs_onremove.go
Normal file
|
@ -0,0 +1,85 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
func onRemove(ctx context.Context, w *response, userHandle Handler) error {
|
||||
w.errorFmt = wccDataErrorFormatter
|
||||
obj := DirOpArg{}
|
||||
if err := xdr.Read(w.req.Body, &obj); err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
fs, path, err := userHandle.FromHandle(obj.Handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
|
||||
// TODO
|
||||
// if !CapabilityCheck(fs, billy.WriteCapability) {
|
||||
// return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
|
||||
// }
|
||||
|
||||
if len(string(obj.Filename)) > PathNameMax {
|
||||
return &NFSStatusError{NFSStatusNameTooLong, nil}
|
||||
}
|
||||
|
||||
fullPath := fs.Join(path...)
|
||||
dirInfo, err := fs.Stat(ctx, fullPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return &NFSStatusError{NFSStatusNoEnt, err}
|
||||
}
|
||||
if os.IsPermission(err) {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return &NFSStatusError{NFSStatusJukebox, err}
|
||||
}
|
||||
return &NFSStatusError{NFSStatusIO, err}
|
||||
}
|
||||
if !dirInfo.IsDir() {
|
||||
return &NFSStatusError{NFSStatusNotDir, nil}
|
||||
}
|
||||
preCacheData := ToFileAttribute(dirInfo, fullPath).AsCache()
|
||||
|
||||
toDelete := fs.Join(append(path, string(obj.Filename))...)
|
||||
toDeleteHandle := userHandle.ToHandle(fs, append(path, string(obj.Filename)))
|
||||
|
||||
err = fs.Remove(ctx, toDelete)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return &NFSStatusError{NFSStatusNoEnt, err}
|
||||
}
|
||||
if os.IsPermission(err) {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return &NFSStatusError{NFSStatusJukebox, err}
|
||||
}
|
||||
return &NFSStatusError{NFSStatusIO, err}
|
||||
}
|
||||
|
||||
if err := userHandle.InvalidateHandle(fs, toDeleteHandle); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := WriteWcc(writer, preCacheData, tryStat(ctx, fs, path)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
120
pkg/go-nfs/nfs_onrename.go
Normal file
120
pkg/go-nfs/nfs_onrename.go
Normal file
|
@ -0,0 +1,120 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"reflect"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
var doubleWccErrorBody = [16]byte{}
|
||||
|
||||
func onRename(ctx context.Context, w *response, userHandle Handler) error {
|
||||
w.errorFmt = errFormatterWithBody(doubleWccErrorBody[:])
|
||||
from := DirOpArg{}
|
||||
err := xdr.Read(w.req.Body, &from)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
fs, fromPath, err := userHandle.FromHandle(from.Handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
|
||||
to := DirOpArg{}
|
||||
if err = xdr.Read(w.req.Body, &to); err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
fs2, toPath, err := userHandle.FromHandle(to.Handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
// check the two fs are the same
|
||||
if !reflect.DeepEqual(fs, fs2) {
|
||||
return &NFSStatusError{NFSStatusNotSupp, os.ErrPermission}
|
||||
}
|
||||
|
||||
if !CapabilityCheck(fs, billy.WriteCapability) {
|
||||
return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
|
||||
}
|
||||
|
||||
if len(string(from.Filename)) > PathNameMax || len(string(to.Filename)) > PathNameMax {
|
||||
return &NFSStatusError{NFSStatusNameTooLong, os.ErrInvalid}
|
||||
}
|
||||
|
||||
fromDirPath := fs.Join(fromPath...)
|
||||
fromDirInfo, err := fs.Stat(ctx, fromDirPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return &NFSStatusError{NFSStatusNoEnt, err}
|
||||
}
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return &NFSStatusError{NFSStatusJukebox, err}
|
||||
}
|
||||
return &NFSStatusError{NFSStatusIO, err}
|
||||
}
|
||||
if !fromDirInfo.IsDir() {
|
||||
return &NFSStatusError{NFSStatusNotDir, nil}
|
||||
}
|
||||
preCacheData := ToFileAttribute(fromDirInfo, fromDirPath).AsCache()
|
||||
|
||||
toDirPath := fs.Join(toPath...)
|
||||
toDirInfo, err := fs.Stat(ctx, toDirPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return &NFSStatusError{NFSStatusNoEnt, err}
|
||||
}
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return &NFSStatusError{NFSStatusJukebox, err}
|
||||
}
|
||||
return &NFSStatusError{NFSStatusIO, err}
|
||||
}
|
||||
if !toDirInfo.IsDir() {
|
||||
return &NFSStatusError{NFSStatusNotDir, nil}
|
||||
}
|
||||
preDestData := ToFileAttribute(toDirInfo, toDirPath).AsCache()
|
||||
|
||||
oldHandle := userHandle.ToHandle(fs, append(fromPath, string(from.Filename)))
|
||||
|
||||
fromLoc := fs.Join(append(fromPath, string(from.Filename))...)
|
||||
toLoc := fs.Join(append(toPath, string(to.Filename))...)
|
||||
|
||||
err = fs.Rename(ctx, fromLoc, toLoc)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return &NFSStatusError{NFSStatusNoEnt, err}
|
||||
}
|
||||
if os.IsPermission(err) {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return &NFSStatusError{NFSStatusJukebox, err}
|
||||
}
|
||||
return &NFSStatusError{NFSStatusIO, err}
|
||||
}
|
||||
|
||||
if err := userHandle.InvalidateHandle(fs, oldHandle); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := WriteWcc(writer, preCacheData, tryStat(ctx, fs, fromPath)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := WriteWcc(writer, preDestData, tryStat(ctx, fs, toPath)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
9
pkg/go-nfs/nfs_onrmdir.go
Normal file
9
pkg/go-nfs/nfs_onrmdir.go
Normal file
|
@ -0,0 +1,9 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
func onRmDir(ctx context.Context, w *response, userHandle Handler) error {
|
||||
return onRemove(ctx, w, userHandle)
|
||||
}
|
80
pkg/go-nfs/nfs_onsetattr.go
Normal file
80
pkg/go-nfs/nfs_onsetattr.go
Normal file
|
@ -0,0 +1,80 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
func onSetAttr(ctx context.Context, w *response, userHandle Handler) error {
|
||||
w.errorFmt = wccDataErrorFormatter
|
||||
handle, err := xdr.ReadOpaque(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
fs, path, err := userHandle.FromHandle(handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
attrs, err := ReadSetFileAttributes(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
fullPath := fs.Join(path...)
|
||||
info, err := fs.Lstat(ctx, fullPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return &NFSStatusError{NFSStatusNoEnt, err}
|
||||
}
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return &NFSStatusError{NFSStatusJukebox, err}
|
||||
}
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
|
||||
// see if there's a "guard"
|
||||
if guard, err := xdr.ReadUint32(w.req.Body); err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
} else if guard != 0 {
|
||||
// read the ctime.
|
||||
t := FileTime{}
|
||||
if err := xdr.Read(w.req.Body, &t); err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
attr := ToFileAttribute(info, fullPath)
|
||||
if t != attr.Ctime {
|
||||
return &NFSStatusError{NFSStatusNotSync, nil}
|
||||
}
|
||||
}
|
||||
|
||||
if !CapabilityCheck(fs, billy.WriteCapability) {
|
||||
return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
|
||||
}
|
||||
|
||||
changer := userHandle.Change(fs)
|
||||
if err := attrs.Apply(ctx, changer, fs, fs.Join(path...)); err != nil {
|
||||
// Already an nfsstatuserror
|
||||
return err
|
||||
}
|
||||
|
||||
preAttr := ToFileAttribute(info, fullPath).AsCache()
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := WriteWcc(writer, preAttr, tryStat(ctx, fs, path)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
88
pkg/go-nfs/nfs_onsymlink.go
Normal file
88
pkg/go-nfs/nfs_onsymlink.go
Normal file
|
@ -0,0 +1,88 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
func onSymlink(ctx context.Context, w *response, userHandle Handler) error {
|
||||
w.errorFmt = wccDataErrorFormatter
|
||||
obj := DirOpArg{}
|
||||
err := xdr.Read(w.req.Body, &obj)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
attrs, err := ReadSetFileAttributes(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
target, err := xdr.ReadOpaque(w.req.Body)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
fs, path, err := userHandle.FromHandle(obj.Handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
if !CapabilityCheck(fs, billy.WriteCapability) {
|
||||
return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
|
||||
}
|
||||
|
||||
if len(string(obj.Filename)) > PathNameMax {
|
||||
return &NFSStatusError{NFSStatusNameTooLong, os.ErrInvalid}
|
||||
}
|
||||
|
||||
newFilePath := fs.Join(append(path, string(obj.Filename))...)
|
||||
if _, err := fs.Stat(ctx, newFilePath); err == nil {
|
||||
return &NFSStatusError{NFSStatusExist, os.ErrExist}
|
||||
}
|
||||
if s, err := fs.Stat(ctx, fs.Join(path...)); err != nil {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
} else if !s.IsDir() {
|
||||
return &NFSStatusError{NFSStatusNotDir, nil}
|
||||
}
|
||||
|
||||
err = fs.Symlink(ctx, string(target), newFilePath)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
|
||||
fp := userHandle.ToHandle(fs, append(path, string(obj.Filename)))
|
||||
changer := userHandle.Change(fs)
|
||||
if changer != nil {
|
||||
if err := attrs.Apply(ctx, changer, fs, newFilePath); err != nil {
|
||||
return &NFSStatusError{NFSStatusIO, err}
|
||||
}
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
// "handle follows"
|
||||
if err := xdr.Write(writer, uint32(1)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := xdr.Write(writer, fp); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := WritePostOpAttrs(writer, tryStat(ctx, fs, append(path, string(obj.Filename)))); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := WriteWcc(writer, nil, tryStat(ctx, fs, path)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
116
pkg/go-nfs/nfs_onwrite.go
Normal file
116
pkg/go-nfs/nfs_onwrite.go
Normal file
|
@ -0,0 +1,116 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
// writeStability is the level of durability requested with the write
|
||||
type writeStability uint32
|
||||
|
||||
const (
|
||||
unstable writeStability = 0
|
||||
dataSync writeStability = 1
|
||||
fileSync writeStability = 2
|
||||
)
|
||||
|
||||
type writeArgs struct {
|
||||
Handle []byte
|
||||
Offset uint64
|
||||
Count uint32
|
||||
How uint32
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func onWrite(ctx context.Context, w *response, userHandle Handler) error {
|
||||
w.errorFmt = wccDataErrorFormatter
|
||||
var req writeArgs
|
||||
if err := xdr.Read(w.req.Body, &req); err != nil {
|
||||
return &NFSStatusError{NFSStatusInval, err}
|
||||
}
|
||||
|
||||
fs, path, err := userHandle.FromHandle(req.Handle)
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusStale, err}
|
||||
}
|
||||
// TODO
|
||||
// if !CapabilityCheck(fs, billy.WriteCapability) {
|
||||
// return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
|
||||
// }
|
||||
if len(req.Data) > math.MaxInt32 || req.Count > math.MaxInt32 {
|
||||
return &NFSStatusError{NFSStatusFBig, os.ErrInvalid}
|
||||
}
|
||||
if req.How != uint32(unstable) && req.How != uint32(dataSync) && req.How != uint32(fileSync) {
|
||||
return &NFSStatusError{NFSStatusInval, os.ErrInvalid}
|
||||
}
|
||||
|
||||
// stat first for pre-op wcc.
|
||||
fullPath := fs.Join(path...)
|
||||
info, err := fs.Stat(ctx, fullPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return &NFSStatusError{NFSStatusNoEnt, err}
|
||||
}
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return &NFSStatusError{NFSStatusJukebox, err}
|
||||
}
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
if !info.Mode().IsRegular() {
|
||||
return &NFSStatusError{NFSStatusInval, os.ErrInvalid}
|
||||
}
|
||||
preOpCache := ToFileAttribute(info, fullPath).AsCache()
|
||||
|
||||
// now the actual op.
|
||||
file, err := fs.OpenFile(ctx, fs.Join(path...), os.O_RDWR, info.Mode().Perm())
|
||||
if err != nil {
|
||||
return &NFSStatusError{NFSStatusAccess, err}
|
||||
}
|
||||
if req.Offset > 0 {
|
||||
if _, err := file.Seek(int64(req.Offset), io.SeekStart); err != nil {
|
||||
return &NFSStatusError{NFSStatusIO, err}
|
||||
}
|
||||
}
|
||||
end := req.Count
|
||||
if len(req.Data) < int(end) {
|
||||
end = uint32(len(req.Data))
|
||||
}
|
||||
writtenCount, err := file.Write(ctx, req.Data[:end])
|
||||
if err != nil {
|
||||
Log.Errorf("Error writing: %v", err)
|
||||
return &NFSStatusError{NFSStatusIO, err}
|
||||
}
|
||||
if err := file.Close(ctx); err != nil {
|
||||
Log.Errorf("error closing: %v", err)
|
||||
return &NFSStatusError{NFSStatusIO, err}
|
||||
}
|
||||
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := WriteWcc(writer, preOpCache, tryStat(ctx, fs, path)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := xdr.Write(writer, uint32(writtenCount)); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := xdr.Write(writer, fileSync); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
if err := xdr.Write(writer, w.Server.ID); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
|
||||
if err := w.Write(writer.Bytes()); err != nil {
|
||||
return &NFSStatusError{NFSStatusServerFault, err}
|
||||
}
|
||||
return nil
|
||||
}
|
293
pkg/go-nfs/nfs_test.go
Normal file
293
pkg/go-nfs/nfs_test.go
Normal file
|
@ -0,0 +1,293 @@
|
|||
package nfs_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers/memfs"
|
||||
|
||||
nfsc "github.com/willscott/go-nfs-client/nfs"
|
||||
rpc "github.com/willscott/go-nfs-client/nfs/rpc"
|
||||
"github.com/willscott/go-nfs-client/nfs/util"
|
||||
"github.com/willscott/go-nfs-client/nfs/xdr"
|
||||
)
|
||||
|
||||
func TestNFS(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
if testing.Verbose() {
|
||||
util.DefaultLogger.SetDebug(true)
|
||||
}
|
||||
|
||||
// make an empty in-memory server.
|
||||
listener, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mem := helpers.WrapBillyFS(memfs.New())
|
||||
// File needs to exist in the root for memfs to acknowledge the root exists.
|
||||
_, _ = mem.Create(ctx, "/test")
|
||||
|
||||
handler := helpers.NewNullAuthHandler(mem)
|
||||
cacheHelper := helpers.NewCachingHandler(handler, 1024)
|
||||
go func() {
|
||||
_ = nfs.Serve(listener, cacheHelper)
|
||||
}()
|
||||
|
||||
c, err := rpc.DialTCP(listener.Addr().Network(), listener.Addr().(*net.TCPAddr).String(), false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
var mounter nfsc.Mount
|
||||
mounter.Client = c
|
||||
target, err := mounter.Mount("/", rpc.AuthNull)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
_ = mounter.Unmount()
|
||||
}()
|
||||
|
||||
_, err = target.FSInfo()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Validate sample file creation
|
||||
_, err = target.Create("/helloworld.txt", 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if info, err := mem.Stat(ctx, "/helloworld.txt"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
if info.Size() != 0 || info.Mode().Perm() != 0666 {
|
||||
t.Fatal("incorrect creation.")
|
||||
}
|
||||
}
|
||||
|
||||
// Validate writing to a file.
|
||||
f, err := target.OpenFile("/helloworld.txt", 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := []byte("hello world")
|
||||
_, err = f.Write(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mf, _ := mem.Open(ctx, "/helloworld.txt")
|
||||
buf := make([]byte, len(b))
|
||||
if _, err = mf.Read(ctx, buf[:]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(buf, b) {
|
||||
t.Fatal("written does not match expected")
|
||||
}
|
||||
|
||||
// for test nfs.ReadDirPlus in case of many files
|
||||
dirF1, err := mem.ReadDir(ctx, "/")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
shouldBeNames := []string{}
|
||||
for _, f := range dirF1 {
|
||||
shouldBeNames = append(shouldBeNames, f.Name())
|
||||
}
|
||||
for i := 0; i < 2000; i++ {
|
||||
fName := fmt.Sprintf("f-%04d.txt", i)
|
||||
shouldBeNames = append(shouldBeNames, fName)
|
||||
f, err := mem.Create(ctx, fName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close(ctx)
|
||||
}
|
||||
|
||||
manyEntitiesPlus, err := target.ReadDirPlus("/")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
actualBeNamesPlus := []string{}
|
||||
for _, e := range manyEntitiesPlus {
|
||||
actualBeNamesPlus = append(actualBeNamesPlus, e.Name())
|
||||
}
|
||||
|
||||
as := sort.StringSlice(shouldBeNames)
|
||||
bs := sort.StringSlice(actualBeNamesPlus)
|
||||
as.Sort()
|
||||
bs.Sort()
|
||||
if !reflect.DeepEqual(as, bs) {
|
||||
t.Fatal("nfs.ReadDirPlus error")
|
||||
}
|
||||
|
||||
// for test nfs.ReadDir in case of many files
|
||||
manyEntities, err := readDir(target, "/")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
actualBeNames := []string{}
|
||||
for _, e := range manyEntities {
|
||||
actualBeNames = append(actualBeNames, e.FileName)
|
||||
}
|
||||
|
||||
as2 := sort.StringSlice(shouldBeNames)
|
||||
bs2 := sort.StringSlice(actualBeNames)
|
||||
as2.Sort()
|
||||
bs2.Sort()
|
||||
if !reflect.DeepEqual(as2, bs2) {
|
||||
fmt.Printf("should be %v\n", as2)
|
||||
fmt.Printf("actual be %v\n", bs2)
|
||||
t.Fatal("nfs.ReadDir error")
|
||||
}
|
||||
|
||||
// confirm rename works as expected
|
||||
oldFA, _, err := target.Lookup("/f-0010.txt", false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := target.Rename("/f-0010.txt", "/g-0010.txt"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
new, _, err := target.Lookup("/g-0010.txt", false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if new.Sys() != oldFA.Sys() {
|
||||
t.Fatal("rename failed to update")
|
||||
}
|
||||
_, _, err = target.Lookup("/f-0010.txt", false)
|
||||
if err == nil {
|
||||
t.Fatal("old handle should be invalid")
|
||||
}
|
||||
|
||||
// for test nfs.ReadDirPlus in case of empty directory
|
||||
_, err = target.Mkdir("/empty", 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
emptyEntitiesPlus, err := target.ReadDirPlus("/empty")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(emptyEntitiesPlus) != 0 {
|
||||
t.Fatal("nfs.ReadDirPlus error reading empty dir")
|
||||
}
|
||||
|
||||
// for test nfs.ReadDir in case of empty directory
|
||||
emptyEntities, err := readDir(target, "/empty")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(emptyEntities) != 0 {
|
||||
t.Fatal("nfs.ReadDir error reading empty dir")
|
||||
}
|
||||
}
|
||||
|
||||
type readDirEntry struct {
|
||||
FileId uint64
|
||||
FileName string
|
||||
Cookie uint64
|
||||
}
|
||||
|
||||
// readDir implementation "appropriated" from go-nfs-client implementation of READDIRPLUS
|
||||
func readDir(target *nfsc.Target, dir string) ([]*readDirEntry, error) {
|
||||
_, fh, err := target.Lookup(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
type readDirArgs struct {
|
||||
rpc.Header
|
||||
Handle []byte
|
||||
Cookie uint64
|
||||
CookieVerif uint64
|
||||
Count uint32
|
||||
}
|
||||
|
||||
type readDirList struct {
|
||||
IsSet bool `xdr:"union"`
|
||||
Entry readDirEntry `xdr:"unioncase=1"`
|
||||
}
|
||||
|
||||
type readDirListOK struct {
|
||||
DirAttrs nfsc.PostOpAttr
|
||||
CookieVerf uint64
|
||||
}
|
||||
|
||||
cookie := uint64(0)
|
||||
cookieVerf := uint64(0)
|
||||
eof := false
|
||||
|
||||
var entries []*readDirEntry
|
||||
for !eof {
|
||||
res, err := target.Call(&readDirArgs{
|
||||
Header: rpc.Header{
|
||||
Rpcvers: 2,
|
||||
Vers: nfsc.Nfs3Vers,
|
||||
Prog: nfsc.Nfs3Prog,
|
||||
Proc: uint32(nfs.NFSProcedureReadDir),
|
||||
Cred: rpc.AuthNull,
|
||||
Verf: rpc.AuthNull,
|
||||
},
|
||||
Handle: fh,
|
||||
Cookie: cookie,
|
||||
CookieVerif: cookieVerf,
|
||||
Count: 4096,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
status, err := xdr.ReadUint32(res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = nfsc.NFS3Error(status); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dirListOK := new(readDirListOK)
|
||||
if err = xdr.Read(res, dirListOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for {
|
||||
var item readDirList
|
||||
if err = xdr.Read(res, &item); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !item.IsSet {
|
||||
break
|
||||
}
|
||||
|
||||
cookie = item.Entry.Cookie
|
||||
if item.Entry.FileName == "." || item.Entry.FileName == ".." {
|
||||
continue
|
||||
}
|
||||
entries = append(entries, &item.Entry)
|
||||
}
|
||||
|
||||
if err = xdr.Read(res, &eof); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cookieVerf = dirListOK.CookieVerf
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
188
pkg/go-nfs/nfsinterface.go
Normal file
188
pkg/go-nfs/nfsinterface.go
Normal file
|
@ -0,0 +1,188 @@
|
|||
package nfs
|
||||
|
||||
// NFSProcedure is the valid RPC calls for the nfs service.
|
||||
type NFSProcedure uint32
|
||||
|
||||
// NfsProcedure Codes
|
||||
const (
|
||||
NFSProcedureNull NFSProcedure = iota
|
||||
NFSProcedureGetAttr
|
||||
NFSProcedureSetAttr
|
||||
NFSProcedureLookup
|
||||
NFSProcedureAccess
|
||||
NFSProcedureReadlink
|
||||
NFSProcedureRead
|
||||
NFSProcedureWrite
|
||||
NFSProcedureCreate
|
||||
NFSProcedureMkDir
|
||||
NFSProcedureSymlink
|
||||
NFSProcedureMkNod
|
||||
NFSProcedureRemove
|
||||
NFSProcedureRmDir
|
||||
NFSProcedureRename
|
||||
NFSProcedureLink
|
||||
NFSProcedureReadDir
|
||||
NFSProcedureReadDirPlus
|
||||
NFSProcedureFSStat
|
||||
NFSProcedureFSInfo
|
||||
NFSProcedurePathConf
|
||||
NFSProcedureCommit
|
||||
)
|
||||
|
||||
func (n NFSProcedure) String() string {
|
||||
switch n {
|
||||
case NFSProcedureNull:
|
||||
return "Null"
|
||||
case NFSProcedureGetAttr:
|
||||
return "GetAttr"
|
||||
case NFSProcedureSetAttr:
|
||||
return "SetAttr"
|
||||
case NFSProcedureLookup:
|
||||
return "Lookup"
|
||||
case NFSProcedureAccess:
|
||||
return "Access"
|
||||
case NFSProcedureReadlink:
|
||||
return "ReadLink"
|
||||
case NFSProcedureRead:
|
||||
return "Read"
|
||||
case NFSProcedureWrite:
|
||||
return "Write"
|
||||
case NFSProcedureCreate:
|
||||
return "Create"
|
||||
case NFSProcedureMkDir:
|
||||
return "Mkdir"
|
||||
case NFSProcedureSymlink:
|
||||
return "Symlink"
|
||||
case NFSProcedureMkNod:
|
||||
return "Mknod"
|
||||
case NFSProcedureRemove:
|
||||
return "Remove"
|
||||
case NFSProcedureRmDir:
|
||||
return "Rmdir"
|
||||
case NFSProcedureRename:
|
||||
return "Rename"
|
||||
case NFSProcedureLink:
|
||||
return "Link"
|
||||
case NFSProcedureReadDir:
|
||||
return "ReadDir"
|
||||
case NFSProcedureReadDirPlus:
|
||||
return "ReadDirPlus"
|
||||
case NFSProcedureFSStat:
|
||||
return "FSStat"
|
||||
case NFSProcedureFSInfo:
|
||||
return "FSInfo"
|
||||
case NFSProcedurePathConf:
|
||||
return "PathConf"
|
||||
case NFSProcedureCommit:
|
||||
return "Commit"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// NFSStatus (nfsstat3) is a result code for nfs rpc calls
|
||||
type NFSStatus uint32
|
||||
|
||||
// NFSStatus codes
|
||||
const (
|
||||
NFSStatusOk NFSStatus = 0
|
||||
NFSStatusPerm NFSStatus = 1
|
||||
NFSStatusNoEnt NFSStatus = 2
|
||||
NFSStatusIO NFSStatus = 5
|
||||
NFSStatusNXIO NFSStatus = 6
|
||||
NFSStatusAccess NFSStatus = 13
|
||||
NFSStatusExist NFSStatus = 17
|
||||
NFSStatusXDev NFSStatus = 18
|
||||
NFSStatusNoDev NFSStatus = 19
|
||||
NFSStatusNotDir NFSStatus = 20
|
||||
NFSStatusIsDir NFSStatus = 21
|
||||
NFSStatusInval NFSStatus = 22
|
||||
NFSStatusFBig NFSStatus = 27
|
||||
NFSStatusNoSPC NFSStatus = 28
|
||||
NFSStatusROFS NFSStatus = 30
|
||||
NFSStatusMlink NFSStatus = 31
|
||||
NFSStatusNameTooLong NFSStatus = 63
|
||||
NFSStatusNotEmpty NFSStatus = 66
|
||||
NFSStatusDQuot NFSStatus = 69
|
||||
NFSStatusStale NFSStatus = 70
|
||||
NFSStatusRemote NFSStatus = 71
|
||||
NFSStatusBadHandle NFSStatus = 10001
|
||||
NFSStatusNotSync NFSStatus = 10002
|
||||
NFSStatusBadCookie NFSStatus = 10003
|
||||
NFSStatusNotSupp NFSStatus = 10004
|
||||
NFSStatusTooSmall NFSStatus = 10005
|
||||
NFSStatusServerFault NFSStatus = 10006
|
||||
NFSStatusBadType NFSStatus = 10007
|
||||
NFSStatusJukebox NFSStatus = 10008
|
||||
)
|
||||
|
||||
func (s NFSStatus) String() string {
|
||||
switch s {
|
||||
case NFSStatusOk:
|
||||
return "Call Completed Successfull"
|
||||
case NFSStatusPerm:
|
||||
return "Not Owner"
|
||||
case NFSStatusNoEnt:
|
||||
return "No such file or directory"
|
||||
case NFSStatusIO:
|
||||
return "I/O error"
|
||||
case NFSStatusNXIO:
|
||||
return "I/O error: No such device"
|
||||
case NFSStatusAccess:
|
||||
return "Permission denied"
|
||||
case NFSStatusExist:
|
||||
return "File exists"
|
||||
case NFSStatusXDev:
|
||||
return "Attempt to do a cross device hard link"
|
||||
case NFSStatusNoDev:
|
||||
return "No such device"
|
||||
case NFSStatusNotDir:
|
||||
return "Not a directory"
|
||||
case NFSStatusIsDir:
|
||||
return "Is a directory"
|
||||
case NFSStatusInval:
|
||||
return "Invalid argument"
|
||||
case NFSStatusFBig:
|
||||
return "File too large"
|
||||
case NFSStatusNoSPC:
|
||||
return "No space left on device"
|
||||
case NFSStatusROFS:
|
||||
return "Read only file system"
|
||||
case NFSStatusMlink:
|
||||
return "Too many hard links"
|
||||
case NFSStatusNameTooLong:
|
||||
return "Name too long"
|
||||
case NFSStatusNotEmpty:
|
||||
return "Not empty"
|
||||
case NFSStatusDQuot:
|
||||
return "Resource quota exceeded"
|
||||
case NFSStatusStale:
|
||||
return "Invalid file handle"
|
||||
case NFSStatusRemote:
|
||||
return "Too many levels of remote in path"
|
||||
case NFSStatusBadHandle:
|
||||
return "Illegal NFS file handle"
|
||||
case NFSStatusNotSync:
|
||||
return "Synchronization mismatch"
|
||||
case NFSStatusBadCookie:
|
||||
return "Cookie is Stale"
|
||||
case NFSStatusNotSupp:
|
||||
return "Operation not supported"
|
||||
case NFSStatusTooSmall:
|
||||
return "Buffer or request too small"
|
||||
case NFSStatusServerFault:
|
||||
return "Unmapped error (EIO)"
|
||||
case NFSStatusBadType:
|
||||
return "Type not supported"
|
||||
case NFSStatusJukebox:
|
||||
return "Initiated, but too slow. Try again with new txn"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// DirOpArg is a common serialization used for referencing an object in a directory
|
||||
type DirOpArg struct {
|
||||
Handle []byte
|
||||
Filename []byte
|
||||
}
|
102
pkg/go-nfs/server.go
Normal file
102
pkg/go-nfs/server.go
Normal file
|
@ -0,0 +1,102 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Server is a handle to the listening NFS server.
|
||||
type Server struct {
|
||||
Handler
|
||||
ID [8]byte
|
||||
}
|
||||
|
||||
// RegisterMessageHandler registers a handler for a specific
|
||||
// XDR procedure.
|
||||
func RegisterMessageHandler(protocol uint32, proc uint32, handler HandleFunc) error {
|
||||
if registeredHandlers == nil {
|
||||
registeredHandlers = make(map[registeredHandlerID]HandleFunc)
|
||||
}
|
||||
for k := range registeredHandlers {
|
||||
if k.protocol == protocol && k.proc == proc {
|
||||
return errors.New("already registered")
|
||||
}
|
||||
}
|
||||
id := registeredHandlerID{protocol, proc}
|
||||
registeredHandlers[id] = handler
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleFunc represents a handler for a specific protocol message.
|
||||
type HandleFunc func(ctx context.Context, w *response, userHandler Handler) error
|
||||
|
||||
// TODO: store directly as a uint64 for more efficient lookups
|
||||
type registeredHandlerID struct {
|
||||
protocol uint32
|
||||
proc uint32
|
||||
}
|
||||
|
||||
var registeredHandlers map[registeredHandlerID]HandleFunc
|
||||
|
||||
// Serve listens on the provided listener port for incoming client requests.
|
||||
func (s *Server) Serve(l net.Listener) error {
|
||||
defer l.Close()
|
||||
if bytes.Equal(s.ID[:], []byte{0, 0, 0, 0, 0, 0, 0, 0}) {
|
||||
if _, err := rand.Reader.Read(s.ID[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var tempDelay time.Duration
|
||||
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok && ne.Timeout() {
|
||||
if tempDelay == 0 {
|
||||
tempDelay = 5 * time.Millisecond
|
||||
} else {
|
||||
tempDelay *= 2
|
||||
}
|
||||
if max := 1 * time.Second; tempDelay > max {
|
||||
tempDelay = max
|
||||
}
|
||||
time.Sleep(tempDelay)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
tempDelay = 0
|
||||
c := s.newConn(conn)
|
||||
go c.serve()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) newConn(nc net.Conn) *conn {
|
||||
c := &conn{
|
||||
Server: s,
|
||||
Conn: nc,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// TODO: keep an immutable map for each server instance to have less
|
||||
// chance of races.
|
||||
func (s *Server) handlerFor(prog uint32, proc uint32) HandleFunc {
|
||||
for k, v := range registeredHandlers {
|
||||
if k.protocol == prog && k.proc == proc {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serve is a singleton listener paralleling http.Serve
|
||||
func Serve(l net.Listener, handler Handler) error {
|
||||
srv := &Server{Handler: handler}
|
||||
return srv.Serve(l)
|
||||
}
|
32
pkg/go-nfs/time.go
Normal file
32
pkg/go-nfs/time.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// FileTime is the NFS wire time format
|
||||
// This is equivalent to go-nfs-client/nfs.NFS3Time
|
||||
type FileTime struct {
|
||||
Seconds uint32
|
||||
Nseconds uint32
|
||||
}
|
||||
|
||||
// ToNFSTime generates the nfs 64bit time format from a golang time.
|
||||
func ToNFSTime(t time.Time) FileTime {
|
||||
return FileTime{
|
||||
Seconds: uint32(t.Unix()),
|
||||
Nseconds: uint32(t.UnixNano() % int64(time.Second)),
|
||||
}
|
||||
}
|
||||
|
||||
// Native generates a golang time from an nfs time spec
|
||||
func (t FileTime) Native() *time.Time {
|
||||
ts := time.Unix(int64(t.Seconds), int64(t.Nseconds))
|
||||
return &ts
|
||||
}
|
||||
|
||||
// EqualTimespec returns if this time is equal to a local time spec
|
||||
func (t FileTime) EqualTimespec(sec int64, nsec int64) bool {
|
||||
// TODO: bounds check on sec/nsec overflow
|
||||
return t.Nseconds == uint32(nsec) && t.Seconds == uint32(sec)
|
||||
}
|
90
pkg/kvtrace/kvmetrics.go
Normal file
90
pkg/kvtrace/kvmetrics.go
Normal file
|
@ -0,0 +1,90 @@
|
|||
package kvtrace
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/royalcat/kv"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
var tracer = otel.Tracer("github.com/royalcat/kv/tracer")
|
||||
|
||||
type traceSrtore[K, V any] struct {
|
||||
kv kv.Store[K, V]
|
||||
attrs []attribute.KeyValue
|
||||
}
|
||||
|
||||
func WrapTracing[K, V any](kv kv.Store[K, V], attrs ...attribute.KeyValue) kv.Store[K, V] {
|
||||
return &traceSrtore[K, V]{
|
||||
kv: kv,
|
||||
attrs: attrs,
|
||||
}
|
||||
}
|
||||
|
||||
// Close implements kv.Store.
|
||||
func (m *traceSrtore[K, V]) Close(ctx context.Context) error {
|
||||
ctx, span := tracer.Start(ctx, "Close", trace.WithAttributes(m.attrs...))
|
||||
defer span.End()
|
||||
|
||||
return m.kv.Close(ctx)
|
||||
}
|
||||
|
||||
// Delete implements kv.Store.
|
||||
func (m *traceSrtore[K, V]) Delete(ctx context.Context, k K) error {
|
||||
ctx, span := tracer.Start(ctx, "Delete", trace.WithAttributes(m.attrs...))
|
||||
defer span.End()
|
||||
|
||||
return m.kv.Delete(ctx, k)
|
||||
}
|
||||
|
||||
// Get implements kv.Store.
|
||||
func (m *traceSrtore[K, V]) Get(ctx context.Context, k K) (v V, found bool, err error) {
|
||||
ctx, span := tracer.Start(ctx, "Get", trace.WithAttributes(m.attrs...))
|
||||
defer span.End()
|
||||
|
||||
return m.kv.Get(ctx, k)
|
||||
}
|
||||
|
||||
// Range implements kv.Store.
|
||||
func (m *traceSrtore[K, V]) Range(ctx context.Context, iter kv.Iter[K, V]) error {
|
||||
ctx, span := tracer.Start(ctx, "Range", trace.WithAttributes(m.attrs...))
|
||||
defer span.End()
|
||||
|
||||
count := 0
|
||||
iterCount := func(k K, v V) bool {
|
||||
count++
|
||||
return iter(k, v)
|
||||
}
|
||||
|
||||
err := m.kv.Range(ctx, iterCount)
|
||||
span.SetAttributes(attribute.Int("count", count))
|
||||
return err
|
||||
}
|
||||
|
||||
// RangeWithPrefix implements kv.Store.
|
||||
func (m *traceSrtore[K, V]) RangeWithPrefix(ctx context.Context, k K, iter kv.Iter[K, V]) error {
|
||||
ctx, span := tracer.Start(ctx, "RangeWithPrefix", trace.WithAttributes(m.attrs...))
|
||||
defer span.End()
|
||||
|
||||
count := 0
|
||||
iterCount := func(k K, v V) bool {
|
||||
count++
|
||||
return iter(k, v)
|
||||
}
|
||||
|
||||
err := m.kv.Range(ctx, iterCount)
|
||||
span.SetAttributes(attribute.Int("count", count))
|
||||
return err
|
||||
}
|
||||
|
||||
// Set implements kv.Store.
|
||||
func (m *traceSrtore[K, V]) Set(ctx context.Context, k K, v V) error {
|
||||
ctx, span := tracer.Start(ctx, "Set", trace.WithAttributes(m.attrs...))
|
||||
defer span.End()
|
||||
|
||||
return m.kv.Set(ctx, k, v)
|
||||
}
|
||||
|
||||
var _ kv.Store[any, any] = (*traceSrtore[any, any])(nil)
|
|
@ -1,6 +1,6 @@
|
|||
package config
|
||||
|
||||
var defaultConfig = Config{
|
||||
var defaultConfig = Settings{
|
||||
SourceDir: "./data",
|
||||
WebUi: WebUi{
|
||||
Port: 4444,
|
||||
|
@ -21,8 +21,9 @@ var defaultConfig = Config{
|
|||
Enabled: false,
|
||||
},
|
||||
NFS: NFS{
|
||||
Enabled: false,
|
||||
Port: 8122,
|
||||
Enabled: false,
|
||||
Port: 8122,
|
||||
CachePath: "./nfs-cache",
|
||||
},
|
||||
},
|
||||
|
||||
|
|
|
@ -13,8 +13,9 @@ import (
|
|||
|
||||
var k = koanf.New(".")
|
||||
|
||||
func Load(path string) (*Config, error) {
|
||||
var Config = defaultConfig
|
||||
|
||||
func Load(path string) (*Settings, error) {
|
||||
err := k.Load(structs.Provider(defaultConfig, "koanf"), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -50,7 +51,7 @@ func Load(path string) (*Config, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
conf := Config{}
|
||||
conf := Settings{}
|
||||
err = k.Unmarshal("", &conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package config
|
||||
|
||||
// Config is the main config object
|
||||
type Config struct {
|
||||
type Settings struct {
|
||||
WebUi WebUi `koanf:"webUi"`
|
||||
TorrentClient TorrentClient `koanf:"torrent"`
|
||||
Mounts Mounts `koanf:"mounts"`
|
||||
|
@ -67,8 +67,9 @@ type Mounts struct {
|
|||
}
|
||||
|
||||
type NFS struct {
|
||||
Enabled bool `koanf:"enabled"`
|
||||
Port int `koanf:"port"`
|
||||
Enabled bool `koanf:"enabled"`
|
||||
Port int `koanf:"port"`
|
||||
CachePath string `koanf:"cache_path"`
|
||||
}
|
||||
|
||||
type HttpFs struct {
|
||||
|
|
|
@ -57,6 +57,11 @@ type ComplexityRoot struct {
|
|||
Size func(childComplexity int) int
|
||||
}
|
||||
|
||||
CleanupResponse struct {
|
||||
Count func(childComplexity int) int
|
||||
List func(childComplexity int) int
|
||||
}
|
||||
|
||||
Dir struct {
|
||||
Name func(childComplexity int) int
|
||||
}
|
||||
|
@ -70,8 +75,14 @@ type ComplexityRoot struct {
|
|||
Size func(childComplexity int) int
|
||||
}
|
||||
|
||||
ListDirResponse struct {
|
||||
Entries func(childComplexity int) int
|
||||
Root func(childComplexity int) int
|
||||
}
|
||||
|
||||
Mutation struct {
|
||||
CleanupTorrents func(childComplexity int, files *bool, dryRun bool) int
|
||||
DedupeStorage func(childComplexity int) int
|
||||
DownloadTorrent func(childComplexity int, infohash string, file *string) int
|
||||
ValidateTorrents func(childComplexity int, filter model.TorrentFilter) int
|
||||
}
|
||||
|
@ -138,12 +149,13 @@ type ComplexityRoot struct {
|
|||
|
||||
type MutationResolver interface {
|
||||
ValidateTorrents(ctx context.Context, filter model.TorrentFilter) (bool, error)
|
||||
CleanupTorrents(ctx context.Context, files *bool, dryRun bool) (int64, error)
|
||||
CleanupTorrents(ctx context.Context, files *bool, dryRun bool) (*model.CleanupResponse, error)
|
||||
DownloadTorrent(ctx context.Context, infohash string, file *string) (*model.DownloadTorrentResponse, error)
|
||||
DedupeStorage(ctx context.Context) (int64, error)
|
||||
}
|
||||
type QueryResolver interface {
|
||||
Torrents(ctx context.Context, filter *model.TorrentsFilter, pagination *model.Pagination) ([]*model.Torrent, error)
|
||||
FsListDir(ctx context.Context, path string) ([]model.DirEntry, error)
|
||||
FsListDir(ctx context.Context, path string) (*model.ListDirResponse, error)
|
||||
}
|
||||
type SubscriptionResolver interface {
|
||||
TaskProgress(ctx context.Context, taskID string) (<-chan model.Progress, error)
|
||||
|
@ -190,6 +202,20 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||
|
||||
return e.complexity.ArchiveFS.Size(childComplexity), true
|
||||
|
||||
case "CleanupResponse.count":
|
||||
if e.complexity.CleanupResponse.Count == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.CleanupResponse.Count(childComplexity), true
|
||||
|
||||
case "CleanupResponse.list":
|
||||
if e.complexity.CleanupResponse.List == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.CleanupResponse.List(childComplexity), true
|
||||
|
||||
case "Dir.name":
|
||||
if e.complexity.Dir.Name == nil {
|
||||
break
|
||||
|
@ -218,6 +244,20 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||
|
||||
return e.complexity.File.Size(childComplexity), true
|
||||
|
||||
case "ListDirResponse.entries":
|
||||
if e.complexity.ListDirResponse.Entries == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.ListDirResponse.Entries(childComplexity), true
|
||||
|
||||
case "ListDirResponse.root":
|
||||
if e.complexity.ListDirResponse.Root == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.ListDirResponse.Root(childComplexity), true
|
||||
|
||||
case "Mutation.cleanupTorrents":
|
||||
if e.complexity.Mutation.CleanupTorrents == nil {
|
||||
break
|
||||
|
@ -230,6 +270,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
|
|||
|
||||
return e.complexity.Mutation.CleanupTorrents(childComplexity, args["files"].(*bool), args["dryRun"].(bool)), true
|
||||
|
||||
case "Mutation.dedupeStorage":
|
||||
if e.complexity.Mutation.DedupeStorage == nil {
|
||||
break
|
||||
}
|
||||
|
||||
return e.complexity.Mutation.DedupeStorage(childComplexity), true
|
||||
|
||||
case "Mutation.downloadTorrent":
|
||||
if e.complexity.Mutation.DownloadTorrent == nil {
|
||||
break
|
||||
|
@ -602,28 +649,34 @@ func (ec *executionContext) introspectType(name string) (*introspection.Type, er
|
|||
|
||||
var sources = []*ast.Source{
|
||||
{Name: "../../../graphql/mutation.graphql", Input: `type Mutation {
|
||||
validateTorrents(filter: TorrentFilter!): Boolean!
|
||||
cleanupTorrents(files: Boolean, dryRun: Boolean!): Int!
|
||||
downloadTorrent(infohash: String!, file: String): DownloadTorrentResponse
|
||||
validateTorrents(filter: TorrentFilter!): Boolean!
|
||||
cleanupTorrents(files: Boolean, dryRun: Boolean!): CleanupResponse!
|
||||
downloadTorrent(infohash: String!, file: String): DownloadTorrentResponse
|
||||
dedupeStorage: Int!
|
||||
}
|
||||
|
||||
|
||||
input TorrentFilter @oneOf {
|
||||
everything: Boolean
|
||||
infohash: String
|
||||
# pathGlob: String!
|
||||
everything: Boolean
|
||||
infohash: String
|
||||
# pathGlob: String!
|
||||
}
|
||||
|
||||
type DownloadTorrentResponse {
|
||||
task: Task
|
||||
task: Task
|
||||
}
|
||||
|
||||
type CleanupResponse {
|
||||
count: Int!
|
||||
list: [String!]!
|
||||
}
|
||||
|
||||
type Task {
|
||||
id: ID!
|
||||
}`, BuiltIn: false},
|
||||
id: ID!
|
||||
}
|
||||
`, BuiltIn: false},
|
||||
{Name: "../../../graphql/query.graphql", Input: `type Query {
|
||||
torrents(filter: TorrentsFilter, pagination: Pagination): [Torrent!]!
|
||||
fsListDir(path: String!): [DirEntry!]!
|
||||
fsListDir(path: String!): ListDirResponse!
|
||||
}
|
||||
|
||||
input TorrentsFilter {
|
||||
|
@ -634,6 +687,11 @@ input TorrentsFilter {
|
|||
peersCount: IntFilter
|
||||
}
|
||||
|
||||
type ListDirResponse {
|
||||
root: DirEntry!
|
||||
entries: [DirEntry!]!
|
||||
}
|
||||
|
||||
input Pagination {
|
||||
offset: Int!
|
||||
limit: Int!
|
||||
|
@ -1008,6 +1066,94 @@ func (ec *executionContext) fieldContext_ArchiveFS_size(ctx context.Context, fie
|
|||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _CleanupResponse_count(ctx context.Context, field graphql.CollectedField, obj *model.CleanupResponse) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_CleanupResponse_count(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.Count, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
if !graphql.HasFieldError(ctx, fc) {
|
||||
ec.Errorf(ctx, "must not be null")
|
||||
}
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(int64)
|
||||
fc.Result = res
|
||||
return ec.marshalNInt2int64(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_CleanupResponse_count(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "CleanupResponse",
|
||||
Field: field,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("field of type Int does not have child fields")
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _CleanupResponse_list(ctx context.Context, field graphql.CollectedField, obj *model.CleanupResponse) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_CleanupResponse_list(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.List, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
if !graphql.HasFieldError(ctx, fc) {
|
||||
ec.Errorf(ctx, "must not be null")
|
||||
}
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.([]string)
|
||||
fc.Result = res
|
||||
return ec.marshalNString2ᚕstringᚄ(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_CleanupResponse_list(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "CleanupResponse",
|
||||
Field: field,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("field of type String does not have child fields")
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _Dir_name(ctx context.Context, field graphql.CollectedField, obj *model.Dir) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_Dir_name(ctx, field)
|
||||
if err != nil {
|
||||
|
@ -1185,6 +1331,94 @@ func (ec *executionContext) fieldContext_File_size(ctx context.Context, field gr
|
|||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _ListDirResponse_root(ctx context.Context, field graphql.CollectedField, obj *model.ListDirResponse) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_ListDirResponse_root(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.Root, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
if !graphql.HasFieldError(ctx, fc) {
|
||||
ec.Errorf(ctx, "must not be null")
|
||||
}
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(model.DirEntry)
|
||||
fc.Result = res
|
||||
return ec.marshalNDirEntry2gitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐDirEntry(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_ListDirResponse_root(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "ListDirResponse",
|
||||
Field: field,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("FieldContext.Child cannot be called on type INTERFACE")
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _ListDirResponse_entries(ctx context.Context, field graphql.CollectedField, obj *model.ListDirResponse) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_ListDirResponse_entries(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return obj.Entries, nil
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
if !graphql.HasFieldError(ctx, fc) {
|
||||
ec.Errorf(ctx, "must not be null")
|
||||
}
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.([]model.DirEntry)
|
||||
fc.Result = res
|
||||
return ec.marshalNDirEntry2ᚕgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐDirEntryᚄ(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_ListDirResponse_entries(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "ListDirResponse",
|
||||
Field: field,
|
||||
IsMethod: false,
|
||||
IsResolver: false,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("FieldContext.Child cannot be called on type INTERFACE")
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _Mutation_validateTorrents(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_Mutation_validateTorrents(ctx, field)
|
||||
if err != nil {
|
||||
|
@ -1266,9 +1500,9 @@ func (ec *executionContext) _Mutation_cleanupTorrents(ctx context.Context, field
|
|||
}
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(int64)
|
||||
res := resTmp.(*model.CleanupResponse)
|
||||
fc.Result = res
|
||||
return ec.marshalNInt2int64(ctx, field.Selections, res)
|
||||
return ec.marshalNCleanupResponse2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐCleanupResponse(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_Mutation_cleanupTorrents(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
|
@ -1278,7 +1512,13 @@ func (ec *executionContext) fieldContext_Mutation_cleanupTorrents(ctx context.Co
|
|||
IsMethod: true,
|
||||
IsResolver: true,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("field of type Int does not have child fields")
|
||||
switch field.Name {
|
||||
case "count":
|
||||
return ec.fieldContext_CleanupResponse_count(ctx, field)
|
||||
case "list":
|
||||
return ec.fieldContext_CleanupResponse_list(ctx, field)
|
||||
}
|
||||
return nil, fmt.Errorf("no field named %q was found under type CleanupResponse", field.Name)
|
||||
},
|
||||
}
|
||||
defer func() {
|
||||
|
@ -1351,6 +1591,50 @@ func (ec *executionContext) fieldContext_Mutation_downloadTorrent(ctx context.Co
|
|||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _Mutation_dedupeStorage(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_Mutation_dedupeStorage(ctx, field)
|
||||
if err != nil {
|
||||
return graphql.Null
|
||||
}
|
||||
ctx = graphql.WithFieldContext(ctx, fc)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ec.Error(ctx, ec.Recover(ctx, r))
|
||||
ret = graphql.Null
|
||||
}
|
||||
}()
|
||||
resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
|
||||
ctx = rctx // use context from middleware stack in children
|
||||
return ec.resolvers.Mutation().DedupeStorage(rctx)
|
||||
})
|
||||
if err != nil {
|
||||
ec.Error(ctx, err)
|
||||
return graphql.Null
|
||||
}
|
||||
if resTmp == nil {
|
||||
if !graphql.HasFieldError(ctx, fc) {
|
||||
ec.Errorf(ctx, "must not be null")
|
||||
}
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.(int64)
|
||||
fc.Result = res
|
||||
return ec.marshalNInt2int64(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_Mutation_dedupeStorage(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
fc = &graphql.FieldContext{
|
||||
Object: "Mutation",
|
||||
Field: field,
|
||||
IsMethod: true,
|
||||
IsResolver: true,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("field of type Int does not have child fields")
|
||||
},
|
||||
}
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) _Query_torrents(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
|
||||
fc, err := ec.fieldContext_Query_torrents(ctx, field)
|
||||
if err != nil {
|
||||
|
@ -1450,9 +1734,9 @@ func (ec *executionContext) _Query_fsListDir(ctx context.Context, field graphql.
|
|||
}
|
||||
return graphql.Null
|
||||
}
|
||||
res := resTmp.([]model.DirEntry)
|
||||
res := resTmp.(*model.ListDirResponse)
|
||||
fc.Result = res
|
||||
return ec.marshalNDirEntry2ᚕgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐDirEntryᚄ(ctx, field.Selections, res)
|
||||
return ec.marshalNListDirResponse2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐListDirResponse(ctx, field.Selections, res)
|
||||
}
|
||||
|
||||
func (ec *executionContext) fieldContext_Query_fsListDir(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
|
||||
|
@ -1462,7 +1746,13 @@ func (ec *executionContext) fieldContext_Query_fsListDir(ctx context.Context, fi
|
|||
IsMethod: true,
|
||||
IsResolver: true,
|
||||
Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
|
||||
return nil, errors.New("FieldContext.Child cannot be called on type INTERFACE")
|
||||
switch field.Name {
|
||||
case "root":
|
||||
return ec.fieldContext_ListDirResponse_root(ctx, field)
|
||||
case "entries":
|
||||
return ec.fieldContext_ListDirResponse_entries(ctx, field)
|
||||
}
|
||||
return nil, fmt.Errorf("no field named %q was found under type ListDirResponse", field.Name)
|
||||
},
|
||||
}
|
||||
defer func() {
|
||||
|
@ -1723,6 +2013,8 @@ func (ec *executionContext) fieldContext_Schema_mutation(ctx context.Context, fi
|
|||
return ec.fieldContext_Mutation_cleanupTorrents(ctx, field)
|
||||
case "downloadTorrent":
|
||||
return ec.fieldContext_Mutation_downloadTorrent(ctx, field)
|
||||
case "dedupeStorage":
|
||||
return ec.fieldContext_Mutation_dedupeStorage(ctx, field)
|
||||
}
|
||||
return nil, fmt.Errorf("no field named %q was found under type Mutation", field.Name)
|
||||
},
|
||||
|
@ -5400,6 +5692,50 @@ func (ec *executionContext) _ArchiveFS(ctx context.Context, sel ast.SelectionSet
|
|||
return out
|
||||
}
|
||||
|
||||
var cleanupResponseImplementors = []string{"CleanupResponse"}
|
||||
|
||||
func (ec *executionContext) _CleanupResponse(ctx context.Context, sel ast.SelectionSet, obj *model.CleanupResponse) graphql.Marshaler {
|
||||
fields := graphql.CollectFields(ec.OperationContext, sel, cleanupResponseImplementors)
|
||||
|
||||
out := graphql.NewFieldSet(fields)
|
||||
deferred := make(map[string]*graphql.FieldSet)
|
||||
for i, field := range fields {
|
||||
switch field.Name {
|
||||
case "__typename":
|
||||
out.Values[i] = graphql.MarshalString("CleanupResponse")
|
||||
case "count":
|
||||
out.Values[i] = ec._CleanupResponse_count(ctx, field, obj)
|
||||
if out.Values[i] == graphql.Null {
|
||||
out.Invalids++
|
||||
}
|
||||
case "list":
|
||||
out.Values[i] = ec._CleanupResponse_list(ctx, field, obj)
|
||||
if out.Values[i] == graphql.Null {
|
||||
out.Invalids++
|
||||
}
|
||||
default:
|
||||
panic("unknown field " + strconv.Quote(field.Name))
|
||||
}
|
||||
}
|
||||
out.Dispatch(ctx)
|
||||
if out.Invalids > 0 {
|
||||
return graphql.Null
|
||||
}
|
||||
|
||||
atomic.AddInt32(&ec.deferred, int32(len(deferred)))
|
||||
|
||||
for label, dfs := range deferred {
|
||||
ec.processDeferredGroup(graphql.DeferredGroup{
|
||||
Label: label,
|
||||
Path: graphql.GetPath(ctx),
|
||||
FieldSet: dfs,
|
||||
Context: ctx,
|
||||
})
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
var dirImplementors = []string{"Dir", "DirEntry"}
|
||||
|
||||
func (ec *executionContext) _Dir(ctx context.Context, sel ast.SelectionSet, obj *model.Dir) graphql.Marshaler {
|
||||
|
@ -5519,6 +5855,50 @@ func (ec *executionContext) _File(ctx context.Context, sel ast.SelectionSet, obj
|
|||
return out
|
||||
}
|
||||
|
||||
var listDirResponseImplementors = []string{"ListDirResponse"}
|
||||
|
||||
func (ec *executionContext) _ListDirResponse(ctx context.Context, sel ast.SelectionSet, obj *model.ListDirResponse) graphql.Marshaler {
|
||||
fields := graphql.CollectFields(ec.OperationContext, sel, listDirResponseImplementors)
|
||||
|
||||
out := graphql.NewFieldSet(fields)
|
||||
deferred := make(map[string]*graphql.FieldSet)
|
||||
for i, field := range fields {
|
||||
switch field.Name {
|
||||
case "__typename":
|
||||
out.Values[i] = graphql.MarshalString("ListDirResponse")
|
||||
case "root":
|
||||
out.Values[i] = ec._ListDirResponse_root(ctx, field, obj)
|
||||
if out.Values[i] == graphql.Null {
|
||||
out.Invalids++
|
||||
}
|
||||
case "entries":
|
||||
out.Values[i] = ec._ListDirResponse_entries(ctx, field, obj)
|
||||
if out.Values[i] == graphql.Null {
|
||||
out.Invalids++
|
||||
}
|
||||
default:
|
||||
panic("unknown field " + strconv.Quote(field.Name))
|
||||
}
|
||||
}
|
||||
out.Dispatch(ctx)
|
||||
if out.Invalids > 0 {
|
||||
return graphql.Null
|
||||
}
|
||||
|
||||
atomic.AddInt32(&ec.deferred, int32(len(deferred)))
|
||||
|
||||
for label, dfs := range deferred {
|
||||
ec.processDeferredGroup(graphql.DeferredGroup{
|
||||
Label: label,
|
||||
Path: graphql.GetPath(ctx),
|
||||
FieldSet: dfs,
|
||||
Context: ctx,
|
||||
})
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
var mutationImplementors = []string{"Mutation"}
|
||||
|
||||
func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler {
|
||||
|
@ -5556,6 +5936,13 @@ func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet)
|
|||
out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
|
||||
return ec._Mutation_downloadTorrent(ctx, field)
|
||||
})
|
||||
case "dedupeStorage":
|
||||
out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
|
||||
return ec._Mutation_dedupeStorage(ctx, field)
|
||||
})
|
||||
if out.Values[i] == graphql.Null {
|
||||
out.Invalids++
|
||||
}
|
||||
default:
|
||||
panic("unknown field " + strconv.Quote(field.Name))
|
||||
}
|
||||
|
@ -6551,6 +6938,20 @@ func (ec *executionContext) marshalNBoolean2bool(ctx context.Context, sel ast.Se
|
|||
return res
|
||||
}
|
||||
|
||||
func (ec *executionContext) marshalNCleanupResponse2gitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐCleanupResponse(ctx context.Context, sel ast.SelectionSet, v model.CleanupResponse) graphql.Marshaler {
|
||||
return ec._CleanupResponse(ctx, sel, &v)
|
||||
}
|
||||
|
||||
func (ec *executionContext) marshalNCleanupResponse2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐCleanupResponse(ctx context.Context, sel ast.SelectionSet, v *model.CleanupResponse) graphql.Marshaler {
|
||||
if v == nil {
|
||||
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
|
||||
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
|
||||
}
|
||||
return graphql.Null
|
||||
}
|
||||
return ec._CleanupResponse(ctx, sel, v)
|
||||
}
|
||||
|
||||
func (ec *executionContext) marshalNDirEntry2gitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐDirEntry(ctx context.Context, sel ast.SelectionSet, v model.DirEntry) graphql.Marshaler {
|
||||
if v == nil {
|
||||
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
|
||||
|
@ -6650,6 +7051,20 @@ func (ec *executionContext) marshalNInt2int64(ctx context.Context, sel ast.Selec
|
|||
return res
|
||||
}
|
||||
|
||||
func (ec *executionContext) marshalNListDirResponse2gitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐListDirResponse(ctx context.Context, sel ast.SelectionSet, v model.ListDirResponse) graphql.Marshaler {
|
||||
return ec._ListDirResponse(ctx, sel, &v)
|
||||
}
|
||||
|
||||
func (ec *executionContext) marshalNListDirResponse2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐListDirResponse(ctx context.Context, sel ast.SelectionSet, v *model.ListDirResponse) graphql.Marshaler {
|
||||
if v == nil {
|
||||
if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
|
||||
ec.Errorf(ctx, "the requested element is null which the schema does not allow")
|
||||
}
|
||||
return graphql.Null
|
||||
}
|
||||
return ec._ListDirResponse(ctx, sel, v)
|
||||
}
|
||||
|
||||
func (ec *executionContext) unmarshalNString2string(ctx context.Context, v interface{}) (string, error) {
|
||||
res, err := graphql.UnmarshalString(v)
|
||||
return res, graphql.ErrorOnPath(ctx, err)
|
||||
|
@ -6665,6 +7080,38 @@ func (ec *executionContext) marshalNString2string(ctx context.Context, sel ast.S
|
|||
return res
|
||||
}
|
||||
|
||||
func (ec *executionContext) unmarshalNString2ᚕstringᚄ(ctx context.Context, v interface{}) ([]string, error) {
|
||||
var vSlice []interface{}
|
||||
if v != nil {
|
||||
vSlice = graphql.CoerceList(v)
|
||||
}
|
||||
var err error
|
||||
res := make([]string, len(vSlice))
|
||||
for i := range vSlice {
|
||||
ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
|
||||
res[i], err = ec.unmarshalNString2string(ctx, vSlice[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (ec *executionContext) marshalNString2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler {
|
||||
ret := make(graphql.Array, len(v))
|
||||
for i := range v {
|
||||
ret[i] = ec.marshalNString2string(ctx, sel, v[i])
|
||||
}
|
||||
|
||||
for _, e := range ret {
|
||||
if e == graphql.Null {
|
||||
return graphql.Null
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (ec *executionContext) marshalNTorrent2ᚕᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Torrent) graphql.Marshaler {
|
||||
ret := make(graphql.Array, len(v))
|
||||
var wg sync.WaitGroup
|
||||
|
|
|
@ -32,6 +32,11 @@ type BooleanFilter struct {
|
|||
Eq *bool `json:"eq,omitempty"`
|
||||
}
|
||||
|
||||
type CleanupResponse struct {
|
||||
Count int64 `json:"count"`
|
||||
List []string `json:"list"`
|
||||
}
|
||||
|
||||
type DateTimeFilter struct {
|
||||
Eq *time.Time `json:"eq,omitempty"`
|
||||
Gt *time.Time `json:"gt,omitempty"`
|
||||
|
@ -68,6 +73,11 @@ type IntFilter struct {
|
|||
In []int64 `json:"in,omitempty"`
|
||||
}
|
||||
|
||||
type ListDirResponse struct {
|
||||
Root DirEntry `json:"root"`
|
||||
Entries []DirEntry `json:"entries"`
|
||||
}
|
||||
|
||||
type Mutation struct {
|
||||
}
|
||||
|
||||
|
|
|
@ -46,18 +46,24 @@ func (r *mutationResolver) ValidateTorrents(ctx context.Context, filter model.To
|
|||
}
|
||||
|
||||
// CleanupTorrents is the resolver for the cleanupTorrents field.
|
||||
func (r *mutationResolver) CleanupTorrents(ctx context.Context, files *bool, dryRun bool) (int64, error) {
|
||||
func (r *mutationResolver) CleanupTorrents(ctx context.Context, files *bool, dryRun bool) (*model.CleanupResponse, error) {
|
||||
torrents, err := r.Service.ListTorrents(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if files != nil && *files {
|
||||
r, err := r.Service.Storage.CleanupFiles(ctx, torrents, dryRun)
|
||||
return int64(r), err
|
||||
return &model.CleanupResponse{
|
||||
Count: int64(len(r)),
|
||||
List: r,
|
||||
}, err
|
||||
} else {
|
||||
r, err := r.Service.Storage.CleanupDirs(ctx, torrents, dryRun)
|
||||
return int64(r), err
|
||||
return &model.CleanupResponse{
|
||||
Count: int64(len(r)),
|
||||
List: r,
|
||||
}, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -80,6 +86,15 @@ func (r *mutationResolver) DownloadTorrent(ctx context.Context, infohash string,
|
|||
return &model.DownloadTorrentResponse{}, nil
|
||||
}
|
||||
|
||||
// DedupeStorage is the resolver for the dedupeStorage field.
|
||||
func (r *mutationResolver) DedupeStorage(ctx context.Context) (int64, error) {
|
||||
deduped, err := r.Service.Storage.Dedupe(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int64(deduped), nil
|
||||
}
|
||||
|
||||
// Mutation returns graph.MutationResolver implementation.
|
||||
func (r *Resolver) Mutation() graph.MutationResolver { return &mutationResolver{r} }
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ package resolver
|
|||
|
||||
import (
|
||||
"context"
|
||||
"io/fs"
|
||||
|
||||
graph "git.kmsign.ru/royalcat/tstor/src/delivery/graphql"
|
||||
"git.kmsign.ru/royalcat/tstor/src/delivery/graphql/model"
|
||||
|
@ -64,51 +65,75 @@ func (r *queryResolver) Torrents(ctx context.Context, filter *model.TorrentsFilt
|
|||
return tr, nil
|
||||
}
|
||||
|
||||
type dirEntry interface {
|
||||
Name() string
|
||||
IsDir() bool
|
||||
}
|
||||
|
||||
func fillDirEntry(e dirEntry) model.DirEntry {
|
||||
switch e.(type) {
|
||||
case *vfs.ArchiveFS:
|
||||
e := e.(*vfs.ArchiveFS)
|
||||
return model.ArchiveFs{
|
||||
Name: e.Name(),
|
||||
Size: e.Size(),
|
||||
}
|
||||
case *vfs.ResolverFS:
|
||||
e := e.(*vfs.ResolverFS)
|
||||
return model.ResolverFs{
|
||||
Name: e.Name(),
|
||||
}
|
||||
case *vfs.TorrentFs:
|
||||
e := e.(*vfs.TorrentFs)
|
||||
return model.TorrentFs{
|
||||
Name: e.Name(),
|
||||
Torrent: model.MapTorrent(e.Torrent),
|
||||
}
|
||||
default:
|
||||
if e.IsDir() {
|
||||
return model.Dir{
|
||||
Name: e.Name(),
|
||||
}
|
||||
}
|
||||
if de, ok := e.(fs.DirEntry); ok {
|
||||
info, _ := de.Info()
|
||||
return model.File{
|
||||
Name: e.Name(),
|
||||
Size: info.Size(),
|
||||
}
|
||||
}
|
||||
|
||||
if fe, ok := e.(fs.FileInfo); ok {
|
||||
return model.File{
|
||||
Name: fe.Name(),
|
||||
Size: fe.Size(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
panic("this dir entry is strange af")
|
||||
}
|
||||
|
||||
// FsListDir is the resolver for the fsListDir field.
|
||||
func (r *queryResolver) FsListDir(ctx context.Context, path string) ([]model.DirEntry, error) {
|
||||
func (r *queryResolver) FsListDir(ctx context.Context, path string) (*model.ListDirResponse, error) {
|
||||
root, err := r.VFS.Stat(ctx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entries, err := r.VFS.ReadDir(ctx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := []model.DirEntry{}
|
||||
for _, e := range entries {
|
||||
switch e.(type) {
|
||||
case *vfs.ArchiveFS:
|
||||
e := e.(*vfs.ArchiveFS)
|
||||
out = append(out, model.ArchiveFs{
|
||||
Name: e.Name(),
|
||||
Size: e.Size,
|
||||
})
|
||||
case *vfs.ResolverFS:
|
||||
e := e.(*vfs.ResolverFS)
|
||||
out = append(out, model.ResolverFs{
|
||||
Name: e.Name(),
|
||||
})
|
||||
case *vfs.TorrentFs:
|
||||
e := e.(*vfs.TorrentFs)
|
||||
out = append(out, model.TorrentFs{
|
||||
Name: e.Name(),
|
||||
Torrent: model.MapTorrent(e.Torrent),
|
||||
})
|
||||
default:
|
||||
if e.IsDir() {
|
||||
out = append(out, model.Dir{Name: e.Name()})
|
||||
} else {
|
||||
info, err := e.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out = append(out, model.File{
|
||||
Name: e.Name(),
|
||||
Size: info.Size(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
out = append(out, fillDirEntry(e))
|
||||
}
|
||||
|
||||
return out, nil
|
||||
return &model.ListDirResponse{
|
||||
Root: fillDirEntry(root),
|
||||
Entries: out,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Query returns graph.QueryResolver implementation.
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
"github.com/shurcooL/httpfs/html/vfstemplate"
|
||||
)
|
||||
|
||||
func New(fc *filecache.Cache, ss *service.Stats, s *service.Service, vfs vfs.Filesystem, logPath string, cfg *config.Config) error {
|
||||
func New(fc *filecache.Cache, ss *service.Stats, s *service.Service, vfs vfs.Filesystem, logPath string, cfg *config.Settings) error {
|
||||
log := slog.With()
|
||||
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"git.kmsign.ru/royalcat/tstor/src/delivery/graphql/resolver"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/service"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
"github.com/99designs/gqlgen/graphql"
|
||||
"github.com/99designs/gqlgen/graphql/handler"
|
||||
"github.com/99designs/gqlgen/graphql/handler/extension"
|
||||
"github.com/99designs/gqlgen/graphql/handler/lru"
|
||||
|
@ -33,7 +34,11 @@ func GraphQLHandler(service *service.Service, vfs vfs.Filesystem) http.Handler {
|
|||
graphqlHandler.SetQueryCache(lru.New(1000))
|
||||
graphqlHandler.Use(extension.Introspection{})
|
||||
graphqlHandler.Use(extension.AutomaticPersistedQuery{Cache: lru.New(100)})
|
||||
graphqlHandler.Use(otelgqlgen.Middleware())
|
||||
graphqlHandler.Use(otelgqlgen.Middleware(
|
||||
otelgqlgen.WithCreateSpanFromFields(func(ctx *graphql.FieldContext) bool {
|
||||
return ctx.Field.Directives.ForName("link") != nil
|
||||
}),
|
||||
))
|
||||
|
||||
return graphqlHandler
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ func (hfs *HTTPFS) filesToFileInfo(name string) ([]fs.FileInfo, error) {
|
|||
return out, nil
|
||||
}
|
||||
|
||||
var _ http.File = &httpFile{}
|
||||
var _ http.File = (*httpFile)(nil)
|
||||
|
||||
type httpFile struct {
|
||||
f vfs.File
|
||||
|
@ -128,5 +128,5 @@ func (f *httpFile) Readdir(count int) ([]fs.FileInfo, error) {
|
|||
}
|
||||
|
||||
func (f *httpFile) Stat() (fs.FileInfo, error) {
|
||||
return f.f.Stat()
|
||||
return f.f.Info()
|
||||
}
|
||||
|
|
|
@ -2,11 +2,12 @@ package nfs
|
|||
|
||||
import (
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
|
||||
nfshelper "git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
"git.kmsign.ru/royalcat/tstor/src/log"
|
||||
nfs "github.com/willscott/go-nfs"
|
||||
nfshelper "github.com/willscott/go-nfs/helpers"
|
||||
)
|
||||
|
||||
func NewNFSv3Handler(fs vfs.Filesystem) (nfs.Handler, error) {
|
||||
|
@ -14,10 +15,13 @@ func NewNFSv3Handler(fs vfs.Filesystem) (nfs.Handler, error) {
|
|||
nfs.SetLogger(log.NewNFSLog(nfslog))
|
||||
nfs.Log.SetLevel(nfs.InfoLevel)
|
||||
|
||||
bfs := &billyFsWrapper{fs: fs, log: nfslog}
|
||||
bfs := &fsWrapper{fs: fs, log: nfslog, timeout: time.Minute}
|
||||
handler := nfshelper.NewNullAuthHandler(bfs)
|
||||
|
||||
cacheHelper := nfshelper.NewCachingHandler(handler, 1024)
|
||||
cacheHelper, err := NewKvHandler(handler, bfs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// cacheHelper := NewCachingHandler(handler)
|
||||
|
||||
|
|
127
src/export/nfs/kvhandler.go
Normal file
127
src/export/nfs/kvhandler.go
Normal file
|
@ -0,0 +1,127 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
|
||||
"git.kmsign.ru/royalcat/tstor/src/config"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/royalcat/kv"
|
||||
)
|
||||
|
||||
const lifetime = time.Hour * 24
|
||||
|
||||
// NewKvHandler provides a basic to/from-file handle cache that can be tuned with a smaller cache of active directory listings.
|
||||
func NewKvHandler(h nfs.Handler, fs nfs.Filesystem) (nfs.Handler, error) {
|
||||
activeHandles, err := kv.NewBadgerKVMarhsler[uuid.UUID, []string](path.Join(config.Config.Mounts.NFS.CachePath, "handlers"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if s, ok := activeHandles.(kv.BadgerStore); ok {
|
||||
// db := s.BadgerDB()
|
||||
// enable with managed database
|
||||
// go func() {
|
||||
// for n := range time.NewTimer(lifetime / 2).C {
|
||||
// db.SetDiscardTs(uint64(n.Add(-lifetime).Unix()))
|
||||
// }
|
||||
// }()
|
||||
// }
|
||||
|
||||
return &CachingHandler{
|
||||
Handler: h,
|
||||
fs: fs,
|
||||
activeHandles: activeHandles,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CachingHandler implements to/from handle via an LRU cache.
|
||||
type CachingHandler struct {
|
||||
nfs.Handler
|
||||
|
||||
fs nfs.Filesystem
|
||||
activeHandles kv.Store[uuid.UUID, []string]
|
||||
}
|
||||
|
||||
// ToHandle takes a file and represents it with an opaque handle to reference it.
|
||||
// In stateless nfs (when it's serving a unix fs) this can be the device + inode
|
||||
// but we can generalize with a stateful local cache of handed out IDs.
|
||||
func (c *CachingHandler) ToHandle(_ nfs.Filesystem, path []string) []byte {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
var id uuid.UUID
|
||||
c.activeHandles.Range(ctx, func(k uuid.UUID, v []string) bool {
|
||||
if slices.Equal(path, v) {
|
||||
id = k
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
if id != uuid.Nil {
|
||||
return id[:]
|
||||
}
|
||||
|
||||
id = uuid.New()
|
||||
|
||||
c.activeHandles.Set(ctx, id, path)
|
||||
|
||||
return id[:]
|
||||
}
|
||||
|
||||
// FromHandle converts from an opaque handle to the file it represents
|
||||
func (c *CachingHandler) FromHandle(fh []byte) (nfs.Filesystem, []string, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
id, err := uuid.FromBytes(fh)
|
||||
if err != nil {
|
||||
return nil, []string{}, err
|
||||
}
|
||||
|
||||
paths, found, err := c.activeHandles.Get(ctx, id)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("kv error: %w", err)
|
||||
}
|
||||
|
||||
if found {
|
||||
return c.fs, paths, nil
|
||||
}
|
||||
|
||||
return nil, []string{}, &nfs.NFSStatusError{NFSStatus: nfs.NFSStatusStale}
|
||||
}
|
||||
|
||||
func (c *CachingHandler) InvalidateHandle(fs nfs.Filesystem, handle []byte) error {
|
||||
ctx := context.Background()
|
||||
//Remove from cache
|
||||
id, err := uuid.FromBytes(handle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.activeHandles.Delete(ctx, id)
|
||||
return nil
|
||||
}
|
||||
|
||||
const maxInt = int(^uint(0) >> 1)
|
||||
|
||||
// HandleLimit exports how many file handles can be safely stored by this cache.
|
||||
func (c *CachingHandler) HandleLimit() int {
|
||||
return maxInt
|
||||
}
|
||||
|
||||
func hasPrefix(path, prefix []string) bool {
|
||||
if len(prefix) > len(path) {
|
||||
return false
|
||||
}
|
||||
for i, e := range prefix {
|
||||
if path[i] != e {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -6,47 +6,45 @@ import (
|
|||
"io/fs"
|
||||
"log/slog"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/ctxbilly"
|
||||
nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
"github.com/go-git/go-billy/v5"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
var billyFsTracer = otel.Tracer("git.kmsign.ru/royalcat/tstor/src/export/nfs.billyFsWrapper")
|
||||
|
||||
type billyFsWrapper struct {
|
||||
type fsWrapper struct {
|
||||
fs vfs.Filesystem
|
||||
log *slog.Logger
|
||||
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
var _ billy.Filesystem = (*billyFsWrapper)(nil)
|
||||
var _ billy.Dir = (*billyFsWrapper)(nil)
|
||||
var _ nfs.Filesystem = (*fsWrapper)(nil)
|
||||
|
||||
func (*billyFsWrapper) ctx() context.Context {
|
||||
return context.Background()
|
||||
}
|
||||
// var _ ctxbilly.Dir = (*billyFsWrapper)(nil)
|
||||
|
||||
// Chroot implements billy.Filesystem.
|
||||
func (*billyFsWrapper) Chroot(path string) (billy.Filesystem, error) {
|
||||
func (*fsWrapper) Chroot(path string) (nfs.Filesystem, error) {
|
||||
return nil, billy.ErrNotSupported
|
||||
}
|
||||
|
||||
// Create implements billy.Filesystem.
|
||||
func (*billyFsWrapper) Create(filename string) (billy.File, error) {
|
||||
func (*fsWrapper) Create(ctx context.Context, filename string) (nfs.File, error) {
|
||||
return nil, billy.ErrNotSupported
|
||||
}
|
||||
|
||||
// Join implements billy.Filesystem.
|
||||
func (*billyFsWrapper) Join(elem ...string) string {
|
||||
func (*fsWrapper) Join(elem ...string) string {
|
||||
return filepath.Join(elem...)
|
||||
}
|
||||
|
||||
// Lstat implements billy.Filesystem.
|
||||
func (fs *billyFsWrapper) Lstat(filename string) (fs.FileInfo, error) {
|
||||
ctx, span := billyFsTracer.Start(fs.ctx(), "Lstat", trace.WithAttributes(attribute.String("filename", filename)))
|
||||
defer span.End()
|
||||
func (fs *fsWrapper) Lstat(ctx context.Context, filename string) (fs.FileInfo, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, fs.timeout)
|
||||
defer cancel()
|
||||
|
||||
info, err := fs.fs.Stat(ctx, filename)
|
||||
if err != nil {
|
||||
|
@ -56,16 +54,14 @@ func (fs *billyFsWrapper) Lstat(filename string) (fs.FileInfo, error) {
|
|||
}
|
||||
|
||||
// MkdirAll implements billy.Filesystem.
|
||||
func (*billyFsWrapper) MkdirAll(filename string, perm fs.FileMode) error {
|
||||
func (*fsWrapper) MkdirAll(ctx context.Context, filename string, perm fs.FileMode) error {
|
||||
return billy.ErrNotSupported
|
||||
}
|
||||
|
||||
// Open implements billy.Filesystem.
|
||||
func (fs *billyFsWrapper) Open(filename string) (billy.File, error) {
|
||||
ctx, span := billyFsTracer.Start(fs.ctx(), "Open",
|
||||
trace.WithAttributes(attribute.String("filename", filename)),
|
||||
)
|
||||
defer span.End()
|
||||
func (fs *fsWrapper) Open(ctx context.Context, filename string) (nfs.File, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, fs.timeout)
|
||||
defer cancel()
|
||||
|
||||
file, err := fs.fs.Open(ctx, filename)
|
||||
if err != nil {
|
||||
|
@ -79,11 +75,9 @@ func (fs *billyFsWrapper) Open(filename string) (billy.File, error) {
|
|||
}
|
||||
|
||||
// OpenFile implements billy.Filesystem.
|
||||
func (fs *billyFsWrapper) OpenFile(filename string, flag int, perm fs.FileMode) (billy.File, error) {
|
||||
ctx, span := billyFsTracer.Start(fs.ctx(), "OpenFile",
|
||||
trace.WithAttributes(attribute.String("filename", filename)),
|
||||
)
|
||||
defer span.End()
|
||||
func (fs *fsWrapper) OpenFile(ctx context.Context, filename string, flag int, perm fs.FileMode) (nfs.File, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, fs.timeout)
|
||||
defer cancel()
|
||||
|
||||
file, err := fs.fs.Open(ctx, filename)
|
||||
if err != nil {
|
||||
|
@ -97,11 +91,9 @@ func (fs *billyFsWrapper) OpenFile(filename string, flag int, perm fs.FileMode)
|
|||
}
|
||||
|
||||
// ReadDir implements billy.Filesystem.
|
||||
func (bfs *billyFsWrapper) ReadDir(path string) ([]fs.FileInfo, error) {
|
||||
ctx, span := billyFsTracer.Start(bfs.ctx(), "OpenFile",
|
||||
trace.WithAttributes(attribute.String("path", path)),
|
||||
)
|
||||
defer span.End()
|
||||
func (bfs *fsWrapper) ReadDir(ctx context.Context, path string) ([]fs.FileInfo, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, bfs.timeout)
|
||||
defer cancel()
|
||||
|
||||
ffs, err := bfs.fs.ReadDir(ctx, path)
|
||||
if err != nil {
|
||||
|
@ -125,36 +117,32 @@ func (bfs *billyFsWrapper) ReadDir(path string) ([]fs.FileInfo, error) {
|
|||
}
|
||||
|
||||
// Readlink implements billy.Filesystem.
|
||||
func (*billyFsWrapper) Readlink(link string) (string, error) {
|
||||
func (*fsWrapper) Readlink(ctx context.Context, link string) (string, error) {
|
||||
return "", billy.ErrNotSupported
|
||||
}
|
||||
|
||||
// Remove implements billy.Filesystem.
|
||||
func (bfs *billyFsWrapper) Remove(filename string) error {
|
||||
ctx, span := billyFsTracer.Start(bfs.ctx(), "Remove",
|
||||
trace.WithAttributes(attribute.String("filename", filename)),
|
||||
)
|
||||
defer span.End()
|
||||
func (bfs *fsWrapper) Remove(ctx context.Context, filename string) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, bfs.timeout)
|
||||
defer cancel()
|
||||
|
||||
return bfs.fs.Unlink(ctx, filename)
|
||||
}
|
||||
|
||||
// Rename implements billy.Filesystem.
|
||||
func (*billyFsWrapper) Rename(oldpath string, newpath string) error {
|
||||
func (*fsWrapper) Rename(ctx context.Context, oldpath string, newpath string) error {
|
||||
return billy.ErrNotSupported
|
||||
}
|
||||
|
||||
// Root implements billy.Filesystem.
|
||||
func (*billyFsWrapper) Root() string {
|
||||
func (*fsWrapper) Root() string {
|
||||
return "/"
|
||||
}
|
||||
|
||||
// Stat implements billy.Filesystem.
|
||||
func (bfs *billyFsWrapper) Stat(filename string) (fs.FileInfo, error) {
|
||||
ctx, span := billyFsTracer.Start(bfs.ctx(), "Remove",
|
||||
trace.WithAttributes(attribute.String("filename", filename)),
|
||||
)
|
||||
defer span.End()
|
||||
func (bfs *fsWrapper) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, bfs.timeout)
|
||||
defer cancel()
|
||||
|
||||
info, err := bfs.fs.Stat(ctx, filename)
|
||||
if err != nil {
|
||||
|
@ -164,28 +152,21 @@ func (bfs *billyFsWrapper) Stat(filename string) (fs.FileInfo, error) {
|
|||
}
|
||||
|
||||
// Symlink implements billy.Filesystem.
|
||||
func (fs *billyFsWrapper) Symlink(target string, link string) error {
|
||||
func (fs *fsWrapper) Symlink(ctx context.Context, target string, link string) error {
|
||||
return billyErr(nil, vfs.ErrNotImplemented, fs.log)
|
||||
}
|
||||
|
||||
// TempFile implements billy.Filesystem.
|
||||
func (fs *billyFsWrapper) TempFile(dir string, prefix string) (billy.File, error) {
|
||||
return nil, billyErr(nil, vfs.ErrNotImplemented, fs.log)
|
||||
}
|
||||
|
||||
type billyFile struct {
|
||||
ctx context.Context
|
||||
|
||||
name string
|
||||
file vfs.File
|
||||
log *slog.Logger
|
||||
}
|
||||
|
||||
var _ billy.File = (*billyFile)(nil)
|
||||
var _ ctxbilly.File = (*billyFile)(nil)
|
||||
|
||||
// Close implements billy.File.
|
||||
func (f *billyFile) Close() error {
|
||||
return f.Close()
|
||||
func (f *billyFile) Close(ctx context.Context) error {
|
||||
return f.file.Close(ctx)
|
||||
}
|
||||
|
||||
// Name implements billy.File.
|
||||
|
@ -194,31 +175,12 @@ func (f *billyFile) Name() string {
|
|||
}
|
||||
|
||||
// Read implements billy.File.
|
||||
func (bf *billyFile) Read(p []byte) (n int, err error) {
|
||||
ctx, span := billyFsTracer.Start(bf.ctx, "Read",
|
||||
trace.WithAttributes(attribute.Int("length", len(p))),
|
||||
)
|
||||
defer func() {
|
||||
span.SetAttributes(attribute.Int("read", n))
|
||||
span.End()
|
||||
}()
|
||||
|
||||
func (bf *billyFile) Read(ctx context.Context, p []byte) (n int, err error) {
|
||||
return bf.file.Read(ctx, p)
|
||||
}
|
||||
|
||||
// ReadAt implements billy.File.
|
||||
func (bf *billyFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
ctx, span := billyFsTracer.Start(bf.ctx, "Read",
|
||||
trace.WithAttributes(
|
||||
attribute.Int("length", len(p)),
|
||||
attribute.Int64("offset", off),
|
||||
),
|
||||
)
|
||||
defer func() {
|
||||
span.SetAttributes(attribute.Int("read", n))
|
||||
span.End()
|
||||
}()
|
||||
|
||||
func (bf *billyFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
|
||||
return bf.file.ReadAt(ctx, p, off)
|
||||
}
|
||||
|
||||
|
@ -228,12 +190,12 @@ func (f *billyFile) Seek(offset int64, whence int) (int64, error) {
|
|||
}
|
||||
|
||||
// Truncate implements billy.File.
|
||||
func (f *billyFile) Truncate(size int64) error {
|
||||
func (f *billyFile) Truncate(ctx context.Context, size int64) error {
|
||||
return billyErr(nil, vfs.ErrNotImplemented, f.log)
|
||||
}
|
||||
|
||||
// Write implements billy.File.
|
||||
func (f *billyFile) Write(p []byte) (n int, err error) {
|
||||
func (f *billyFile) Write(ctx context.Context, p []byte) (n int, err error) {
|
||||
return 0, billyErr(nil, vfs.ErrNotImplemented, f.log)
|
||||
}
|
||||
|
|
@ -96,6 +96,7 @@ type webDAVFile struct {
|
|||
func newFile(ctx context.Context, name string, f vfs.File, df func() ([]os.FileInfo, error)) *webDAVFile {
|
||||
return &webDAVFile{
|
||||
ctx: ctx,
|
||||
f: f,
|
||||
fi: newFileInfo(name, f.Size(), f.IsDir()),
|
||||
dirFunc: df,
|
||||
}
|
||||
|
|
|
@ -13,13 +13,12 @@ import (
|
|||
"github.com/anacrolix/torrent/storage"
|
||||
"github.com/anacrolix/torrent/types/infohash"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/royalcat/kv"
|
||||
)
|
||||
|
||||
// NOT USED
|
||||
type PieceStorage struct {
|
||||
basePath string
|
||||
completion storage.PieceCompletion
|
||||
dirInfohash kv.Store[string, infohash.T]
|
||||
basePath string
|
||||
completion storage.PieceCompletion
|
||||
}
|
||||
|
||||
func NewPieceStorage(path string, completion storage.PieceCompletion) *PieceStorage {
|
||||
|
@ -29,8 +28,6 @@ func NewPieceStorage(path string, completion storage.PieceCompletion) *PieceStor
|
|||
}
|
||||
}
|
||||
|
||||
var _ DataStorage = (*PieceStorage)(nil)
|
||||
|
||||
// OpenTorrent implements FileStorageDeleter.
|
||||
func (p *PieceStorage) OpenTorrent(info *metainfo.Info, infoHash infohash.T) (storage.TorrentImpl, error) {
|
||||
torrentPath := path.Join(p.basePath, infoHash.HexString())
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/anacrolix/torrent/storage"
|
||||
)
|
||||
|
||||
func Setup(cfg config.TorrentClient) (DataStorage, storage.PieceCompletion, error) {
|
||||
func Setup(cfg config.TorrentClient) (*DataStorage, storage.PieceCompletion, error) {
|
||||
pcp := filepath.Join(cfg.MetadataFolder, "piece-completion")
|
||||
if err := os.MkdirAll(pcp, 0744); err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating piece completion folder: %w", err)
|
||||
|
|
|
@ -2,49 +2,62 @@ package datastorage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/rlog"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/controller"
|
||||
"github.com/anacrolix/torrent"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/anacrolix/torrent/storage"
|
||||
"github.com/dustin/go-humanize"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type DataStorage interface {
|
||||
storage.ClientImplCloser
|
||||
DeleteFile(file *torrent.File) error
|
||||
CleanupDirs(ctx context.Context, expected []*controller.Torrent, dryRun bool) (int, error)
|
||||
CleanupFiles(ctx context.Context, expected []*controller.Torrent, dryRun bool) (int, error)
|
||||
}
|
||||
// type DataStorage interface {
|
||||
// storage.ClientImplCloser
|
||||
// DeleteFile(file *torrent.File) error
|
||||
// CleanupDirs(ctx context.Context, expected []*controller.Torrent, dryRun bool) (int, error)
|
||||
// CleanupFiles(ctx context.Context, expected []*controller.Torrent, dryRun bool) (int, error)
|
||||
// }
|
||||
|
||||
var tracer = otel.Tracer("git.kmsign.ru/royalcat/tstor/src/host/datastorage")
|
||||
|
||||
// NewFileStorage creates a new ClientImplCloser that stores files using the OS native filesystem.
|
||||
func NewFileStorage(baseDir string, pc storage.PieceCompletion) *FileStorage {
|
||||
return &FileStorage{
|
||||
baseDir: baseDir,
|
||||
func NewFileStorage(baseDir string, pc storage.PieceCompletion) *DataStorage {
|
||||
return &DataStorage{
|
||||
ClientImplCloser: storage.NewFileOpts(storage.NewFileClientOpts{
|
||||
ClientBaseDir: baseDir,
|
||||
PieceCompletion: pc,
|
||||
TorrentDirMaker: torrentDir,
|
||||
FilePathMaker: filePath,
|
||||
}),
|
||||
baseDir: baseDir,
|
||||
pieceCompletion: pc,
|
||||
log: slog.With("component", "torrent-client"),
|
||||
}
|
||||
}
|
||||
|
||||
// File-based storage for torrents, that isn't yet bound to a particular torrent.
|
||||
type FileStorage struct {
|
||||
type DataStorage struct {
|
||||
baseDir string
|
||||
storage.ClientImplCloser
|
||||
pieceCompletion storage.PieceCompletion
|
||||
log *slog.Logger
|
||||
}
|
||||
|
||||
func (me *FileStorage) Close() error {
|
||||
func (me *DataStorage) Close() error {
|
||||
return me.pieceCompletion.Close()
|
||||
}
|
||||
|
||||
|
@ -61,14 +74,14 @@ func filePath(opts storage.FilePathMakerOpts) string {
|
|||
return filepath.Join(opts.File.Path...)
|
||||
}
|
||||
|
||||
func (fs *FileStorage) filePath(info *metainfo.Info, infoHash metainfo.Hash, fileInfo *metainfo.FileInfo) string {
|
||||
func (fs *DataStorage) filePath(info *metainfo.Info, infoHash metainfo.Hash, fileInfo *metainfo.FileInfo) string {
|
||||
return filepath.Join(torrentDir(fs.baseDir, info, infoHash), filePath(storage.FilePathMakerOpts{
|
||||
Info: info,
|
||||
File: fileInfo,
|
||||
}))
|
||||
}
|
||||
|
||||
func (fs *FileStorage) DeleteFile(file *torrent.File) error {
|
||||
func (fs *DataStorage) DeleteFile(file *torrent.File) error {
|
||||
info := file.Torrent().Info()
|
||||
infoHash := file.Torrent().InfoHash()
|
||||
torrentDir := torrentDir(fs.baseDir, info, infoHash)
|
||||
|
@ -88,7 +101,7 @@ func (fs *FileStorage) DeleteFile(file *torrent.File) error {
|
|||
return os.Remove(filePath)
|
||||
}
|
||||
|
||||
func (fs *FileStorage) CleanupDirs(ctx context.Context, expected []*controller.Torrent, dryRun bool) (int, error) {
|
||||
func (fs *DataStorage) CleanupDirs(ctx context.Context, expected []*controller.Torrent, dryRun bool) ([]string, error) {
|
||||
log := fs.log.With("function", "CleanupDirs", "expectedTorrents", len(expected), "dryRun", dryRun)
|
||||
|
||||
expectedEntries := []string{}
|
||||
|
@ -98,7 +111,7 @@ func (fs *FileStorage) CleanupDirs(ctx context.Context, expected []*controller.T
|
|||
|
||||
entries, err := os.ReadDir(fs.baseDir)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
toDelete := []string{}
|
||||
|
@ -109,7 +122,7 @@ func (fs *FileStorage) CleanupDirs(ctx context.Context, expected []*controller.T
|
|||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return 0, ctx.Err()
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
log.Info("deleting trash data", "dirsCount", len(toDelete))
|
||||
|
@ -119,40 +132,34 @@ func (fs *FileStorage) CleanupDirs(ctx context.Context, expected []*controller.T
|
|||
log.Warn("deleting trash data", "path", p)
|
||||
err := os.RemoveAll(p)
|
||||
if err != nil {
|
||||
return i, err
|
||||
return toDelete[:i], err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return len(toDelete), nil
|
||||
return toDelete, nil
|
||||
}
|
||||
|
||||
// func (fs *FileStorage) IsCompatable(ctx context.Context, addition *controller.Torrent, dryRun bool) (bool, error) {
|
||||
// log := fs.log.With("function", "IsCompatable", "addition", addition.Name())
|
||||
|
||||
// ifp
|
||||
// }
|
||||
|
||||
func (fs *FileStorage) CleanupFiles(ctx context.Context, expected []*controller.Torrent, dryRun bool) (int, error) {
|
||||
log := fs.log.With("function", "CleanupFiles", "expectedTorrents", len(expected), "dryRun", dryRun)
|
||||
func (s *DataStorage) CleanupFiles(ctx context.Context, expected []*controller.Torrent, dryRun bool) ([]string, error) {
|
||||
log := s.log.With("function", "CleanupFiles", "expectedTorrents", len(expected), "dryRun", dryRun)
|
||||
|
||||
expectedEntries := []string{}
|
||||
{
|
||||
for _, e := range expected {
|
||||
files, err := e.Files(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
expectedEntries = append(expectedEntries, fs.filePath(e.Torrent().Info(), e.Torrent().InfoHash(), ptr(f.FileInfo())))
|
||||
expectedEntries = append(expectedEntries, s.filePath(e.Torrent().Info(), e.Torrent().InfoHash(), ptr(f.FileInfo())))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
entries := []string{}
|
||||
err := filepath.Walk(fs.baseDir,
|
||||
func(path string, info os.FileInfo, err error) error {
|
||||
err := filepath.WalkDir(s.baseDir,
|
||||
func(path string, info fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -167,7 +174,7 @@ func (fs *FileStorage) CleanupFiles(ctx context.Context, expected []*controller.
|
|||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
toDelete := []string{}
|
||||
|
@ -178,20 +185,243 @@ func (fs *FileStorage) CleanupFiles(ctx context.Context, expected []*controller.
|
|||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return len(toDelete), ctx.Err()
|
||||
return toDelete, ctx.Err()
|
||||
}
|
||||
|
||||
log.Info("deleting trash data", "filesCount", len(toDelete))
|
||||
if !dryRun {
|
||||
for i, p := range toDelete {
|
||||
fs.log.Warn("deleting trash data", "path", p)
|
||||
s.log.Warn("deleting trash data", "path", p)
|
||||
err := os.Remove(p)
|
||||
if err != nil {
|
||||
return i, err
|
||||
return toDelete[i:], err
|
||||
}
|
||||
}
|
||||
}
|
||||
return len(toDelete), nil
|
||||
return toDelete, nil
|
||||
}
|
||||
|
||||
func (s *DataStorage) iterFiles(ctx context.Context, iter func(ctx context.Context, path string, entry fs.FileInfo) error) error {
|
||||
return filepath.Walk(s.baseDir,
|
||||
func(path string, info fs.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return iter(ctx, path, info)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *DataStorage) Dedupe(ctx context.Context) (uint64, error) {
|
||||
ctx, span := tracer.Start(ctx, fmt.Sprintf("Dedupe"))
|
||||
defer span.End()
|
||||
|
||||
log := rlog.FunctionLog(s.log, "Dedupe")
|
||||
|
||||
sizeMap := map[int64][]string{}
|
||||
err := s.iterFiles(ctx, func(ctx context.Context, path string, info fs.FileInfo) error {
|
||||
size := info.Size()
|
||||
sizeMap[size] = append(sizeMap[size], path)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
maps.DeleteFunc(sizeMap, func(k int64, v []string) bool {
|
||||
return len(v) <= 1
|
||||
})
|
||||
|
||||
span.AddEvent("collected files with same size", trace.WithAttributes(
|
||||
attribute.Int("count", len(sizeMap)),
|
||||
))
|
||||
|
||||
var deduped uint64 = 0
|
||||
|
||||
i := 0
|
||||
for _, paths := range sizeMap {
|
||||
if i%100 == 0 {
|
||||
log.Info("deduping in progress", "current", i, "total", len(sizeMap))
|
||||
}
|
||||
i++
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return deduped, ctx.Err()
|
||||
}
|
||||
|
||||
slices.Sort(paths)
|
||||
paths = slices.Compact(paths)
|
||||
if len(paths) <= 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
paths, err = applyErr(paths, filepath.Abs)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
dedupedGroup, err := s.dedupeFiles(ctx, paths)
|
||||
if err != nil {
|
||||
log.Error("Error applying dedupe", "files", paths, "error", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
if dedupedGroup > 0 {
|
||||
deduped += dedupedGroup
|
||||
log.Info("deduped file group",
|
||||
slog.String("files", fmt.Sprint(paths)),
|
||||
slog.String("deduped", humanize.Bytes(dedupedGroup)),
|
||||
slog.String("deduped_total", humanize.Bytes(deduped)),
|
||||
)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return deduped, nil
|
||||
}
|
||||
|
||||
func applyErr[E, O any](in []E, apply func(E) (O, error)) ([]O, error) {
|
||||
out := make([]O, 0, len(in))
|
||||
for _, p := range in {
|
||||
o, err := apply(p)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
out = append(out, o)
|
||||
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// const blockSize uint64 = 4096
|
||||
|
||||
func (s *DataStorage) dedupeFiles(ctx context.Context, paths []string) (deduped uint64, err error) {
|
||||
ctx, span := tracer.Start(ctx, fmt.Sprintf("dedupeFiles"), trace.WithAttributes(
|
||||
attribute.StringSlice("files", paths),
|
||||
))
|
||||
defer func() {
|
||||
span.SetAttributes(attribute.Int64("deduped", int64(deduped)))
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
}
|
||||
span.End()
|
||||
}()
|
||||
|
||||
log := rlog.FunctionLog(s.log, "dedupeFiles")
|
||||
|
||||
srcF, err := os.Open(paths[0])
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
defer srcF.Close()
|
||||
srcStat, err := srcF.Stat()
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
srcFd := int(srcF.Fd())
|
||||
srcSize := srcStat.Size()
|
||||
|
||||
fsStat := unix.Statfs_t{}
|
||||
err = unix.Fstatfs(srcFd, &fsStat)
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
srcHash, err := filehash(srcF)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
if fsStat.Bsize > srcSize { // for btrfs it means file in residing in not deduplicatable metadata
|
||||
return deduped, nil
|
||||
}
|
||||
|
||||
blockSize := uint64((srcSize % fsStat.Bsize) * fsStat.Bsize)
|
||||
|
||||
span.SetAttributes(attribute.Int64("blocksize", int64(blockSize)))
|
||||
|
||||
rng := unix.FileDedupeRange{
|
||||
Src_offset: 0,
|
||||
Src_length: blockSize,
|
||||
Info: []unix.FileDedupeRangeInfo{},
|
||||
}
|
||||
|
||||
for _, dst := range paths[1:] {
|
||||
if ctx.Err() != nil {
|
||||
return deduped, ctx.Err()
|
||||
}
|
||||
|
||||
destF, err := os.OpenFile(dst, os.O_RDWR, os.ModePerm)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
defer destF.Close()
|
||||
|
||||
dstHash, err := filehash(destF)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
if srcHash != dstHash {
|
||||
destF.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
rng.Info = append(rng.Info, unix.FileDedupeRangeInfo{
|
||||
Dest_fd: int64(destF.Fd()),
|
||||
Dest_offset: 0,
|
||||
})
|
||||
}
|
||||
|
||||
if len(rng.Info) == 0 {
|
||||
return deduped, nil
|
||||
}
|
||||
|
||||
log.Info("found same files, deduping", "files", paths, "size", humanize.Bytes(uint64(srcStat.Size())))
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return deduped, ctx.Err()
|
||||
}
|
||||
|
||||
rng.Src_offset = 0
|
||||
for i := range rng.Info {
|
||||
rng.Info[i].Dest_offset = 0
|
||||
}
|
||||
|
||||
err = unix.IoctlFileDedupeRange(srcFd, &rng)
|
||||
if err != nil {
|
||||
return deduped, err
|
||||
}
|
||||
|
||||
for i := range rng.Info {
|
||||
deduped += rng.Info[i].Bytes_deduped
|
||||
|
||||
rng.Info[i].Status = 0
|
||||
rng.Info[i].Bytes_deduped = 0
|
||||
}
|
||||
|
||||
return deduped, nil
|
||||
}
|
||||
|
||||
const compareBlockSize = 1024 * 128
|
||||
|
||||
func filehash(r io.Reader) ([20]byte, error) {
|
||||
buf := make([]byte, compareBlockSize)
|
||||
_, err := r.Read(buf)
|
||||
if err != nil && err != io.EOF {
|
||||
return [20]byte{}, err
|
||||
}
|
||||
|
||||
return sha1.Sum(buf), nil
|
||||
}
|
||||
|
||||
func ptr[D any](v D) *D {
|
||||
|
|
|
@ -1,21 +1,27 @@
|
|||
package service
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/ctxio"
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/rlog"
|
||||
"git.kmsign.ru/royalcat/tstor/src/config"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/controller"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/datastorage"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/store"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/tkv"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"go.uber.org/multierr"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
|
@ -27,6 +33,8 @@ import (
|
|||
"github.com/royalcat/kv"
|
||||
)
|
||||
|
||||
var tracer = otel.Tracer("git.kmsign.ru/royalcat/tstor/src/service")
|
||||
|
||||
type DirAquire struct {
|
||||
Name string
|
||||
Hashes []infohash.T
|
||||
|
@ -39,9 +47,11 @@ type Service struct {
|
|||
|
||||
torrentLoaded chan struct{}
|
||||
|
||||
loadMutex sync.Mutex
|
||||
|
||||
// stats *Stats
|
||||
DefaultPriority types.PiecePriority
|
||||
Storage datastorage.DataStorage
|
||||
Storage *datastorage.DataStorage
|
||||
SourceDir string
|
||||
|
||||
dirsAquire kv.Store[string, DirAquire]
|
||||
|
@ -50,9 +60,9 @@ type Service struct {
|
|||
}
|
||||
|
||||
func NewService(sourceDir string, cfg config.TorrentClient, c *torrent.Client,
|
||||
storage datastorage.DataStorage, excludedFiles *store.FilesMappings, infoBytes *store.InfoBytes,
|
||||
storage *datastorage.DataStorage, excludedFiles *store.FilesMappings, infoBytes *store.InfoBytes,
|
||||
) (*Service, error) {
|
||||
dirsAcquire, err := kv.NewBadgerKV[string, DirAquire](path.Join(cfg.MetadataFolder, "dir-acquire"))
|
||||
dirsAcquire, err := tkv.New[string, DirAquire](cfg.MetadataFolder, "dir-acquire")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -66,12 +76,15 @@ func NewService(sourceDir string, cfg config.TorrentClient, c *torrent.Client,
|
|||
Storage: storage,
|
||||
SourceDir: sourceDir,
|
||||
torrentLoaded: make(chan struct{}),
|
||||
loadMutex: sync.Mutex{},
|
||||
dirsAquire: dirsAcquire,
|
||||
|
||||
// stats: newStats(), // TODO persistent
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := s.loadTorrentFiles(context.Background())
|
||||
ctx := context.Background()
|
||||
err := s.loadTorrentFiles(ctx)
|
||||
if err != nil {
|
||||
s.log.Error("initial torrent load failed", "error", err)
|
||||
}
|
||||
|
@ -89,20 +102,32 @@ func (s *Service) Close() error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (s *Service) AddTorrent(ctx context.Context, f vfs.File) (*torrent.Torrent, error) {
|
||||
func (s *Service) LoadTorrent(ctx context.Context, f vfs.File) (*torrent.Torrent, error) {
|
||||
ctx, span := tracer.Start(ctx, "LoadTorrent")
|
||||
defer span.End()
|
||||
|
||||
log := rlog.FunctionLog(s.log, "LoadTorrent")
|
||||
|
||||
defer f.Close(ctx)
|
||||
|
||||
stat, err := f.Stat()
|
||||
stat, err := f.Info()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("call stat failed: %w", err)
|
||||
}
|
||||
|
||||
mi, err := metainfo.Load(ctxio.IoReader(ctx, f))
|
||||
span.SetAttributes(attribute.String("filename", stat.Name()))
|
||||
|
||||
mi, err := metainfo.Load(bufio.NewReader(ctxio.IoReader(ctx, f)))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading torrent metadata from file %s, error: %w", stat.Name(), err)
|
||||
}
|
||||
|
||||
t, ok := s.c.Torrent(mi.HashInfoBytes())
|
||||
if !ok {
|
||||
|
||||
span.AddEvent("torrent not found, loading from file")
|
||||
log.InfoContext(ctx, "torrent not found, loading from file")
|
||||
|
||||
spec, err := torrent.TorrentSpecFromMetaInfoErr(mi)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse spec from metadata: %w", err)
|
||||
|
@ -110,33 +135,18 @@ func (s *Service) AddTorrent(ctx context.Context, f vfs.File) (*torrent.Torrent,
|
|||
infoBytes := spec.InfoBytes
|
||||
|
||||
if !isValidInfoHashBytes(infoBytes) {
|
||||
log.WarnContext(ctx, "info loaded from spec not valid")
|
||||
infoBytes = nil
|
||||
}
|
||||
|
||||
if len(infoBytes) == 0 {
|
||||
log.InfoContext(ctx, "no info loaded from file, try to load from cache")
|
||||
infoBytes, err = s.infoBytes.GetBytes(spec.InfoHash)
|
||||
if err != nil && err != store.ErrNotFound {
|
||||
return nil, fmt.Errorf("get info bytes from database: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var info metainfo.Info
|
||||
err = bencode.Unmarshal(infoBytes, &info)
|
||||
if err != nil {
|
||||
infoBytes = nil
|
||||
} else {
|
||||
compatable, _, err := s.checkTorrentCompatable(ctx, spec.InfoHash, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !compatable {
|
||||
return nil, fmt.Errorf(
|
||||
"torrent with name '%s' not compatable existing infohash: %s, new: %s",
|
||||
t.Name(), t.InfoHash().HexString(), spec.InfoHash.HexString(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
t, _ = s.c.AddTorrentOpt(torrent.AddTorrentOpts{
|
||||
InfoHash: spec.InfoHash,
|
||||
Storage: s.Storage,
|
||||
|
@ -146,18 +156,33 @@ func (s *Service) AddTorrent(ctx context.Context, f vfs.File) (*torrent.Torrent,
|
|||
t.AllowDataDownload()
|
||||
t.AllowDataUpload()
|
||||
|
||||
span.AddEvent("torrent added to client")
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, fmt.Errorf("creating torrent timed out")
|
||||
return nil, ctx.Err()
|
||||
case <-t.GotInfo():
|
||||
err := s.infoBytes.Set(t.InfoHash(), t.Metainfo())
|
||||
if err != nil {
|
||||
s.log.Error("error setting info bytes for torrent %s: %s", t.Name(), err.Error())
|
||||
}
|
||||
for _, f := range t.Files() {
|
||||
f.SetPriority(s.DefaultPriority)
|
||||
}
|
||||
}
|
||||
span.AddEvent("got info")
|
||||
|
||||
info := t.Info()
|
||||
if info == nil {
|
||||
return nil, fmt.Errorf("info is nil")
|
||||
}
|
||||
|
||||
compatable, _, err := s.checkTorrentCompatable(ctx, spec.InfoHash, *info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !compatable {
|
||||
return nil, fmt.Errorf(
|
||||
"torrent with name '%s' not compatable existing infohash: %s, new: %s",
|
||||
t.Name(), t.InfoHash().HexString(), spec.InfoHash.HexString(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -271,15 +296,15 @@ func (s *Service) checkTorrentFilesCompatable(aq DirAquire, existingFiles, newFi
|
|||
return true
|
||||
}
|
||||
|
||||
func (s *Service) getTorrentsByName(name string) []*torrent.Torrent {
|
||||
out := []*torrent.Torrent{}
|
||||
for _, t := range s.c.Torrents() {
|
||||
if t.Name() == name {
|
||||
out = append(out, t)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
// func (s *Service) getTorrentsByName(name string) []*torrent.Torrent {
|
||||
// out := []*torrent.Torrent{}
|
||||
// for _, t := range s.c.Torrents() {
|
||||
// if t.Name() == name {
|
||||
// out = append(out, t)
|
||||
// }
|
||||
// }
|
||||
// return out
|
||||
// }
|
||||
|
||||
func isValidInfoHashBytes(d []byte) bool {
|
||||
var info metainfo.Info
|
||||
|
@ -290,12 +315,12 @@ func isValidInfoHashBytes(d []byte) bool {
|
|||
func (s *Service) NewTorrentFs(ctx context.Context, f vfs.File) (vfs.Filesystem, error) {
|
||||
defer f.Close(ctx)
|
||||
|
||||
info, err := f.Stat()
|
||||
info, err := f.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t, err := s.AddTorrent(ctx, f)
|
||||
t, err := s.LoadTorrent(ctx, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -311,7 +336,46 @@ func (s *Service) GetStats() torrent.ConnStats {
|
|||
return s.c.ConnStats()
|
||||
}
|
||||
|
||||
const loadWorkers = 5
|
||||
|
||||
func (s *Service) loadTorrentFiles(ctx context.Context) error {
|
||||
ctx, span := tracer.Start(ctx, "loadTorrentFiles", trace.WithAttributes(
|
||||
attribute.Int("workers", loadWorkers),
|
||||
))
|
||||
defer span.End()
|
||||
|
||||
log := rlog.FunctionLog(s.log, "loadTorrentFiles")
|
||||
|
||||
loaderPaths := make(chan string)
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
defer func() {
|
||||
close(loaderPaths)
|
||||
wg.Wait()
|
||||
}()
|
||||
|
||||
loaderWorker := func() {
|
||||
wg.Add(1)
|
||||
for path := range loaderPaths {
|
||||
file, err := vfs.NewLazyOsFile(path)
|
||||
if err != nil {
|
||||
log.Error("error opening torrent file", "filename", path, rlog.Err(err))
|
||||
continue
|
||||
}
|
||||
defer file.Close(ctx)
|
||||
|
||||
_, err = s.LoadTorrent(ctx, file)
|
||||
if err != nil {
|
||||
s.log.Error("failed adding torrent", "error", err)
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
for range loadWorkers {
|
||||
go loaderWorker()
|
||||
}
|
||||
|
||||
return filepath.Walk(s.SourceDir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("fs walk error: %w", err)
|
||||
|
@ -326,13 +390,7 @@ func (s *Service) loadTorrentFiles(ctx context.Context) error {
|
|||
}
|
||||
|
||||
if strings.HasSuffix(path, ".torrent") {
|
||||
file := vfs.NewLazyOsFile(path)
|
||||
defer file.Close(ctx)
|
||||
|
||||
_, err = s.AddTorrent(ctx, file)
|
||||
if err != nil {
|
||||
s.log.Error("failed adding torrent", "error", err)
|
||||
}
|
||||
loaderPaths <- path
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
21
src/host/tkv/new.go
Normal file
21
src/host/tkv/new.go
Normal file
|
@ -0,0 +1,21 @@
|
|||
package tkv
|
||||
|
||||
import (
|
||||
"path"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/kvtrace"
|
||||
"github.com/royalcat/kv"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
func New[K kv.Bytes, V any](dbdir, name string) (store kv.Store[K, V], err error) {
|
||||
dir := path.Join(dbdir, name)
|
||||
store, err = kv.NewBadgerKV[K, V](dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
store = kvtrace.WrapTracing(store, attribute.String("collection", name), attribute.String("database", "badger"))
|
||||
|
||||
return store, err
|
||||
}
|
|
@ -3,40 +3,40 @@ package vfs
|
|||
import (
|
||||
"archive/zip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/ctxio"
|
||||
"git.kmsign.ru/royalcat/tstor/src/iio"
|
||||
"github.com/bodgit/sevenzip"
|
||||
"github.com/nwaples/rardecode/v2"
|
||||
)
|
||||
|
||||
var ArchiveFactories = map[string]FsFactory{
|
||||
".zip": func(ctx context.Context, f File) (Filesystem, error) {
|
||||
stat, err := f.Stat()
|
||||
stat, err := f.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewArchive(ctx, stat.Name(), f, stat.Size(), ZipLoader), nil
|
||||
return NewArchive(ctx, stat.Name(), f, stat.Size(), ZipLoader)
|
||||
},
|
||||
".rar": func(ctx context.Context, f File) (Filesystem, error) {
|
||||
stat, err := f.Stat()
|
||||
stat, err := f.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewArchive(ctx, stat.Name(), f, stat.Size(), RarLoader), nil
|
||||
return NewArchive(ctx, stat.Name(), f, stat.Size(), RarLoader)
|
||||
},
|
||||
".7z": func(ctx context.Context, f File) (Filesystem, error) {
|
||||
stat, err := f.Stat()
|
||||
stat, err := f.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewArchive(ctx, stat.Name(), f, stat.Size(), SevenZipLoader), nil
|
||||
return NewArchive(ctx, stat.Name(), f, stat.Size(), SevenZipLoader)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -47,52 +47,73 @@ var _ Filesystem = &ArchiveFS{}
|
|||
type ArchiveFS struct {
|
||||
name string
|
||||
|
||||
r ctxio.ReaderAt
|
||||
size int64
|
||||
|
||||
Size int64
|
||||
|
||||
files func() (map[string]File, error)
|
||||
files map[string]File
|
||||
}
|
||||
|
||||
func NewArchive(ctx context.Context, name string, r ctxio.ReaderAt, size int64, loader archiveLoader) *ArchiveFS {
|
||||
return &ArchiveFS{
|
||||
name: name,
|
||||
r: r,
|
||||
Size: size,
|
||||
files: OnceValueWOErr(func() (map[string]File, error) {
|
||||
zipFiles, err := loader(ctx, r, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO make optional
|
||||
singleDir := true
|
||||
for k := range zipFiles {
|
||||
if !strings.HasPrefix(k, "/"+name+"/") {
|
||||
singleDir = false
|
||||
break
|
||||
}
|
||||
}
|
||||
// ModTime implements Filesystem.
|
||||
func (a *ArchiveFS) ModTime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
files := make(map[string]File, len(zipFiles))
|
||||
for k, v := range zipFiles {
|
||||
// TODO make optional
|
||||
if strings.Contains(k, "/__MACOSX/") {
|
||||
continue
|
||||
}
|
||||
// Mode implements Filesystem.
|
||||
func (a *ArchiveFS) Mode() fs.FileMode {
|
||||
return fs.ModeDir
|
||||
}
|
||||
|
||||
if singleDir {
|
||||
k, _ = strings.CutPrefix(k, "/"+name)
|
||||
}
|
||||
// Size implements Filesystem.
|
||||
func (a *ArchiveFS) Size() int64 {
|
||||
return int64(a.size)
|
||||
}
|
||||
|
||||
files[k] = v
|
||||
}
|
||||
// Sys implements Filesystem.
|
||||
func (a *ArchiveFS) Sys() any {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FIXME
|
||||
files["/.forcegallery"] = NewMemoryFile(".forcegallery", []byte{})
|
||||
// FsName implements Filesystem.
|
||||
func (a *ArchiveFS) FsName() string {
|
||||
return "archivefs"
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}),
|
||||
func NewArchive(ctx context.Context, name string, r ctxio.ReaderAt, size int64, loader archiveLoader) (*ArchiveFS, error) {
|
||||
archiveFiles, err := loader(ctx, r, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO make optional
|
||||
singleDir := true
|
||||
for k := range archiveFiles {
|
||||
if !strings.HasPrefix(k, "/"+name+"/") {
|
||||
singleDir = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
files := make(map[string]File, len(archiveFiles))
|
||||
for k, v := range archiveFiles {
|
||||
// TODO make optional
|
||||
if strings.Contains(k, "/__MACOSX/") {
|
||||
continue
|
||||
}
|
||||
|
||||
if singleDir {
|
||||
k, _ = strings.CutPrefix(k, "/"+name)
|
||||
}
|
||||
|
||||
files[k] = v
|
||||
}
|
||||
|
||||
// FIXME
|
||||
files["/.forcegallery"] = NewMemoryFile(".forcegallery", []byte{})
|
||||
|
||||
return &ArchiveFS{
|
||||
name: name,
|
||||
size: size,
|
||||
files: files,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Unlink implements Filesystem.
|
||||
|
@ -101,35 +122,21 @@ func (a *ArchiveFS) Unlink(ctx context.Context, filename string) error {
|
|||
}
|
||||
|
||||
func (a *ArchiveFS) Open(ctx context.Context, filename string) (File, error) {
|
||||
files, err := a.files()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return getFile(files, filename)
|
||||
return getFile(a.files, filename)
|
||||
}
|
||||
|
||||
func (fs *ArchiveFS) ReadDir(ctx context.Context, path string) ([]fs.DirEntry, error) {
|
||||
files, err := fs.files()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return listDirFromFiles(files, path)
|
||||
func (a *ArchiveFS) ReadDir(ctx context.Context, path string) ([]fs.DirEntry, error) {
|
||||
return listDirFromFiles(a.files, path)
|
||||
}
|
||||
|
||||
// Stat implements Filesystem.
|
||||
func (afs *ArchiveFS) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
|
||||
files, err := afs.files()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
if file, ok := afs.files[filename]; ok {
|
||||
return file.Info()
|
||||
}
|
||||
|
||||
if file, ok := files[filename]; ok {
|
||||
return file.Stat()
|
||||
}
|
||||
|
||||
for p, _ := range files {
|
||||
for p, _ := range afs.files {
|
||||
if strings.HasPrefix(p, filename) {
|
||||
return newDirInfo(path.Base(filename)), nil
|
||||
}
|
||||
|
@ -140,11 +147,7 @@ func (afs *ArchiveFS) Stat(ctx context.Context, filename string) (fs.FileInfo, e
|
|||
|
||||
// Info implements Filesystem.
|
||||
func (a *ArchiveFS) Info() (fs.FileInfo, error) {
|
||||
return &fileInfo{
|
||||
name: a.name,
|
||||
size: a.Size,
|
||||
isDir: true,
|
||||
}, nil
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// IsDir implements Filesystem.
|
||||
|
@ -162,42 +165,46 @@ func (a *ArchiveFS) Type() fs.FileMode {
|
|||
return fs.ModeDir
|
||||
}
|
||||
|
||||
var _ File = &archiveFile{}
|
||||
var _ File = (*archiveFile)(nil)
|
||||
|
||||
func NewArchiveFile(name string, readerFunc func() (iio.Reader, error), size int64) *archiveFile {
|
||||
func NewArchiveFile(name string, size int64, af archiveFileReaderFactory) *archiveFile {
|
||||
return &archiveFile{
|
||||
name: name,
|
||||
readerFunc: readerFunc,
|
||||
size: size,
|
||||
name: name,
|
||||
size: size,
|
||||
af: af,
|
||||
|
||||
buffer: ctxio.NewFileBuffer(nil),
|
||||
}
|
||||
}
|
||||
|
||||
const readahead = 1024 * 16
|
||||
|
||||
type archiveFile struct {
|
||||
name string
|
||||
size int64
|
||||
af archiveFileReaderFactory
|
||||
|
||||
readerFunc func() (iio.Reader, error)
|
||||
reader iio.Reader
|
||||
size int64
|
||||
m sync.Mutex
|
||||
|
||||
offset int64
|
||||
readen int64
|
||||
buffer *ctxio.FileBuffer
|
||||
}
|
||||
|
||||
func (d *archiveFile) Stat() (fs.FileInfo, error) {
|
||||
// Name implements File.
|
||||
func (d *archiveFile) Name() string {
|
||||
return d.name
|
||||
}
|
||||
|
||||
// Type implements File.
|
||||
func (d *archiveFile) Type() fs.FileMode {
|
||||
return roMode
|
||||
}
|
||||
|
||||
func (d *archiveFile) Info() (fs.FileInfo, error) {
|
||||
return newFileInfo(d.name, d.size), nil
|
||||
}
|
||||
|
||||
func (d *archiveFile) load() error {
|
||||
if d.reader != nil {
|
||||
return nil
|
||||
}
|
||||
r, err := d.readerFunc()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.reader = r
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *archiveFile) Size() int64 {
|
||||
return d.size
|
||||
}
|
||||
|
@ -206,31 +213,60 @@ func (d *archiveFile) IsDir() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (d *archiveFile) Close(ctx context.Context) (err error) {
|
||||
if d.reader != nil {
|
||||
err = d.reader.Close()
|
||||
d.reader = nil
|
||||
func (d *archiveFile) Close(ctx context.Context) error {
|
||||
return d.buffer.Close(ctx)
|
||||
}
|
||||
|
||||
func (d *archiveFile) loadMore(ctx context.Context, to int64) error {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
if to < d.readen {
|
||||
return nil
|
||||
}
|
||||
|
||||
return
|
||||
reader, err := d.af(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get file reader: %w", err)
|
||||
}
|
||||
_, err = d.buffer.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to seek to start of the file: %w", err)
|
||||
}
|
||||
d.readen, err = ctxio.CopyN(ctx, d.buffer, ctxio.WrapIoReader(reader), to+readahead)
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("error copying from archive file reader: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *archiveFile) Read(ctx context.Context, p []byte) (n int, err error) {
|
||||
if err := d.load(); err != nil {
|
||||
return 0, err
|
||||
err = d.loadMore(ctx, d.offset+int64(len(p)))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to load more from archive file: %w", err)
|
||||
}
|
||||
|
||||
return d.reader.Read(p)
|
||||
n, err = d.buffer.Read(ctx, p)
|
||||
if err != nil && err != io.EOF {
|
||||
return n, fmt.Errorf("failed to read from buffer: %w", err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (d *archiveFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
|
||||
if err := d.load(); err != nil {
|
||||
return 0, err
|
||||
err = d.loadMore(ctx, off+int64(len(p)))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to load more from archive file: %w", err)
|
||||
}
|
||||
|
||||
return d.reader.ReadAt(p, off)
|
||||
n, err = d.buffer.ReadAt(ctx, p, off)
|
||||
if err != nil && err != io.EOF {
|
||||
return n, fmt.Errorf("failed to read from buffer: %w", err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
type archiveFileReaderFactory func(ctx context.Context) (io.ReadCloser, error)
|
||||
|
||||
var _ archiveLoader = ZipLoader
|
||||
|
||||
func ZipLoader(ctx context.Context, ctxreader ctxio.ReaderAt, size int64) (map[string]*archiveFile, error) {
|
||||
|
@ -248,16 +284,24 @@ func ZipLoader(ctx context.Context, ctxreader ctxio.ReaderAt, size int64) (map[s
|
|||
continue
|
||||
}
|
||||
|
||||
rf := func() (iio.Reader, error) {
|
||||
zr, err := zipFile.Open()
|
||||
i := i
|
||||
af := func(ctx context.Context) (io.ReadCloser, error) {
|
||||
reader := ctxio.IoReaderAt(ctx, ctxreader)
|
||||
|
||||
zr, err := zip.NewReader(reader, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return iio.NewDiskTeeReader(zr)
|
||||
rc, err := zr.File[i].Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
out[AbsPath(zipFile.Name)] = NewArchiveFile(zipFile.Name, rf, zipFile.FileInfo().Size())
|
||||
out[AbsPath(zipFile.Name)] = NewArchiveFile(zipFile.Name, zipFile.FileInfo().Size(), af)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
|
@ -274,25 +318,29 @@ func SevenZipLoader(ctx context.Context, ctxreader ctxio.ReaderAt, size int64) (
|
|||
}
|
||||
|
||||
out := make(map[string]*archiveFile)
|
||||
for _, f := range r.File {
|
||||
for i, f := range r.File {
|
||||
f := f
|
||||
if f.FileInfo().IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
rf := func() (iio.Reader, error) {
|
||||
zr, err := f.Open()
|
||||
i := i
|
||||
af := func(ctx context.Context) (io.ReadCloser, error) {
|
||||
reader := ctxio.IoReaderAt(ctx, ctxreader)
|
||||
zr, err := sevenzip.NewReader(reader, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return iio.NewDiskTeeReader(zr)
|
||||
rc, err := zr.File[i].Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
af := NewArchiveFile(f.Name, rf, f.FileInfo().Size())
|
||||
n := filepath.Join(string(os.PathSeparator), f.Name)
|
||||
|
||||
out[n] = af
|
||||
out[AbsPath(f.Name)] = NewArchiveFile(f.Name, f.FileInfo().Size(), af)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
|
@ -318,15 +366,26 @@ func RarLoader(ctx context.Context, ctxreader ctxio.ReaderAt, size int64) (map[s
|
|||
return nil, err
|
||||
}
|
||||
|
||||
rf := func() (iio.Reader, error) {
|
||||
return iio.NewDiskTeeReader(r)
|
||||
name := header.Name
|
||||
af := func(ctx context.Context) (io.ReadCloser, error) {
|
||||
reader := ctxio.IoReadSeekerWrapper(ctx, ctxreader, size)
|
||||
r, err := rardecode.NewReader(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for header, err := r.Next(); err != io.EOF; header, err = r.Next() {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if header.Name == name {
|
||||
return io.NopCloser(r), nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("file with name '%s' not found", name)
|
||||
}
|
||||
|
||||
n := filepath.Join(string(os.PathSeparator), header.Name)
|
||||
|
||||
af := NewArchiveFile(header.Name, rf, header.UnPackedSize)
|
||||
|
||||
out[n] = af
|
||||
out[AbsPath(header.Name)] = NewArchiveFile(header.Name, header.UnPackedSize, af)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package vfs
|
||||
package vfs_test
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
|
@ -8,9 +8,35 @@ import (
|
|||
"testing"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/ctxio"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TODO
|
||||
// func TestArchiveFactories(t *testing.T) {
|
||||
// t.Parallel()
|
||||
|
||||
// ctx := context.Background()
|
||||
|
||||
// require := require.New(t)
|
||||
|
||||
// require.Contains(vfs.ArchiveFactories, ".zip")
|
||||
// require.Contains(vfs.ArchiveFactories, ".rar")
|
||||
// require.Contains(vfs.ArchiveFactories, ".7z")
|
||||
|
||||
// fs, err := vfs.ArchiveFactories[".zip"](ctx, &vfs.DummyFile{})
|
||||
// require.NoError(err)
|
||||
// require.NotNil(fs)
|
||||
|
||||
// fs, err = vfs.ArchiveFactories[".rar"](ctx, &vfs.DummyFile{})
|
||||
// require.NoError(err)
|
||||
// require.NotNil(fs)
|
||||
|
||||
// fs, err = vfs.ArchiveFactories[".7z"](ctx, &vfs.DummyFile{})
|
||||
// require.NoError(err)
|
||||
// require.NotNil(fs)
|
||||
// }
|
||||
|
||||
var fileContent []byte = []byte("Hello World")
|
||||
|
||||
func TestZipFilesystem(t *testing.T) {
|
||||
|
@ -22,7 +48,8 @@ func TestZipFilesystem(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
|
||||
// TODO add single dir collapse test
|
||||
zfs := NewArchive(ctx, "test", zReader, size, ZipLoader)
|
||||
zfs, err := vfs.NewArchive(ctx, "test", zReader, size, vfs.ZipLoader)
|
||||
require.NoError(err)
|
||||
|
||||
files, err := zfs.ReadDir(ctx, "/path/to/test/file")
|
||||
require.NoError(err)
|
||||
|
|
|
@ -6,39 +6,54 @@ import (
|
|||
"path"
|
||||
)
|
||||
|
||||
var _ File = &dir{}
|
||||
var _ File = &dirFile{}
|
||||
|
||||
func NewDir(name string) File {
|
||||
return &dir{
|
||||
func newDirFile(name string) File {
|
||||
return &dirFile{
|
||||
name: path.Base(name),
|
||||
}
|
||||
}
|
||||
|
||||
type dir struct {
|
||||
type dirFile struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// Info implements File.
|
||||
func (d *dir) Stat() (fs.FileInfo, error) {
|
||||
return newDirInfo(d.name), nil
|
||||
}
|
||||
|
||||
func (d *dir) Size() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *dir) IsDir() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *dir) Close(ctx context.Context) error {
|
||||
// Close implements File.
|
||||
func (d *dirFile) Close(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dir) Read(ctx context.Context, p []byte) (n int, err error) {
|
||||
return 0, nil
|
||||
// Info implements File.
|
||||
func (d *dirFile) Info() (fs.FileInfo, error) {
|
||||
return newDirInfo(d.name), nil
|
||||
}
|
||||
|
||||
func (d *dir) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
|
||||
return 0, nil
|
||||
// IsDir implements File.
|
||||
func (d *dirFile) IsDir() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Name implements File.
|
||||
func (d *dirFile) Name() string {
|
||||
return d.name
|
||||
}
|
||||
|
||||
// Read implements File.
|
||||
func (d *dirFile) Read(ctx context.Context, p []byte) (n int, err error) {
|
||||
return 0, fs.ErrInvalid
|
||||
}
|
||||
|
||||
// ReadAt implements File.
|
||||
func (d *dirFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
|
||||
return 0, fs.ErrInvalid
|
||||
}
|
||||
|
||||
// Size implements File.
|
||||
func (d *dirFile) Size() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Type implements File.
|
||||
func (d *dirFile) Type() fs.FileMode {
|
||||
return roMode | fs.ModeDir
|
||||
}
|
||||
|
|
125
src/host/vfs/dummy.go
Normal file
125
src/host/vfs/dummy.go
Normal file
|
@ -0,0 +1,125 @@
|
|||
package vfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ Filesystem = &DummyFs{}
|
||||
|
||||
type DummyFs struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// ModTime implements Filesystem.
|
||||
func (d *DummyFs) ModTime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// Mode implements Filesystem.
|
||||
func (d *DummyFs) Mode() fs.FileMode {
|
||||
return fs.ModeDir
|
||||
}
|
||||
|
||||
// Size implements Filesystem.
|
||||
func (d *DummyFs) Size() int64 {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Sys implements Filesystem.
|
||||
func (d *DummyFs) Sys() any {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// FsName implements Filesystem.
|
||||
func (d *DummyFs) FsName() string {
|
||||
return "dummyfs"
|
||||
}
|
||||
|
||||
// Stat implements Filesystem.
|
||||
func (*DummyFs) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
|
||||
return newFileInfo(path.Base(filename), 0), nil // TODO
|
||||
}
|
||||
|
||||
func (d *DummyFs) Open(ctx context.Context, filename string) (File, error) {
|
||||
return &DummyFile{}, nil
|
||||
}
|
||||
|
||||
func (d *DummyFs) Unlink(ctx context.Context, filename string) error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
func (d *DummyFs) ReadDir(ctx context.Context, path string) ([]fs.DirEntry, error) {
|
||||
if path == "/dir/here" {
|
||||
return []fs.DirEntry{
|
||||
newFileInfo("file1.txt", 0),
|
||||
newFileInfo("file2.txt", 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
// Info implements Filesystem.
|
||||
func (d *DummyFs) Info() (fs.FileInfo, error) {
|
||||
return newDirInfo(d.name), nil
|
||||
}
|
||||
|
||||
// IsDir implements Filesystem.
|
||||
func (d *DummyFs) IsDir() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Name implements Filesystem.
|
||||
func (d *DummyFs) Name() string {
|
||||
return d.name
|
||||
}
|
||||
|
||||
// Type implements Filesystem.
|
||||
func (d *DummyFs) Type() fs.FileMode {
|
||||
return fs.ModeDir
|
||||
}
|
||||
|
||||
var _ File = &DummyFile{}
|
||||
|
||||
type DummyFile struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// Name implements File.
|
||||
func (d *DummyFile) Name() string {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Type implements File.
|
||||
func (d *DummyFile) Type() fs.FileMode {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Stat implements File.
|
||||
func (d *DummyFile) Info() (fs.FileInfo, error) {
|
||||
return newFileInfo(d.name, 0), nil
|
||||
}
|
||||
|
||||
func (d *DummyFile) Size() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *DummyFile) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *DummyFile) Close(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DummyFile) Read(ctx context.Context, p []byte) (n int, err error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (d *DummyFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
|
||||
return 0, nil
|
||||
}
|
|
@ -8,12 +8,14 @@ import (
|
|||
"time"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/ctxio"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
type File interface {
|
||||
IsDir() bool
|
||||
Size() int64
|
||||
Stat() (fs.FileInfo, error)
|
||||
|
||||
fs.DirEntry
|
||||
|
||||
ctxio.Reader
|
||||
ctxio.ReaderAt
|
||||
|
@ -22,6 +24,8 @@ type File interface {
|
|||
|
||||
var ErrNotImplemented = errors.New("not implemented")
|
||||
|
||||
var tracer = otel.Tracer("git.kmsign.ru/royalcat/tstor/src/host/vfs")
|
||||
|
||||
type Filesystem interface {
|
||||
// Open opens the named file for reading. If successful, methods on the
|
||||
// returned file can be used for reading; the associated file descriptor has
|
||||
|
@ -35,10 +39,12 @@ type Filesystem interface {
|
|||
Stat(ctx context.Context, filename string) (fs.FileInfo, error)
|
||||
Unlink(ctx context.Context, filename string) error
|
||||
|
||||
// As filesystem mounted to some path, make sense to have the filesystem implement DirEntry
|
||||
fs.DirEntry
|
||||
}
|
||||
|
||||
const defaultMode = fs.FileMode(0555)
|
||||
// readonly
|
||||
const roMode = fs.FileMode(0555)
|
||||
|
||||
type fileInfo struct {
|
||||
name string
|
||||
|
@ -87,10 +93,10 @@ func (fi *fileInfo) Size() int64 {
|
|||
|
||||
func (fi *fileInfo) Mode() fs.FileMode {
|
||||
if fi.isDir {
|
||||
return defaultMode | fs.ModeDir
|
||||
return roMode | fs.ModeDir
|
||||
}
|
||||
|
||||
return defaultMode
|
||||
return roMode
|
||||
}
|
||||
|
||||
func (fi *fileInfo) ModTime() time.Time {
|
||||
|
|
|
@ -37,7 +37,7 @@ func TestDirInfo(t *testing.T) {
|
|||
require.NotNil(fi.ModTime())
|
||||
require.NotZero(fi.Type() & fs.ModeDir)
|
||||
require.NotZero(fi.Mode() & fs.ModeDir)
|
||||
require.Equal(defaultMode|fs.ModeDir, fi.Mode())
|
||||
require.Equal(roMode|fs.ModeDir, fi.Mode())
|
||||
require.Nil(fi.Sys())
|
||||
|
||||
}
|
||||
|
|
|
@ -5,22 +5,62 @@ import (
|
|||
"io/fs"
|
||||
"log/slog"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/pkg/rlog"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
type LogFS struct {
|
||||
fs Filesystem
|
||||
log *slog.Logger
|
||||
|
||||
timeout time.Duration
|
||||
readTimeout time.Duration
|
||||
}
|
||||
|
||||
var _ Filesystem = (*LogFS)(nil)
|
||||
|
||||
func WrapLogFS(fs Filesystem, log *slog.Logger) *LogFS {
|
||||
func WrapLogFS(fs Filesystem) *LogFS {
|
||||
return &LogFS{
|
||||
fs: fs,
|
||||
log: log.With("component", "fs"),
|
||||
fs: fs,
|
||||
log: rlog.ComponentLog("fs"),
|
||||
timeout: time.Minute * 3,
|
||||
readTimeout: time.Minute,
|
||||
}
|
||||
}
|
||||
|
||||
// ModTime implements Filesystem.
|
||||
func (lfs *LogFS) ModTime() time.Time {
|
||||
return lfs.ModTime()
|
||||
}
|
||||
|
||||
// Mode implements Filesystem.
|
||||
func (lfs *LogFS) Mode() fs.FileMode {
|
||||
return lfs.Mode()
|
||||
}
|
||||
|
||||
// Size implements Filesystem.
|
||||
func (lfs *LogFS) Size() int64 {
|
||||
return lfs.Size()
|
||||
}
|
||||
|
||||
// Sys implements Filesystem.
|
||||
func (lfs *LogFS) Sys() any {
|
||||
return lfs.Sys()
|
||||
}
|
||||
|
||||
func (fs *LogFS) FsName() string {
|
||||
return "logfs"
|
||||
}
|
||||
|
||||
func (fs *LogFS) traceAttrs(add ...attribute.KeyValue) trace.SpanStartOption {
|
||||
return trace.WithAttributes(append([]attribute.KeyValue{
|
||||
attribute.String("fs", fs.FsName()),
|
||||
}, add...)...)
|
||||
}
|
||||
|
||||
// Info implements Filesystem.
|
||||
func (fs *LogFS) Info() (fs.FileInfo, error) {
|
||||
return fs.fs.Info()
|
||||
|
@ -42,36 +82,84 @@ func (fs *LogFS) Type() fs.FileMode {
|
|||
}
|
||||
|
||||
// Open implements Filesystem.
|
||||
func (fs *LogFS) Open(ctx context.Context, filename string) (File, error) {
|
||||
file, err := fs.fs.Open(ctx, filename)
|
||||
func (fs *LogFS) Open(ctx context.Context, filename string) (file File, err error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, fs.timeout)
|
||||
defer cancel()
|
||||
ctx, span := tracer.Start(ctx, "Open",
|
||||
fs.traceAttrs(attribute.String("filename", filename)),
|
||||
)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
}
|
||||
span.End()
|
||||
}()
|
||||
|
||||
file, err = fs.fs.Open(ctx, filename)
|
||||
if err != nil {
|
||||
fs.log.With("filename", filename).Error("Failed to open file")
|
||||
}
|
||||
file = WrapLogFile(file, filename, fs.log)
|
||||
file = WrapLogFile(file, filename, fs.log, fs.readTimeout)
|
||||
return file, err
|
||||
}
|
||||
|
||||
// ReadDir implements Filesystem.
|
||||
func (fs *LogFS) ReadDir(ctx context.Context, path string) ([]fs.DirEntry, error) {
|
||||
file, err := fs.fs.ReadDir(ctx, path)
|
||||
func (fs *LogFS) ReadDir(ctx context.Context, path string) (entries []fs.DirEntry, err error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, fs.timeout)
|
||||
defer cancel()
|
||||
ctx, span := tracer.Start(ctx, "ReadDir",
|
||||
fs.traceAttrs(attribute.String("path", path)),
|
||||
)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
}
|
||||
span.End()
|
||||
}()
|
||||
|
||||
entries, err = fs.fs.ReadDir(ctx, path)
|
||||
if err != nil {
|
||||
fs.log.ErrorContext(ctx, "Failed to read dir", "path", path, "error", err.Error(), "fs-type", reflect.TypeOf(fs.fs).Name())
|
||||
}
|
||||
return file, err
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// Stat implements Filesystem.
|
||||
func (fs *LogFS) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
|
||||
file, err := fs.fs.Stat(ctx, filename)
|
||||
func (fs *LogFS) Stat(ctx context.Context, filename string) (info fs.FileInfo, err error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, fs.timeout)
|
||||
defer cancel()
|
||||
ctx, span := tracer.Start(ctx, "Stat",
|
||||
fs.traceAttrs(attribute.String("filename", filename)),
|
||||
)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
}
|
||||
span.End()
|
||||
}()
|
||||
|
||||
info, err = fs.fs.Stat(ctx, filename)
|
||||
if err != nil {
|
||||
fs.log.Error("Failed to stat", "filename", filename, "error", err)
|
||||
}
|
||||
return file, err
|
||||
return info, err
|
||||
}
|
||||
|
||||
// Unlink implements Filesystem.
|
||||
func (fs *LogFS) Unlink(ctx context.Context, filename string) error {
|
||||
err := fs.fs.Unlink(ctx, filename)
|
||||
func (fs *LogFS) Unlink(ctx context.Context, filename string) (err error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, fs.timeout)
|
||||
defer cancel()
|
||||
ctx, span := tracer.Start(ctx, "Unlink",
|
||||
fs.traceAttrs(attribute.String("filename", filename)),
|
||||
)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
}
|
||||
span.End()
|
||||
}()
|
||||
|
||||
err = fs.fs.Unlink(ctx, filename)
|
||||
if err != nil {
|
||||
fs.log.Error("Failed to stat", "filename", filename, "error", err)
|
||||
}
|
||||
|
@ -79,24 +167,51 @@ func (fs *LogFS) Unlink(ctx context.Context, filename string) error {
|
|||
}
|
||||
|
||||
type LogFile struct {
|
||||
f File
|
||||
log *slog.Logger
|
||||
filename string
|
||||
f File
|
||||
|
||||
log *slog.Logger
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// Name implements File.
|
||||
func (f *LogFile) Name() string {
|
||||
return f.f.Name()
|
||||
}
|
||||
|
||||
// Type implements File.
|
||||
func (f *LogFile) Type() fs.FileMode {
|
||||
return f.f.Type()
|
||||
}
|
||||
|
||||
var _ File = (*LogFile)(nil)
|
||||
|
||||
func WrapLogFile(f File, filename string, log *slog.Logger) *LogFile {
|
||||
func WrapLogFile(f File, filename string, log *slog.Logger, timeout time.Duration) *LogFile {
|
||||
return &LogFile{
|
||||
f: f,
|
||||
log: log.With("filename", filename),
|
||||
filename: filename,
|
||||
f: f,
|
||||
log: log.With("filename", filename),
|
||||
timeout: timeout,
|
||||
}
|
||||
}
|
||||
|
||||
// Close implements File.
|
||||
func (f *LogFile) Close(ctx context.Context) error {
|
||||
err := f.f.Close(ctx)
|
||||
func (f *LogFile) Close(ctx context.Context) (err error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, f.timeout)
|
||||
defer cancel()
|
||||
ctx, span := tracer.Start(ctx, "Close",
|
||||
trace.WithAttributes(attribute.String("filename", f.filename)),
|
||||
)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
}
|
||||
span.End()
|
||||
}()
|
||||
|
||||
err = f.f.Close(ctx)
|
||||
if err != nil {
|
||||
f.log.Error("Failed to close", "error", err)
|
||||
f.log.ErrorContext(ctx, "Failed to close", "error", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -108,6 +223,22 @@ func (f *LogFile) IsDir() bool {
|
|||
|
||||
// Read implements File.
|
||||
func (f *LogFile) Read(ctx context.Context, p []byte) (n int, err error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, f.timeout)
|
||||
defer cancel()
|
||||
ctx, span := tracer.Start(ctx, "Read",
|
||||
trace.WithAttributes(
|
||||
attribute.String("filename", f.filename),
|
||||
attribute.Int("length", len(p)),
|
||||
),
|
||||
)
|
||||
defer func() {
|
||||
span.SetAttributes(attribute.Int("read", n))
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
}
|
||||
span.End()
|
||||
}()
|
||||
|
||||
n, err = f.f.Read(ctx, p)
|
||||
if err != nil {
|
||||
f.log.Error("Failed to read", "error", err)
|
||||
|
@ -117,6 +248,22 @@ func (f *LogFile) Read(ctx context.Context, p []byte) (n int, err error) {
|
|||
|
||||
// ReadAt implements File.
|
||||
func (f *LogFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, f.timeout)
|
||||
defer cancel()
|
||||
ctx, span := tracer.Start(ctx, "ReadAt",
|
||||
trace.WithAttributes(
|
||||
attribute.String("filename", f.filename),
|
||||
attribute.Int("length", len(p)),
|
||||
),
|
||||
)
|
||||
defer func() {
|
||||
span.SetAttributes(attribute.Int("read", n))
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
}
|
||||
span.End()
|
||||
}()
|
||||
|
||||
n, err = f.f.ReadAt(ctx, p, off)
|
||||
if err != nil {
|
||||
f.log.Error("Failed to read", "offset", off, "error", err)
|
||||
|
@ -130,8 +277,8 @@ func (f *LogFile) Size() int64 {
|
|||
}
|
||||
|
||||
// Stat implements File.
|
||||
func (f *LogFile) Stat() (fs.FileInfo, error) {
|
||||
info, err := f.f.Stat()
|
||||
func (f *LogFile) Info() (fs.FileInfo, error) {
|
||||
info, err := f.f.Info()
|
||||
if err != nil {
|
||||
f.log.Error("Failed to read", "error", err)
|
||||
}
|
||||
|
|
|
@ -5,15 +5,41 @@ import (
|
|||
"context"
|
||||
"io/fs"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ Filesystem = &MemoryFs{}
|
||||
|
||||
type MemoryFs struct {
|
||||
name string
|
||||
files map[string]*MemoryFile
|
||||
}
|
||||
|
||||
var _ Filesystem = (*MemoryFs)(nil)
|
||||
|
||||
// ModTime implements Filesystem.
|
||||
func (mfs *MemoryFs) ModTime() time.Time {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// Mode implements Filesystem.
|
||||
func (mfs *MemoryFs) Mode() fs.FileMode {
|
||||
return fs.ModeDir
|
||||
}
|
||||
|
||||
// Size implements Filesystem.
|
||||
func (fs *MemoryFs) Size() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Sys implements Filesystem.
|
||||
func (fs *MemoryFs) Sys() any {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FsKind implements Filesystem.
|
||||
func (fs *MemoryFs) FsName() string {
|
||||
return "memoryfs"
|
||||
}
|
||||
|
||||
// Info implements Filesystem.
|
||||
func (fs *MemoryFs) Info() (fs.FileInfo, error) {
|
||||
return newDirInfo(fs.name), nil
|
||||
|
@ -77,7 +103,17 @@ func NewMemoryFile(name string, data []byte) *MemoryFile {
|
|||
}
|
||||
}
|
||||
|
||||
func (d *MemoryFile) Stat() (fs.FileInfo, error) {
|
||||
// Name implements File.
|
||||
func (d *MemoryFile) Name() string {
|
||||
return d.name
|
||||
}
|
||||
|
||||
// Type implements File.
|
||||
func (d *MemoryFile) Type() fs.FileMode {
|
||||
return roMode
|
||||
}
|
||||
|
||||
func (d *MemoryFile) Info() (fs.FileInfo, error) {
|
||||
return newFileInfo(d.name, int64(d.data.Len())), nil
|
||||
}
|
||||
|
||||
|
|
|
@ -12,13 +12,19 @@ type OsFS struct {
|
|||
hostDir string
|
||||
}
|
||||
|
||||
var _ Filesystem = (*OsFS)(nil)
|
||||
|
||||
// Stat implements Filesystem.
|
||||
func (fs *OsFS) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
|
||||
if path.Clean(filename) == Separator {
|
||||
return newDirInfo(Separator), nil
|
||||
}
|
||||
|
||||
return os.Stat(path.Join(fs.hostDir, filename))
|
||||
info, err := os.Stat(path.Join(fs.hostDir, filename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Unlink implements Filesystem.
|
||||
|
@ -28,11 +34,11 @@ func (fs *OsFS) Unlink(ctx context.Context, filename string) error {
|
|||
|
||||
// Open implements Filesystem.
|
||||
func (fs *OsFS) Open(ctx context.Context, filename string) (File, error) {
|
||||
if path.Clean(filename) == Separator {
|
||||
return NewDir(filename), nil
|
||||
if isRoot(filename) {
|
||||
return newDirFile(fs.Name()), nil
|
||||
}
|
||||
|
||||
return NewLazyOsFile(path.Join(fs.hostDir, filename)), nil
|
||||
return NewLazyOsFile(path.Join(fs.hostDir, filename))
|
||||
}
|
||||
|
||||
// ReadDir implements Filesystem.
|
||||
|
@ -42,7 +48,7 @@ func (o *OsFS) ReadDir(ctx context.Context, dir string) ([]fs.DirEntry, error) {
|
|||
|
||||
// Info implements Filesystem.
|
||||
func (fs *OsFS) Info() (fs.FileInfo, error) {
|
||||
return newDirInfo(path.Base(fs.hostDir)), nil
|
||||
return newDirInfo(fs.Name()), nil
|
||||
}
|
||||
|
||||
// IsDir implements Filesystem.
|
||||
|
@ -68,56 +74,16 @@ func NewOsFs(osDir string) *OsFS {
|
|||
|
||||
var _ Filesystem = &OsFS{}
|
||||
|
||||
type OsFile struct {
|
||||
f *os.File
|
||||
}
|
||||
|
||||
func NewOsFile(f *os.File) *OsFile {
|
||||
return &OsFile{f: f}
|
||||
}
|
||||
|
||||
var _ File = &OsFile{}
|
||||
|
||||
// Info implements File.
|
||||
func (f *OsFile) Info() (fs.FileInfo, error) {
|
||||
return f.f.Stat()
|
||||
}
|
||||
|
||||
// Close implements File.
|
||||
func (f *OsFile) Close(ctx context.Context) error {
|
||||
return f.f.Close()
|
||||
}
|
||||
|
||||
// Read implements File.
|
||||
func (f *OsFile) Read(ctx context.Context, p []byte) (n int, err error) {
|
||||
return f.f.Read(p)
|
||||
}
|
||||
|
||||
// ReadAt implements File.
|
||||
func (f *OsFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
|
||||
return f.f.ReadAt(p, off)
|
||||
}
|
||||
|
||||
func (f *OsFile) Stat() (fs.FileInfo, error) {
|
||||
return f.f.Stat()
|
||||
}
|
||||
|
||||
// Size implements File.
|
||||
func (f *OsFile) Size() int64 {
|
||||
stat, err := f.Stat()
|
||||
func NewLazyOsFile(path string) (*LazyOsFile, error) {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return 0
|
||||
return nil, err
|
||||
}
|
||||
return stat.Size()
|
||||
}
|
||||
|
||||
// IsDir implements File.
|
||||
func (f *OsFile) IsDir() bool {
|
||||
stat, err := f.Stat()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return stat.IsDir()
|
||||
return &LazyOsFile{
|
||||
path: path,
|
||||
info: info,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type LazyOsFile struct {
|
||||
|
@ -125,15 +91,10 @@ type LazyOsFile struct {
|
|||
path string
|
||||
file *os.File
|
||||
|
||||
// cached field
|
||||
info fs.FileInfo
|
||||
}
|
||||
|
||||
func NewLazyOsFile(path string) *LazyOsFile {
|
||||
return &LazyOsFile{path: path}
|
||||
}
|
||||
|
||||
var _ File = &OsFile{}
|
||||
var _ File = (*LazyOsFile)(nil)
|
||||
|
||||
func (f *LazyOsFile) open() error {
|
||||
f.m.Lock()
|
||||
|
@ -151,6 +112,16 @@ func (f *LazyOsFile) open() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Name implements File.
|
||||
func (f *LazyOsFile) Name() string {
|
||||
return path.Base(f.path)
|
||||
}
|
||||
|
||||
// Type implements File.
|
||||
func (f *LazyOsFile) Type() fs.FileMode {
|
||||
return f.info.Mode()
|
||||
}
|
||||
|
||||
// Close implements File.
|
||||
func (f *LazyOsFile) Close(ctx context.Context) error {
|
||||
if f.file == nil {
|
||||
|
@ -177,41 +148,17 @@ func (f *LazyOsFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, er
|
|||
return f.file.ReadAt(p, off)
|
||||
}
|
||||
|
||||
func (f *LazyOsFile) Stat() (fs.FileInfo, error) {
|
||||
f.m.Lock()
|
||||
defer f.m.Unlock()
|
||||
if f.info == nil {
|
||||
if f.file == nil {
|
||||
info, err := os.Stat(f.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.info = info
|
||||
} else {
|
||||
info, err := f.file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.info = info
|
||||
}
|
||||
}
|
||||
func (f *LazyOsFile) Info() (fs.FileInfo, error) {
|
||||
|
||||
return f.info, nil
|
||||
}
|
||||
|
||||
// Size implements File.
|
||||
func (f *LazyOsFile) Size() int64 {
|
||||
stat, err := f.Stat()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return stat.Size()
|
||||
return f.info.Size()
|
||||
}
|
||||
|
||||
// IsDir implements File.
|
||||
func (f *LazyOsFile) IsDir() bool {
|
||||
stat, err := f.Stat()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return stat.IsDir()
|
||||
return f.info.IsDir()
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue