diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
deleted file mode 100644
index 46a25bc..0000000
--- a/.github/FUNDING.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-# These are supported funding model platforms
-
-github: [ajnavarro]
-# patreon: # Replace with a single Patreon username
-# open_collective: # Replace with a single Open Collective username
-# ko_fi: # Replace with a single Ko-fi username
-# tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
-# community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
-# liberapay: # Replace with a single Liberapay username
-# issuehunt: # Replace with a single IssueHunt username
-# otechie: # Replace with a single Otechie username
-# custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
deleted file mode 100644
index 5fa1be4..0000000
--- a/.github/workflows/codeql-analysis.yml
+++ /dev/null
@@ -1,59 +0,0 @@
-name: "CodeQL"
-
-on:
-  push:
-    branches: [ master ]
-  pull_request:
-    # The branches below must be a subset of the branches above
-    branches: [ master ]
-  schedule:
-    - cron: '26 14 * * 1'
-
-jobs:
-  analyze:
-    name: Analyze
-    runs-on: ubuntu-latest
-    permissions:
-      actions: read
-      contents: read
-      security-events: write
-
-    strategy:
-      fail-fast: false
-      matrix:
-        language: [ 'go', 'javascript' ]
-        # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
-        # Learn more about CodeQL language support at https://git.io/codeql-language-support
-
-    steps:
-    - name: Checkout repository
-      uses: actions/checkout@v4
-
-    # Initializes the CodeQL tools for scanning.
-    - name: Initialize CodeQL
-      uses: github/codeql-action/init@v2
-      with:
-        languages: ${{ matrix.language }}
-        # If you wish to specify custom queries, you can do so here or in a config file.
-        # By default, queries listed here will override any specified in a config file.
-        # Prefix the list here with "+" to use these queries and those in the config file.
-        # queries: ./path/to/local/query, your-org/your-repo/queries@main
-
-    # Autobuild attempts to build any compiled languages  (C/C++, C#, or Java).
-    # If this step fails, then you should remove it and run the build manually (see below)
-    - name: Autobuild
-      uses: github/codeql-action/autobuild@v2
-
-    # â„šī¸ Command-line programs to run using the OS shell.
-    # 📚 https://git.io/JvXDl
-
-    # âœī¸ If the Autobuild fails above, remove it and uncomment the following three lines
-    #    and modify them (or add more) to build your code if your project
-    #    uses a compiled language
-
-    #- run: |
-    #   make bootstrap
-    #   make release
-
-    - name: Perform CodeQL Analysis
-      uses: github/codeql-action/analyze@v2
diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml
index 3c7e138..10e270f 100644
--- a/.github/workflows/docker.yaml
+++ b/.github/workflows/docker.yaml
@@ -9,6 +9,9 @@ on:
 
 jobs:
   build-docker:
+    permissions:
+      contents: read
+      packages: write
     strategy:
       fail-fast: false
       matrix:
@@ -16,24 +19,22 @@ jobs:
           - linux/amd64
           - linux/arm64
           - linux/386
-          - inux/arm/v6
+          - linux/arm/v5
           - linux/arm/v7
-          - linux/arm64
+          - linux/arm64/v8
+          - linux/riscv64
     runs-on: ubuntu-latest
     steps:
       - name: Checkout repository
         uses: actions/checkout@v3
 
-      - name: Set up QEMU
-        uses: docker/setup-qemu-action@v2
-
       - name: Set up Docker Buildx
-        uses: docker/setup-buildx-action@v2
+        uses: docker/setup-buildx-action@v3
 
-      - name: Login to GitHub Container Registry
-        uses: docker/login-action@v2
+      - name: Login to Container Registry
+        uses: docker/login-action@v3
         with:
-          registry: ghcr.io
+          registry: git.kmsign.ru
           username: ${{ github.actor }}
           password: ${{ secrets.GITHUB_TOKEN }}
 
@@ -42,7 +43,7 @@ jobs:
         uses: docker/metadata-action@v5
         with:
           # list of Docker images to use as base name for tags
-          images: ghcr.io/${{ github.repository }}
+          images: git.kmsign.ru/${{ github.repository }}
           # generate Docker tags based on the following events/attributes
           tags: |
             type=schedule
@@ -54,7 +55,7 @@ jobs:
             type=sha
 
       - name: Build and push Docker image
-        uses: docker/build-push-action@v4
+        uses: docker/build-push-action@v5
         with:
           context: .
           push: true
@@ -63,6 +64,6 @@ jobs:
           labels: ${{ steps.meta.outputs.labels }}
           sbom: true
           provenance: true
-          cache-from: type=gha
-          cache-to: type=gha,mode=max
+          # cache-from: type=gha
+          # cache-to: type=gha,mode=max
           tags: ${{ steps.meta.outputs.tags }}
diff --git a/.gitignore b/.gitignore
index d5f5981..5bbfe5f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,4 +2,6 @@ tstor-data
 httpfs_vfsdata.go
 bin/
 coverage.out
-bin
\ No newline at end of file
+bin
+build
+deploy-debug.sh
\ No newline at end of file
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 0000000..8d56cb2
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,52 @@
+linters:
+  enable:
+    - revive
+    - exhaustruct
+    - nakedret
+    - gomoddirectives
+    - importas
+    - misspell
+    - promlinter
+    - prealloc
+    - predeclared
+    - stylecheck
+    - ineffassign
+    - dupl
+    - govet
+    - staticcheck
+    - unused
+    - asasalint
+    - asciicheck
+    - bidichk
+    - bodyclose
+    - containedctx
+    - durationcheck
+    - errcheck
+    - nakedret
+    - testifylint
+
+linters-settings:
+  revive:
+    ignore-generated-header: true
+    severity: warning
+    rules:
+      - name: blank-imports
+        disabled: true
+  staticcheck:
+    checks:
+      - "-SA4006"
+  gosimple:
+    checks:
+      - "-S1002"
+  exhaustruct:
+    include:
+      - ".*Service"
+      - ".*Server.*"
+    exclude:
+      - ".*mutex"
+      - ".*mutex"
+  stylecheck:
+    checks:
+      - "-ST1003"
+  dupl:
+    threshold: 180
diff --git a/.gqlgen.yml b/.gqlgen.yml
new file mode 100644
index 0000000..7363cf7
--- /dev/null
+++ b/.gqlgen.yml
@@ -0,0 +1,48 @@
+schema:
+  - graphql/*.graphql
+  - graphql/**/*.graphql
+
+exec:
+  filename: src/delivery/graphql/generated.go
+  package: graph
+
+model:
+  filename: src/delivery/graphql/model/models_gen.go
+  package: model
+
+resolver:
+  layout: follow-schema
+  dir: src/delivery/graphql/resolver
+  package: resolver
+  filename_template: "{name}.resolvers.go"
+
+models:
+  DateTime:
+    model: github.com/99designs/gqlgen/graphql.Time
+  Int:
+    model: github.com/99designs/gqlgen/graphql.Int64
+  Torrent:
+    fields:
+      name:
+        resolver: true
+      files:
+        resolver: true
+      excludedFiles:
+        resolver: true
+      peers:
+        resolver: true
+    extraFields:
+      T:
+        type: "*git.kmsign.ru/royalcat/tstor/src/host/controller.Torrent"
+  TorrentFile:
+    extraFields:
+      F:
+        type: "*github.com/anacrolix/torrent.File"
+  TorrentPeer:
+    extraFields:
+      F:
+        type: "*github.com/anacrolix/torrent.PeerConn"
+  # TorrentProgress:
+  #   fields:
+  #     torrent:
+  #       resolver: true
diff --git a/.graphqlrc.yaml b/.graphqlrc.yaml
new file mode 100644
index 0000000..37aea9c
--- /dev/null
+++ b/.graphqlrc.yaml
@@ -0,0 +1,4 @@
+schema:
+    - graphql/schema.graphql
+    - graphql/*.graphql
+    - graphql/**/*.graphql
diff --git a/.vscode/launch.json b/.vscode/launch.json
index b9d624c..bc0b8c4 100644
--- a/.vscode/launch.json
+++ b/.vscode/launch.json
@@ -4,13 +4,20 @@
     // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
     "version": "0.2.0",
     "configurations": [
+        {
+            "name": "Launch file",
+            "type": "go",
+            "request": "launch",
+            "mode": "debug",
+            "program": "${file}"
+        },
         {
             "name": "Launch Package",
             "type": "go",
             "request": "launch",
             "mode": "auto",
             "program": "${workspaceFolder}/cmd/tstor/main.go",
-            "cwd": "${workspaceFolder}/bin",
+            "cwd": "${workspaceFolder}/bin"
         }
     ]
 }
\ No newline at end of file
diff --git a/.vscode/settings.json b/.vscode/settings.json
deleted file mode 100644
index e40c94c..0000000
--- a/.vscode/settings.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
-    "yaml.schemas": {
-        "https://json.schemastore.org/github-workflow.json": "file:///home/royalcat/projects/distribyted/.github/workflows/mkdocs.yml"
-    }
-}
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
index d75f316..ebc4e1b 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,33 +1,24 @@
-#===============
-# Stage 1: Build
-#===============
+FROM golang:1.21 as builder
 
-FROM golang:1.20 as builder
+WORKDIR /app
 
-ENV BIN_REPO=git.kmsign.ru/royalcat/tstor
-ENV BIN_PATH=$GOPATH/src/$BIN_REPO
+COPY go.mod ./
+COPY go.sum ./
+RUN go mod download
 
-COPY . $BIN_PATH
-WORKDIR $BIN_PATH
+COPY ./src ./src
+COPY ./cmd ./cmd
+COPY ./assets ./assets
+COPY ./templates ./templates
+COPY embed.go embed.go
 
-RUN apk add fuse-dev git gcc libc-dev g++ make
+RUN go generate ./...
+RUN CGO_ENABLED=0 go build -tags timetzdata -o /tstor ./cmd/tstor/main.go 
 
-RUN BIN_OUTPUT=/bin/tstor make build
 
-#===============
-# Stage 2: Run
-#===============
+FROM scratch
 
-FROM alpine:3
+COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
+COPY --from=builder /tstor /tstor
 
-RUN apk add gcc libc-dev fuse-dev
-
-COPY --from=builder /bin/tstor /bin/tstor
-RUN chmod +x /bin/tstor
-
-RUN mkdir /tstor-data
-
-RUN echo "user_allow_other" >> /etc/fuse.conf
-ENV tstor_FUSE_ALLOW_OTHER=true
-
-ENTRYPOINT ["./bin/tstor"]
+ENTRYPOINT ["/tstor"]
diff --git a/Makefile b/Makefile
index beef8f0..028fa69 100644
--- a/Makefile
+++ b/Makefile
@@ -34,11 +34,7 @@ go-generate:
 	@echo "  >  Generating code files..."
 	go generate ./...
 
-.PHONY: help
-all: help
-help: Makefile
-	@echo
-	@echo " Choose a command run in "$(PROJECTNAME)":"
-	@echo
-	@sed -n 's/^##//p' $< | column -t -s ':' |  sed -e 's/^/ /'
-	@echo
+generate-graphql: src/delivery/graph/generated.go
+
+src/delivery/graph/generated.go: .gqlgen.yml graphql/* graphql/types/* cmd/generate-graphql/*
+	go run cmd/generate-graphql/main.go
diff --git a/README.md b/README.md
index 795d95a..9a7f163 100644
--- a/README.md
+++ b/README.md
@@ -1,93 +1,18 @@
-[![Releases][releases-shield]][releases-url]
-[![Contributors][contributors-shield]][contributors-url]
-[![Forks][forks-shield]][forks-url]
-[![Stargazers][stars-shield]][stars-url]
-[![Issues][issues-shield]][issues-url]
-[![GPL3 License][license-shield]][license-url]
-[![Coveralls][coveralls-shield]][coveralls-url]
-[![Docker Image][docker-pulls-shield]][docker-pulls-url]
+# tstor (WIP)
 
-<!-- PROJECT LOGO -->
-<br />
-<p align="center">
-  <a href="https://git.kmsign.ru/royalcat/tstor">
-    <img src="mkdocs/docs/images/tstor_icon.png" alt="Logo" width="100">
-  </a>
+tstor is an advanced remote torrent clien for self-hosting enthusiasts.
 
-  <h3 align="center">tstor</h3>
+It expose virtual filesystem with torrents and archives presented as fully featured directories with limited amount of mutability. Virtual filesystem can be exported as a webDAV, HTTP endpoint or NFS(WIP).
 
-  <p align="center">
-    Torrent client with on-demand file downloading as a filesystem.
-    <br />
-    <br />
-    <a href="https://git.kmsign.ru/royalcat/tstor/issues">Report a Bug</a>
-    ¡
-    <a href="https://git.kmsign.ru/royalcat/tstor/issues">Request Feature</a>
-  </p>
-</p>
-
-## About The Project
-
-![tstor Screen Shot][product-screenshot]
-
-tstor is an alternative torrent client.
-It can expose torrent files as a standard FUSE, webDAV or HTTP endpoint and download them on demand, allowing random reads using a fixed amount of disk space.
-
-tstor tries to make easier integrations with other applications using torrent files, presenting them as a standard filesystem.
-
-**Note that tstor is in beta version, it is a proof of concept with a lot of bugs.**
-
-## Use Cases
-
-- Play **multimedia files** on your favorite video or audio player. These files will be downloaded on demand and only the needed parts.
-- Explore TBs of data from public **datasets** only downloading the parts you need. Use **Jupyter Notebooks** directly to process or analyze this data.
-- Give access to your latest dataset creation just by sharing a magnet link. People will start using your data in seconds.
-- Play your **ROM backups** directly from the torrent file. You can have virtually GBs in games and only downloaded the needed ones.
-
-## Documentation
-
-Check [here][main-url] for further documentation.
-
-## Contributing
-
-Contributions are what make the open-source community such an amazing place to learn, inspire, and create. Any contributions you make are **greatly appreciated**.
-
-Some areas need more care than others:
-
-- Windows and macOS tests and compatibility. I don't have any easy way to test tstor on these operating systems.
-- Web interface. Web development is not my _forte_.
-- Tutorials. Share with the community your use case!
+tstor is based on amazing [distribyted](https://github.com/distribyted/distribyted), but has more focus on store a torrent data when streaming it.
 
 ## Special thanks
 
-- [Anacrolix BitTorrent client package and utilities][torrent-repo-url]. An amazing torrent library with file seek support.
-- [Nwaples rardecode library, experimental branch][rardecode-repo-url]. The only go library that is able to seek over rar files and avoid to use `io.Discard`.
-- [Bodgit 7zip library][sevenzip-repo-url]. Amazing library to decode 7zip files.
+- [distribyted](https://github.com/distribyted/distribyted)
+- [Anacrolix BitTorrent client package and utilities](https://github.com/anacrolix/torrent-repo-url). An amazing torrent library with file seek support.
+- [Nwaples rardecode library, experimental branch](https://github.com/nwaples/rardecode/tree/experimental). The only go library that is able to seek over rar files and avoid to use `io.Discard`.
+- [Bodgit 7zip library](https://github.com/bodgit/sevenzip). Amazing library to decode 7zip files.
 
 ## License
 
 Distributed under the GPL3 license. See `LICENSE` for more information.
-
-[sevenzip-repo-url]: https://github.com/bodgit/sevenzip
-[rardecode-repo-url]: https://github.com/nwaples/rardecode/tree/experimental
-[torrent-repo-url]: https://github.com/anacrolix/torrent
-[main-url]: https://tstor.com
-[releases-shield]: https://img.shields.io/github/v/release/tstor/tstor.svg?style=flat-square
-[releases-url]: https://git.kmsign.ru/royalcat/tstor/releases
-[docker-pulls-shield]: https://img.shields.io/docker/pulls/tstor/tstor.svg?style=flat-square
-[docker-pulls-url]: https://hub.docker.com/r/tstor/tstor
-[contributors-shield]: https://img.shields.io/github/contributors/tstor/tstor.svg?style=flat-square
-[contributors-url]: https://git.kmsign.ru/royalcat/tstor/graphs/contributors
-[forks-shield]: https://img.shields.io/github/forks/tstor/tstor.svg?style=flat-square
-[forks-url]: https://git.kmsign.ru/royalcat/tstor/network/members
-[stars-shield]: https://img.shields.io/github/stars/tstor/tstor.svg?style=flat-square
-[stars-url]: https://git.kmsign.ru/royalcat/tstor/stargazers
-[issues-shield]: https://img.shields.io/github/issues/tstor/tstor.svg?style=flat-square
-[issues-url]: https://git.kmsign.ru/royalcat/tstor/issues
-[releases-url]: https://git.kmsign.ru/royalcat/tstor/releases
-[license-shield]: https://img.shields.io/github/license/tstor/tstor.svg?style=flat-square
-[license-url]: https://git.kmsign.ru/royalcat/tstor/blob/master/LICENSE
-[product-screenshot]: mkdocs/docs/images/tstor.gif
-[example-config]: https://git.kmsign.ru/royalcat/tstor/blob/master/examples/conf_example.yaml
-[coveralls-shield]: https://img.shields.io/coveralls/github/tstor/tstor?style=flat-square
-[coveralls-url]: https://coveralls.io/github/tstor/tstor
diff --git a/build_tools/Dockerfile b/build_tools/Dockerfile
deleted file mode 100644
index d21a348..0000000
--- a/build_tools/Dockerfile
+++ /dev/null
@@ -1,19 +0,0 @@
-FROM techknowlogick/xgo:go-1.17.x
-
-# add 32-bit and 64-bit architectures and install 7zip
-RUN \
-    dpkg --add-architecture i386 && \
-    dpkg --add-architecture amd64 && \
-    apt-get update && \
-    apt-get install -y --no-install-recommends p7zip-full
-
-# install LIBFUSE
-RUN \
-    apt-get update && \
-    apt-get install -y --no-install-recommends libfuse-dev:i386 && \
-    apt-get install -y --no-install-recommends libfuse-dev:amd64 && \
-    apt-get download libfuse-dev:i386 && \
-    dpkg -x libfuse-dev*i386*.deb /
-
-ENV \
-    OSXCROSS_NO_INCLUDE_PATH_WARNINGS 1
diff --git a/cmd/generate-graphql/main.go b/cmd/generate-graphql/main.go
new file mode 100644
index 0000000..ea2d6aa
--- /dev/null
+++ b/cmd/generate-graphql/main.go
@@ -0,0 +1,65 @@
+// https://github.com/99designs/gqlgen/issues/2281#issuecomment-1506561381
+package main
+
+import (
+	"fmt"
+	"os"
+
+	"github.com/99designs/gqlgen/api"
+	"github.com/99designs/gqlgen/codegen"
+	"github.com/99designs/gqlgen/codegen/config"
+)
+
+type fieldDirectiveFix struct {
+}
+
+func (fieldDirectiveFix) Name() string {
+	return "Fix Directive hook called with wrong object"
+}
+
+func (fieldDirectiveFix) GenerateCode(cfg *codegen.Data) error {
+	for _, input := range cfg.Inputs {
+		for _, field := range input.Fields {
+			if field.GoFieldType == codegen.GoFieldVariable {
+				directiveMap := make(map[string]int, len(field.TypeReference.Definition.Directives)+len(field.Object.Directives))
+				for _, v := range field.TypeReference.Definition.Directives {
+					directiveMap[v.Name]++
+				}
+				// for _, v := range field.Object.Directives {
+				// 	directiveMap[v.Name]++
+				// }
+
+				directive := make([]*codegen.Directive, 0, len(field.Directives))
+				for _, v := range field.Directives {
+					if count := directiveMap[v.Name]; count > 0 {
+						directiveMap[v.Name] = count - 1
+						fmt.Printf("Ignore field %s{%s} directive: @%s\n", input.Name, field.Name, v.Name)
+						continue
+					}
+
+					directive = append(directive, v)
+				}
+
+				field.Directives = directive
+			}
+		}
+	}
+
+	return nil
+}
+
+func main() {
+	cfg, err := config.LoadConfigFromDefaultLocations()
+	if err != nil {
+		fmt.Fprintln(os.Stderr, "failed to load config", err.Error())
+		os.Exit(2)
+	}
+
+	err = api.Generate(cfg,
+		api.AddPlugin(&fieldDirectiveFix{}),
+	)
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err.Error())
+		os.Exit(3)
+	}
+}
diff --git a/cmd/tstor/main.go b/cmd/tstor/main.go
index 979118d..b430181 100644
--- a/cmd/tstor/main.go
+++ b/cmd/tstor/main.go
@@ -1,33 +1,39 @@
 package main
 
 import (
-	"bufio"
+	"context"
 	"fmt"
+
+	"net"
+	nethttp "net/http"
+	_ "net/http/pprof"
 	"os"
 	"os/signal"
 	"path/filepath"
-	"runtime"
 	"syscall"
 	"time"
 
+	wnfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
+	"git.kmsign.ru/royalcat/tstor/pkg/rlog"
 	"git.kmsign.ru/royalcat/tstor/src/config"
+	"git.kmsign.ru/royalcat/tstor/src/delivery"
 	"git.kmsign.ru/royalcat/tstor/src/host"
-	"git.kmsign.ru/royalcat/tstor/src/host/torrent"
-	"github.com/anacrolix/torrent/storage"
-	"github.com/gin-gonic/gin"
-	"github.com/rs/zerolog/log"
+	"git.kmsign.ru/royalcat/tstor/src/host/datastorage"
+	"git.kmsign.ru/royalcat/tstor/src/host/service"
+	"git.kmsign.ru/royalcat/tstor/src/host/store"
+	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
+	"git.kmsign.ru/royalcat/tstor/src/telemetry"
 	"github.com/urfave/cli/v2"
 
-	"git.kmsign.ru/royalcat/tstor/src/http"
-	dlog "git.kmsign.ru/royalcat/tstor/src/log"
-	"git.kmsign.ru/royalcat/tstor/src/mounts/fuse"
-	"git.kmsign.ru/royalcat/tstor/src/mounts/httpfs"
-	"git.kmsign.ru/royalcat/tstor/src/mounts/webdav"
+	_ "git.kmsign.ru/royalcat/tstor/pkg/rlog"
+	"git.kmsign.ru/royalcat/tstor/src/export/fuse"
+	"git.kmsign.ru/royalcat/tstor/src/export/httpfs"
+	"git.kmsign.ru/royalcat/tstor/src/export/nfs"
+	"git.kmsign.ru/royalcat/tstor/src/export/webdav"
 )
 
 const (
 	configFlag     = "config"
-	fuseAllowOther = "fuse-allow-other"
 	portFlag       = "http-port"
 	webDAVPortFlag = "webdav-port"
 )
@@ -45,179 +51,210 @@ func main() {
 		},
 
 		Action: func(c *cli.Context) error {
-			err := load(c.String(configFlag), c.Int(portFlag), c.Int(webDAVPortFlag), c.Bool(fuseAllowOther))
-
-			// stop program execution on errors to avoid flashing consoles
-			if err != nil && runtime.GOOS == "windows" {
-				log.Error().Err(err).Msg("problem starting application")
-				fmt.Print("Press 'Enter' to continue...")
-				bufio.NewReader(os.Stdin).ReadBytes('\n')
-			}
-
-			return err
+			return run(c.String(configFlag))
 		},
 
 		HideHelpCommand: true,
 	}
 
 	if err := app.Run(os.Args); err != nil {
-		log.Fatal().Err(err).Msg("problem starting application")
+		print("problem starting application: ", err.Error())
 	}
 }
 
-func setupStorage(tcfg config.TorrentClient) (storage.ClientImplCloser, storage.PieceCompletion, error) {
-	pcp := filepath.Join(tcfg.DataFolder, "piece-completion")
-	if err := os.MkdirAll(pcp, 0744); err != nil {
-		return nil, nil, fmt.Errorf("error creating piece completion folder: %w", err)
-	}
-	pc, err := storage.NewBoltPieceCompletion(pcp)
-	if err != nil {
-		return nil, nil, fmt.Errorf("error creating servers piece completion: %w", err)
-	}
-
-	// TODO implement cache/storage switching
-	// cacheDir := filepath.Join(tcfg.DataFolder, "cache")
-	// if err := os.MkdirAll(cacheDir, 0744); err != nil {
-	// 	return nil, nil, fmt.Errorf("error creating piece completion folder: %w", err)
-	// }
-	// fc, err := filecache.NewCache(cacheDir)
-	// if err != nil {
-	// 	return nil, nil, fmt.Errorf("error creating cache: %w", err)
-	// }
-	// log.Info().Msg(fmt.Sprintf("setting cache size to %d MB", 1024))
-	// fc.SetCapacity(1024 * 1024 * 1024)
-
-	// rp := storage.NewResourcePieces(fc.AsResourceProvider())
-	// st := &stc{rp}
-
-	filesDir := filepath.Join(tcfg.DataFolder, "files")
-	if err := os.MkdirAll(pcp, 0744); err != nil {
-		return nil, nil, fmt.Errorf("error creating piece completion folder: %w", err)
-	}
-
-	st := storage.NewFileWithCompletion(filesDir, pc)
-
-	return st, pc, nil
-}
-
-type stc struct {
-	storage.ClientImpl
-}
-
-func (s *stc) Close() error {
-	return nil
-}
-
-func load(configPath string, port, webDAVPort int, fuseAllowOther bool) error {
+func run(configPath string) error {
 	conf, err := config.Load(configPath)
 	if err != nil {
 		return fmt.Errorf("error loading configuration: %w", err)
 	}
+	// dlog.Load(&conf.Log)
 
-	dlog.Load(&conf.Log)
+	if conf.OtelHttp != "" {
+		ctx := context.Background()
+		client, err := telemetry.Setup(ctx, conf.OtelHttp)
+		if err != nil {
+			return err
+		}
+
+		defer client.Shutdown(ctx)
+	}
+
+	log := rlog.ComponentLog("run")
+
+	// TODO make optional
+	err = syscall.Setpriority(syscall.PRIO_PGRP, 0, 19)
+	if err != nil {
+		log.Error("set priority failed", "error", err)
+	}
 
 	if err := os.MkdirAll(conf.TorrentClient.MetadataFolder, 0744); err != nil {
 		return fmt.Errorf("error creating metadata folder: %w", err)
 	}
 
-	fis, err := torrent.NewFileItemStore(filepath.Join(conf.TorrentClient.MetadataFolder, "items"), 2*time.Hour)
+	fis, err := store.NewFileItemStore(filepath.Join(conf.TorrentClient.MetadataFolder, "items"), 2*time.Hour)
 	if err != nil {
 		return fmt.Errorf("error starting item store: %w", err)
 	}
+	defer fis.Close()
 
-	id, err := torrent.GetOrCreatePeerID(filepath.Join(conf.TorrentClient.MetadataFolder, "ID"))
+	id, err := store.GetOrCreatePeerID(filepath.Join(conf.TorrentClient.MetadataFolder, "ID"))
 	if err != nil {
 		return fmt.Errorf("error creating node ID: %w", err)
 	}
 
-	st, _, err := setupStorage(conf.TorrentClient)
+	st, _, err := datastorage.Setup(conf.TorrentClient)
+	if err != nil {
+		return err
+	}
+	defer st.Close()
+
+	excludedFilesStore, err := store.NewFileMappings(conf.TorrentClient.MetadataFolder, st)
 	if err != nil {
 		return err
 	}
 
-	c, err := torrent.NewClient(st, fis, &conf.TorrentClient, id)
+	infoBytesStore, err := store.NewInfoBytes(conf.TorrentClient.MetadataFolder)
+	if err != nil {
+		return err
+	}
+
+	c, err := store.NewClient(st, fis, &conf.TorrentClient, id)
 	if err != nil {
 		return fmt.Errorf("error starting torrent client: %w", err)
 	}
 	c.AddDhtNodes(conf.TorrentClient.DHTNodes)
+	defer c.Close()
 
-	ts := torrent.NewService(c, conf.TorrentClient.AddTimeout, conf.TorrentClient.ReadTimeout)
+	ts, err := service.NewService(
+		conf.SourceDir, conf.TorrentClient,
+		c, st, excludedFilesStore, infoBytesStore,
+	)
+	if err != nil {
+		return fmt.Errorf("error creating service: %w", err)
+	}
 
-	if err := os.MkdirAll(conf.DataFolder, 0744); err != nil {
+	if err := os.MkdirAll(conf.SourceDir, 0744); err != nil {
 		return fmt.Errorf("error creating data folder: %w", err)
 	}
-	cfs := host.NewStorage(conf.DataFolder, ts)
+	sfs := host.NewTorrentStorage(conf.SourceDir, ts)
+	sfs = vfs.WrapLogFS(sfs)
+
+	// TODO make separate function
+	// {
+	// 	if st, ok := st.(storage.FileStorageDeleter); ok {
+	// 		log.Info().Msg("listing files")
+	// 		files, err := listFilesRecursive(conf.SourceDir)
+	// 		if err != nil {
+	// 			return fmt.Errorf("error listing files: %w", err)
+	// 		}
+
+	// 		torrentFiles := []string{}
+	// 		for _, v := range files {
+	// 			if strings.HasSuffix(v, ".torrent") {
+	// 				torrentFiles = append(torrentFiles, v)
+	// 			}
+	// 		}
+
+	// 		log.Info().Int("count", len(torrentFiles)).Msg("loading torrent files")
+	// 		torrentList := []*torrent.Torrent{}
+	// 		for _, tf := range torrentFiles {
+	// 			t, err := c.AddTorrentFromFile(tf)
+	// 			if err != nil {
+	// 				return err
+	// 			}
+	// 			<-t.GotInfo()
+	// 			torrentList = append(torrentList, t)
+	// 		}
+	// 		log.Info().Msg("staring cleanup")
+	// 		err = st.Cleanup(torrentList)
+	// 		if err != nil {
+	// 			return fmt.Errorf("cleanup error: %w", err)
+	// 		}
+	// 	}
+
+	// }
 
-	var mh *fuse.Handler
 	if conf.Mounts.Fuse.Enabled {
-		mh = fuse.NewHandler(conf.Mounts.Fuse.AllowOther, conf.Mounts.Fuse.Path)
+		mh := fuse.NewHandler(conf.Mounts.Fuse.AllowOther, conf.Mounts.Fuse.Path)
+		err := mh.Mount(sfs)
+		if err != nil {
+			return fmt.Errorf("mount fuse error: %w", err)
+		}
+		defer mh.Unmount()
 	}
 
-	sigChan := make(chan os.Signal)
-	signal.Notify(sigChan, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
-	go func() {
-
-		<-sigChan
-		log.Info().Msg("closing servers...")
-		// for _, s := range servers {
-		// 	if err := s.Close(); err != nil {
-		// 		log.Warn().Err(err).Msg("problem closing server")
-		// 	}
-		// }
-		log.Info().Msg("closing items database...")
-		fis.Close()
-		log.Info().Msg("closing torrent client...")
-		c.Close()
-		if mh != nil {
-			log.Info().Msg("unmounting fuse filesystem...")
-			mh.Unmount()
-		}
-
-		log.Info().Msg("exiting")
-		os.Exit(1)
-	}()
-
-	go func() {
-		if mh == nil {
-			return
-		}
-
-		if err := mh.Mount(cfs); err != nil {
-			log.Info().Err(err).Msg("error mounting filesystems")
-		}
-	}()
-
 	if conf.Mounts.WebDAV.Enabled {
 		go func() {
-			if err := webdav.NewWebDAVServer(cfs, conf.Mounts.WebDAV.Port, conf.Mounts.WebDAV.User, conf.Mounts.WebDAV.Pass); err != nil {
-				log.Error().Err(err).Msg("error starting webDAV")
+			if err := webdav.NewWebDAVServer(sfs, conf.Mounts.WebDAV.Port, conf.Mounts.WebDAV.User, conf.Mounts.WebDAV.Pass); err != nil {
+				log.Error("error starting webDAV", "error", err)
 			}
 
-			log.Warn().Msg("webDAV configuration not found!")
+			log.Warn("webDAV configuration not found!")
 		}()
 	}
 	if conf.Mounts.HttpFs.Enabled {
 		go func() {
-			httpfs := httpfs.NewHTTPFS(cfs)
+			httpfs := httpfs.NewHTTPFS(sfs)
+			err = nethttp.ListenAndServe(fmt.Sprintf("0.0.0.0:%d", conf.Mounts.HttpFs.Port), nethttp.FileServer(httpfs))
+			if err != nil {
+				log.Error("error starting HTTPFS", "error", err)
+			}
+			// r := gin.New()
 
-			r := gin.New()
+			// r.GET("*filepath", func(c *gin.Context) {
+			// 	path := c.Param("filepath")
+			// 	c.FileFromFS(path, httpfs)
+			// })
 
-			r.GET("*filepath", func(c *gin.Context) {
-				path := c.Param("filepath")
-				c.FileFromFS(path, httpfs)
-			})
+			log.Info("starting HTTPFS", "host", fmt.Sprintf("0.0.0.0:%d", conf.Mounts.HttpFs.Port))
+			// if err := r.Run(fmt.Sprintf("0.0.0.0:%d", conf.Mounts.HttpFs.Port)); err != nil {
+			// 	log.Error().Err(err).Msg("error starting HTTPFS")
+			// }
+		}()
+	}
 
-			log.Info().Str("host", fmt.Sprintf("0.0.0.0:%d", conf.Mounts.HttpFs.Port)).Msg("starting HTTPFS")
-			if err := r.Run(fmt.Sprintf("0.0.0.0:%d", conf.Mounts.HttpFs.Port)); err != nil {
-				log.Error().Err(err).Msg("error starting HTTPFS")
+	if conf.Mounts.NFS.Enabled {
+		go func() {
+			log := log.With("component", "NFS")
+			listener, err := net.Listen("tcp", fmt.Sprintf("0.0.0.0:%d", conf.Mounts.NFS.Port))
+			if err != nil {
+				log.Error("failed to start TCP  listener", err)
+				return
+			}
+			log.Info("starting NFS server", "host", listener.Addr().String())
+			handler, err := nfs.NewNFSv3Handler(sfs)
+			if err != nil {
+				log.Error("failed to create NFS handler", "error", err)
+				return
+			}
+
+			err = wnfs.Serve(listener, handler)
+			if err != nil {
+				log.Error("error serving nfs", "error", err)
+				return
 			}
 		}()
 	}
 
-	logFilename := filepath.Join(conf.Log.Path, dlog.FileName)
+	go func() {
+		err := webdav.NewDirServer(conf.SourceDir, 36912, conf.Mounts.WebDAV.User, conf.Mounts.WebDAV.Pass)
+		if err != nil {
+			log.Error("error starting webDAV", "error", err)
+		}
+	}()
 
-	err = http.New(nil, nil, ts, logFilename, conf)
-	log.Error().Err(err).Msg("error initializing HTTP server")
-	return err
+	go func() {
+		logFilename := filepath.Join(conf.Log.Path, "logs")
+
+		err := delivery.New(nil, service.NewStats(), ts, sfs, logFilename, conf)
+		if err != nil {
+			log.Error("error initializing HTTP server", "error", err)
+		}
+	}()
+
+	sigChan := make(chan os.Signal, 1)
+	signal.Notify(sigChan, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+	<-sigChan
+
+	return ts.Close()
 }
diff --git a/http_fs.go b/embed.go
similarity index 100%
rename from http_fs.go
rename to embed.go
diff --git a/go.mod b/go.mod
index 421dc8d..6cbec8e 100644
--- a/go.mod
+++ b/go.mod
@@ -1,61 +1,90 @@
 module git.kmsign.ru/royalcat/tstor
 
-go 1.21
+go 1.22.1
 
 require (
-	github.com/anacrolix/dht/v2 v2.20.0
-	github.com/anacrolix/log v0.14.3-0.20230823030427-4b296d71a6b4
-	github.com/anacrolix/missinggo/v2 v2.7.2
-	github.com/anacrolix/torrent v1.52.6-0.20230929044811-45c91b322ad1
+	github.com/99designs/gqlgen v0.17.43
+	github.com/agoda-com/opentelemetry-go/otelslog v0.1.1
+	github.com/agoda-com/opentelemetry-logs-go v0.3.0
+	github.com/anacrolix/dht/v2 v2.21.1
+	github.com/anacrolix/log v0.14.6-0.20231202035202-ed7a02cad0b4
+	github.com/anacrolix/missinggo/v2 v2.7.3
+	github.com/anacrolix/torrent v1.55.0
 	github.com/billziss-gh/cgofuse v1.5.0
-	github.com/bodgit/sevenzip v1.4.3
+	github.com/bodgit/sevenzip v1.4.5
 	github.com/dgraph-io/badger/v4 v4.2.0
+	github.com/dgraph-io/ristretto v0.1.1
+	github.com/dustin/go-humanize v1.0.0
+	github.com/gin-contrib/pprof v1.4.0
 	github.com/gin-gonic/gin v1.9.1
+	github.com/go-git/go-billy/v5 v5.5.0
+	github.com/gofrs/uuid/v5 v5.0.0
+	github.com/google/uuid v1.5.0
+	github.com/hashicorp/go-multierror v1.1.1
+	github.com/hashicorp/golang-lru/v2 v2.0.7
 	github.com/knadh/koanf/parsers/yaml v0.1.0
 	github.com/knadh/koanf/providers/env v0.1.0
 	github.com/knadh/koanf/providers/file v0.1.0
 	github.com/knadh/koanf/providers/structs v0.1.0
 	github.com/knadh/koanf/v2 v2.0.1
-	github.com/mattn/go-colorable v0.1.13
 	github.com/nwaples/rardecode/v2 v2.0.0-beta.2
-	github.com/rs/zerolog v1.31.0
+	github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93
+	github.com/ravilushqa/otelgqlgen v0.15.0
+	github.com/royalcat/kv v0.0.0-20240327213417-8cf5696b2389
+	github.com/rs/zerolog v1.32.0
+	github.com/samber/slog-multi v1.0.2
+	github.com/samber/slog-zerolog v1.0.0
 	github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
 	github.com/stretchr/testify v1.8.4
-	github.com/urfave/cli/v2 v2.25.7
-	golang.org/x/net v0.16.0
-	gopkg.in/natefinch/lumberjack.v2 v2.2.1
+	github.com/urfave/cli/v2 v2.27.0
+	github.com/vektah/gqlparser/v2 v2.5.11
+	github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00
+	github.com/willscott/memphis v0.0.0-20210922141505-529d4987ab7e
+	go.opentelemetry.io/otel v1.24.0
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0
+	go.opentelemetry.io/otel/exporters/prometheus v0.46.0
+	go.opentelemetry.io/otel/sdk v1.24.0
+	go.opentelemetry.io/otel/sdk/metric v1.24.0
+	go.opentelemetry.io/otel/trace v1.24.0
+	go.uber.org/multierr v1.11.0
+	golang.org/x/exp v0.0.0-20231226003508-02704c960a9b
+	golang.org/x/net v0.19.0
+	golang.org/x/sys v0.17.0
 )
 
 require (
 	github.com/RoaringBitmap/roaring v1.2.3 // indirect
+	github.com/agnivade/levenshtein v1.1.1 // indirect
 	github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 // indirect
 	github.com/alecthomas/atomic v0.1.0-alpha2 // indirect
 	github.com/anacrolix/chansync v0.3.0 // indirect
 	github.com/anacrolix/envpprof v1.3.0 // indirect
-	github.com/anacrolix/generics v0.0.0-20230816105729-c755655aee45 // indirect
+	github.com/anacrolix/generics v0.0.0-20230911070922-5dd7545c6b13 // indirect
 	github.com/anacrolix/go-libutp v1.3.1 // indirect
 	github.com/anacrolix/missinggo v1.3.0 // indirect
 	github.com/anacrolix/missinggo/perf v1.0.0 // indirect
 	github.com/anacrolix/mmsg v1.0.0 // indirect
 	github.com/anacrolix/multiless v0.3.1-0.20221221005021-2d12701f83f7 // indirect
 	github.com/anacrolix/stm v0.4.1-0.20221221005312-96d17df0e496 // indirect
-	github.com/anacrolix/sync v0.4.1-0.20230926072150-b8cd7cfb92d0 // indirect
+	github.com/anacrolix/sync v0.5.1 // indirect
 	github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 // indirect
 	github.com/anacrolix/utp v0.1.0 // indirect
-	github.com/andybalholm/brotli v1.0.5 // indirect
+	github.com/andybalholm/brotli v1.0.6 // indirect
 	github.com/bahlo/generic-list-go v0.2.0 // indirect
 	github.com/benbjohnson/immutable v0.4.1-0.20221220213129-8932b999621d // indirect
+	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/bits-and-blooms/bitset v1.2.2 // indirect
 	github.com/bodgit/plumbing v1.3.0 // indirect
 	github.com/bodgit/windows v1.0.1 // indirect
 	github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
 	github.com/bytedance/sonic v1.9.1 // indirect
+	github.com/cenkalti/backoff/v4 v4.2.1 // indirect
+	github.com/cespare/xxhash v1.1.0 // indirect
 	github.com/cespare/xxhash/v2 v2.2.0 // indirect
 	github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
 	github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
+	github.com/cyphar/filepath-securejoin v0.2.4 // indirect
 	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/dgraph-io/ristretto v0.1.1 // indirect
-	github.com/dustin/go-humanize v1.0.0 // indirect
 	github.com/edsrzf/mmap-go v1.1.0 // indirect
 	github.com/fatih/structs v1.1.0 // indirect
 	github.com/fsnotify/fsnotify v1.6.0 // indirect
@@ -63,30 +92,31 @@ require (
 	github.com/gin-contrib/sse v0.1.0 // indirect
 	github.com/go-llsqlite/adapter v0.0.0-20230927005056-7f5ce7f0c916 // indirect
 	github.com/go-llsqlite/crawshaw v0.4.0 // indirect
-	github.com/go-logr/logr v1.2.3 // indirect
+	github.com/go-logr/logr v1.4.1 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/go-playground/locales v0.14.1 // indirect
 	github.com/go-playground/universal-translator v0.18.1 // indirect
 	github.com/go-playground/validator/v10 v10.14.0 // indirect
 	github.com/goccy/go-json v0.10.2 // indirect
 	github.com/gogo/protobuf v1.3.2 // indirect
-	github.com/golang/glog v1.0.0 // indirect
+	github.com/golang/glog v1.1.2 // indirect
 	github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
-	github.com/golang/protobuf v1.5.2 // indirect
-	github.com/golang/snappy v0.0.3 // indirect
+	github.com/golang/protobuf v1.5.3 // indirect
+	github.com/golang/snappy v0.0.4 // indirect
 	github.com/google/btree v1.1.2 // indirect
-	github.com/google/flatbuffers v1.12.1 // indirect
-	github.com/google/uuid v1.3.0 // indirect
+	github.com/google/flatbuffers v2.0.8+incompatible // indirect
 	github.com/gorilla/websocket v1.5.0 // indirect
+	github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
 	github.com/hashicorp/errwrap v1.0.0 // indirect
-	github.com/hashicorp/go-multierror v1.1.1 // indirect
 	github.com/huandu/xstrings v1.3.2 // indirect
 	github.com/json-iterator/go v1.1.12 // indirect
-	github.com/klauspost/compress v1.16.6 // indirect
+	github.com/klauspost/compress v1.17.4 // indirect
 	github.com/klauspost/cpuid/v2 v2.2.4 // indirect
 	github.com/knadh/koanf/maps v0.1.1 // indirect
 	github.com/leodido/go-urn v1.2.4 // indirect
-	github.com/mattn/go-isatty v0.0.19 // indirect
+	github.com/mattn/go-colorable v0.1.13 // indirect
+	github.com/mattn/go-isatty v0.0.20 // indirect
+	github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
 	github.com/mitchellh/copystructure v1.2.0 // indirect
 	github.com/mitchellh/mapstructure v1.5.0 // indirect
 	github.com/mitchellh/reflectwalk v1.0.2 // indirect
@@ -94,7 +124,7 @@ require (
 	github.com/modern-go/reflect2 v1.0.2 // indirect
 	github.com/mschoch/smat v0.2.0 // indirect
 	github.com/pelletier/go-toml/v2 v2.0.8 // indirect
-	github.com/pierrec/lz4/v4 v4.1.18 // indirect
+	github.com/pierrec/lz4/v4 v4.1.19 // indirect
 	github.com/pion/datachannel v1.5.2 // indirect
 	github.com/pion/dtls/v2 v2.2.4 // indirect
 	github.com/pion/ice/v2 v2.2.6 // indirect
@@ -115,28 +145,43 @@ require (
 	github.com/pion/webrtc/v3 v3.1.42 // indirect
 	github.com/pkg/errors v0.9.1 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
+	github.com/polydawn/go-timeless-api v0.0.0-20220821201550-b93919e12c56 // indirect
+	github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect
+	github.com/polydawn/rio v0.0.0-20220823181337-7c31ad9831a4 // indirect
+	github.com/prometheus/client_golang v1.18.0 // indirect
+	github.com/prometheus/client_model v0.6.0 // indirect
+	github.com/prometheus/common v0.45.0 // indirect
+	github.com/prometheus/procfs v0.12.0 // indirect
 	github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
 	github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 // indirect
 	github.com/russross/blackfriday/v2 v2.1.0 // indirect
 	github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect
+	github.com/samber/lo v1.38.1 // indirect
+	github.com/sosodev/duration v1.2.0 // indirect
 	github.com/tidwall/btree v1.6.0 // indirect
 	github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
 	github.com/ugorji/go/codec v1.2.11 // indirect
 	github.com/ulikunitz/xz v0.5.11 // indirect
+	github.com/warpfork/go-errcat v0.0.0-20180917083543-335044ffc86e // indirect
 	github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
 	go.etcd.io/bbolt v1.3.6 // indirect
-	go.opencensus.io v0.22.5 // indirect
-	go.opentelemetry.io/otel v1.8.0 // indirect
-	go.opentelemetry.io/otel/trace v1.8.0 // indirect
+	go.opencensus.io v0.24.0 // indirect
+	go.opentelemetry.io/contrib v1.21.1 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 // indirect
+	go.opentelemetry.io/otel/metric v1.24.0 // indirect
+	go.opentelemetry.io/proto/otlp v1.1.0 // indirect
 	go4.org v0.0.0-20200411211856-f5505b9728dd // indirect
 	golang.org/x/arch v0.3.0 // indirect
-	golang.org/x/crypto v0.14.0 // indirect
-	golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df // indirect
-	golang.org/x/sync v0.3.0 // indirect
-	golang.org/x/sys v0.13.0 // indirect
-	golang.org/x/text v0.13.0 // indirect
-	golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
-	google.golang.org/protobuf v1.30.0 // indirect
+	golang.org/x/crypto v0.17.0 // indirect
+	golang.org/x/mod v0.14.0 // indirect
+	golang.org/x/sync v0.5.0 // indirect
+	golang.org/x/text v0.14.0 // indirect
+	golang.org/x/time v0.5.0 // indirect
+	golang.org/x/tools v0.16.0 // indirect
+	google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect
+	google.golang.org/grpc v1.61.1 // indirect
+	google.golang.org/protobuf v1.32.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 	modernc.org/libc v1.22.3 // indirect
 	modernc.org/mathutil v1.5.0 // indirect
diff --git a/go.sum b/go.sum
index 14db9e5..2cf862e 100644
--- a/go.sum
+++ b/go.sum
@@ -19,8 +19,12 @@ crawshaw.io/sqlite v0.3.2/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4=
 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
 filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU=
 filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
+github.com/99designs/gqlgen v0.17.43 h1:I4SYg6ahjowErAQcHFVKy5EcWuwJ3+Xw9z2fLpuFCPo=
+github.com/99designs/gqlgen v0.17.43/go.mod h1:lO0Zjy8MkZgBdv4T1U91x09r0e0WFOdhVUutlQs1Rsc=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
 github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
 github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI=
 github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
@@ -28,6 +32,12 @@ github.com/RoaringBitmap/roaring v1.2.3 h1:yqreLINqIrX22ErkKI0vY47/ivtJr6n+kMhVO
 github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE=
 github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
 github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
+github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
+github.com/agoda-com/opentelemetry-go/otelslog v0.1.1 h1:6nV8PZCzySHuh9kP/HZ2OJqGucwQiM+yZRugKDvtzj4=
+github.com/agoda-com/opentelemetry-go/otelslog v0.1.1/go.mod h1:CSc0veIcY/HsIfH7l5PGtIpRvBttk09QUQlweVkD2PI=
+github.com/agoda-com/opentelemetry-logs-go v0.3.0 h1:d2lMVUfCDeLzVgTxMeSU8IWaMXjwD4sVKigEZBGwcsw=
+github.com/agoda-com/opentelemetry-logs-go v0.3.0/go.mod h1:k3QR1O5AOl+dFC7pkrK9wWmoD72jjDONPFHi9dAgLJc=
 github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 h1:byYvvbfSo3+9efR4IeReh77gVs4PnNDR3AMOE9NJ7a0=
 github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0/go.mod h1:q37NoqncT41qKc048STsifIt69LfUJ8SrWWcz/yam5k=
 github.com/alecthomas/assert/v2 v2.0.0-alpha3 h1:pcHeMvQ3OMstAWgaeaXIAL8uzB9xMm2zlxt+/4ml8lk=
@@ -42,23 +52,23 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
 github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
 github.com/anacrolix/chansync v0.3.0 h1:lRu9tbeuw3wl+PhMu/r+JJCRu5ArFXIluOgdF0ao6/U=
 github.com/anacrolix/chansync v0.3.0/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k=
-github.com/anacrolix/dht/v2 v2.20.0 h1:eDx9lfE9iCSf5sPK0290GToHURNhEFuUGN8iyvhvJDk=
-github.com/anacrolix/dht/v2 v2.20.0/go.mod h1:SDGC+sEs1pnO2sJGYuhvIis7T8749dDHNfcjtdH4e3g=
+github.com/anacrolix/dht/v2 v2.21.1 h1:s1rKkfLLcmBHKv4v/mtMkIeHIEptzEFiB6xVu54+5/o=
+github.com/anacrolix/dht/v2 v2.21.1/go.mod h1:SDGC+sEs1pnO2sJGYuhvIis7T8749dDHNfcjtdH4e3g=
 github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
 github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
 github.com/anacrolix/envpprof v1.1.0/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4=
 github.com/anacrolix/envpprof v1.3.0 h1:WJt9bpuT7A/CDCxPOv/eeZqHWlle/Y0keJUvc6tcJDk=
 github.com/anacrolix/envpprof v1.3.0/go.mod h1:7QIG4CaX1uexQ3tqd5+BRa/9e2D02Wcertl6Yh0jCB0=
-github.com/anacrolix/generics v0.0.0-20230816105729-c755655aee45 h1:Kmcl3I9K2+5AdnnR7hvrnVT0TLeFWWMa9bxnm55aVIg=
-github.com/anacrolix/generics v0.0.0-20230816105729-c755655aee45/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8=
+github.com/anacrolix/generics v0.0.0-20230911070922-5dd7545c6b13 h1:qwOprPTDMM3BASJRf84mmZnTXRsPGGJ8xoHKQS7m3so=
+github.com/anacrolix/generics v0.0.0-20230911070922-5dd7545c6b13/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8=
 github.com/anacrolix/go-libutp v1.3.1 h1:idJzreNLl+hNjGC3ZnUOjujEaryeOGgkwHLqSGoige0=
 github.com/anacrolix/go-libutp v1.3.1/go.mod h1:heF41EC8kN0qCLMokLBVkB8NXiLwx3t8R8810MTNI5o=
 github.com/anacrolix/log v0.3.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU=
 github.com/anacrolix/log v0.6.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU=
 github.com/anacrolix/log v0.10.1-0.20220123034749-3920702c17f8/go.mod h1:GmnE2c0nvz8pOIPUSC9Rawgefy1sDXqposC2wgtBZE4=
 github.com/anacrolix/log v0.13.1/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68=
-github.com/anacrolix/log v0.14.3-0.20230823030427-4b296d71a6b4 h1:01OE3pdiBGIZGyQb6cIAu+QfaNhBR9k5MVmLsl+DVbE=
-github.com/anacrolix/log v0.14.3-0.20230823030427-4b296d71a6b4/go.mod h1:1OmJESOtxQGNMlUO5rcv96Vpp9mfMqXXbe2RdinFLdY=
+github.com/anacrolix/log v0.14.6-0.20231202035202-ed7a02cad0b4 h1:CdVK9IoqoqklXQQ4+L2aew64xsz14KdOD+rnKdTQajg=
+github.com/anacrolix/log v0.14.6-0.20231202035202-ed7a02cad0b4/go.mod h1:1OmJESOtxQGNMlUO5rcv96Vpp9mfMqXXbe2RdinFLdY=
 github.com/anacrolix/lsan v0.0.0-20211126052245-807000409a62 h1:P04VG6Td13FHMgS5ZBcJX23NPC/fiC4cp9bXwYujdYM=
 github.com/anacrolix/lsan v0.0.0-20211126052245-807000409a62/go.mod h1:66cFKPCO7Sl4vbFnAaSq7e4OXtdMhRSBagJGWgmpJbM=
 github.com/anacrolix/missinggo v0.0.0-20180725070939-60ef2fbf63df/go.mod h1:kwGiTUTZ0+p4vAz3VbAI5a30t2YbvemcmspjKwrAz5s=
@@ -72,8 +82,8 @@ github.com/anacrolix/missinggo/perf v1.0.0/go.mod h1:ljAFWkBuzkO12MQclXzZrosP5ur
 github.com/anacrolix/missinggo/v2 v2.2.0/go.mod h1:o0jgJoYOyaoYQ4E2ZMISVa9c88BbUBVQQW4QeRkNCGY=
 github.com/anacrolix/missinggo/v2 v2.5.1/go.mod h1:WEjqh2rmKECd0t1VhQkLGTdIWXO6f6NLjp5GlMZ+6FA=
 github.com/anacrolix/missinggo/v2 v2.5.2/go.mod h1:yNvsLrtZYRYCOI+KRH/JM8TodHjtIE/bjOGhQaLOWIE=
-github.com/anacrolix/missinggo/v2 v2.7.2 h1:XGia0kZVC8DDY6XVl15fjtdEyUF39tWkdtsH1VjuAHg=
-github.com/anacrolix/missinggo/v2 v2.7.2/go.mod h1:mIEtp9pgaXqt8VQ3NQxFOod/eQ1H0D1XsZzKUQfwtac=
+github.com/anacrolix/missinggo/v2 v2.7.3 h1:Ee//CmZBMadeNiYB/hHo9ly2PFOEZ4Fhsbnug3rDAIE=
+github.com/anacrolix/missinggo/v2 v2.7.3/go.mod h1:mIEtp9pgaXqt8VQ3NQxFOod/eQ1H0D1XsZzKUQfwtac=
 github.com/anacrolix/mmsg v0.0.0-20180515031531-a4a3ba1fc8bb/go.mod h1:x2/ErsYUmT77kezS63+wzZp8E3byYB0gzirM/WMBLfw=
 github.com/anacrolix/mmsg v1.0.0 h1:btC7YLjOn29aTUAExJiVUhQOuf/8rhm+/nWCMAnL3Hg=
 github.com/anacrolix/mmsg v1.0.0/go.mod h1:x8kRaJY/dCrY9Al0PEcj1mb/uFHwP6GCJ9fLl4thEPc=
@@ -84,20 +94,24 @@ github.com/anacrolix/stm v0.4.1-0.20221221005312-96d17df0e496 h1:aMiRi2kOOd+nG64
 github.com/anacrolix/stm v0.4.1-0.20221221005312-96d17df0e496/go.mod h1:DBm8/1OXm4A4RZ6Xa9u/eOsjeAXCaoRYvd2JzlskXeM=
 github.com/anacrolix/sync v0.0.0-20180808010631-44578de4e778/go.mod h1:s735Etp3joe/voe2sdaXLcqDdJSay1O0OPnM0ystjqk=
 github.com/anacrolix/sync v0.3.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g=
-github.com/anacrolix/sync v0.4.1-0.20230926072150-b8cd7cfb92d0 h1:M2HtYrYz6CVwo88TfVrGNlc+mSe59KXCBe3gFuEsEto=
-github.com/anacrolix/sync v0.4.1-0.20230926072150-b8cd7cfb92d0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g=
+github.com/anacrolix/sync v0.5.1 h1:FbGju6GqSjzVoTgcXTUKkF041lnZkG5P0C3T5RL3SGc=
+github.com/anacrolix/sync v0.5.1/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g=
 github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
 github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
 github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8=
-github.com/anacrolix/torrent v1.52.6-0.20230929044811-45c91b322ad1 h1:KzIKTajeqBXWeLjHv2KHjlwigyR19TkdvU5uLGPGQAI=
-github.com/anacrolix/torrent v1.52.6-0.20230929044811-45c91b322ad1/go.mod h1:q4utKicrzW80odcXiy3J8sObJELsGGFI1FxhFt/2qA0=
+github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8=
+github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE=
 github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 h1:QAVZ3pN/J4/UziniAhJR2OZ9Ox5kOY2053tBbbqUPYA=
 github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96/go.mod h1:Wa6n8cYIdaG35x15aH3Zy6d03f7P728QfdcDeD/IEOs=
 github.com/anacrolix/utp v0.1.0 h1:FOpQOmIwYsnENnz7tAGohA+r6iXpRjrq8ssKSre2Cp4=
 github.com/anacrolix/utp v0.1.0/go.mod h1:MDwc+vsGEq7RMw6lr2GKOEqjWny5hO5OZXRVNaBJ2Dk=
-github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
-github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
+github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI=
+github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
 github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
+github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
 github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
 github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
 github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
@@ -105,6 +119,7 @@ github.com/benbjohnson/immutable v0.4.1-0.20221220213129-8932b999621d h1:2qVb9bs
 github.com/benbjohnson/immutable v0.4.1-0.20221220213129-8932b999621d/go.mod h1:iAr8OjJGLnLmVUr9MZ/rz4PWUy6Ouc2JLYuMArmvAJM=
 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
 github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
 github.com/billziss-gh/cgofuse v1.5.0 h1:kH516I/s+Ab4diL/Y/ayFeUjjA8ey+JK12xDfBf4HEs=
 github.com/billziss-gh/cgofuse v1.5.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
@@ -113,8 +128,8 @@ github.com/bits-and-blooms/bitset v1.2.2 h1:J5gbX05GpMdBjCvQ9MteIg2KKDExr7DrgK+Y
 github.com/bits-and-blooms/bitset v1.2.2/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
 github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU=
 github.com/bodgit/plumbing v1.3.0/go.mod h1:JOTb4XiRu5xfnmdnDJo6GmSbSbtSyufrsyZFByMtKEs=
-github.com/bodgit/sevenzip v1.4.3 h1:46Rb9vCYdpceC1U+GIR0bS3hP2/Xv8coKFDeLJySV/A=
-github.com/bodgit/sevenzip v1.4.3/go.mod h1:F8n3+0CwbdxqmNy3wFeOAtanza02Ur66AGfs/hbYblI=
+github.com/bodgit/sevenzip v1.4.5 h1:HFJQ+nbjppfyf2xbQEJBbmVo+o2kTg1FXV4i7YOx87s=
+github.com/bodgit/sevenzip v1.4.5/go.mod h1:LAcAg/UQzyjzCQSGBPZFYzoiHMfT6Gk+3tMSjUk3foY=
 github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
 github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
 github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
@@ -124,7 +139,11 @@ github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67
 github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
 github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s=
 github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
+github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
+github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
 github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
 github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -135,10 +154,13 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR
 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
 github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
 github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
 github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
+github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -148,6 +170,8 @@ github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWa
 github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
 github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
 github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g=
+github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
 github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
 github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
 github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
@@ -157,7 +181,9 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1
 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
 github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
 github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
 github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
 github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
@@ -170,8 +196,11 @@ github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4
 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
 github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
 github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
+github.com/gin-contrib/pprof v1.4.0 h1:XxiBSf5jWZ5i16lNOPbMTVdgHBdhfGRD5PZ1LWazzvg=
+github.com/gin-contrib/pprof v1.4.0/go.mod h1:RrehPJasUVBPK6yTUwOl8/NP6i0vbUgmxtis+Z5KE90=
 github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
 github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
 github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
 github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
 github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
@@ -180,6 +209,9 @@ github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod
 github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
 github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
 github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
+github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
+github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
+github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
 github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@@ -191,30 +223,37 @@ github.com/go-llsqlite/crawshaw v0.4.0/go.mod h1:/YJdV7uBQaYDE0fwe4z3wwJIZBJxdYz
 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
 github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
 github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
 github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
 github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
 github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
 github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
 github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js=
 github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
 github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
 github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
 github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
 github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
 github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/uuid/v5 v5.0.0 h1:p544++a97kEL+svbcFbCQVM9KFu0Yo25UoISXGNNH9M=
+github.com/gofrs/uuid/v5 v5.0.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
 github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
 github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
 github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
 github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
+github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
+github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
 github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
 github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
 github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
@@ -232,52 +271,64 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x
 github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
 github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
 github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
 github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
 github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
 github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
-github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
 github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
 github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
 github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
 github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
-github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw=
-github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/flatbuffers v2.0.8+incompatible h1:ivUb1cGomAB101ZM1T0nOiWz9pSrTMoa9+EiY7igmkM=
+github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
 github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
 github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
 github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
 github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
 github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
 github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
 github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
+github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
 github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
 github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
 github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
 github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw=
 github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
 github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
 github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
 github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
 github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
 github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
 github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
 github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
 github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
 github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
 github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
 github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@@ -295,12 +346,13 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
 github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
 github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
 github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
 github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk=
-github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
+github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
 github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
 github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
 github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
@@ -320,6 +372,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
 github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
 github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
 github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
 github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
 github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
 github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@@ -327,14 +380,19 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
 github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
 github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
 github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
 github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
 github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
 github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
 github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
 github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
 github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
 github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
+github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
 github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
 github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
 github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@@ -352,6 +410,7 @@ github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE
 github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
 github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
 github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q0rDaRO0MPaOk=
 github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY=
 github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
@@ -365,13 +424,16 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
 github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
 github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
 github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
+github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
 github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
 github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
 github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
 github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
 github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
-github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
-github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pierrec/lz4/v4 v4.1.19 h1:tYLzDnjDXh9qIxSTKHwXwOYmm9d887Y7Y1ZkyXYHAN4=
+github.com/pierrec/lz4/v4 v4.1.19/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
 github.com/pion/datachannel v1.5.2 h1:piB93s8LGmbECrpO84DnkIVWasRMk3IimbcXkTQLE6E=
 github.com/pion/datachannel v1.5.2/go.mod h1:FTGQWaHrdCwIJ1rw6xBIfZVkslikjShim5yr05XFuCQ=
 github.com/pion/dtls/v2 v2.1.3/go.mod h1:o6+WvyLDAlXF7YiPB/RlskRoeK+/JtuaZa5emwQcWus=
@@ -422,23 +484,44 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
 github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/polydawn/go-timeless-api v0.0.0-20201121022836-7399661094a6/go.mod h1:z2fMUifgtqrZiNLgzF4ZR8pX+YFLCmAp1jJTSTvyDMM=
+github.com/polydawn/go-timeless-api v0.0.0-20220821201550-b93919e12c56 h1:LQ103HjiN76aqIxnQNgdZ+7NveuKd45+Q+TYGJVVsyw=
+github.com/polydawn/go-timeless-api v0.0.0-20220821201550-b93919e12c56/go.mod h1:OAK6p/pJUakz6jQ+HlSw16gVMnuohxqJFGoypUYyr4w=
+github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
+github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls=
+github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
+github.com/polydawn/rio v0.0.0-20201122020833-6192319df581/go.mod h1:mwZtAu36D3fSNzVLN1we6PFdRU4VeE+RXLTZiOiQlJ0=
+github.com/polydawn/rio v0.0.0-20220823181337-7c31ad9831a4 h1:SNhgcsCNGEqz7Tp46YHEvcjF1s5x+ZGWcVzFoghkuMA=
+github.com/polydawn/rio v0.0.0-20220823181337-7c31ad9831a4/go.mod h1:fZ8OGW5CVjZHyQeNs8QH3X3tUxrPcx1jxHSl2z6Xv00=
 github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
 github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
 github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
 github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
+github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
+github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
 github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
 github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
+github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
 github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
 github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
+github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93 h1:UVArwN/wkKjMVhh2EQGC0tEc1+FqiLlvYXY5mQ2f8Wg=
+github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93/go.mod h1:Nfe4efndBz4TibWycNE+lqyJZiMX4ycx+QKV8Ta0f/o=
+github.com/ravilushqa/otelgqlgen v0.15.0 h1:U85nrlweMXTGaMChUViYM39/MXBZVeVVlpuHq+6eECQ=
+github.com/ravilushqa/otelgqlgen v0.15.0/go.mod h1:o+1Eju0VySmgq2BP8Vupz2YrN21Bj7D7imBqu3m2uB8=
 github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
 github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
 github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
@@ -446,27 +529,46 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qq
 github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
 github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
-github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
-github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
+github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/royalcat/kv v0.0.0-20240327213417-8cf5696b2389 h1:7XbHzr1TOaxs5Y/i9GtTEOOSTzfQ4ESYqF38DVfPkFY=
+github.com/royalcat/kv v0.0.0-20240327213417-8cf5696b2389/go.mod h1:Ff0Z/r1H3ojacpEe8SashMKJx6YCIhWrYtpdV8Y/k3A=
 github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 h1:Lt9DzQALzHoDwMBGJ6v8ObDPR0dzr2a6sXTB1Fq7IHs=
 github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA=
 github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
-github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A=
-github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
+github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0=
+github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
 github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
 github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
 github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
 github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
+github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM=
+github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
+github.com/samber/slog-multi v1.0.2 h1:6BVH9uHGAsiGkbbtQgAOQJMpKgV8unMrHhhJaw+X1EQ=
+github.com/samber/slog-multi v1.0.2/go.mod h1:uLAvHpGqbYgX4FSL0p1ZwoLuveIAJvBECtE07XmYvFo=
+github.com/samber/slog-zerolog v1.0.0 h1:YpRy0xux1uJr0Ng3wrEjv9nyvb4RAoNqkS611UjzeG8=
+github.com/samber/slog-zerolog v1.0.0/go.mod h1:N2/g/mNGRY1zqsydIYE0uKipSSFsPDjytoVkRnZ0Jp0=
 github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
+github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
+github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs=
 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M=
 github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
 github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
 github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
+github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
 github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
 github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
+github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
+github.com/sosodev/duration v1.2.0 h1:pqK/FLSjsAADWY74SyWDCjOcd5l7H8GSnnOGEB9A1Us=
+github.com/sosodev/duration v1.2.0/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@@ -492,14 +594,26 @@ github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDW
 github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
 github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
 github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
+github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
+github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
 github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
 github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
 github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
 github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
-github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
-github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
+github.com/urfave/cli/v2 v2.27.0 h1:uNs1K8JwTFL84X68j5Fjny6hfANh9nTlJ6dRtZAFAHY=
+github.com/urfave/cli/v2 v2.27.0/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
+github.com/vektah/gqlparser/v2 v2.5.11 h1:JJxLtXIoN7+3x6MBdtIP59TP1RANnY7pXOaDnADQSf8=
+github.com/vektah/gqlparser/v2 v2.5.11/go.mod h1:1rCcfwB2ekJofmluGWXMSEnPMZgbxzwj6FaZ/4OT8Cc=
+github.com/warpfork/go-errcat v0.0.0-20180917083543-335044ffc86e h1:FIB2fi7XJGHIdf5rWNsfFQqatIKxutT45G+wNuMQNgs=
+github.com/warpfork/go-errcat v0.0.0-20180917083543-335044ffc86e/go.mod h1:/qe02xr3jvTUz8u/PV0FHGpP8t96OQNP7U9BJMwMLEw=
+github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w=
+github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
 github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
 github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 h1:U0DnHRZFzoIV1oFEZczg5XyPut9yxk9jjtax/9Bxr/o=
+github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00/go.mod h1:Tq++Lr/FgiS3X48q5FETemXiSLGuYMQT2sPjYNPJSwA=
+github.com/willscott/memphis v0.0.0-20210922141505-529d4987ab7e h1:1eHCP4w7tMmpfFBdrd5ff+vYU9THtrtA1yM9f0TLlJw=
+github.com/willscott/memphis v0.0.0-20210922141505-529d4987ab7e/go.mod h1:59vHBW4EpjiL5oiqgCrBp1Tc9JXRzKCNMEOaGmNfSHo=
 github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
 github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -513,12 +627,32 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
 go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
 go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
 go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opentelemetry.io/otel v1.8.0 h1:zcvBFizPbpa1q7FehvFiHbQwGzmPILebO0tyqIR5Djg=
-go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM=
-go.opentelemetry.io/otel/trace v1.8.0 h1:cSy0DF9eGI5WIfNwZ1q2iUyGj00tGzP24dE1lOlHrfY=
-go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/contrib v1.21.1 h1:/U05KZ31iqMqAowhtW10cDPAViNY0tnpAacUgYBmuj8=
+go.opentelemetry.io/contrib v1.21.1/go.mod h1:usW9bPlrjHiJFbK0a6yK/M5wNHs3nLmtrT3vzhoD3co=
+go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
+go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM=
+go.opentelemetry.io/otel/exporters/prometheus v0.46.0 h1:I8WIFXR351FoLJYuloU4EgXbtNX2URfU/85pUPheIEQ=
+go.opentelemetry.io/otel/exporters/prometheus v0.46.0/go.mod h1:ztwVUHe5DTR/1v7PeuGRnU5Bbd4QKYwApWmuutKsJSs=
+go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
+go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
+go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
+go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
+go.opentelemetry.io/otel/sdk/metric v1.24.0 h1:yyMQrPzF+k88/DbH7o4FMAs80puqd+9osbiBrJrz/w8=
+go.opentelemetry.io/otel/sdk/metric v1.24.0/go.mod h1:I6Y5FjH6rvEnTTAYQz3Mmv2kl6Ek5IIrmwTLqMrrOE0=
+go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
+go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
+go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
+go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
+go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
+go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
 go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU=
 go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg=
 golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
@@ -530,13 +664,14 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
 golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/crypto v0.0.0-20220516162934-403b01795ae8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
-golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
-golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
+golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
+golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -545,8 +680,8 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE
 golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
 golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME=
-golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
+golang.org/x/exp v0.0.0-20231226003508-02704c960a9b h1:kLiC65FbiHWFAOu+lxwNPujcsl8VYyTYYEZnsOO1WK4=
+golang.org/x/exp v0.0.0-20231226003508-02704c960a9b/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
 golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
 golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
 golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -566,6 +701,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
 golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -587,6 +724,7 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL
 golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
 golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
@@ -601,8 +739,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
 golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
 golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
 golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos=
-golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
+golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -616,8 +754,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
-golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
+golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -641,6 +779,7 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -649,6 +788,8 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -663,8 +804,8 @@ golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
-golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -680,12 +821,12 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
 golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
-golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
 golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U=
-golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
+golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
 golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -693,6 +834,7 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
 golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
 golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
 golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
 golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
@@ -715,8 +857,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
 golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
-golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM=
+golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -748,41 +890,58 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx
 google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
 google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
 google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 h1:YJ5pD9rF8o9Qtta0Cmy9rdBwkSjrTCT6XTiUQVOtIos=
+google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0vFTBPy+tFSGvXEd3z+BcoG1k7EHbqm+YBsY=
+google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 h1:rcS6EyEaoCO52hQDupoSfrxI3R6C2Tq741is7X8OvnM=
+google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU=
 google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
 google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
 google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
 google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
 google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY=
+google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs=
 google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
 google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
 google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
 google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
 google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
-google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
+google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
 gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
 gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
-gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
 gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
 gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/graphql/mutation.graphql b/graphql/mutation.graphql
new file mode 100644
index 0000000..0f45036
--- /dev/null
+++ b/graphql/mutation.graphql
@@ -0,0 +1,25 @@
+type Mutation {
+  validateTorrents(filter: TorrentFilter!): Boolean!
+  cleanupTorrents(files: Boolean, dryRun: Boolean!): CleanupResponse!
+  downloadTorrent(infohash: String!, file: String): DownloadTorrentResponse
+  dedupeStorage: Int!
+}
+
+input TorrentFilter @oneOf {
+  everything: Boolean
+  infohash: String
+  # pathGlob: String!
+}
+
+type DownloadTorrentResponse {
+  task: Task
+}
+
+type CleanupResponse {
+  count: Int!
+  list: [String!]!
+}
+
+type Task {
+  id: ID!
+}
diff --git a/graphql/query.graphql b/graphql/query.graphql
new file mode 100644
index 0000000..8161a9f
--- /dev/null
+++ b/graphql/query.graphql
@@ -0,0 +1,49 @@
+type Query {
+  torrents(filter: TorrentsFilter, pagination: Pagination): [Torrent!]!
+  fsListDir(path: String!): ListDirResponse!
+}
+
+input TorrentsFilter {
+  name: StringFilter
+  bytesCompleted: IntFilter
+  bytesMissing: IntFilter
+
+  peersCount: IntFilter
+}
+
+type ListDirResponse {
+  root: DirEntry!
+  entries: [DirEntry!]!
+}
+
+input Pagination {
+  offset: Int!
+  limit: Int!
+}
+
+input StringFilter @oneOf {
+  eq: String
+  substr: String
+  in: [String!]
+}
+
+input IntFilter @oneOf {
+  eq: Int
+  gt: Int
+  lt: Int
+  gte: Int
+  lte: Int
+  in: [Int!]
+}
+
+input DateTimeFilter @oneOf {
+  eq: DateTime
+  gt: DateTime
+  lt: DateTime
+  gte: DateTime
+  lte: DateTime
+}
+
+input BooleanFilter @oneOf {
+  eq: Boolean
+}
diff --git a/graphql/schema.graphql b/graphql/schema.graphql
new file mode 100644
index 0000000..9342a90
--- /dev/null
+++ b/graphql/schema.graphql
@@ -0,0 +1,9 @@
+directive @oneOf on INPUT_OBJECT | FIELD_DEFINITION
+directive @stream on FIELD_DEFINITION
+
+scalar DateTime
+
+type Schema {
+  query: Query
+  mutation: Mutation
+}
diff --git a/graphql/subscription.graphql b/graphql/subscription.graphql
new file mode 100644
index 0000000..9df09ee
--- /dev/null
+++ b/graphql/subscription.graphql
@@ -0,0 +1,16 @@
+type Subscription {
+    taskProgress(taskID: ID!): Progress
+    torrentDownloadUpdates: TorrentProgress
+}
+
+
+type TorrentProgress implements Progress {
+    torrent: Torrent!
+    current: Int!
+    total: Int!
+}
+
+interface Progress {
+    current: Int!
+    total: Int!
+}
\ No newline at end of file
diff --git a/graphql/types/fs.graphql b/graphql/types/fs.graphql
new file mode 100644
index 0000000..73ca119
--- /dev/null
+++ b/graphql/types/fs.graphql
@@ -0,0 +1,26 @@
+interface DirEntry {
+  name: String!
+}
+
+type Dir implements DirEntry {
+  name: String!
+}
+
+type File implements DirEntry {
+  name: String!
+  size: Int!
+}
+
+type ResolverFS implements DirEntry {
+  name: String!
+}
+
+type TorrentFS implements DirEntry {
+  name: String!
+  torrent: Torrent!
+}
+
+type ArchiveFS implements DirEntry {
+  name: String!
+  size: Int!
+}
diff --git a/graphql/types/torrent.graphql b/graphql/types/torrent.graphql
new file mode 100644
index 0000000..10e95f6
--- /dev/null
+++ b/graphql/types/torrent.graphql
@@ -0,0 +1,24 @@
+type Torrent {
+  name: String!
+  infohash: String!
+  bytesCompleted: Int!
+  torrentFilePath: String!
+  bytesMissing: Int!
+  files: [TorrentFile!]!
+  excludedFiles: [TorrentFile!]!
+  peers: [TorrentPeer!]!
+}
+
+type TorrentFile {
+  filename: String!
+  size: Int!
+  bytesCompleted: Int!
+}
+
+type TorrentPeer {
+  ip: String!
+  downloadRate: Float!
+  discovery: String!
+  port: Int!
+  clientName: String!
+}
\ No newline at end of file
diff --git a/pkg/ctxbilly/change.go b/pkg/ctxbilly/change.go
new file mode 100644
index 0000000..e9a5105
--- /dev/null
+++ b/pkg/ctxbilly/change.go
@@ -0,0 +1,27 @@
+package ctxbilly
+
+import (
+	"context"
+	"os"
+	"time"
+)
+
+// Change abstract the FileInfo change related operations in a storage-agnostic
+// interface as an extension to the Basic interface
+type Change interface {
+	// Chmod changes the mode of the named file to mode. If the file is a
+	// symbolic link, it changes the mode of the link's target.
+	Chmod(ctx context.Context, name string, mode os.FileMode) error
+	// Lchown changes the numeric uid and gid of the named file. If the file is
+	// a symbolic link, it changes the uid and gid of the link itself.
+	Lchown(ctx context.Context, name string, uid, gid int) error
+	// Chown changes the numeric uid and gid of the named file. If the file is a
+	// symbolic link, it changes the uid and gid of the link's target.
+	Chown(ctx context.Context, name string, uid, gid int) error
+	// Chtimes changes the access and modification times of the named file,
+	// similar to the Unix utime() or utimes() functions.
+	//
+	// The underlying filesystem may truncate or round the values to a less
+	// precise time unit.
+	Chtimes(ctx context.Context, name string, atime time.Time, mtime time.Time) error
+}
diff --git a/pkg/ctxbilly/fs.go b/pkg/ctxbilly/fs.go
new file mode 100644
index 0000000..e14835b
--- /dev/null
+++ b/pkg/ctxbilly/fs.go
@@ -0,0 +1,92 @@
+package ctxbilly
+
+import (
+	"context"
+	"io"
+	"os"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/ctxio"
+)
+
+type Filesystem interface {
+	// Create creates the named file with mode 0666 (before umask), truncating
+	// it if it already exists. If successful, methods on the returned File can
+	// be used for I/O; the associated file descriptor has mode O_RDWR.
+	Create(ctx context.Context, filename string) (File, error)
+	// Open opens the named file for reading. If successful, methods on the
+	// returned file can be used for reading; the associated file descriptor has
+	// mode O_RDONLY.
+	Open(ctx context.Context, filename string) (File, error)
+	// OpenFile is the generalized open call; most users will use Open or Create
+	// instead. It opens the named file with specified flag (O_RDONLY etc.) and
+	// perm, (0666 etc.) if applicable. If successful, methods on the returned
+	// File can be used for I/O.
+	OpenFile(ctx context.Context, filename string, flag int, perm os.FileMode) (File, error)
+	// Stat returns a FileInfo describing the named file.
+	Stat(ctx context.Context, filename string) (os.FileInfo, error)
+	// Rename renames (moves) oldpath to newpath. If newpath already exists and
+	// is not a directory, Rename replaces it. OS-specific restrictions may
+	// apply when oldpath and newpath are in different directories.
+	Rename(ctx context.Context, oldpath, newpath string) error
+	// Remove removes the named file or directory.
+	Remove(ctx context.Context, filename string) error
+	// Join joins any number of path elements into a single path, adding a
+	// Separator if necessary. Join calls filepath.Clean on the result; in
+	// particular, all empty strings are ignored. On Windows, the result is a
+	// UNC path if and only if the first path element is a UNC path.
+	Join(elem ...string) string
+
+	// TempFile creates a new temporary file in the directory dir with a name
+	// beginning with prefix, opens the file for reading and writing, and
+	// returns the resulting *os.File. If dir is the empty string, TempFile
+	// uses the default directory for temporary files (see os.TempDir).
+	// Multiple programs calling TempFile simultaneously will not choose the
+	// same file. The caller can use f.Name() to find the pathname of the file.
+	// It is the caller's responsibility to remove the file when no longer
+	// needed.
+	TempFile(ctx context.Context, dir, prefix string) (File, error)
+
+	// ReadDir reads the directory named by d(irname and returns a list of
+	// directory entries sorted by filename.
+	ReadDir(ctx context.Context, path string) ([]os.FileInfo, error)
+	// MkdirAll creates a directory named path, along with any necessary
+	// parents, and returns nil, or else returns an error. The permission bits
+	// perm are used for all directories that MkdirAll creates. If path is/
+	// already a directory, MkdirAll does nothing and returns nil.
+	MkdirAll(ctx context.Context, filename string, perm os.FileMode) error
+
+	// Lstat returns a FileInfo describing the named file. If the file is a
+	// symbolic link, the returned FileInfo describes the symbolic link. Lstat
+	// makes no attempt to follow the link.
+	Lstat(ctx context.Context, filename string) (os.FileInfo, error)
+	// Symlink creates a symbolic-link from link to target. target may be an
+	// absolute or relative path, and need not refer to an existing node.
+	// Parent directories of link are created as necessary.
+	Symlink(ctx context.Context, target, link string) error
+	// Readlink returns the target path of link.
+	Readlink(ctx context.Context, link string) (string, error)
+
+	// // Chroot returns a new filesystem from the same type where the new root is
+	// // the given path. Files outside of the designated directory tree cannot be
+	// // accessed.
+	// Chroot(path string) (Filesystem, error)
+	// // Root returns the root path of the filesystem.
+	// Root() string
+}
+
+type File interface {
+	// Name returns the name of the file as presented to Open.
+	Name() string
+	ctxio.Writer
+	ctxio.Reader
+	ctxio.ReaderAt
+	io.Seeker
+	ctxio.Closer
+	// Lock locks the file like e.g. flock. It protects against access from
+	// other processes.
+	Lock() error
+	// Unlock unlocks the file.
+	Unlock() error
+	// Truncate the file.
+	Truncate(ctx context.Context, size int64) error
+}
diff --git a/pkg/ctxbilly/mem.go b/pkg/ctxbilly/mem.go
new file mode 100644
index 0000000..934e18a
--- /dev/null
+++ b/pkg/ctxbilly/mem.go
@@ -0,0 +1,166 @@
+package ctxbilly
+
+import (
+	"context"
+	"io/fs"
+
+	"github.com/go-git/go-billy/v5"
+)
+
+func WrapFileSystem(bf billy.Filesystem) Filesystem {
+	return &wrapFS{
+		Filesystem: bf,
+	}
+}
+
+type wrapFS struct {
+	billy.Filesystem
+}
+
+var _ Filesystem = (*wrapFS)(nil)
+
+// Create implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).Create of MemFS.Filesystem.
+func (m *wrapFS) Create(ctx context.Context, filename string) (File, error) {
+	bf, err := m.Filesystem.Create(filename)
+	if err != nil {
+		return nil, err
+	}
+	return &wrapFile{bf}, nil
+}
+
+// Lstat implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).Lstat of MemFS.Filesystem.
+func (m *wrapFS) Lstat(ctx context.Context, filename string) (fs.FileInfo, error) {
+	return m.Filesystem.Lstat(filename)
+}
+
+// MkdirAll implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).MkdirAll of MemFS.Filesystem.
+func (m *wrapFS) MkdirAll(ctx context.Context, filename string, perm fs.FileMode) error {
+	return m.Filesystem.MkdirAll(filename, perm)
+}
+
+// Open implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).Open of MemFS.Filesystem.
+func (m *wrapFS) Open(ctx context.Context, filename string) (File, error) {
+	bf, err := m.Filesystem.Open(filename)
+	if err != nil {
+		return nil, err
+	}
+	return WrapFile(bf), nil
+}
+
+// OpenFile implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).OpenFile of MemFS.Filesystem.
+func (m *wrapFS) OpenFile(ctx context.Context, filename string, flag int, perm fs.FileMode) (File, error) {
+	bf, err := m.Filesystem.OpenFile(filename, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	return WrapFile(bf), nil
+}
+
+// ReadDir implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).ReadDir of MemFS.Filesystem.
+func (m *wrapFS) ReadDir(ctx context.Context, path string) ([]fs.FileInfo, error) {
+	return m.Filesystem.ReadDir(path)
+}
+
+// Readlink implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).Readlink of MemFS.Filesystem.
+func (m *wrapFS) Readlink(ctx context.Context, link string) (string, error) {
+	return m.Filesystem.Readlink(link)
+}
+
+// Remove implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).Remove of MemFS.Filesystem.
+func (m *wrapFS) Remove(ctx context.Context, filename string) error {
+	return m.Filesystem.Remove(filename)
+}
+
+// Rename implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).Rename of MemFS.Filesystem.
+func (m *wrapFS) Rename(ctx context.Context, oldpath string, newpath string) error {
+	return m.Filesystem.Rename(oldpath, newpath)
+}
+
+// Stat implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).Stat of MemFS.Filesystem.
+func (m *wrapFS) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
+	return m.Filesystem.Stat(filename)
+}
+
+// Symlink implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).Symlink of MemFS.Filesystem.
+func (m *wrapFS) Symlink(ctx context.Context, target string, link string) error {
+	return m.Filesystem.Symlink(target, link)
+}
+
+// TempFile implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).TempFile of MemFS.Filesystem.
+func (m *wrapFS) TempFile(ctx context.Context, dir string, prefix string) (File, error) {
+	file, err := m.Filesystem.TempFile(dir, prefix)
+	if err != nil {
+		return nil, err
+	}
+	return WrapFile(file), nil
+}
+
+func WrapFile(bf billy.File) File {
+	return &wrapFile{File: bf}
+}
+
+type wrapFile struct {
+	billy.File
+}
+
+var _ File = (*wrapFile)(nil)
+
+// Close implements File.
+// Subtle: this method shadows the method (File).Close of MemFile.File.
+func (m *wrapFile) Close(ctx context.Context) error {
+	return m.File.Close()
+}
+
+// Lock implements File.
+// Subtle: this method shadows the method (File).Lock of MemFile.File.
+func (m *wrapFile) Lock() error {
+	return m.File.Lock()
+}
+
+// Name implements File.
+// Subtle: this method shadows the method (File).Name of MemFile.File.
+func (m *wrapFile) Name() string {
+	return m.File.Name()
+}
+
+// Truncate implements File.
+// Subtle: this method shadows the method (File).Truncate of memFile.File.
+func (m *wrapFile) Truncate(ctx context.Context, size int64) error {
+	return m.File.Truncate(size)
+}
+
+// Read implements File.
+// Subtle: this method shadows the method (File).Read of MemFile.File.
+func (m *wrapFile) Read(ctx context.Context, p []byte) (n int, err error) {
+	return m.File.Read(p)
+}
+
+// ReadAt implements File.
+// Subtle: this method shadows the method (File).ReadAt of MemFile.File.
+func (m *wrapFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
+	return m.File.ReadAt(p, off)
+}
+
+// Unlock implements File.
+// Subtle: this method shadows the method (File).Unlock of MemFile.File.
+func (m *wrapFile) Unlock() error {
+	return m.File.Unlock()
+}
+
+// Write implements File.
+// Subtle: this method shadows the method (File).Write of MemFile.File.
+func (m *wrapFile) Write(ctx context.Context, p []byte) (n int, err error) {
+	return m.File.Write(p)
+}
diff --git a/pkg/ctxio/cachereader.go b/pkg/ctxio/cachereader.go
new file mode 100644
index 0000000..14e70f9
--- /dev/null
+++ b/pkg/ctxio/cachereader.go
@@ -0,0 +1,63 @@
+package ctxio
+
+import (
+	"context"
+	"errors"
+	"io"
+	"sync"
+)
+
+type CacheReader struct {
+	m sync.Mutex
+
+	fo int64
+	fr *FileBuffer
+	to int64
+	tr Reader
+}
+
+var _ FileReader = (*CacheReader)(nil)
+
+func NewCacheReader(r Reader) (FileReader, error) {
+	fr := NewFileBuffer(nil)
+	tr := TeeReader(r, fr)
+	return &CacheReader{fr: fr, tr: tr}, nil
+}
+
+func (dtr *CacheReader) ReadAt(ctx context.Context, p []byte, off int64) (int, error) {
+	dtr.m.Lock()
+	defer dtr.m.Unlock()
+	tb := off + int64(len(p))
+
+	if tb > dtr.fo {
+		w, err := CopyN(ctx, Discard, dtr.tr, tb-dtr.fo)
+		dtr.to += w
+		if err != nil && err != io.EOF {
+			return 0, err
+		}
+	}
+
+	n, err := dtr.fr.ReadAt(ctx, p, off)
+	dtr.fo += int64(n)
+	return n, err
+}
+
+func (dtr *CacheReader) Read(ctx context.Context, p []byte) (n int, err error) {
+	dtr.m.Lock()
+	defer dtr.m.Unlock()
+	// use directly tee reader here
+	n, err = dtr.tr.Read(ctx, p)
+	dtr.to += int64(n)
+	return
+}
+
+func (dtr *CacheReader) Close(ctx context.Context) error {
+	frcloser := dtr.fr.Close(ctx)
+
+	var closeerr error
+	if rc, ok := dtr.tr.(ReadCloser); ok {
+		closeerr = rc.Close(ctx)
+	}
+
+	return errors.Join(frcloser, closeerr)
+}
diff --git a/pkg/ctxio/copy.go b/pkg/ctxio/copy.go
new file mode 100644
index 0000000..4c98a8e
--- /dev/null
+++ b/pkg/ctxio/copy.go
@@ -0,0 +1,89 @@
+package ctxio
+
+// // CopyN copies n bytes (or until an error) from src to dst.
+// // It returns the number of bytes copied and the earliest
+// // error encountered while copying.
+// // On return, written == n if and only if err == nil.
+// //
+// // If dst implements [ReaderFrom], the copy is implemented using it.
+// func CopyN(ctx context.Context, dst Writer, src Reader, n int64) (written int64, err error) {
+// 	written, err = Copy(ctx, dst, LimitReader(src, n))
+// 	if written == n {
+// 		return n, nil
+// 	}
+// 	if written < n && err == nil {
+// 		// src stopped early; must have been EOF.
+// 		err = io.EOF
+// 	}
+
+// 	return
+// }
+
+// // Copy copies from src to dst until either EOF is reached
+// // on src or an error occurs. It returns the number of bytes
+// // copied and the first error encountered while copying, if any.
+// //
+// // A successful Copy returns err == nil, not err == EOF.
+// // Because Copy is defined to read from src until EOF, it does
+// // not treat an EOF from Read as an error to be reported.
+// //
+// // If src implements [WriterTo],
+// // the copy is implemented by calling src.WriteTo(dst).
+// // Otherwise, if dst implements [ReaderFrom],
+// // the copy is implemented by calling dst.ReadFrom(src).
+// func Copy(ctx context.Context, dst Writer, src Reader) (written int64, err error) {
+// 	return copyBuffer(ctx, dst, src, nil)
+// }
+
+// // copyBuffer is the actual implementation of Copy and CopyBuffer.
+// // if buf is nil, one is allocated.
+// func copyBuffer(ctx context.Context, dst Writer, src Reader, buf []byte) (written int64, err error) {
+// 	// If the reader has a WriteTo method, use it to do the copy.
+// 	// Avoids an allocation and a copy.
+// 	if wt, ok := src.(WriterTo); ok {
+// 		return wt.WriteTo(dst)
+// 	}
+// 	// Similarly, if the writer has a ReadFrom method, use it to do the copy.
+// 	if rt, ok := dst.(ReaderFrom); ok {
+// 		return rt.ReadFrom(src)
+// 	}
+// 	if buf == nil {
+// 		size := 32 * 1024
+// 		if l, ok := src.(*LimitedReader); ok && int64(size) > l.N {
+// 			if l.N < 1 {
+// 				size = 1
+// 			} else {
+// 				size = int(l.N)
+// 			}
+// 		}
+// 		buf = make([]byte, size)
+// 	}
+// 	for {
+// 		nr, er := src.Read(ctx, buf)
+// 		if nr > 0 {
+// 			nw, ew := dst.Write(ctx, buf[0:nr])
+// 			if nw < 0 || nr < nw {
+// 				nw = 0
+// 				if ew == nil {
+// 					ew = errInvalidWrite
+// 				}
+// 			}
+// 			written += int64(nw)
+// 			if ew != nil {
+// 				err = ew
+// 				break
+// 			}
+// 			if nr != nw {
+// 				err = io.ErrShortWrite
+// 				break
+// 			}
+// 		}
+// 		if er != nil {
+// 			if er != io.EOF {
+// 				err = er
+// 			}
+// 			break
+// 		}
+// 	}
+// 	return written, err
+// }
diff --git a/pkg/ctxio/filebuffer.go b/pkg/ctxio/filebuffer.go
new file mode 100644
index 0000000..23cb88e
--- /dev/null
+++ b/pkg/ctxio/filebuffer.go
@@ -0,0 +1,180 @@
+package ctxio
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"io"
+	"os"
+)
+
+// FileBuffer implements interfaces implemented by files.
+// The main purpose of this type is to have an in memory replacement for a
+// file.
+type FileBuffer struct {
+	// buff is the backing buffer
+	buff *bytes.Buffer
+	// index indicates where in the buffer we are at
+	index    int64
+	isClosed bool
+}
+
+var _ FileReader = (*FileBuffer)(nil)
+var _ Writer = (*FileBuffer)(nil)
+
+// NewFileBuffer returns a new populated Buffer
+func NewFileBuffer(b []byte) *FileBuffer {
+	return &FileBuffer{buff: bytes.NewBuffer(b)}
+}
+
+// NewFileBufferFromReader is a convenience method that returns a new populated Buffer
+// whose contents are sourced from a supplied reader by loading it entirely
+// into memory.
+func NewFileBufferFromReader(ctx context.Context, reader Reader) (*FileBuffer, error) {
+	data, err := ReadAll(ctx, reader)
+	if err != nil {
+		return nil, err
+	}
+	return NewFileBuffer(data), nil
+}
+
+// NewFileBufferFromReader is a convenience method that returns a new populated Buffer
+// whose contents are sourced from a supplied reader by loading it entirely
+// into memory.
+func NewFileBufferFromIoReader(reader io.Reader) (*FileBuffer, error) {
+	data, err := io.ReadAll(reader)
+	if err != nil {
+		return nil, err
+	}
+	return NewFileBuffer(data), nil
+}
+
+// Bytes returns the bytes available until the end of the buffer.
+func (f *FileBuffer) Bytes() []byte {
+	if f.isClosed || f.index >= int64(f.buff.Len()) {
+		return []byte{}
+	}
+	return f.buff.Bytes()[f.index:]
+}
+
+// String implements the Stringer interface
+func (f *FileBuffer) String() string {
+	return string(f.buff.Bytes()[f.index:])
+}
+
+// Read implements io.Reader https://golang.org/pkg/io/#Reader
+// Read reads up to len(p) bytes into p. It returns the number of bytes read (0 <= n <= len(p))
+// and any error encountered. Even if Read returns n < len(p), it may use all of p as scratch
+// space during the call. If some data is available but not len(p) bytes, Read conventionally
+// returns what is available instead of waiting for more.
+
+// When Read encounters an error or end-of-file condition after successfully reading n > 0 bytes,
+// it returns the number of bytes read. It may return the (non-nil) error from the same call or
+// return the error (and n == 0) from a subsequent call. An instance of this general case is
+// that a Reader returning a non-zero number of bytes at the end of the input stream may return
+// either err == EOF or err == nil. The next Read should return 0, EOF.
+func (f *FileBuffer) Read(ctx context.Context, b []byte) (n int, err error) {
+	if f.isClosed {
+		return 0, os.ErrClosed
+	}
+	if len(b) == 0 {
+		return 0, nil
+	}
+	if f.index >= int64(f.buff.Len()) {
+		return 0, io.EOF
+	}
+	n, err = bytes.NewBuffer(f.buff.Bytes()[f.index:]).Read(b)
+	f.index += int64(n)
+
+	return n, err
+}
+
+// ReadAt implements io.ReaderAt https://golang.org/pkg/io/#ReaderAt
+// ReadAt reads len(p) bytes into p starting at offset off in the underlying input source.
+// It returns the number of bytes read (0 <= n <= len(p)) and any error encountered.
+//
+// When ReadAt returns n < len(p), it returns a non-nil error explaining why more bytes were not returned.
+// In this respect, ReadAt is stricter than Read.
+//
+// Even if ReadAt returns n < len(p), it may use all of p as scratch space during the call.
+// If some data is available but not len(p) bytes, ReadAt blocks until either all the data is available or an error occurs.
+// In this respect ReadAt is different from Read.
+//
+// If the n = len(p) bytes returned by ReadAt are at the end of the input source,
+// ReadAt may return either err == EOF or err == nil.
+//
+// If ReadAt is reading from an input source with a seek offset,
+// ReadAt should not affect nor be affected by the underlying seek offset.
+// Clients of ReadAt can execute parallel ReadAt calls on the same input source.
+func (f *FileBuffer) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
+	if f.isClosed {
+		return 0, os.ErrClosed
+	}
+	if off < 0 {
+		return 0, errors.New("filebuffer.ReadAt: negative offset")
+	}
+	reqLen := len(p)
+	buffLen := int64(f.buff.Len())
+	if off >= buffLen {
+		return 0, io.EOF
+	}
+
+	n = copy(p, f.buff.Bytes()[off:])
+	if n < reqLen {
+		err = io.EOF
+	}
+	return n, err
+}
+
+// Write implements io.Writer https://golang.org/pkg/io/#Writer
+// by appending the passed bytes to the buffer unless the buffer is closed or index negative.
+func (f *FileBuffer) Write(ctx context.Context, p []byte) (n int, err error) {
+	if f.isClosed {
+		return 0, os.ErrClosed
+	}
+	if f.index < 0 {
+		return 0, io.EOF
+	}
+	// we might have rewinded, let's reset the buffer before appending to it
+	idx := int(f.index)
+	buffLen := f.buff.Len()
+	if idx != buffLen && idx <= buffLen {
+		f.buff = bytes.NewBuffer(f.Bytes()[:f.index])
+	}
+	n, err = f.buff.Write(p)
+
+	f.index += int64(n)
+	return n, err
+}
+
+// Seek implements io.Seeker https://golang.org/pkg/io/#Seeker
+func (f *FileBuffer) Seek(offset int64, whence int) (idx int64, err error) {
+	if f.isClosed {
+		return 0, os.ErrClosed
+	}
+
+	var abs int64
+	switch whence {
+	case 0:
+		abs = offset
+	case 1:
+		abs = int64(f.index) + offset
+	case 2:
+		abs = int64(f.buff.Len()) + offset
+	default:
+		return 0, errors.New("filebuffer.Seek: invalid whence")
+	}
+	if abs < 0 {
+		return 0, errors.New("filebuffer.Seek: negative position")
+	}
+	f.index = abs
+	return abs, nil
+}
+
+// Close implements io.Closer https://golang.org/pkg/io/#Closer
+// It closes the buffer, rendering it unusable for I/O. It returns an error, if any.
+func (f *FileBuffer) Close(ctx context.Context) error {
+	f.isClosed = true
+	f.buff = nil
+	return nil
+}
diff --git a/pkg/ctxio/io.go b/pkg/ctxio/io.go
new file mode 100644
index 0000000..fa0c561
--- /dev/null
+++ b/pkg/ctxio/io.go
@@ -0,0 +1,663 @@
+package ctxio
+
+import (
+	"context"
+	"errors"
+	"io"
+	"sync"
+)
+
+// Seek whence values.
+const (
+	SeekStart   = 0 // seek relative to the origin of the file
+	SeekCurrent = 1 // seek relative to the current offset
+	SeekEnd     = 2 // seek relative to the end
+)
+
+// ErrShortWrite means that a write accepted fewer bytes than requested
+// but failed to return an explicit error.
+var ErrShortWrite = io.ErrShortWrite
+
+// errInvalidWrite means that a write returned an impossible count.
+var errInvalidWrite = errors.New("invalid write result")
+
+// ErrShortBuffer means that a read required a longer buffer than was provided.
+var ErrShortBuffer = io.ErrShortBuffer
+
+// EOF is the error returned by Read when no more input is available.
+// (Read must return EOF itself, not an error wrapping EOF,
+// because callers will test for EOF using ==.)
+// Functions should return EOF only to signal a graceful end of input.
+// If the EOF occurs unexpectedly in a structured data stream,
+// the appropriate error is either [ErrUnexpectedEOF] or some other error
+// giving more detail.
+var EOF = io.EOF
+
+// ErrUnexpectedEOF means that EOF was encountered in the
+// middle of reading a fixed-size block or data structure.
+var ErrUnexpectedEOF = io.ErrUnexpectedEOF
+
+// ErrNoProgress is returned by some clients of a [Reader] when
+// many calls to Read have failed to return any data or error,
+// usually the sign of a broken [Reader] implementation.
+var ErrNoProgress = io.ErrNoProgress
+
+// Reader is the interface that wraps the basic Read method.
+//
+// Read reads up to len(p) bytes into p. It returns the number of bytes
+// read (0 <= n <= len(p)) and any error encountered. Even if Read
+// returns n < len(p), it may use all of p as scratch space during the call.
+// If some data is available but not len(p) bytes, Read conventionally
+// returns what is available instead of waiting for more.
+//
+// When Read encounters an error or end-of-file condition after
+// successfully reading n > 0 bytes, it returns the number of
+// bytes read. It may return the (non-nil) error from the same call
+// or return the error (and n == 0) from a subsequent call.
+// An instance of this general case is that a Reader returning
+// a non-zero number of bytes at the end of the input stream may
+// return either err == EOF or err == nil. The next Read should
+// return 0, EOF.
+//
+// Callers should always process the n > 0 bytes returned before
+// considering the error err. Doing so correctly handles I/O errors
+// that happen after reading some bytes and also both of the
+// allowed EOF behaviors.
+//
+// If len(p) == 0, Read should always return n == 0. It may return a
+// non-nil error if some error condition is known, such as EOF.
+//
+// Implementations of Read are discouraged from returning a
+// zero byte count with a nil error, except when len(p) == 0.
+// Callers should treat a return of 0 and nil as indicating that
+// nothing happened; in particular it does not indicate EOF.
+//
+// Implementations must not retain p.
+type Reader interface {
+	Read(ctx context.Context, p []byte) (n int, err error)
+}
+
+// Writer is the interface that wraps the basic Write method.
+//
+// Write writes len(p) bytes from p to the underlying data stream.
+// It returns the number of bytes written from p (0 <= n <= len(p))
+// and any error encountered that caused the write to stop early.
+// Write must return a non-nil error if it returns n < len(p).
+// Write must not modify the slice data, even temporarily.
+//
+// Implementations must not retain p.
+type Writer interface {
+	Write(ctx context.Context, p []byte) (n int, err error)
+}
+
+// Closer is the interface that wraps the basic Close method.
+//
+// The behavior of Close after the first call is undefined.
+// Specific implementations may document their own behavior.
+type Closer interface {
+	Close(ctx context.Context) error
+}
+
+// Seeker is the interface that wraps the basic Seek method.
+//
+// Seek sets the offset for the next Read or Write to offset,
+// interpreted according to whence:
+// [SeekStart] means relative to the start of the file,
+// [SeekCurrent] means relative to the current offset, and
+// [SeekEnd] means relative to the end
+// (for example, offset = -2 specifies the penultimate byte of the file).
+// Seek returns the new offset relative to the start of the
+// file or an error, if any.
+//
+// Seeking to an offset before the start of the file is an error.
+// Seeking to any positive offset may be allowed, but if the new offset exceeds
+// the size of the underlying object the behavior of subsequent I/O operations
+// is implementation-dependent.
+type Seeker interface {
+	Seek(offset int64, whence int) (int64, error)
+}
+
+// ReadWriter is the interface that groups the basic Read and Write methods.
+type ReadWriter interface {
+	Reader
+	Writer
+}
+
+// ReadCloser is the interface that groups the basic Read and Close methods.
+type ReadCloser interface {
+	Reader
+	Closer
+}
+
+// WriteCloser is the interface that groups the basic Write and Close methods.
+type WriteCloser interface {
+	Writer
+	Closer
+}
+
+// ReadWriteCloser is the interface that groups the basic Read, Write and Close methods.
+type ReadWriteCloser interface {
+	Reader
+	Writer
+	Closer
+}
+
+// ReadSeeker is the interface that groups the basic Read and Seek methods.
+type ReadSeeker interface {
+	Reader
+	Seeker
+}
+
+// ReadSeekCloser is the interface that groups the basic Read, Seek and Close
+// methods.
+type ReadSeekCloser interface {
+	Reader
+	Seeker
+	Closer
+}
+
+// WriteSeeker is the interface that groups the basic Write and Seek methods.
+type WriteSeeker interface {
+	Writer
+	Seeker
+}
+
+// ReadWriteSeeker is the interface that groups the basic Read, Write and Seek methods.
+type ReadWriteSeeker interface {
+	Reader
+	Writer
+	Seeker
+}
+
+// ReaderFrom is the interface that wraps the ReadFrom method.
+//
+// ReadFrom reads data from r until EOF or error.
+// The return value n is the number of bytes read.
+// Any error except EOF encountered during the read is also returned.
+//
+// The [Copy] function uses [ReaderFrom] if available.
+type ReaderFrom interface {
+	ReadFrom(ctx context.Context, r Reader) (n int64, err error)
+}
+
+// WriterTo is the interface that wraps the WriteTo method.
+//
+// WriteTo writes data to w until there's no more data to write or
+// when an error occurs. The return value n is the number of bytes
+// written. Any error encountered during the write is also returned.
+//
+// The Copy function uses WriterTo if available.
+type WriterTo interface {
+	WriteTo(ctx context.Context, w Writer) (n int64, err error)
+}
+
+// ReaderAt is the interface that wraps the basic ReadAt method.
+//
+// ReadAt reads len(p) bytes into p starting at offset off in the
+// underlying input source. It returns the number of bytes
+// read (0 <= n <= len(p)) and any error encountered.
+//
+// When ReadAt returns n < len(p), it returns a non-nil error
+// explaining why more bytes were not returned. In this respect,
+// ReadAt is stricter than Read.
+//
+// Even if ReadAt returns n < len(p), it may use all of p as scratch
+// space during the call. If some data is available but not len(p) bytes,
+// ReadAt blocks until either all the data is available or an error occurs.
+// In this respect ReadAt is different from Read.
+//
+// If the n = len(p) bytes returned by ReadAt are at the end of the
+// input source, ReadAt may return either err == EOF or err == nil.
+//
+// If ReadAt is reading from an input source with a seek offset,
+// ReadAt should not affect nor be affected by the underlying
+// seek offset.
+//
+// Clients of ReadAt can execute parallel ReadAt calls on the
+// same input source.
+//
+// Implementations must not retain p.
+type ReaderAt interface {
+	ReadAt(ctx context.Context, p []byte, off int64) (n int, err error)
+}
+
+// WriterAt is the interface that wraps the basic WriteAt method.
+//
+// WriteAt writes len(p) bytes from p to the underlying data stream
+// at offset off. It returns the number of bytes written from p (0 <= n <= len(p))
+// and any error encountered that caused the write to stop early.
+// WriteAt must return a non-nil error if it returns n < len(p).
+//
+// If WriteAt is writing to a destination with a seek offset,
+// WriteAt should not affect nor be affected by the underlying
+// seek offset.
+//
+// Clients of WriteAt can execute parallel WriteAt calls on the same
+// destination if the ranges do not overlap.
+//
+// Implementations must not retain p.
+type WriterAt interface {
+	WriteAt(ctx context.Context, p []byte, off int64) (n int, err error)
+}
+
+// StringWriter is the interface that wraps the WriteString method.
+type StringWriter interface {
+	WriteString(s string) (n int, err error)
+}
+
+// WriteString writes the contents of the string s to w, which accepts a slice of bytes.
+// If w implements [StringWriter], [StringWriter.WriteString] is invoked directly.
+// Otherwise, [Writer.Write] is called exactly once.
+func WriteString(ctx context.Context, w Writer, s string) (n int, err error) {
+	if sw, ok := w.(StringWriter); ok {
+		return sw.WriteString(s)
+	}
+	return w.Write(ctx, []byte(s))
+}
+
+// ReadAtLeast reads from r into buf until it has read at least min bytes.
+// It returns the number of bytes copied and an error if fewer bytes were read.
+// The error is EOF only if no bytes were read.
+// If an EOF happens after reading fewer than min bytes,
+// ReadAtLeast returns [ErrUnexpectedEOF].
+// If min is greater than the length of buf, ReadAtLeast returns [ErrShortBuffer].
+// On return, n >= min if and only if err == nil.
+// If r returns an error having read at least min bytes, the error is dropped.
+func ReadAtLeast(ctx context.Context, r Reader, buf []byte, min int) (n int, err error) {
+	if len(buf) < min {
+		return 0, ErrShortBuffer
+	}
+	for n < min && err == nil {
+		var nn int
+		nn, err = r.Read(ctx, buf[n:])
+		n += nn
+	}
+	if n >= min {
+		err = nil
+	} else if n > 0 && err == EOF {
+		err = ErrUnexpectedEOF
+	}
+	return
+}
+
+// ReadFull reads exactly len(buf) bytes from r into buf.
+// It returns the number of bytes copied and an error if fewer bytes were read.
+// The error is EOF only if no bytes were read.
+// If an EOF happens after reading some but not all the bytes,
+// ReadFull returns [ErrUnexpectedEOF].
+// On return, n == len(buf) if and only if err == nil.
+// If r returns an error having read at least len(buf) bytes, the error is dropped.
+func ReadFull(ctx context.Context, r Reader, buf []byte) (n int, err error) {
+	return ReadAtLeast(ctx, r, buf, len(buf))
+}
+
+// CopyN copies n bytes (or until an error) from src to dst.
+// It returns the number of bytes copied and the earliest
+// error encountered while copying.
+// On return, written == n if and only if err == nil.
+//
+// If dst implements [ReaderFrom], the copy is implemented using it.
+func CopyN(ctx context.Context, dst Writer, src Reader, n int64) (written int64, err error) {
+	written, err = Copy(ctx, dst, LimitReader(src, n))
+	if written == n {
+		return n, nil
+	}
+	if written < n && err == nil {
+		// src stopped early; must have been EOF.
+		err = EOF
+	}
+	return
+}
+
+// Copy copies from src to dst until either EOF is reached
+// on src or an error occurs. It returns the number of bytes
+// copied and the first error encountered while copying, if any.
+//
+// A successful Copy returns err == nil, not err == EOF.
+// Because Copy is defined to read from src until EOF, it does
+// not treat an EOF from Read as an error to be reported.
+//
+// If src implements [WriterTo],
+// the copy is implemented by calling src.WriteTo(dst).
+// Otherwise, if dst implements [ReaderFrom],
+// the copy is implemented by calling dst.ReadFrom(src).
+func Copy(ctx context.Context, dst Writer, src Reader) (written int64, err error) {
+	return copyBuffer(ctx, dst, src, nil)
+}
+
+// CopyBuffer is identical to Copy except that it stages through the
+// provided buffer (if one is required) rather than allocating a
+// temporary one. If buf is nil, one is allocated; otherwise if it has
+// zero length, CopyBuffer panics.
+//
+// If either src implements [WriterTo] or dst implements [ReaderFrom],
+// buf will not be used to perform the copy.
+func CopyBuffer(ctx context.Context, dst Writer, src Reader, buf []byte) (written int64, err error) {
+	if buf != nil && len(buf) == 0 {
+		panic("empty buffer in CopyBuffer")
+	}
+	return copyBuffer(ctx, dst, src, buf)
+}
+
+// copyBuffer is the actual implementation of Copy and CopyBuffer.
+// if buf is nil, one is allocated.
+func copyBuffer(ctx context.Context, dst Writer, src Reader, buf []byte) (written int64, err error) {
+	// If the reader has a WriteTo method, use it to do the copy.
+	// Avoids an allocation and a copy.
+	if wt, ok := src.(WriterTo); ok {
+		return wt.WriteTo(ctx, dst)
+	}
+	// Similarly, if the writer has a ReadFrom method, use it to do the copy.
+	if rt, ok := dst.(ReaderFrom); ok {
+		return rt.ReadFrom(ctx, src)
+	}
+	if buf == nil {
+		size := 32 * 1024
+		if l, ok := src.(*LimitedReader); ok && int64(size) > l.N {
+			if l.N < 1 {
+				size = 1
+			} else {
+				size = int(l.N)
+			}
+		}
+		buf = make([]byte, size)
+	}
+	for {
+		nr, er := src.Read(ctx, buf)
+		if nr > 0 {
+			nw, ew := dst.Write(ctx, buf[0:nr])
+			if nw < 0 || nr < nw {
+				nw = 0
+				if ew == nil {
+					ew = errInvalidWrite
+				}
+			}
+			written += int64(nw)
+			if ew != nil {
+				err = ew
+				break
+			}
+			if nr != nw {
+				err = ErrShortWrite
+				break
+			}
+		}
+		if er != nil {
+			if er != EOF {
+				err = er
+			}
+			break
+		}
+	}
+	return written, err
+}
+
+// LimitReader returns a Reader that reads from r
+// but stops with EOF after n bytes.
+// The underlying implementation is a *LimitedReader.
+func LimitReader(r Reader, n int64) Reader { return &LimitedReader{r, n} }
+
+// A LimitedReader reads from R but limits the amount of
+// data returned to just N bytes. Each call to Read
+// updates N to reflect the new amount remaining.
+// Read returns EOF when N <= 0 or when the underlying R returns EOF.
+type LimitedReader struct {
+	R Reader // underlying reader
+	N int64  // max bytes remaining
+}
+
+func (l *LimitedReader) Read(ctx context.Context, p []byte) (n int, err error) {
+	if l.N <= 0 {
+		return 0, EOF
+	}
+	if int64(len(p)) > l.N {
+		p = p[0:l.N]
+	}
+	n, err = l.R.Read(ctx, p)
+	l.N -= int64(n)
+	return
+}
+
+// NewSectionReader returns a [SectionReader] that reads from r
+// starting at offset off and stops with EOF after n bytes.
+func NewSectionReader(r ReaderAt, off int64, n int64) *SectionReader {
+	var remaining int64
+	const maxint64 = 1<<63 - 1
+	if off <= maxint64-n {
+		remaining = n + off
+	} else {
+		// Overflow, with no way to return error.
+		// Assume we can read up to an offset of 1<<63 - 1.
+		remaining = maxint64
+	}
+	return &SectionReader{r, off, off, remaining, n}
+}
+
+// SectionReader implements Read, Seek, and ReadAt on a section
+// of an underlying [ReaderAt].
+type SectionReader struct {
+	r     ReaderAt // constant after creation
+	base  int64    // constant after creation
+	off   int64
+	limit int64 // constant after creation
+	n     int64 // constant after creation
+}
+
+func (s *SectionReader) Read(ctx context.Context, p []byte) (n int, err error) {
+	if s.off >= s.limit {
+		return 0, EOF
+	}
+	if max := s.limit - s.off; int64(len(p)) > max {
+		p = p[0:max]
+	}
+	n, err = s.r.ReadAt(ctx, p, s.off)
+	s.off += int64(n)
+	return
+}
+
+var errWhence = errors.New("Seek: invalid whence")
+var errOffset = errors.New("Seek: invalid offset")
+
+func (s *SectionReader) Seek(offset int64, whence int) (int64, error) {
+	switch whence {
+	default:
+		return 0, errWhence
+	case SeekStart:
+		offset += s.base
+	case SeekCurrent:
+		offset += s.off
+	case SeekEnd:
+		offset += s.limit
+	}
+	if offset < s.base {
+		return 0, errOffset
+	}
+	s.off = offset
+	return offset - s.base, nil
+}
+
+func (s *SectionReader) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
+	if off < 0 || off >= s.Size() {
+		return 0, EOF
+	}
+	off += s.base
+	if max := s.limit - off; int64(len(p)) > max {
+		p = p[0:max]
+		n, err = s.r.ReadAt(ctx, p, off)
+		if err == nil {
+			err = EOF
+		}
+		return n, err
+	}
+	return s.r.ReadAt(ctx, p, off)
+}
+
+// Size returns the size of the section in bytes.
+func (s *SectionReader) Size() int64 { return s.limit - s.base }
+
+// Outer returns the underlying [ReaderAt] and offsets for the section.
+//
+// The returned values are the same that were passed to [NewSectionReader]
+// when the [SectionReader] was created.
+func (s *SectionReader) Outer() (r ReaderAt, off int64, n int64) {
+	return s.r, s.base, s.n
+}
+
+// An OffsetWriter maps writes at offset base to offset base+off in the underlying writer.
+type OffsetWriter struct {
+	w    WriterAt
+	base int64 // the original offset
+	off  int64 // the current offset
+}
+
+// NewOffsetWriter returns an [OffsetWriter] that writes to w
+// starting at offset off.
+func NewOffsetWriter(w WriterAt, off int64) *OffsetWriter {
+	return &OffsetWriter{w, off, off}
+}
+
+func (o *OffsetWriter) Write(ctx context.Context, p []byte) (n int, err error) {
+	n, err = o.w.WriteAt(ctx, p, o.off)
+	o.off += int64(n)
+	return
+}
+
+func (o *OffsetWriter) WriteAt(ctx context.Context, p []byte, off int64) (n int, err error) {
+	if off < 0 {
+		return 0, errOffset
+	}
+
+	off += o.base
+	return o.w.WriteAt(ctx, p, off)
+}
+
+func (o *OffsetWriter) Seek(offset int64, whence int) (int64, error) {
+	switch whence {
+	default:
+		return 0, errWhence
+	case SeekStart:
+		offset += o.base
+	case SeekCurrent:
+		offset += o.off
+	}
+	if offset < o.base {
+		return 0, errOffset
+	}
+	o.off = offset
+	return offset - o.base, nil
+}
+
+// TeeReader returns a [Reader] that writes to w what it reads from r.
+// All reads from r performed through it are matched with
+// corresponding writes to w. There is no internal buffering -
+// the write must complete before the read completes.
+// Any error encountered while writing is reported as a read error.
+func TeeReader(r Reader, w Writer) Reader {
+	return &teeReader{r, w}
+}
+
+type teeReader struct {
+	r Reader
+	w Writer
+}
+
+func (t *teeReader) Read(ctx context.Context, p []byte) (n int, err error) {
+	n, err = t.r.Read(ctx, p)
+	if n > 0 {
+		if n, err := t.w.Write(ctx, p[:n]); err != nil {
+			return n, err
+		}
+	}
+	return
+}
+
+// Discard is a [Writer] on which all Write calls succeed
+// without doing anything.
+var Discard Writer = discard{}
+
+type discard struct{}
+
+// discard implements ReaderFrom as an optimization so Copy to
+// io.Discard can avoid doing unnecessary work.
+var _ ReaderFrom = discard{}
+
+func (discard) Write(ctx context.Context, p []byte) (int, error) {
+	return len(p), nil
+}
+
+func (discard) WriteString(ctx context.Context, s string) (int, error) {
+	return len(s), nil
+}
+
+var blackHolePool = sync.Pool{
+	New: func() any {
+		b := make([]byte, 8192)
+		return &b
+	},
+}
+
+func (discard) ReadFrom(ctx context.Context, r Reader) (n int64, err error) {
+	bufp := blackHolePool.Get().(*[]byte)
+	readSize := 0
+	for {
+		readSize, err = r.Read(ctx, *bufp)
+		n += int64(readSize)
+		if err != nil {
+			blackHolePool.Put(bufp)
+			if err == EOF {
+				return n, nil
+			}
+			return
+		}
+	}
+}
+
+// NopCloser returns a [ReadCloser] with a no-op Close method wrapping
+// the provided [Reader] r.
+// If r implements [WriterTo], the returned [ReadCloser] will implement [WriterTo]
+// by forwarding calls to r.
+func NopCloser(r Reader) ReadCloser {
+	if _, ok := r.(WriterTo); ok {
+		return nopCloserWriterTo{r}
+	}
+	return nopCloser{r}
+}
+
+type nopCloser struct {
+	Reader
+}
+
+func (nopCloser) Close(ctx context.Context) error { return nil }
+
+type nopCloserWriterTo struct {
+	Reader
+}
+
+func (nopCloserWriterTo) Close(ctx context.Context) error { return nil }
+
+func (c nopCloserWriterTo) WriteTo(ctx context.Context, w Writer) (n int64, err error) {
+	return c.Reader.(WriterTo).WriteTo(ctx, w)
+}
+
+// ReadAll reads from r until an error or EOF and returns the data it read.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read from src until EOF, it does not treat an EOF from Read
+// as an error to be reported.
+func ReadAll(ctx context.Context, r Reader) ([]byte, error) {
+	b := make([]byte, 0, 512)
+	for {
+		n, err := r.Read(ctx, b[len(b):cap(b)])
+		b = b[:len(b)+n]
+		if err != nil {
+			if err == EOF {
+				err = nil
+			}
+			return b, err
+		}
+
+		if len(b) == cap(b) {
+			// Add more capacity (let append pick how much).
+			b = append(b, 0)[:len(b)]
+		}
+	}
+}
diff --git a/pkg/ctxio/reader.go b/pkg/ctxio/reader.go
new file mode 100644
index 0000000..8d2e4c7
--- /dev/null
+++ b/pkg/ctxio/reader.go
@@ -0,0 +1,64 @@
+package ctxio
+
+import (
+	"context"
+	"io"
+)
+
+type FileReader interface {
+	Reader
+	ReaderAt
+	Closer
+}
+
+type contextReader struct {
+	ctx context.Context
+	r   Reader
+}
+
+func (r *contextReader) Read(p []byte) (n int, err error) {
+	if r.ctx.Err() != nil {
+		return 0, r.ctx.Err()
+	}
+
+	return r.r.Read(r.ctx, p)
+}
+
+func IoReaderAt(ctx context.Context, r ReaderAt) io.ReaderAt {
+	return &contextReaderAt{ctx: ctx, r: r}
+}
+
+type contextReaderAt struct {
+	ctx context.Context
+	r   ReaderAt
+}
+
+func (c *contextReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
+	if c.ctx.Err() != nil {
+		return 0, c.ctx.Err()
+	}
+
+	return c.r.ReadAt(c.ctx, p, off)
+}
+
+func IoReader(ctx context.Context, r Reader) io.Reader {
+	return &contextReader{ctx: ctx, r: r}
+}
+
+func WrapIoReader(r io.Reader) Reader {
+	return &wrapReader{r: r}
+}
+
+type wrapReader struct {
+	r io.Reader
+}
+
+var _ Reader = (*wrapReader)(nil)
+
+// Read implements Reader.
+func (c *wrapReader) Read(ctx context.Context, p []byte) (n int, err error) {
+	if ctx.Err() != nil {
+		return 0, ctx.Err()
+	}
+	return c.r.Read(p)
+}
diff --git a/pkg/ctxio/seeker.go b/pkg/ctxio/seeker.go
new file mode 100644
index 0000000..a482477
--- /dev/null
+++ b/pkg/ctxio/seeker.go
@@ -0,0 +1,102 @@
+package ctxio
+
+import (
+	"context"
+	"io"
+	"sync"
+)
+
+type ioSeekerWrapper struct {
+	ctx context.Context
+
+	mu   sync.Mutex
+	pos  int64
+	size int64
+
+	r ReaderAt
+}
+
+func IoReadSeekerWrapper(ctx context.Context, r ReaderAt, size int64) io.ReadSeeker {
+	return &ioSeekerWrapper{
+		ctx:  ctx,
+		r:    r,
+		size: size,
+	}
+}
+
+func (r *ioSeekerWrapper) Seek(offset int64, whence int) (int64, error) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	switch whence {
+	case io.SeekStart:
+		r.pos = offset
+	case io.SeekCurrent:
+		r.pos = r.pos + offset
+	case io.SeekEnd:
+		r.pos = r.size + offset
+	}
+
+	return r.pos, nil
+}
+
+func (r *ioSeekerWrapper) Read(p []byte) (int, error) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	n, err := r.r.ReadAt(r.ctx, p, r.pos)
+	r.pos += int64(n)
+
+	return n, err
+}
+
+var _ io.ReadSeekCloser = (*ioSeekerCloserWrapper)(nil)
+
+type ioSeekerCloserWrapper struct {
+	ctx context.Context
+
+	mu   sync.Mutex
+	pos  int64
+	size int64
+
+	r FileReader
+}
+
+func IoReadSeekCloserWrapper(ctx context.Context, r FileReader, size int64) io.ReadSeekCloser {
+	return &ioSeekerCloserWrapper{
+		ctx:  ctx,
+		r:    r,
+		size: size,
+	}
+}
+
+func (r *ioSeekerCloserWrapper) Seek(offset int64, whence int) (int64, error) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	switch whence {
+	case io.SeekStart:
+		r.pos = offset
+	case io.SeekCurrent:
+		r.pos = r.pos + offset
+	case io.SeekEnd:
+		r.pos = r.size + offset
+	}
+
+	return r.pos, nil
+}
+
+func (r *ioSeekerCloserWrapper) Read(p []byte) (int, error) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	n, err := r.r.ReadAt(r.ctx, p, r.pos)
+	r.pos += int64(n)
+
+	return n, err
+}
+
+// Close implements io.ReadSeekCloser.
+func (r *ioSeekerCloserWrapper) Close() error {
+	return r.r.Close(r.ctx)
+}
diff --git a/pkg/ctxio/teereader.go b/pkg/ctxio/teereader.go
new file mode 100644
index 0000000..999a670
--- /dev/null
+++ b/pkg/ctxio/teereader.go
@@ -0,0 +1,20 @@
+package ctxio
+
+// func TeeReader(r Reader, w Writer) Reader {
+// 	return &teeReader{r, w}
+// }
+
+// type teeReader struct {
+// 	r Reader
+// 	w Writer
+// }
+
+// func (t *teeReader) Read(ctx context.Context, p []byte) (n int, err error) {
+// 	n, err = t.r.Read(ctx, p)
+// 	if n > 0 {
+// 		if n, err := t.w.Write(ctx, p[:n]); err != nil {
+// 			return n, err
+// 		}
+// 	}
+// 	return
+// }
diff --git a/pkg/go-nfs/.github/dependabot.yml b/pkg/go-nfs/.github/dependabot.yml
new file mode 100644
index 0000000..12389a7
--- /dev/null
+++ b/pkg/go-nfs/.github/dependabot.yml
@@ -0,0 +1,11 @@
+# To get started with Dependabot version updates, you'll need to specify which
+# package ecosystems to update and where the package manifests are located.
+# Please see the documentation for all configuration options:
+# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
+
+version: 2
+updates:
+  - package-ecosystem: "gomod"
+    directory: "/" # Location of package manifests
+    schedule:
+      interval: "daily"
diff --git a/pkg/go-nfs/.github/workflows/codeql-analysis.yml b/pkg/go-nfs/.github/workflows/codeql-analysis.yml
new file mode 100644
index 0000000..ca22cc4
--- /dev/null
+++ b/pkg/go-nfs/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,51 @@
+name: "Code scanning - action"
+
+on:
+  push:
+  pull_request:
+  schedule:
+    - cron: '0 18 * * 3'
+
+jobs:
+  CodeQL-Build:
+
+    runs-on: ubuntu-latest
+
+    steps:
+    - name: Checkout repository
+      uses: actions/checkout@v2
+      with:
+        # We must fetch at least the immediate parents so that if this is
+        # a pull request then we can checkout the head.
+        fetch-depth: 2
+
+    # If this run was triggered by a pull request event, then checkout
+    # the head of the pull request instead of the merge commit.
+    - run: git checkout HEAD^2
+      if: ${{ github.event_name == 'pull_request' }}
+      
+    # Initializes the CodeQL tools for scanning.
+    - name: Initialize CodeQL
+      uses: github/codeql-action/init@v1
+      # Override language selection by uncommenting this and choosing your languages
+      # with:
+      #   languages: go, javascript, csharp, python, cpp, java
+
+    # Autobuild attempts to build any compiled languages  (C/C++, C#, or Java).
+    # If this step fails, then you should remove it and run the build manually (see below)
+    - name: Autobuild
+      uses: github/codeql-action/autobuild@v1
+
+    # â„šī¸ Command-line programs to run using the OS shell.
+    # 📚 https://git.io/JvXDl
+
+    # âœī¸ If the Autobuild fails above, remove it and uncomment the following three lines
+    #    and modify them (or add more) to build your code if your project
+    #    uses a compiled language
+
+    #- run: |
+    #   make bootstrap
+    #   make release
+
+    - name: Perform CodeQL Analysis
+      uses: github/codeql-action/analyze@v1
diff --git a/pkg/go-nfs/.github/workflows/go.yml b/pkg/go-nfs/.github/workflows/go.yml
new file mode 100644
index 0000000..df1e4fe
--- /dev/null
+++ b/pkg/go-nfs/.github/workflows/go.yml
@@ -0,0 +1,36 @@
+name: Go
+
+on:
+  push:
+    branches: [ master ]
+  pull_request:
+    branches: [ master ]
+
+permissions:
+  contents: read
+
+jobs:
+  build:
+    name: Build
+    runs-on: ubuntu-latest
+    steps:
+    - name: Set up Go 1.x
+      uses: actions/setup-go@v3
+      with:
+        go-version: ^1.19
+      id: go
+
+    - name: Check out code into the Go module directory
+      uses: actions/checkout@v3
+
+    - name: Get dependencies
+      run: go get -v -t -d ./...
+
+    - name: Build
+      run: go build -v ./...
+
+    - name: golangci-lint
+      uses: golangci/golangci-lint-action@v3
+
+    - name: Test
+      run: go test -v .
diff --git a/pkg/go-nfs/CONTRIBUTING.md b/pkg/go-nfs/CONTRIBUTING.md
new file mode 100644
index 0000000..f1f3f11
--- /dev/null
+++ b/pkg/go-nfs/CONTRIBUTING.md
@@ -0,0 +1,11 @@
+# Contributing Guidelines
+
+We appreciate your interest in improving go-nfs!
+
+## Looking for ways to contribute?
+
+There are several ways you can contribute:
+- Start contributing immediately via the [opened](https://github.com/willscott/go-nfs/issues) issues on GitHub.
+  Defined issues provide an excellent starting point.
+- Reporting issues, bugs, mistakes, or inconsistencies.
+  As many open source projects, we are short-staffed, we thus kindly ask you to be open to contribute a fix for discovered issues.
diff --git a/pkg/go-nfs/LICENSE b/pkg/go-nfs/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/pkg/go-nfs/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/pkg/go-nfs/README.md b/pkg/go-nfs/README.md
new file mode 100644
index 0000000..a92f706
--- /dev/null
+++ b/pkg/go-nfs/README.md
@@ -0,0 +1,96 @@
+Golang Network File Server
+===
+
+NFSv3 protocol implementation in pure Golang.
+
+Current Status:
+* Minimally tested
+* Mounts, read-only and read-write support
+
+Usage
+===
+
+The most interesting demo is currently in `example/osview`. 
+
+Start the server
+`go run ./example/osview .`.
+
+The local folder at `.` will be the initial view in the mount. mutations to metadata or contents
+will be stored purely in memory and not written back to the OS. When run, this
+demo will print the port it is listening on.
+
+The mount can be accessed using a command similar to 
+`mount -o port=<n>,mountport=<n> -t nfs localhost:/mount <mountpoint>` (For Mac users)
+
+or
+
+`mount -o port=<n>,mountport=<n>,nfsvers=3,noacl,tcp -t nfs localhost:/mount <mountpoint>` (For Linux users)
+
+API
+===
+
+The NFS server runs on a `net.Listener` to export a file system to NFS clients.
+Usage is structured similarly to many other golang network servers.
+
+```golang
+package main
+
+import (
+	"fmt"
+	"log"
+	"net"
+
+	"github.com/go-git/go-billy/v5/memfs"
+	nfs "github.com/willscott/go-nfs"
+	nfshelper "github.com/willscott/go-nfs/helpers"
+)
+
+func main() {
+	listener, err := net.Listen("tcp", ":0")
+	panicOnErr(err, "starting TCP listener")
+	fmt.Printf("Server running at %s\n", listener.Addr())
+	mem := memfs.New()
+	f, err := mem.Create("hello.txt")
+	panicOnErr(err, "creating file")
+	_, err = f.Write([]byte("hello world"))
+	panicOnErr(err, "writing data")
+	f.Close()
+	handler := nfshelper.NewNullAuthHandler(mem)
+	cacheHelper := nfshelper.NewCachingHandler(handler, 1)
+	panicOnErr(nfs.Serve(listener, cacheHelper), "serving nfs")
+}
+
+func panicOnErr(err error, desc ...interface{}) {
+	if err == nil {
+		return
+	}
+	log.Println(desc...)
+	log.Panicln(err)
+}
+```
+
+Notes
+---
+
+* Ports are typically determined through portmap. The need for running portmap 
+(which is the only part that needs a privileged listening port) can be avoided
+through specific mount options. e.g. 
+`mount -o port=n,mountport=n -t nfs host:/mount /localmount`
+
+* This server currently uses [billy](https://github.com/go-git/go-billy/) to
+provide a file system abstraction layer. There are some edges of the NFS protocol
+which do not translate to this abstraction.
+  * NFS expects access to an `inode` or equivalent unique identifier to reference
+  files in a file system. These are considered opaque identifiers here, which
+  means they will not work as expected in cases of hard linking.
+  * The billy abstraction layer does not extend to exposing `uid` and `gid`
+  ownership of files. If ownership is important to your file system, you
+  will need to ensure that the `os.FileInfo` meets additional constraints.
+  In particular, the `Sys()` escape hatch is queried by this library, and
+  if your file system populates a [`syscall.Stat_t`](https://golang.org/pkg/syscall/#Stat_t)
+  concrete struct, the ownership specified in that object will be used.
+
+* Relevant RFCS:
+[5531 - RPC protocol](https://tools.ietf.org/html/rfc5531),
+[1813 - NFSv3](https://tools.ietf.org/html/rfc1813),
+[1094 - NFS](https://tools.ietf.org/html/rfc1094)
diff --git a/pkg/go-nfs/SECURITY.md b/pkg/go-nfs/SECURITY.md
new file mode 100644
index 0000000..5f079c1
--- /dev/null
+++ b/pkg/go-nfs/SECURITY.md
@@ -0,0 +1,11 @@
+# Security Policy
+
+## Supported Versions
+
+The latest release reflects the current best recommendation / supported version at this time.
+
+## Reporting a Vulnerability
+
+Please email Will (the git commit author) if you need to report issues privately.
+I will endeavor to respond within a day, but if I am offline, responses may be delayed longer than that.
+If you need a stronger SLA to have confidence in using this code, feel free to reach out.
diff --git a/pkg/go-nfs/capability_check.go b/pkg/go-nfs/capability_check.go
new file mode 100644
index 0000000..5ad6596
--- /dev/null
+++ b/pkg/go-nfs/capability_check.go
@@ -0,0 +1,9 @@
+package nfs
+
+import (
+	billy "github.com/go-git/go-billy/v5"
+)
+
+func CapabilityCheck(fs Filesystem, cap billy.Capability) bool {
+	return true
+}
diff --git a/pkg/go-nfs/conn.go b/pkg/go-nfs/conn.go
new file mode 100644
index 0000000..ec9cf76
--- /dev/null
+++ b/pkg/go-nfs/conn.go
@@ -0,0 +1,335 @@
+package nfs
+
+import (
+	"bufio"
+	"bytes"
+	"context"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+
+	xdr2 "github.com/rasky/go-xdr/xdr2"
+	"github.com/willscott/go-nfs-client/nfs/rpc"
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+	"go.opentelemetry.io/otel"
+)
+
+var (
+	// ErrInputInvalid is returned when input cannot be parsed
+	ErrInputInvalid = errors.New("invalid input")
+	// ErrAlreadySent is returned when writing a header/status multiple times
+	ErrAlreadySent = errors.New("response already started")
+)
+
+// ResponseCode is a combination of accept_stat and reject_stat.
+type ResponseCode uint32
+
+// ResponseCode Codes
+const (
+	ResponseCodeSuccess ResponseCode = iota
+	ResponseCodeProgUnavailable
+	ResponseCodeProcUnavailable
+	ResponseCodeGarbageArgs
+	ResponseCodeSystemErr
+	ResponseCodeRPCMismatch
+	ResponseCodeAuthError
+)
+
+type conn struct {
+	*Server
+	writeSerializer chan []byte
+	net.Conn
+}
+
+var tracer = otel.Tracer("git.kmsign.ru/royalcat/tstor/pkg/go-nfs")
+
+func (c *conn) serve() {
+	ctx := context.Background() // TODO implement correct timeout on serve side
+
+	c.writeSerializer = make(chan []byte, 1)
+	go c.serializeWrites(ctx)
+
+	bio := bufio.NewReader(c.Conn)
+	for {
+		w, err := c.readRequestHeader(ctx, bio)
+		if err != nil {
+			if err == io.EOF {
+				// Clean close.
+				c.Close()
+				return
+			}
+			return
+		}
+		Log.Tracef("request: %v", w.req)
+		err = c.handle(ctx, w)
+		respErr := w.finish(ctx)
+		if err != nil {
+			Log.Errorf("error handling req: %v", err)
+			// failure to handle at a level needing to close the connection.
+			c.Close()
+			return
+		}
+		if respErr != nil {
+			Log.Errorf("error sending response: %v", respErr)
+			c.Close()
+			return
+		}
+	}
+}
+
+func (c *conn) serializeWrites(ctx context.Context) {
+	// todo: maybe don't need the extra buffer
+	writer := bufio.NewWriter(c.Conn)
+	var fragmentBuf [4]byte
+	var fragmentInt uint32
+	for {
+		select {
+		case <-ctx.Done():
+			return
+		case msg, ok := <-c.writeSerializer:
+			if !ok {
+				return
+			}
+			// prepend the fragmentation header
+			fragmentInt = uint32(len(msg))
+			fragmentInt |= (1 << 31)
+			binary.BigEndian.PutUint32(fragmentBuf[:], fragmentInt)
+			n, err := writer.Write(fragmentBuf[:])
+			if n < 4 || err != nil {
+				return
+			}
+			n, err = writer.Write(msg)
+			if err != nil {
+				return
+			}
+			if n < len(msg) {
+				panic("todo: ensure writes complete fully.")
+			}
+			if err = writer.Flush(); err != nil {
+				return
+			}
+		}
+	}
+}
+
+// Handle a request. errors from this method indicate a failure to read or
+// write on the network stream, and trigger a disconnection of the connection.
+func (c *conn) handle(ctx context.Context, w *response) error {
+	ctx, span := tracer.Start(ctx, fmt.Sprintf("nfs.handle.%s", NFSProcedure(w.req.Header.Proc).String()))
+	defer span.End()
+
+	handler := c.Server.handlerFor(w.req.Header.Prog, w.req.Header.Proc)
+	if handler == nil {
+		Log.Errorf("No handler for %d.%d", w.req.Header.Prog, w.req.Header.Proc)
+		if err := w.drain(ctx); err != nil {
+			return err
+		}
+		return c.err(ctx, w, &ResponseCodeProcUnavailableError{})
+	}
+	appError := handler(ctx, w, c.Server.Handler)
+	if drainErr := w.drain(ctx); drainErr != nil {
+		return drainErr
+	}
+	if appError != nil && !w.responded {
+		Log.Errorf("call to %+v failed: %v", handler, appError)
+		if err := c.err(ctx, w, appError); err != nil {
+			return err
+		}
+	}
+	if !w.responded {
+		Log.Errorf("Handler did not indicate response status via writing or erroring")
+		if err := c.err(ctx, w, &ResponseCodeSystemError{}); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (c *conn) err(ctx context.Context, w *response, err error) error {
+	select {
+	case <-ctx.Done():
+		return nil
+	default:
+	}
+
+	if w.err == nil {
+		w.err = err
+	}
+
+	if w.responded {
+		return nil
+	}
+
+	rpcErr := w.errorFmt(err)
+	if writeErr := w.writeHeader(rpcErr.Code()); writeErr != nil {
+		return writeErr
+	}
+
+	body, _ := rpcErr.MarshalBinary()
+	return w.Write(body)
+}
+
+type request struct {
+	xid uint32
+	rpc.Header
+	Body io.Reader
+}
+
+func (r *request) String() string {
+	if r.Header.Prog == nfsServiceID {
+		return fmt.Sprintf("RPC #%d (nfs.%s)", r.xid, NFSProcedure(r.Header.Proc))
+	} else if r.Header.Prog == mountServiceID {
+		return fmt.Sprintf("RPC #%d (mount.%s)", r.xid, MountProcedure(r.Header.Proc))
+	}
+	return fmt.Sprintf("RPC #%d (%d.%d)", r.xid, r.Header.Prog, r.Header.Proc)
+}
+
+type response struct {
+	*conn
+	writer    *bytes.Buffer
+	responded bool
+	err       error
+	errorFmt  func(error) RPCError
+	req       *request
+}
+
+func (w *response) writeXdrHeader() error {
+	err := xdr.Write(w.writer, &w.req.xid)
+	if err != nil {
+		return err
+	}
+	respType := uint32(1)
+	err = xdr.Write(w.writer, &respType)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func (w *response) writeHeader(code ResponseCode) error {
+	if w.responded {
+		return ErrAlreadySent
+	}
+	w.responded = true
+	if err := w.writeXdrHeader(); err != nil {
+		return err
+	}
+
+	status := rpc.MsgAccepted
+	if code == ResponseCodeAuthError || code == ResponseCodeRPCMismatch {
+		status = rpc.MsgDenied
+	}
+
+	err := xdr.Write(w.writer, &status)
+	if err != nil {
+		return err
+	}
+
+	if status == rpc.MsgAccepted {
+		// Write opaque_auth header.
+		err = xdr.Write(w.writer, &rpc.AuthNull)
+		if err != nil {
+			return err
+		}
+	}
+
+	return xdr.Write(w.writer, &code)
+}
+
+// Write a response to an xdr message
+func (w *response) Write(dat []byte) error {
+	if !w.responded {
+		if err := w.writeHeader(ResponseCodeSuccess); err != nil {
+			return err
+		}
+	}
+
+	acc := 0
+	for acc < len(dat) {
+		n, err := w.writer.Write(dat[acc:])
+		if err != nil {
+			return err
+		}
+		acc += n
+	}
+	return nil
+}
+
+// drain reads the rest of the request frame if not consumed by the handler.
+func (w *response) drain(ctx context.Context) error {
+	if reader, ok := w.req.Body.(*io.LimitedReader); ok {
+		if reader.N == 0 {
+			return nil
+		}
+		// todo: wrap body in a context reader.
+		_, err := io.CopyN(io.Discard, w.req.Body, reader.N)
+		if err == nil || err == io.EOF {
+			return nil
+		}
+		return err
+	}
+	return io.ErrUnexpectedEOF
+}
+
+func (w *response) finish(ctx context.Context) error {
+	select {
+	case w.conn.writeSerializer <- w.writer.Bytes():
+		return nil
+	case <-ctx.Done():
+		return ctx.Err()
+	}
+}
+
+func (c *conn) readRequestHeader(ctx context.Context, reader *bufio.Reader) (w *response, err error) {
+	fragment, err := xdr.ReadUint32(reader)
+	if err != nil {
+		if xdrErr, ok := err.(*xdr2.UnmarshalError); ok {
+			if xdrErr.Err == io.EOF {
+				return nil, io.EOF
+			}
+		}
+		return nil, err
+	}
+	if fragment&(1<<31) == 0 {
+		Log.Warnf("Warning: haven't implemented fragment reconstruction.\n")
+		return nil, ErrInputInvalid
+	}
+	reqLen := fragment - uint32(1<<31)
+	if reqLen < 40 {
+		return nil, ErrInputInvalid
+	}
+
+	r := io.LimitedReader{R: reader, N: int64(reqLen)}
+
+	xid, err := xdr.ReadUint32(&r)
+	if err != nil {
+		return nil, err
+	}
+	reqType, err := xdr.ReadUint32(&r)
+	if err != nil {
+		return nil, err
+	}
+	if reqType != 0 { // 0 = request, 1 = response
+		return nil, ErrInputInvalid
+	}
+
+	req := request{
+		xid,
+		rpc.Header{},
+		&r,
+	}
+	if err = xdr.Read(&r, &req.Header); err != nil {
+		return nil, err
+	}
+
+	w = &response{
+		conn:     c,
+		req:      &req,
+		errorFmt: basicErrorFormatter,
+		// TODO: use a pool for these.
+		writer: bytes.NewBuffer([]byte{}),
+	}
+	return w, nil
+}
diff --git a/pkg/go-nfs/errors.go b/pkg/go-nfs/errors.go
new file mode 100644
index 0000000..af08be6
--- /dev/null
+++ b/pkg/go-nfs/errors.go
@@ -0,0 +1,230 @@
+package nfs
+
+import (
+	"encoding"
+	"encoding/binary"
+	"errors"
+	"fmt"
+)
+
+// RPCError provides the error interface for errors thrown by
+// procedures to be transmitted over the XDR RPC channel
+type RPCError interface {
+	// An RPCError is an `error` with this method
+	Error() string
+	// Code is the RPC Response code to send
+	Code() ResponseCode
+	// BinaryMarshaler is the on-wire representation of this error
+	encoding.BinaryMarshaler
+}
+
+// AuthStat is an enumeration of why authentication ahs failed
+type AuthStat uint32
+
+// AuthStat Codes
+const (
+	AuthStatOK AuthStat = iota
+	AuthStatBadCred
+	AuthStatRejectedCred
+	AuthStatBadVerifier
+	AuthStatRejectedVerfier
+	AuthStatTooWeak
+	AuthStatInvalidResponse
+	AuthStatFailed
+	AuthStatKerbGeneric
+	AuthStatTimeExpire
+	AuthStatTktFile
+	AuthStatDecode
+	AuthStatNetAddr
+	AuthStatRPCGSSCredProblem
+	AuthStatRPCGSSCTXProblem
+)
+
+// AuthError is an RPCError
+type AuthError struct {
+	AuthStat
+}
+
+// Code for AuthErrors is ResponseCodeAuthError
+func (a *AuthError) Code() ResponseCode {
+	return ResponseCodeAuthError
+}
+
+// Error is a textual representaiton of the auth error. From the RFC
+func (a *AuthError) Error() string {
+	switch a.AuthStat {
+	case AuthStatOK:
+		return "Auth Status: OK"
+	case AuthStatBadCred:
+		return "Auth Status: bad credential"
+	case AuthStatRejectedCred:
+		return "Auth Status: client must begin new session"
+	case AuthStatBadVerifier:
+		return "Auth Status: bad verifier"
+	case AuthStatRejectedVerfier:
+		return "Auth Status: verifier expired or replayed"
+	case AuthStatTooWeak:
+		return "Auth Status: rejected for security reasons"
+	case AuthStatInvalidResponse:
+		return "Auth Status: bogus response verifier"
+	case AuthStatFailed:
+		return "Auth Status: reason unknown"
+	case AuthStatKerbGeneric:
+		return "Auth Status: kerberos generic error"
+	case AuthStatTimeExpire:
+		return "Auth Status: time of credential expired"
+	case AuthStatTktFile:
+		return "Auth Status: problem with ticket file"
+	case AuthStatDecode:
+		return "Auth Status: can't decode authenticator"
+	case AuthStatNetAddr:
+		return "Auth Status: wrong net address in ticket"
+	case AuthStatRPCGSSCredProblem:
+		return "Auth Status: no credentials for user"
+	case AuthStatRPCGSSCTXProblem:
+		return "Auth Status: problem with context"
+	}
+	return "Auth Status: Unknown"
+}
+
+// MarshalBinary sends the specific auth status
+func (a *AuthError) MarshalBinary() (data []byte, err error) {
+	var resp [4]byte
+	binary.LittleEndian.PutUint32(resp[:], uint32(a.AuthStat))
+	return resp[:], nil
+}
+
+// RPCMismatchError is an RPCError
+type RPCMismatchError struct {
+	Low  uint32
+	High uint32
+}
+
+// Code for RPCMismatchError is ResponseCodeRPCMismatch
+func (r *RPCMismatchError) Code() ResponseCode {
+	return ResponseCodeRPCMismatch
+}
+
+func (r *RPCMismatchError) Error() string {
+	return fmt.Sprintf("RPC Mismatch: Expected version between %d and %d.", r.Low, r.High)
+}
+
+// MarshalBinary sends the specific rpc mismatch range
+func (r *RPCMismatchError) MarshalBinary() (data []byte, err error) {
+	var resp [8]byte
+	binary.LittleEndian.PutUint32(resp[0:4], uint32(r.Low))
+	binary.LittleEndian.PutUint32(resp[4:8], uint32(r.High))
+	return resp[:], nil
+}
+
+// ResponseCodeProcUnavailableError is an RPCError
+type ResponseCodeProcUnavailableError struct {
+}
+
+// Code for ResponseCodeProcUnavailableError
+func (r *ResponseCodeProcUnavailableError) Code() ResponseCode {
+	return ResponseCodeProcUnavailable
+}
+
+func (r *ResponseCodeProcUnavailableError) Error() string {
+	return "The requested procedure is unexported"
+}
+
+// MarshalBinary - this error has no associated body
+func (r *ResponseCodeProcUnavailableError) MarshalBinary() (data []byte, err error) {
+	return []byte{}, nil
+}
+
+// ResponseCodeSystemError is an RPCError
+type ResponseCodeSystemError struct {
+}
+
+// Code for ResponseCodeSystemError
+func (r *ResponseCodeSystemError) Code() ResponseCode {
+	return ResponseCodeSystemErr
+}
+
+func (r *ResponseCodeSystemError) Error() string {
+	return "memory allocation failure"
+}
+
+// MarshalBinary - this error has no associated body
+func (r *ResponseCodeSystemError) MarshalBinary() (data []byte, err error) {
+	return []byte{}, nil
+}
+
+// basicErrorFormatter is the default error handler for response errors.
+// if the error is already formatted, it is directly written. Otherwise,
+// ResponseCodeSystemError is sent to the client.
+func basicErrorFormatter(err error) RPCError {
+	var rpcErr RPCError
+	if errors.As(err, &rpcErr) {
+		return rpcErr
+	}
+	return &ResponseCodeSystemError{}
+}
+
+// NFSStatusError represents an error at the NFS level.
+type NFSStatusError struct {
+	NFSStatus
+	WrappedErr error
+}
+
+// Error is The wrapped error
+func (s *NFSStatusError) Error() string {
+	message := s.NFSStatus.String()
+	if s.WrappedErr != nil {
+		message = fmt.Sprintf("%s: %v", message, s.WrappedErr)
+	}
+	return message
+}
+
+// Code for NFS issues are successful RPC responses
+func (s *NFSStatusError) Code() ResponseCode {
+	return ResponseCodeSuccess
+}
+
+// MarshalBinary - The binary form of the code.
+func (s *NFSStatusError) MarshalBinary() (data []byte, err error) {
+	var resp [4]byte
+	binary.BigEndian.PutUint32(resp[0:4], uint32(s.NFSStatus))
+	return resp[:], nil
+}
+
+// Unwrap unpacks wrapped errors
+func (s *NFSStatusError) Unwrap() error {
+	return s.WrappedErr
+}
+
+// StatusErrorWithBody is an NFS error with a payload.
+type StatusErrorWithBody struct {
+	NFSStatusError
+	Body []byte
+}
+
+// MarshalBinary provides the wire format of the error response
+func (s *StatusErrorWithBody) MarshalBinary() (data []byte, err error) {
+	head, err := s.NFSStatusError.MarshalBinary()
+	return append(head, s.Body...), err
+}
+
+// errFormatterWithBody appends a provided body to errors
+func errFormatterWithBody(body []byte) func(err error) RPCError {
+	return func(err error) RPCError {
+		if nerr, ok := err.(*NFSStatusError); ok {
+			return &StatusErrorWithBody{*nerr, body[:]}
+		}
+		var rErr RPCError
+		if errors.As(err, &rErr) {
+			return rErr
+		}
+		return &ResponseCodeSystemError{}
+	}
+}
+
+var (
+	opAttrErrorBody       = [4]byte{}
+	opAttrErrorFormatter  = errFormatterWithBody(opAttrErrorBody[:])
+	wccDataErrorBody      = [8]byte{}
+	wccDataErrorFormatter = errFormatterWithBody(wccDataErrorBody[:])
+)
diff --git a/pkg/go-nfs/example/helloworld/main.go b/pkg/go-nfs/example/helloworld/main.go
new file mode 100644
index 0000000..87f2d98
--- /dev/null
+++ b/pkg/go-nfs/example/helloworld/main.go
@@ -0,0 +1,52 @@
+package main
+
+import (
+	"context"
+	"fmt"
+	"net"
+
+	"github.com/go-git/go-billy/v5"
+	"github.com/go-git/go-billy/v5/memfs"
+
+	nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
+	"git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
+	nfshelper "git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
+)
+
+// ROFS is an intercepter for the filesystem indicating it should
+// be read only. The undelrying billy.Memfs indicates it supports
+// writing, but does not in implement billy.Change to support
+// modification of permissions / modTimes, and as such cannot be
+// used as RW system.
+type ROFS struct {
+	nfs.Filesystem
+}
+
+// Capabilities exports the filesystem as readonly
+func (ROFS) Capabilities() billy.Capability {
+	return billy.ReadCapability | billy.SeekCapability
+}
+
+func main() {
+	ctx := context.Background()
+
+	listener, err := net.Listen("tcp", ":0")
+	if err != nil {
+		fmt.Printf("Failed to listen: %v\n", err)
+		return
+	}
+	fmt.Printf("Server running at %s\n", listener.Addr())
+
+	mem := helpers.WrapBillyFS(memfs.New())
+	f, err := mem.Create(ctx, "hello.txt")
+	if err != nil {
+		fmt.Printf("Failed to create file: %v\n", err)
+		return
+	}
+	_, _ = f.Write(ctx, []byte("hello world"))
+	_ = f.Close(ctx)
+
+	handler := nfshelper.NewNullAuthHandler(ROFS{mem})
+	cacheHelper := nfshelper.NewCachingHandler(handler, 1024)
+	fmt.Printf("%v", nfs.Serve(listener, cacheHelper))
+}
diff --git a/pkg/go-nfs/example/osnfs/changeos.go b/pkg/go-nfs/example/osnfs/changeos.go
new file mode 100644
index 0000000..38ca4d3
--- /dev/null
+++ b/pkg/go-nfs/example/osnfs/changeos.go
@@ -0,0 +1,38 @@
+package main
+
+import (
+	"os"
+	"time"
+
+	"github.com/go-git/go-billy/v5"
+)
+
+// NewChangeOSFS wraps billy osfs to add the change interface
+func NewChangeOSFS(fs billy.Filesystem) billy.Filesystem {
+	return COS{fs}
+}
+
+// COS or OSFS + Change wraps a billy.FS to not fail the `Change` interface.
+type COS struct {
+	billy.Filesystem
+}
+
+// Chmod changes mode
+func (fs COS) Chmod(name string, mode os.FileMode) error {
+	return os.Chmod(fs.Join(fs.Root(), name), mode)
+}
+
+// Lchown changes ownership
+func (fs COS) Lchown(name string, uid, gid int) error {
+	return os.Lchown(fs.Join(fs.Root(), name), uid, gid)
+}
+
+// Chown changes ownership
+func (fs COS) Chown(name string, uid, gid int) error {
+	return os.Chown(fs.Join(fs.Root(), name), uid, gid)
+}
+
+// Chtimes changes access time
+func (fs COS) Chtimes(name string, atime time.Time, mtime time.Time) error {
+	return os.Chtimes(fs.Join(fs.Root(), name), atime, mtime)
+}
diff --git a/pkg/go-nfs/example/osnfs/changeos_unix.go b/pkg/go-nfs/example/osnfs/changeos_unix.go
new file mode 100644
index 0000000..2fe93c6
--- /dev/null
+++ b/pkg/go-nfs/example/osnfs/changeos_unix.go
@@ -0,0 +1,28 @@
+//go:build darwin || dragonfly || freebsd || linux || nacl || netbsd || openbsd || solaris
+
+package main
+
+import (
+	"golang.org/x/sys/unix"
+)
+
+func (fs COS) Mknod(path string, mode uint32, major uint32, minor uint32) error {
+	dev := unix.Mkdev(major, minor)
+	return unix.Mknod(fs.Join(fs.Root(), path), mode, int(dev))
+}
+
+func (fs COS) Mkfifo(path string, mode uint32) error {
+	return unix.Mkfifo(fs.Join(fs.Root(), path), mode)
+}
+
+func (fs COS) Link(path string, link string) error {
+	return unix.Link(fs.Join(fs.Root(), path), link)
+}
+
+func (fs COS) Socket(path string) error {
+	fd, err := unix.Socket(unix.AF_UNIX, unix.SOCK_STREAM, 0)
+	if err != nil {
+		return err
+	}
+	return unix.Bind(fd, &unix.SockaddrUnix{Name: fs.Join(fs.Root(), path)})
+}
diff --git a/pkg/go-nfs/example/osnfs/main.go b/pkg/go-nfs/example/osnfs/main.go
new file mode 100644
index 0000000..feadccc
--- /dev/null
+++ b/pkg/go-nfs/example/osnfs/main.go
@@ -0,0 +1,36 @@
+package main
+
+import (
+	"fmt"
+	"net"
+	"os"
+
+	nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
+	"git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
+	nfshelper "git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
+	osfs "github.com/go-git/go-billy/v5/osfs"
+)
+
+func main() {
+	port := ""
+	if len(os.Args) < 2 {
+		fmt.Printf("Usage: osnfs </path/to/folder> [port]\n")
+		return
+	} else if len(os.Args) == 3 {
+		port = os.Args[2]
+	}
+
+	listener, err := net.Listen("tcp", ":"+port)
+	if err != nil {
+		fmt.Printf("Failed to listen: %v\n", err)
+		return
+	}
+	fmt.Printf("osnfs server running at %s\n", listener.Addr())
+
+	bfs := osfs.New(os.Args[1])
+	bfsPlusChange := helpers.WrapBillyFS(NewChangeOSFS(bfs))
+
+	handler := nfshelper.NewNullAuthHandler(bfsPlusChange)
+	cacheHelper := nfshelper.NewCachingHandler(handler, 1024)
+	fmt.Printf("%v", nfs.Serve(listener, cacheHelper))
+}
diff --git a/pkg/go-nfs/example/osview/main.go b/pkg/go-nfs/example/osview/main.go
new file mode 100644
index 0000000..355df17
--- /dev/null
+++ b/pkg/go-nfs/example/osview/main.go
@@ -0,0 +1,37 @@
+package main
+
+import (
+	"fmt"
+	"net"
+	"os"
+
+	"github.com/willscott/memphis"
+
+	nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
+	"git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
+	nfshelper "git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
+)
+
+func main() {
+	port := ""
+	if len(os.Args) < 2 {
+		fmt.Printf("Usage: osview </path/to/folder> [port]\n")
+		return
+	} else if len(os.Args) == 3 {
+		port = os.Args[2]
+	}
+
+	listener, err := net.Listen("tcp", ":"+port)
+	if err != nil {
+		fmt.Printf("Failed to listen: %v\n", err)
+		return
+	}
+	fmt.Printf("Server running at %s\n", listener.Addr())
+
+	fs := memphis.FromOS(os.Args[1])
+	bfs := helpers.WrapBillyFS(fs.AsBillyFS(0, 0))
+
+	handler := nfshelper.NewNullAuthHandler(bfs)
+	cacheHelper := nfshelper.NewCachingHandler(handler, 1024)
+	fmt.Printf("%v", nfs.Serve(listener, cacheHelper))
+}
diff --git a/pkg/go-nfs/file.go b/pkg/go-nfs/file.go
new file mode 100644
index 0000000..2a108b1
--- /dev/null
+++ b/pkg/go-nfs/file.go
@@ -0,0 +1,377 @@
+package nfs
+
+import (
+	"context"
+	"errors"
+	"hash/fnv"
+	"io"
+	"math"
+	"os"
+	"time"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/go-nfs/file"
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+// FileAttribute holds metadata about a filesystem object
+type FileAttribute struct {
+	Type                FileType
+	FileMode            uint32
+	Nlink               uint32
+	UID                 uint32
+	GID                 uint32
+	Filesize            uint64
+	Used                uint64
+	SpecData            [2]uint32
+	FSID                uint64
+	Fileid              uint64
+	Atime, Mtime, Ctime FileTime
+}
+
+// FileType represents a NFS File Type
+type FileType uint32
+
+// Enumeration of NFS FileTypes
+const (
+	FileTypeRegular FileType = iota + 1
+	FileTypeDirectory
+	FileTypeBlock
+	FileTypeCharacter
+	FileTypeLink
+	FileTypeSocket
+	FileTypeFIFO
+)
+
+func (f FileType) String() string {
+	switch f {
+	case FileTypeRegular:
+		return "Regular"
+	case FileTypeDirectory:
+		return "Directory"
+	case FileTypeBlock:
+		return "Block Device"
+	case FileTypeCharacter:
+		return "Character Device"
+	case FileTypeLink:
+		return "Symbolic Link"
+	case FileTypeSocket:
+		return "Socket"
+	case FileTypeFIFO:
+		return "FIFO"
+	default:
+		return "Unknown"
+	}
+}
+
+// Mode provides the OS interpreted mode of the file attributes
+func (f *FileAttribute) Mode() os.FileMode {
+	return os.FileMode(f.FileMode)
+}
+
+// FileCacheAttribute is the subset of FileAttribute used by
+// wcc_attr
+type FileCacheAttribute struct {
+	Filesize     uint64
+	Mtime, Ctime FileTime
+}
+
+// AsCache provides the wcc view of the file attributes
+func (f FileAttribute) AsCache() *FileCacheAttribute {
+	wcc := FileCacheAttribute{
+		Filesize: f.Filesize,
+		Mtime:    f.Mtime,
+		Ctime:    f.Ctime,
+	}
+	return &wcc
+}
+
+// ToFileAttribute creates an NFS fattr3 struct from an OS.FileInfo
+func ToFileAttribute(info os.FileInfo, filePath string) *FileAttribute {
+	f := FileAttribute{}
+
+	m := info.Mode()
+	f.FileMode = uint32(m)
+	if info.IsDir() {
+		f.Type = FileTypeDirectory
+	} else if m&os.ModeSymlink != 0 {
+		f.Type = FileTypeLink
+	} else if m&os.ModeCharDevice != 0 {
+		f.Type = FileTypeCharacter
+	} else if m&os.ModeDevice != 0 {
+		f.Type = FileTypeBlock
+	} else if m&os.ModeSocket != 0 {
+		f.Type = FileTypeSocket
+	} else if m&os.ModeNamedPipe != 0 {
+		f.Type = FileTypeFIFO
+	} else {
+		f.Type = FileTypeRegular
+	}
+	// The number of hard links to the file.
+	f.Nlink = 1
+
+	if a := file.GetInfo(info); a != nil {
+		f.Nlink = a.Nlink
+		f.UID = a.UID
+		f.GID = a.GID
+		f.SpecData = [2]uint32{a.Major, a.Minor}
+		f.Fileid = a.Fileid
+	} else {
+		hasher := fnv.New64()
+		_, _ = hasher.Write([]byte(filePath))
+		f.Fileid = hasher.Sum64()
+	}
+
+	f.Filesize = uint64(info.Size())
+	f.Used = uint64(info.Size())
+	f.Atime = ToNFSTime(info.ModTime())
+	f.Mtime = f.Atime
+	f.Ctime = f.Atime
+	return &f
+}
+
+// tryStat attempts to create a FileAttribute from a path.
+func tryStat(ctx context.Context, fs Filesystem, path []string) *FileAttribute {
+	fullPath := fs.Join(path...)
+	attrs, err := fs.Lstat(ctx, fullPath)
+	if err != nil || attrs == nil {
+		Log.Errorf("err loading attrs for %s: %v", fs.Join(path...), err)
+		return nil
+	}
+	return ToFileAttribute(attrs, fullPath)
+}
+
+// WriteWcc writes the `wcc_data` representation of an object.
+func WriteWcc(writer io.Writer, pre *FileCacheAttribute, post *FileAttribute) error {
+	if pre == nil {
+		if err := xdr.Write(writer, uint32(0)); err != nil {
+			return err
+		}
+	} else {
+		if err := xdr.Write(writer, uint32(1)); err != nil {
+			return err
+		}
+		if err := xdr.Write(writer, *pre); err != nil {
+			return err
+		}
+	}
+	if post == nil {
+		if err := xdr.Write(writer, uint32(0)); err != nil {
+			return err
+		}
+	} else {
+		if err := xdr.Write(writer, uint32(1)); err != nil {
+			return err
+		}
+		if err := xdr.Write(writer, *post); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// WritePostOpAttrs writes the `post_op_attr` representation of a files attributes
+func WritePostOpAttrs(writer io.Writer, post *FileAttribute) error {
+	if post == nil {
+		if err := xdr.Write(writer, uint32(0)); err != nil {
+			return err
+		}
+	} else {
+		if err := xdr.Write(writer, uint32(1)); err != nil {
+			return err
+		}
+		if err := xdr.Write(writer, *post); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// SetFileAttributes represents a command to update some metadata
+// about a file.
+type SetFileAttributes struct {
+	SetMode  *uint32
+	SetUID   *uint32
+	SetGID   *uint32
+	SetSize  *uint64
+	SetAtime *time.Time
+	SetMtime *time.Time
+}
+
+// Apply uses a `Change` implementation to set defined attributes on a
+// provided file.
+func (s *SetFileAttributes) Apply(ctx context.Context, changer Change, fs Filesystem, file string) error {
+	curOS, err := fs.Lstat(ctx, file)
+	if errors.Is(err, os.ErrNotExist) {
+		return &NFSStatusError{NFSStatusNoEnt, os.ErrNotExist}
+	} else if errors.Is(err, os.ErrPermission) {
+		return &NFSStatusError{NFSStatusAccess, os.ErrPermission}
+	} else if err != nil {
+		return nil
+	}
+	curr := ToFileAttribute(curOS, file)
+
+	if s.SetMode != nil {
+		mode := os.FileMode(*s.SetMode) & os.ModePerm
+		if mode != curr.Mode().Perm() {
+			if changer == nil {
+				return &NFSStatusError{NFSStatusNotSupp, os.ErrPermission}
+			}
+			if err := changer.Chmod(ctx, file, mode); err != nil {
+				if errors.Is(err, os.ErrPermission) {
+					return &NFSStatusError{NFSStatusAccess, os.ErrPermission}
+				}
+				return err
+			}
+		}
+	}
+	if s.SetUID != nil || s.SetGID != nil {
+		euid := curr.UID
+		if s.SetUID != nil {
+			euid = *s.SetUID
+		}
+		egid := curr.GID
+		if s.SetGID != nil {
+			egid = *s.SetGID
+		}
+		if euid != curr.UID || egid != curr.GID {
+			if changer == nil {
+				return &NFSStatusError{NFSStatusNotSupp, os.ErrPermission}
+			}
+			if err := changer.Lchown(ctx, file, int(euid), int(egid)); err != nil {
+				if errors.Is(err, os.ErrPermission) {
+					return &NFSStatusError{NFSStatusAccess, os.ErrPermission}
+				}
+				return err
+			}
+		}
+	}
+	if s.SetSize != nil {
+		if curr.Mode()&os.ModeSymlink != 0 {
+			return &NFSStatusError{NFSStatusNotSupp, os.ErrInvalid}
+		}
+		fp, err := fs.OpenFile(ctx, file, os.O_WRONLY|os.O_EXCL, 0)
+		if errors.Is(err, os.ErrPermission) {
+			return &NFSStatusError{NFSStatusAccess, err}
+		} else if err != nil {
+			return err
+		}
+		if *s.SetSize > math.MaxInt64 {
+			return &NFSStatusError{NFSStatusInval, os.ErrInvalid}
+		}
+		if err := fp.Truncate(ctx, int64(*s.SetSize)); err != nil {
+			return err
+		}
+		if err := fp.Close(ctx); err != nil {
+			return err
+		}
+	}
+
+	if s.SetAtime != nil || s.SetMtime != nil {
+		atime := curr.Atime.Native()
+		if s.SetAtime != nil {
+			atime = s.SetAtime
+		}
+		mtime := curr.Mtime.Native()
+		if s.SetMtime != nil {
+			mtime = s.SetMtime
+		}
+		if atime != curr.Atime.Native() || mtime != curr.Mtime.Native() {
+			if changer == nil {
+				return &NFSStatusError{NFSStatusNotSupp, os.ErrPermission}
+			}
+			if err := changer.Chtimes(ctx, file, *atime, *mtime); err != nil {
+				if errors.Is(err, os.ErrPermission) {
+					return &NFSStatusError{NFSStatusAccess, err}
+				}
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// Mode returns a mode if specified or the provided default mode.
+func (s *SetFileAttributes) Mode(def os.FileMode) os.FileMode {
+	if s.SetMode != nil {
+		return os.FileMode(*s.SetMode) & os.ModePerm
+	}
+	return def
+}
+
+// ReadSetFileAttributes reads an sattr3 xdr stream into a go struct.
+func ReadSetFileAttributes(r io.Reader) (*SetFileAttributes, error) {
+	attrs := SetFileAttributes{}
+	hasMode, err := xdr.ReadUint32(r)
+	if err != nil {
+		return nil, err
+	}
+	if hasMode != 0 {
+		mode, err := xdr.ReadUint32(r)
+		if err != nil {
+			return nil, err
+		}
+		attrs.SetMode = &mode
+	}
+	hasUID, err := xdr.ReadUint32(r)
+	if err != nil {
+		return nil, err
+	}
+	if hasUID != 0 {
+		uid, err := xdr.ReadUint32(r)
+		if err != nil {
+			return nil, err
+		}
+		attrs.SetUID = &uid
+	}
+	hasGID, err := xdr.ReadUint32(r)
+	if err != nil {
+		return nil, err
+	}
+	if hasGID != 0 {
+		gid, err := xdr.ReadUint32(r)
+		if err != nil {
+			return nil, err
+		}
+		attrs.SetGID = &gid
+	}
+	hasSize, err := xdr.ReadUint32(r)
+	if err != nil {
+		return nil, err
+	}
+	if hasSize != 0 {
+		var size uint64
+		attrs.SetSize = &size
+		if err := xdr.Read(r, &size); err != nil {
+			return nil, err
+		}
+	}
+	aTime, err := xdr.ReadUint32(r)
+	if err != nil {
+		return nil, err
+	}
+	if aTime == 1 {
+		now := time.Now()
+		attrs.SetAtime = &now
+	} else if aTime == 2 {
+		t := FileTime{}
+		if err := xdr.Read(r, &t); err != nil {
+			return nil, err
+		}
+		attrs.SetAtime = t.Native()
+	}
+	mTime, err := xdr.ReadUint32(r)
+	if err != nil {
+		return nil, err
+	}
+	if mTime == 1 {
+		now := time.Now()
+		attrs.SetMtime = &now
+	} else if mTime == 2 {
+		t := FileTime{}
+		if err := xdr.Read(r, &t); err != nil {
+			return nil, err
+		}
+		attrs.SetMtime = t.Native()
+	}
+	return &attrs, nil
+}
diff --git a/pkg/go-nfs/file/file.go b/pkg/go-nfs/file/file.go
new file mode 100644
index 0000000..2853868
--- /dev/null
+++ b/pkg/go-nfs/file/file.go
@@ -0,0 +1,17 @@
+package file
+
+import "os"
+
+type FileInfo struct {
+	Nlink  uint32
+	UID    uint32
+	GID    uint32
+	Major  uint32
+	Minor  uint32
+	Fileid uint64
+}
+
+// GetInfo extracts some non-standardized items from the result of a Stat call.
+func GetInfo(fi os.FileInfo) *FileInfo {
+	return getInfo(fi)
+}
diff --git a/pkg/go-nfs/file/file_unix.go b/pkg/go-nfs/file/file_unix.go
new file mode 100644
index 0000000..6658c20
--- /dev/null
+++ b/pkg/go-nfs/file/file_unix.go
@@ -0,0 +1,24 @@
+//go:build darwin || dragonfly || freebsd || linux || nacl || netbsd || openbsd || solaris
+
+package file
+
+import (
+	"os"
+	"syscall"
+
+	"golang.org/x/sys/unix"
+)
+
+func getInfo(info os.FileInfo) *FileInfo {
+	fi := &FileInfo{}
+	if s, ok := info.Sys().(*syscall.Stat_t); ok {
+		fi.Nlink = uint32(s.Nlink)
+		fi.UID = s.Uid
+		fi.GID = s.Gid
+		fi.Major = unix.Major(uint64(s.Rdev))
+		fi.Minor = unix.Minor(uint64(s.Rdev))
+		fi.Fileid = s.Ino
+		return fi
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/file/file_windows.go b/pkg/go-nfs/file/file_windows.go
new file mode 100644
index 0000000..ef173d5
--- /dev/null
+++ b/pkg/go-nfs/file/file_windows.go
@@ -0,0 +1,12 @@
+//go:build windows
+
+package file
+
+import "os"
+
+func getInfo(info os.FileInfo) *FileInfo {
+	// https://godoc.org/golang.org/x/sys/windows#GetFileInformationByHandle
+	// can be potentially used to populate Nlink
+
+	return nil
+}
diff --git a/pkg/go-nfs/filesystem.go b/pkg/go-nfs/filesystem.go
new file mode 100644
index 0000000..3a06cc8
--- /dev/null
+++ b/pkg/go-nfs/filesystem.go
@@ -0,0 +1,101 @@
+package nfs
+
+import (
+	"context"
+	"io"
+	"os"
+	"time"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/ctxio"
+)
+
+// FSStat returns metadata about a file system
+type FSStat struct {
+	TotalSize      uint64
+	FreeSize       uint64
+	AvailableSize  uint64
+	TotalFiles     uint64
+	FreeFiles      uint64
+	AvailableFiles uint64
+	// CacheHint is called "invarsec" in the nfs standard
+	CacheHint time.Duration
+}
+
+type Filesystem interface {
+	// Create creates the named file with mode 0666 (before umask), truncating
+	// it if it already exists. If successful, methods on the returned File can
+	// be used for I/O; the associated file descriptor has mode O_RDWR.
+	Create(ctx context.Context, filename string) (File, error)
+	// Open opens the named file for reading. If successful, methods on the
+	// returned file can be used for reading; the associated file descriptor has
+	// mode O_RDONLY.
+	Open(ctx context.Context, filename string) (File, error)
+	// OpenFile is the generalized open call; most users will use Open or Create
+	// instead. It opens the named file with specified flag (O_RDONLY etc.) and
+	// perm, (0666 etc.) if applicable. If successful, methods on the returned
+	// File can be used for I/O.
+	OpenFile(ctx context.Context, filename string, flag int, perm os.FileMode) (File, error)
+	// Stat returns a FileInfo describing the named file.
+	Stat(ctx context.Context, filename string) (os.FileInfo, error)
+	// Rename renames (moves) oldpath to newpath. If newpath already exists and
+	// is not a directory, Rename replaces it. OS-specific restrictions may
+	// apply when oldpath and newpath are in different directories.
+	Rename(ctx context.Context, oldpath, newpath string) error
+	// Remove removes the named file or directory.
+	Remove(ctx context.Context, filename string) error
+	// Join joins any number of path elements into a single path, adding a
+	// Separator if necessary. Join calls filepath.Clean on the result; in
+	// particular, all empty strings are ignored. On Windows, the result is a
+	// UNC path if and only if the first path element is a UNC path.
+	Join(elem ...string) string
+
+	// ReadDir reads the directory named by d(irname and returns a list of
+	// directory entries sorted by filename.
+	ReadDir(ctx context.Context, path string) ([]os.FileInfo, error)
+	// MkdirAll creates a directory named path, along with any necessary
+	// parents, and returns nil, or else returns an error. The permission bits
+	// perm are used for all directories that MkdirAll creates. If path is/
+	// already a directory, MkdirAll does nothing and returns nil.
+	MkdirAll(ctx context.Context, filename string, perm os.FileMode) error
+
+	// Lstat returns a FileInfo describing the named file. If the file is a
+	// symbolic link, the returned FileInfo describes the symbolic link. Lstat
+	// makes no attempt to follow the link.
+	Lstat(ctx context.Context, filename string) (os.FileInfo, error)
+	// Symlink creates a symbolic-link from link to target. target may be an
+	// absolute or relative path, and need not refer to an existing node.
+	// Parent directories of link are created as necessary.
+	Symlink(ctx context.Context, target, link string) error
+	// Readlink returns the target path of link.
+	Readlink(ctx context.Context, link string) (string, error)
+}
+
+type File interface {
+	// Name returns the name of the file as presented to Open.
+	Name() string
+	ctxio.Writer
+	ctxio.Reader
+	ctxio.ReaderAt
+	io.Seeker
+	ctxio.Closer
+
+	// Truncate the file.
+	Truncate(ctx context.Context, size int64) error
+}
+
+// Change abstract the FileInfo change related operations in a storage-agnostic
+// interface as an extension to the Basic interface
+type Change interface {
+	// Chmod changes the mode of the named file to mode. If the file is a
+	// symbolic link, it changes the mode of the link's target.
+	Chmod(ctx context.Context, name string, mode os.FileMode) error
+	// Lchown changes the numeric uid and gid of the named file. If the file is
+	// a symbolic link, it changes the uid and gid of the link itself.
+	Lchown(ctx context.Context, name string, uid, gid int) error
+	// Chtimes changes the access and modification times of the named file,
+	// similar to the Unix utime() or utimes() functions.
+	//
+	// The underlying filesystem may truncate or round the values to a less
+	// precise time unit.
+	Chtimes(ctx context.Context, name string, atime time.Time, mtime time.Time) error
+}
diff --git a/pkg/go-nfs/handler.go b/pkg/go-nfs/handler.go
new file mode 100644
index 0000000..cac4d72
--- /dev/null
+++ b/pkg/go-nfs/handler.go
@@ -0,0 +1,52 @@
+package nfs
+
+import (
+	"context"
+	"io/fs"
+	"net"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/ctxbilly"
+)
+
+// Handler represents the interface of the file system / vfs being exposed over NFS
+type Handler interface {
+	// Required methods
+
+	Mount(context.Context, net.Conn, MountRequest) (MountStatus, Filesystem, []AuthFlavor)
+
+	// Change can return 'nil' if filesystem is read-only
+	// If the returned value can be cast to `UnixChange`, mknod and link RPCs will be available.
+	Change(Filesystem) Change
+
+	// Optional methods - generic helpers or trivial implementations can be sufficient depending on use case.
+
+	// Fill in information about a file system's free space.
+	FSStat(context.Context, Filesystem, *FSStat) error
+
+	// represent file objects as opaque references
+	// Can be safely implemented via helpers/cachinghandler.
+	ToHandle(fs Filesystem, path []string) []byte
+	FromHandle(fh []byte) (Filesystem, []string, error)
+	InvalidateHandle(Filesystem, []byte) error
+
+	// How many handles can be safely maintained by the handler.
+	HandleLimit() int
+}
+
+// UnixChange extends the billy `Change` interface with support for special files.
+type UnixChange interface {
+	ctxbilly.Change
+	Mknod(ctx context.Context, path string, mode uint32, major uint32, minor uint32) error
+	Mkfifo(ctx context.Context, path string, mode uint32) error
+	Socket(ctx context.Context, path string) error
+	Link(ctx context.Context, path string, link string) error
+}
+
+// CachingHandler represents the optional caching work that a user may wish to over-ride with
+// their own implementations, but which can be otherwise provided through defaults.
+type CachingHandler interface {
+	VerifierFor(path string, contents []fs.FileInfo) uint64
+
+	// fs.FileInfo needs to be sorted by Name(), nil in case of a cache-miss
+	DataForVerifier(path string, verifier uint64) []fs.FileInfo
+}
diff --git a/pkg/go-nfs/helpers/billlyfs.go b/pkg/go-nfs/helpers/billlyfs.go
new file mode 100644
index 0000000..3dd3c2c
--- /dev/null
+++ b/pkg/go-nfs/helpers/billlyfs.go
@@ -0,0 +1,157 @@
+package helpers
+
+import (
+	"context"
+	"io/fs"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
+	"github.com/go-git/go-billy/v5"
+)
+
+func WrapBillyFS(bf billy.Filesystem) nfs.Filesystem {
+	return &wrapFS{
+		Filesystem: bf,
+	}
+}
+
+type wrapFS struct {
+	billy.Filesystem
+}
+
+var _ nfs.Filesystem = (*wrapFS)(nil)
+
+// Create implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).Create of MemFS.Filesystem.
+func (m *wrapFS) Create(ctx context.Context, filename string) (nfs.File, error) {
+	bf, err := m.Filesystem.Create(filename)
+	if err != nil {
+		return nil, err
+	}
+	return &wrapFile{bf}, nil
+}
+
+// Lstat implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).Lstat of MemFS.Filesystem.
+func (m *wrapFS) Lstat(ctx context.Context, filename string) (fs.FileInfo, error) {
+	return m.Filesystem.Lstat(filename)
+}
+
+// MkdirAll implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).MkdirAll of MemFS.Filesystem.
+func (m *wrapFS) MkdirAll(ctx context.Context, filename string, perm fs.FileMode) error {
+	return m.Filesystem.MkdirAll(filename, perm)
+}
+
+// Open implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).Open of MemFS.Filesystem.
+func (m *wrapFS) Open(ctx context.Context, filename string) (nfs.File, error) {
+	bf, err := m.Filesystem.Open(filename)
+	if err != nil {
+		return nil, err
+	}
+	return WrapFile(bf), nil
+}
+
+// OpenFile implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).OpenFile of MemFS.Filesystem.
+func (m *wrapFS) OpenFile(ctx context.Context, filename string, flag int, perm fs.FileMode) (nfs.File, error) {
+	bf, err := m.Filesystem.OpenFile(filename, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	return WrapFile(bf), nil
+}
+
+// ReadDir implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).ReadDir of MemFS.Filesystem.
+func (m *wrapFS) ReadDir(ctx context.Context, path string) ([]fs.FileInfo, error) {
+	return m.Filesystem.ReadDir(path)
+}
+
+// Readlink implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).Readlink of MemFS.Filesystem.
+func (m *wrapFS) Readlink(ctx context.Context, link string) (string, error) {
+	return m.Filesystem.Readlink(link)
+}
+
+// Remove implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).Remove of MemFS.Filesystem.
+func (m *wrapFS) Remove(ctx context.Context, filename string) error {
+	return m.Filesystem.Remove(filename)
+}
+
+// Rename implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).Rename of MemFS.Filesystem.
+func (m *wrapFS) Rename(ctx context.Context, oldpath string, newpath string) error {
+	return m.Filesystem.Rename(oldpath, newpath)
+}
+
+// Stat implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).Stat of MemFS.Filesystem.
+func (m *wrapFS) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
+	return m.Filesystem.Stat(filename)
+}
+
+// Symlink implements Filesystem.
+// Subtle: this method shadows the method (Filesystem).Symlink of MemFS.Filesystem.
+func (m *wrapFS) Symlink(ctx context.Context, target string, link string) error {
+	return m.Filesystem.Symlink(target, link)
+}
+
+func WrapFile(bf billy.File) nfs.File {
+	return &wrapFile{File: bf}
+}
+
+type wrapFile struct {
+	billy.File
+}
+
+var _ nfs.File = (*wrapFile)(nil)
+
+// Close implements File.
+// Subtle: this method shadows the method (File).Close of MemFile.File.
+func (m *wrapFile) Close(ctx context.Context) error {
+	return m.File.Close()
+}
+
+// Lock implements File.
+// Subtle: this method shadows the method (File).Lock of MemFile.File.
+func (m *wrapFile) Lock() error {
+	return m.File.Lock()
+}
+
+// Name implements File.
+// Subtle: this method shadows the method (File).Name of MemFile.File.
+func (m *wrapFile) Name() string {
+	return m.File.Name()
+}
+
+// Truncate implements File.
+// Subtle: this method shadows the method (File).Truncate of memFile.File.
+func (m *wrapFile) Truncate(ctx context.Context, size int64) error {
+	return m.File.Truncate(size)
+}
+
+// Read implements File.
+// Subtle: this method shadows the method (File).Read of MemFile.File.
+func (m *wrapFile) Read(ctx context.Context, p []byte) (n int, err error) {
+	return m.File.Read(p)
+}
+
+// ReadAt implements File.
+// Subtle: this method shadows the method (File).ReadAt of MemFile.File.
+func (m *wrapFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
+	return m.File.ReadAt(p, off)
+}
+
+// Unlock implements File.
+// Subtle: this method shadows the method (File).Unlock of MemFile.File.
+func (m *wrapFile) Unlock() error {
+	return m.File.Unlock()
+}
+
+// Write implements File.
+// Subtle: this method shadows the method (File).Write of MemFile.File.
+func (m *wrapFile) Write(ctx context.Context, p []byte) (n int, err error) {
+	return m.File.Write(p)
+}
diff --git a/pkg/go-nfs/helpers/cachinghandler.go b/pkg/go-nfs/helpers/cachinghandler.go
new file mode 100644
index 0000000..06d5934
--- /dev/null
+++ b/pkg/go-nfs/helpers/cachinghandler.go
@@ -0,0 +1,198 @@
+package helpers
+
+import (
+	"crypto/sha256"
+	"encoding/binary"
+	"io/fs"
+	"reflect"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
+
+	"github.com/google/uuid"
+	lru "github.com/hashicorp/golang-lru/v2"
+)
+
+// NewCachingHandler wraps a handler to provide a basic to/from-file handle cache.
+func NewCachingHandler(h nfs.Handler, limit int) nfs.Handler {
+	return NewCachingHandlerWithVerifierLimit(h, limit, limit)
+}
+
+// NewCachingHandlerWithVerifierLimit provides a basic to/from-file handle cache that can be tuned with a smaller cache of active directory listings.
+func NewCachingHandlerWithVerifierLimit(h nfs.Handler, limit int, verifierLimit int) nfs.Handler {
+	if limit < 2 || verifierLimit < 2 {
+		nfs.Log.Warnf("Caching handler created with insufficient cache to support directory listing", "size", limit, "verifiers", verifierLimit)
+	}
+	cache, _ := lru.New[uuid.UUID, entry](limit)
+	reverseCache := make(map[string][]uuid.UUID)
+	verifiers, _ := lru.New[uint64, verifier](verifierLimit)
+	return &CachingHandler{
+		Handler:         h,
+		activeHandles:   cache,
+		reverseHandles:  reverseCache,
+		activeVerifiers: verifiers,
+		cacheLimit:      limit,
+	}
+}
+
+// CachingHandler implements to/from handle via an LRU cache.
+type CachingHandler struct {
+	nfs.Handler
+	activeHandles   *lru.Cache[uuid.UUID, entry]
+	reverseHandles  map[string][]uuid.UUID
+	activeVerifiers *lru.Cache[uint64, verifier]
+	cacheLimit      int
+}
+
+type entry struct {
+	f nfs.Filesystem
+	p []string
+}
+
+// ToHandle takes a file and represents it with an opaque handle to reference it.
+// In stateless nfs (when it's serving a unix fs) this can be the device + inode
+// but we can generalize with a stateful local cache of handed out IDs.
+func (c *CachingHandler) ToHandle(f nfs.Filesystem, path []string) []byte {
+	joinedPath := f.Join(path...)
+
+	if handle := c.searchReverseCache(f, joinedPath); handle != nil {
+		return handle
+	}
+
+	id := uuid.New()
+
+	newPath := make([]string, len(path))
+
+	copy(newPath, path)
+	evictedKey, evictedPath, ok := c.activeHandles.GetOldest()
+	if evicted := c.activeHandles.Add(id, entry{f, newPath}); evicted && ok {
+		rk := evictedPath.f.Join(evictedPath.p...)
+		c.evictReverseCache(rk, evictedKey)
+	}
+
+	if _, ok := c.reverseHandles[joinedPath]; !ok {
+		c.reverseHandles[joinedPath] = []uuid.UUID{}
+	}
+	c.reverseHandles[joinedPath] = append(c.reverseHandles[joinedPath], id)
+	b, _ := id.MarshalBinary()
+
+	return b
+}
+
+// FromHandle converts from an opaque handle to the file it represents
+func (c *CachingHandler) FromHandle(fh []byte) (nfs.Filesystem, []string, error) {
+	id, err := uuid.FromBytes(fh)
+	if err != nil {
+		return nil, []string{}, err
+	}
+
+	if f, ok := c.activeHandles.Get(id); ok {
+		for _, k := range c.activeHandles.Keys() {
+			candidate, _ := c.activeHandles.Peek(k)
+			if hasPrefix(f.p, candidate.p) {
+				_, _ = c.activeHandles.Get(k)
+			}
+		}
+		if ok {
+			newP := make([]string, len(f.p))
+			copy(newP, f.p)
+			return f.f, newP, nil
+		}
+	}
+	return nil, []string{}, &nfs.NFSStatusError{NFSStatus: nfs.NFSStatusStale}
+}
+
+func (c *CachingHandler) searchReverseCache(f nfs.Filesystem, path string) []byte {
+	uuids, exists := c.reverseHandles[path]
+
+	if !exists {
+		return nil
+	}
+
+	for _, id := range uuids {
+		if candidate, ok := c.activeHandles.Get(id); ok {
+			if reflect.DeepEqual(candidate.f, f) {
+				return id[:]
+			}
+		}
+	}
+
+	return nil
+}
+
+func (c *CachingHandler) evictReverseCache(path string, handle uuid.UUID) {
+	uuids, exists := c.reverseHandles[path]
+
+	if !exists {
+		return
+	}
+	for i, u := range uuids {
+		if u == handle {
+			uuids = append(uuids[:i], uuids[i+1:]...)
+			c.reverseHandles[path] = uuids
+			return
+		}
+	}
+}
+
+func (c *CachingHandler) InvalidateHandle(fs nfs.Filesystem, handle []byte) error {
+	//Remove from cache
+	id, _ := uuid.FromBytes(handle)
+	entry, ok := c.activeHandles.Get(id)
+	if ok {
+		rk := entry.f.Join(entry.p...)
+		c.evictReverseCache(rk, id)
+	}
+	c.activeHandles.Remove(id)
+	return nil
+}
+
+// HandleLimit exports how many file handles can be safely stored by this cache.
+func (c *CachingHandler) HandleLimit() int {
+	return c.cacheLimit
+}
+
+func hasPrefix(path, prefix []string) bool {
+	if len(prefix) > len(path) {
+		return false
+	}
+	for i, e := range prefix {
+		if path[i] != e {
+			return false
+		}
+	}
+	return true
+}
+
+type verifier struct {
+	path     string
+	contents []fs.FileInfo
+}
+
+func hashPathAndContents(path string, contents []fs.FileInfo) uint64 {
+	//calculate a cookie-verifier.
+	vHash := sha256.New()
+
+	// Add the path to avoid collisions of directories with the same content
+	vHash.Write(binary.BigEndian.AppendUint64([]byte{}, uint64(len(path))))
+	vHash.Write([]byte(path))
+
+	for _, c := range contents {
+		vHash.Write([]byte(c.Name())) // Never fails according to the docs
+	}
+
+	verify := vHash.Sum(nil)[0:8]
+	return binary.BigEndian.Uint64(verify)
+}
+
+func (c *CachingHandler) VerifierFor(path string, contents []fs.FileInfo) uint64 {
+	id := hashPathAndContents(path, contents)
+	c.activeVerifiers.Add(id, verifier{path, contents})
+	return id
+}
+
+func (c *CachingHandler) DataForVerifier(path string, id uint64) []fs.FileInfo {
+	if cache, ok := c.activeVerifiers.Get(id); ok {
+		return cache.contents
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/helpers/memfs/memfs.go b/pkg/go-nfs/helpers/memfs/memfs.go
new file mode 100644
index 0000000..5e1822a
--- /dev/null
+++ b/pkg/go-nfs/helpers/memfs/memfs.go
@@ -0,0 +1,414 @@
+// Package memfs is a variant of "github.com/go-git/go-billy/v5/memfs" with
+// stable mtimes for items.
+package memfs
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+	"syscall"
+	"time"
+
+	"github.com/go-git/go-billy/v5"
+	"github.com/go-git/go-billy/v5/helper/chroot"
+	"github.com/go-git/go-billy/v5/util"
+)
+
+const separator = filepath.Separator
+
+// Memory a very convenient filesystem based on memory files
+type Memory struct {
+	s *storage
+}
+
+// New returns a new Memory filesystem.
+func New() billy.Filesystem {
+	fs := &Memory{s: newStorage()}
+	return chroot.New(fs, string(separator))
+}
+
+func (fs *Memory) Create(filename string) (billy.File, error) {
+	return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+}
+
+func (fs *Memory) Open(filename string) (billy.File, error) {
+	return fs.OpenFile(filename, os.O_RDONLY, 0)
+}
+
+func (fs *Memory) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) {
+	f, has := fs.s.Get(filename)
+	if !has {
+		if !isCreate(flag) {
+			return nil, os.ErrNotExist
+		}
+
+		var err error
+		f, err = fs.s.New(filename, perm, flag)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		if isExclusive(flag) {
+			return nil, os.ErrExist
+		}
+
+		if target, isLink := fs.resolveLink(filename, f); isLink {
+			return fs.OpenFile(target, flag, perm)
+		}
+	}
+
+	if f.mode.IsDir() {
+		return nil, fmt.Errorf("cannot open directory: %s", filename)
+	}
+
+	return f.Duplicate(filename, perm, flag), nil
+}
+
+func (fs *Memory) resolveLink(fullpath string, f *file) (target string, isLink bool) {
+	if !isSymlink(f.mode) {
+		return fullpath, false
+	}
+
+	target = string(f.content.bytes)
+	if !isAbs(target) {
+		target = fs.Join(filepath.Dir(fullpath), target)
+	}
+
+	return target, true
+}
+
+// On Windows OS, IsAbs validates if a path is valid based on if stars with a
+// unit (eg.: `C:\`)  to assert that is absolute, but in this mem implementation
+// any path starting by `separator` is also considered absolute.
+func isAbs(path string) bool {
+	return filepath.IsAbs(path) || strings.HasPrefix(path, string(separator))
+}
+
+func (fs *Memory) Stat(filename string) (os.FileInfo, error) {
+	f, has := fs.s.Get(filename)
+	if !has {
+		return nil, os.ErrNotExist
+	}
+
+	fi, _ := f.Stat()
+
+	var err error
+	if target, isLink := fs.resolveLink(filename, f); isLink {
+		fi, err = fs.Stat(target)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// the name of the file should always the name of the stated file, so we
+	// overwrite the Stat returned from the storage with it, since the
+	// filename may belong to a link.
+	fi.(*fileInfo).name = filepath.Base(filename)
+	return fi, nil
+}
+
+func (fs *Memory) Lstat(filename string) (os.FileInfo, error) {
+	f, has := fs.s.Get(filename)
+	if !has {
+		return nil, os.ErrNotExist
+	}
+
+	return f.Stat()
+}
+
+type ByName []os.FileInfo
+
+func (a ByName) Len() int           { return len(a) }
+func (a ByName) Less(i, j int) bool { return a[i].Name() < a[j].Name() }
+func (a ByName) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+func (fs *Memory) ReadDir(path string) ([]os.FileInfo, error) {
+	if f, has := fs.s.Get(path); has {
+		if target, isLink := fs.resolveLink(path, f); isLink {
+			return fs.ReadDir(target)
+		}
+	} else {
+		return nil, &os.PathError{Op: "open", Path: path, Err: syscall.ENOENT}
+	}
+
+	var entries []os.FileInfo
+	for _, f := range fs.s.Children(path) {
+		fi, _ := f.Stat()
+		entries = append(entries, fi)
+	}
+
+	sort.Sort(ByName(entries))
+
+	return entries, nil
+}
+
+func (fs *Memory) MkdirAll(path string, perm os.FileMode) error {
+	_, err := fs.s.New(path, perm|os.ModeDir, 0)
+	return err
+}
+
+func (fs *Memory) TempFile(dir, prefix string) (billy.File, error) {
+	return util.TempFile(fs, dir, prefix)
+}
+
+func (fs *Memory) Rename(from, to string) error {
+	return fs.s.Rename(from, to)
+}
+
+func (fs *Memory) Remove(filename string) error {
+	return fs.s.Remove(filename)
+}
+
+func (fs *Memory) Join(elem ...string) string {
+	return filepath.Join(elem...)
+}
+
+func (fs *Memory) Symlink(target, link string) error {
+	_, err := fs.Stat(link)
+	if err == nil {
+		return os.ErrExist
+	}
+
+	if !os.IsNotExist(err) {
+		return err
+	}
+
+	return util.WriteFile(fs, link, []byte(target), 0777|os.ModeSymlink)
+}
+
+func (fs *Memory) Readlink(link string) (string, error) {
+	f, has := fs.s.Get(link)
+	if !has {
+		return "", os.ErrNotExist
+	}
+
+	if !isSymlink(f.mode) {
+		return "", &os.PathError{
+			Op:   "readlink",
+			Path: link,
+			Err:  fmt.Errorf("not a symlink"),
+		}
+	}
+
+	return string(f.content.bytes), nil
+}
+
+// Capabilities implements the Capable interface.
+func (fs *Memory) Capabilities() billy.Capability {
+	return billy.WriteCapability |
+		billy.ReadCapability |
+		billy.ReadAndWriteCapability |
+		billy.SeekCapability |
+		billy.TruncateCapability
+}
+
+type file struct {
+	name     string
+	content  *content
+	position int64
+	flag     int
+	mode     os.FileMode
+	mtime    time.Time
+
+	isClosed bool
+}
+
+func (f *file) Name() string {
+	return f.name
+}
+
+func (f *file) Read(b []byte) (int, error) {
+	n, err := f.ReadAt(b, f.position)
+	f.position += int64(n)
+
+	if err == io.EOF && n != 0 {
+		err = nil
+	}
+
+	return n, err
+}
+
+func (f *file) ReadAt(b []byte, off int64) (int, error) {
+	if f.isClosed {
+		return 0, os.ErrClosed
+	}
+
+	if !isReadAndWrite(f.flag) && !isReadOnly(f.flag) {
+		return 0, errors.New("read not supported")
+	}
+
+	n, err := f.content.ReadAt(b, off)
+
+	return n, err
+}
+
+func (f *file) Seek(offset int64, whence int) (int64, error) {
+	if f.isClosed {
+		return 0, os.ErrClosed
+	}
+
+	switch whence {
+	case io.SeekCurrent:
+		f.position += offset
+	case io.SeekStart:
+		f.position = offset
+	case io.SeekEnd:
+		f.position = int64(f.content.Len()) + offset
+	}
+
+	return f.position, nil
+}
+
+func (f *file) Write(p []byte) (int, error) {
+	return f.WriteAt(p, f.position)
+}
+
+func (f *file) WriteAt(p []byte, off int64) (int, error) {
+	if f.isClosed {
+		return 0, os.ErrClosed
+	}
+
+	if !isReadAndWrite(f.flag) && !isWriteOnly(f.flag) {
+		return 0, errors.New("write not supported")
+	}
+
+	n, err := f.content.WriteAt(p, off)
+	f.position = off + int64(n)
+	f.mtime = time.Now()
+
+	return n, err
+}
+
+func (f *file) Close() error {
+	if f.isClosed {
+		return os.ErrClosed
+	}
+
+	f.isClosed = true
+	return nil
+}
+
+func (f *file) Truncate(size int64) error {
+	if size < int64(len(f.content.bytes)) {
+		f.content.bytes = f.content.bytes[:size]
+	} else if more := int(size) - len(f.content.bytes); more > 0 {
+		f.content.bytes = append(f.content.bytes, make([]byte, more)...)
+	}
+	f.mtime = time.Now()
+
+	return nil
+}
+
+func (f *file) Duplicate(filename string, mode os.FileMode, flag int) billy.File {
+	new := &file{
+		name:    filename,
+		content: f.content,
+		mode:    mode,
+		flag:    flag,
+		mtime:   time.Now(),
+	}
+
+	if isTruncate(flag) {
+		new.content.Truncate()
+	}
+
+	if isAppend(flag) {
+		new.position = int64(new.content.Len())
+	}
+
+	return new
+}
+
+func (f *file) Stat() (os.FileInfo, error) {
+	return &fileInfo{
+		name:  f.Name(),
+		mode:  f.mode,
+		size:  f.content.Len(),
+		mtime: f.mtime,
+	}, nil
+}
+
+// Lock is a no-op in memfs.
+func (f *file) Lock() error {
+	return nil
+}
+
+// Unlock is a no-op in memfs.
+func (f *file) Unlock() error {
+	return nil
+}
+
+type fileInfo struct {
+	name  string
+	size  int
+	mode  os.FileMode
+	mtime time.Time
+}
+
+func (fi *fileInfo) Name() string {
+	return fi.name
+}
+
+func (fi *fileInfo) Size() int64 {
+	return int64(fi.size)
+}
+
+func (fi *fileInfo) Mode() os.FileMode {
+	return fi.mode
+}
+
+func (fi *fileInfo) ModTime() time.Time {
+	return fi.mtime
+}
+
+func (fi *fileInfo) IsDir() bool {
+	return fi.mode.IsDir()
+}
+
+func (*fileInfo) Sys() interface{} {
+	return nil
+}
+
+func (c *content) Truncate() {
+	c.bytes = make([]byte, 0)
+}
+
+func (c *content) Len() int {
+	return len(c.bytes)
+}
+
+func isCreate(flag int) bool {
+	return flag&os.O_CREATE != 0
+}
+
+func isExclusive(flag int) bool {
+	return flag&os.O_EXCL != 0
+}
+
+func isAppend(flag int) bool {
+	return flag&os.O_APPEND != 0
+}
+
+func isTruncate(flag int) bool {
+	return flag&os.O_TRUNC != 0
+}
+
+func isReadAndWrite(flag int) bool {
+	return flag&os.O_RDWR != 0
+}
+
+func isReadOnly(flag int) bool {
+	return flag == os.O_RDONLY
+}
+
+func isWriteOnly(flag int) bool {
+	return flag&os.O_WRONLY != 0
+}
+
+func isSymlink(m os.FileMode) bool {
+	return m&os.ModeSymlink != 0
+}
diff --git a/pkg/go-nfs/helpers/memfs/storage.go b/pkg/go-nfs/helpers/memfs/storage.go
new file mode 100644
index 0000000..5d73331
--- /dev/null
+++ b/pkg/go-nfs/helpers/memfs/storage.go
@@ -0,0 +1,243 @@
+package memfs
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"time"
+)
+
+type storage struct {
+	files    map[string]*file
+	children map[string]map[string]*file
+}
+
+func newStorage() *storage {
+	return &storage{
+		files:    make(map[string]*file, 0),
+		children: make(map[string]map[string]*file, 0),
+	}
+}
+
+func (s *storage) Has(path string) bool {
+	path = clean(path)
+
+	_, ok := s.files[path]
+	return ok
+}
+
+func (s *storage) New(path string, mode os.FileMode, flag int) (*file, error) {
+	path = clean(path)
+	if s.Has(path) {
+		if !s.MustGet(path).mode.IsDir() {
+			return nil, fmt.Errorf("file already exists %q", path)
+		}
+
+		return nil, nil
+	}
+
+	name := filepath.Base(path)
+
+	f := &file{
+		name:    name,
+		content: &content{name: name},
+		mode:    mode,
+		flag:    flag,
+		mtime:   time.Now(),
+	}
+
+	s.files[path] = f
+	if err := s.createParent(path, mode, f); err != nil {
+		return nil, err
+	}
+	return f, nil
+}
+
+func (s *storage) createParent(path string, mode os.FileMode, f *file) error {
+	base := filepath.Dir(path)
+	base = clean(base)
+	if f.Name() == string(separator) {
+		return nil
+	}
+
+	if _, err := s.New(base, mode.Perm()|os.ModeDir, 0); err != nil {
+		return err
+	}
+
+	if _, ok := s.children[base]; !ok {
+		s.children[base] = make(map[string]*file, 0)
+	}
+
+	s.children[base][f.Name()] = f
+	return nil
+}
+
+func (s *storage) Children(path string) []*file {
+	path = clean(path)
+
+	l := make([]*file, 0)
+	for _, f := range s.children[path] {
+		l = append(l, f)
+	}
+
+	return l
+}
+
+func (s *storage) MustGet(path string) *file {
+	f, ok := s.Get(path)
+	if !ok {
+		panic(fmt.Errorf("couldn't find %q", path))
+	}
+
+	return f
+}
+
+func (s *storage) Get(path string) (*file, bool) {
+	path = clean(path)
+	if !s.Has(path) {
+		return nil, false
+	}
+
+	file, ok := s.files[path]
+	return file, ok
+}
+
+func (s *storage) Rename(from, to string) error {
+	from = clean(from)
+	to = clean(to)
+
+	if !s.Has(from) {
+		return os.ErrNotExist
+	}
+
+	move := [][2]string{{from, to}}
+
+	for pathFrom := range s.files {
+		if pathFrom == from || !strings.HasPrefix(pathFrom, from) {
+			continue
+		}
+
+		rel, _ := filepath.Rel(from, pathFrom)
+		pathTo := filepath.Join(to, rel)
+
+		move = append(move, [2]string{pathFrom, pathTo})
+	}
+
+	for _, ops := range move {
+		from := ops[0]
+		to := ops[1]
+
+		if err := s.move(from, to); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (s *storage) move(from, to string) error {
+	s.files[to] = s.files[from]
+	s.files[to].name = filepath.Base(to)
+	s.children[to] = s.children[from]
+
+	defer func() {
+		delete(s.children, from)
+		delete(s.files, from)
+		delete(s.children[filepath.Dir(from)], filepath.Base(from))
+	}()
+
+	return s.createParent(to, 0644, s.files[to])
+}
+
+func (s *storage) Remove(path string) error {
+	path = clean(path)
+
+	f, has := s.Get(path)
+	if !has {
+		return os.ErrNotExist
+	}
+
+	if f.mode.IsDir() && len(s.children[path]) != 0 {
+		return fmt.Errorf("dir: %s contains files", path)
+	}
+
+	base, file := filepath.Split(path)
+	base = filepath.Clean(base)
+
+	delete(s.children[base], file)
+	delete(s.files, path)
+	return nil
+}
+
+func clean(path string) string {
+	return filepath.Clean(filepath.FromSlash(path))
+}
+
+type content struct {
+	name  string
+	bytes []byte
+
+	m sync.RWMutex
+}
+
+func (c *content) WriteAt(p []byte, off int64) (int, error) {
+	if off < 0 {
+		return 0, &os.PathError{
+			Op:   "writeat",
+			Path: c.name,
+			Err:  errors.New("negative offset"),
+		}
+	}
+
+	c.m.Lock()
+	prev := len(c.bytes)
+
+	diff := int(off) - prev
+	if diff > 0 {
+		c.bytes = append(c.bytes, make([]byte, diff)...)
+	}
+
+	c.bytes = append(c.bytes[:off], p...)
+	if len(c.bytes) < prev {
+		c.bytes = c.bytes[:prev]
+	}
+	c.m.Unlock()
+
+	return len(p), nil
+}
+
+func (c *content) ReadAt(b []byte, off int64) (n int, err error) {
+	if off < 0 {
+		return 0, &os.PathError{
+			Op:   "readat",
+			Path: c.name,
+			Err:  errors.New("negative offset"),
+		}
+	}
+
+	c.m.RLock()
+	size := int64(len(c.bytes))
+	if off >= size {
+		c.m.RUnlock()
+		return 0, io.EOF
+	}
+
+	l := int64(len(b))
+	if off+l > size {
+		l = size - off
+	}
+
+	btr := c.bytes[off : off+l]
+	n = copy(b, btr)
+
+	if len(btr) < len(b) {
+		err = io.EOF
+	}
+	c.m.RUnlock()
+
+	return
+}
diff --git a/pkg/go-nfs/helpers/nullauthhandler.go b/pkg/go-nfs/helpers/nullauthhandler.go
new file mode 100644
index 0000000..87e4658
--- /dev/null
+++ b/pkg/go-nfs/helpers/nullauthhandler.go
@@ -0,0 +1,59 @@
+package helpers
+
+import (
+	"context"
+	"net"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/ctxbilly"
+	nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
+)
+
+// NewNullAuthHandler creates a handler for the provided filesystem
+func NewNullAuthHandler(fs nfs.Filesystem) nfs.Handler {
+	return &NullAuthHandler{fs}
+}
+
+// NullAuthHandler returns a NFS backing that exposes a given file system in response to all mount requests.
+type NullAuthHandler struct {
+	fs nfs.Filesystem
+}
+
+// Mount backs Mount RPC Requests, allowing for access control policies.
+func (h *NullAuthHandler) Mount(ctx context.Context, conn net.Conn, req nfs.MountRequest) (status nfs.MountStatus, hndl nfs.Filesystem, auths []nfs.AuthFlavor) {
+	status = nfs.MountStatusOk
+	hndl = h.fs
+	auths = []nfs.AuthFlavor{nfs.AuthFlavorNull}
+	return
+}
+
+// Change provides an interface for updating file attributes.
+func (h *NullAuthHandler) Change(fs nfs.Filesystem) nfs.Change {
+	if c, ok := h.fs.(ctxbilly.Change); ok {
+		return c
+	}
+	return nil
+}
+
+// FSStat provides information about a filesystem.
+func (h *NullAuthHandler) FSStat(ctx context.Context, f nfs.Filesystem, s *nfs.FSStat) error {
+	return nil
+}
+
+// ToHandle handled by CachingHandler
+func (h *NullAuthHandler) ToHandle(f nfs.Filesystem, s []string) []byte {
+	return []byte{}
+}
+
+// FromHandle handled by CachingHandler
+func (h *NullAuthHandler) FromHandle([]byte) (nfs.Filesystem, []string, error) {
+	return nil, []string{}, nil
+}
+
+func (c *NullAuthHandler) InvalidateHandle(nfs.Filesystem, []byte) error {
+	return nil
+}
+
+// HandleLImit handled by cachingHandler
+func (h *NullAuthHandler) HandleLimit() int {
+	return -1
+}
diff --git a/pkg/go-nfs/log.go b/pkg/go-nfs/log.go
new file mode 100644
index 0000000..db594d3
--- /dev/null
+++ b/pkg/go-nfs/log.go
@@ -0,0 +1,216 @@
+package nfs
+
+import (
+	"fmt"
+	"log"
+	"os"
+)
+
+var (
+	Log Logger = &DefaultLogger{}
+)
+
+type LogLevel int
+
+const (
+	PanicLevel LogLevel = iota
+	FatalLevel
+	ErrorLevel
+	WarnLevel
+	InfoLevel
+	DebugLevel
+	TraceLevel
+
+	panicLevelStr string = "[PANIC] "
+	fatalLevelStr string = "[FATAL] "
+	errorLevelStr string = "[ERROR] "
+	warnLevelStr  string = "[WARN] "
+	infoLevelStr  string = "[INFO] "
+	debugLevelStr string = "[DEBUG] "
+	traceLevelStr string = "[TRACE] "
+)
+
+type Logger interface {
+	SetLevel(level LogLevel)
+	GetLevel() LogLevel
+	ParseLevel(level string) (LogLevel, error)
+
+	Panic(args ...interface{})
+	Fatal(args ...interface{})
+	Error(args ...interface{})
+	Warn(args ...interface{})
+	Info(args ...interface{})
+	Debug(args ...interface{})
+	Trace(args ...interface{})
+	Print(args ...interface{})
+
+	Panicf(format string, args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Errorf(format string, args ...interface{})
+	Warnf(format string, args ...interface{})
+	Infof(format string, args ...interface{})
+	Debugf(format string, args ...interface{})
+	Tracef(format string, args ...interface{})
+	Printf(format string, args ...interface{})
+}
+
+type DefaultLogger struct {
+	Level LogLevel
+}
+
+func SetLogger(logger Logger) {
+	Log = logger
+}
+
+func init() {
+	if os.Getenv("LOG_LEVEL") != "" {
+		if level, err := Log.ParseLevel(os.Getenv("LOG_LEVEL")); err == nil {
+			Log.SetLevel(level)
+		}
+	} else {
+		// set default log level to info
+		Log.SetLevel(InfoLevel)
+	}
+}
+
+func (l *DefaultLogger) GetLevel() LogLevel {
+	return l.Level
+}
+
+func (l *DefaultLogger) SetLevel(level LogLevel) {
+	l.Level = level
+}
+
+func (l *DefaultLogger) ParseLevel(level string) (LogLevel, error) {
+	switch level {
+	case "panic":
+		return PanicLevel, nil
+	case "fatal":
+		return FatalLevel, nil
+	case "error":
+		return ErrorLevel, nil
+	case "warn":
+		return WarnLevel, nil
+	case "info":
+		return InfoLevel, nil
+	case "debug":
+		return DebugLevel, nil
+	case "trace":
+		return TraceLevel, nil
+	}
+	var ll LogLevel
+	return ll, fmt.Errorf("invalid log level %q", level)
+}
+
+func (l *DefaultLogger) Panic(args ...interface{}) {
+	if l.Level < PanicLevel {
+		return
+	}
+	args = append([]interface{}{panicLevelStr}, args...)
+	log.Print(args...)
+}
+
+func (l *DefaultLogger) Panicf(format string, args ...interface{}) {
+	if l.Level < PanicLevel {
+		return
+	}
+	log.Printf(panicLevelStr+format, args...)
+}
+
+func (l *DefaultLogger) Fatal(args ...interface{}) {
+	if l.Level < FatalLevel {
+		return
+	}
+	args = append([]interface{}{fatalLevelStr}, args...)
+	log.Print(args...)
+}
+
+func (l *DefaultLogger) Fatalf(format string, args ...interface{}) {
+	if l.Level < FatalLevel {
+		return
+	}
+	log.Printf(fatalLevelStr+format, args...)
+}
+
+func (l *DefaultLogger) Error(args ...interface{}) {
+	if l.Level < ErrorLevel {
+		return
+	}
+	args = append([]interface{}{errorLevelStr}, args...)
+	log.Print(args...)
+}
+
+func (l *DefaultLogger) Errorf(format string, args ...interface{}) {
+	if l.Level < ErrorLevel {
+		return
+	}
+	log.Printf(errorLevelStr+format, args...)
+}
+
+func (l *DefaultLogger) Warn(args ...interface{}) {
+	if l.Level < WarnLevel {
+		return
+	}
+	args = append([]interface{}{warnLevelStr}, args...)
+	log.Print(args...)
+}
+
+func (l *DefaultLogger) Warnf(format string, args ...interface{}) {
+	if l.Level < WarnLevel {
+		return
+	}
+	log.Printf(warnLevelStr+format, args...)
+}
+
+func (l *DefaultLogger) Info(args ...interface{}) {
+	if l.Level < InfoLevel {
+		return
+	}
+	args = append([]interface{}{infoLevelStr}, args...)
+	log.Print(args...)
+}
+
+func (l *DefaultLogger) Infof(format string, args ...interface{}) {
+	if l.Level < InfoLevel {
+		return
+	}
+	log.Printf(infoLevelStr+format, args...)
+}
+
+func (l *DefaultLogger) Debug(args ...interface{}) {
+	if l.Level < DebugLevel {
+		return
+	}
+	args = append([]interface{}{debugLevelStr}, args...)
+	log.Print(args...)
+}
+
+func (l *DefaultLogger) Debugf(format string, args ...interface{}) {
+	if l.Level < DebugLevel {
+		return
+	}
+	log.Printf(debugLevelStr+format, args...)
+}
+
+func (l *DefaultLogger) Trace(args ...interface{}) {
+	if l.Level < TraceLevel {
+		return
+	}
+	args = append([]interface{}{traceLevelStr}, args...)
+	log.Print(args...)
+}
+
+func (l *DefaultLogger) Tracef(format string, args ...interface{}) {
+	if l.Level < TraceLevel {
+		return
+	}
+	log.Printf(traceLevelStr+format, args...)
+}
+
+func (l *DefaultLogger) Print(args ...interface{}) {
+	log.Print(args...)
+}
+
+func (l *DefaultLogger) Printf(format string, args ...interface{}) {
+	log.Printf(format, args...)
+}
diff --git a/pkg/go-nfs/mount.go b/pkg/go-nfs/mount.go
new file mode 100644
index 0000000..e95d098
--- /dev/null
+++ b/pkg/go-nfs/mount.go
@@ -0,0 +1,58 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+const (
+	mountServiceID = 100005
+)
+
+func init() {
+	_ = RegisterMessageHandler(mountServiceID, uint32(MountProcNull), onMountNull)
+	_ = RegisterMessageHandler(mountServiceID, uint32(MountProcMount), onMount)
+	_ = RegisterMessageHandler(mountServiceID, uint32(MountProcUmnt), onUMount)
+}
+
+func onMountNull(ctx context.Context, w *response, userHandle Handler) error {
+	return w.writeHeader(ResponseCodeSuccess)
+}
+
+func onMount(ctx context.Context, w *response, userHandle Handler) error {
+	// TODO: auth check.
+	dirpath, err := xdr.ReadOpaque(w.req.Body)
+	if err != nil {
+		return err
+	}
+	mountReq := MountRequest{Header: w.req.Header, Dirpath: dirpath}
+	status, handle, flavors := userHandle.Mount(ctx, w.conn, mountReq)
+
+	if err := w.writeHeader(ResponseCodeSuccess); err != nil {
+		return err
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(status)); err != nil {
+		return err
+	}
+
+	rootHndl := userHandle.ToHandle(handle, []string{})
+
+	if status == MountStatusOk {
+		_ = xdr.Write(writer, rootHndl)
+		_ = xdr.Write(writer, flavors)
+	}
+	return w.Write(writer.Bytes())
+}
+
+func onUMount(ctx context.Context, w *response, userHandle Handler) error {
+	_, err := xdr.ReadOpaque(w.req.Body)
+	if err != nil {
+		return err
+	}
+
+	return w.writeHeader(ResponseCodeSuccess)
+}
diff --git a/pkg/go-nfs/mountinterface.go b/pkg/go-nfs/mountinterface.go
new file mode 100644
index 0000000..1dc39ee
--- /dev/null
+++ b/pkg/go-nfs/mountinterface.go
@@ -0,0 +1,90 @@
+package nfs
+
+import (
+	"github.com/willscott/go-nfs-client/nfs/rpc"
+)
+
+// FHSize is the maximum size of a FileHandle
+const FHSize = 64
+
+// MNTNameLen is the maximum size of a mount name
+const MNTNameLen = 255
+
+// MntPathLen is the maximum size of a mount path
+const MntPathLen = 1024
+
+// FileHandle maps to a fhandle3
+type FileHandle []byte
+
+// MountStatus defines the response to the Mount Procedure
+type MountStatus uint32
+
+// MountStatus Codes
+const (
+	MountStatusOk             MountStatus = 0
+	MountStatusErrPerm        MountStatus = 1
+	MountStatusErrNoEnt       MountStatus = 2
+	MountStatusErrIO          MountStatus = 5
+	MountStatusErrAcces       MountStatus = 13
+	MountStatusErrNotDir      MountStatus = 20
+	MountStatusErrInval       MountStatus = 22
+	MountStatusErrNameTooLong MountStatus = 63
+	MountStatusErrNotSupp     MountStatus = 10004
+	MountStatusErrServerFault MountStatus = 10006
+)
+
+// MountProcedure is the valid RPC calls for the mount service.
+type MountProcedure uint32
+
+// MountProcedure Codes
+const (
+	MountProcNull MountProcedure = iota
+	MountProcMount
+	MountProcDump
+	MountProcUmnt
+	MountProcUmntAll
+	MountProcExport
+)
+
+func (m MountProcedure) String() string {
+	switch m {
+	case MountProcNull:
+		return "Null"
+	case MountProcMount:
+		return "Mount"
+	case MountProcDump:
+		return "Dump"
+	case MountProcUmnt:
+		return "Umnt"
+	case MountProcUmntAll:
+		return "UmntAll"
+	case MountProcExport:
+		return "Export"
+	default:
+		return "Unknown"
+	}
+}
+
+// AuthFlavor is a form of authentication, per rfc1057 section 7.2
+type AuthFlavor uint32
+
+// AuthFlavor Codes
+const (
+	AuthFlavorNull  AuthFlavor = 0
+	AuthFlavorUnix  AuthFlavor = 1
+	AuthFlavorShort AuthFlavor = 2
+	AuthFlavorDES   AuthFlavor = 3
+)
+
+// MountRequest contains the format of a client request to open a mount.
+type MountRequest struct {
+	rpc.Header
+	Dirpath []byte
+}
+
+// MountResponse is the server's response with status `MountStatusOk`
+type MountResponse struct {
+	rpc.Header
+	FileHandle
+	AuthFlavors []int
+}
diff --git a/pkg/go-nfs/nfs.go b/pkg/go-nfs/nfs.go
new file mode 100644
index 0000000..bf85e0a
--- /dev/null
+++ b/pkg/go-nfs/nfs.go
@@ -0,0 +1,38 @@
+package nfs
+
+import (
+	"context"
+)
+
+const (
+	nfsServiceID = 100003
+)
+
+func init() {
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureNull), onNull)               // 0
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureGetAttr), onGetAttr)         // 1
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureSetAttr), onSetAttr)         // 2
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureLookup), onLookup)           // 3
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureAccess), onAccess)           // 4
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureReadlink), onReadLink)       // 5
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureRead), onRead)               // 6
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureWrite), onWrite)             // 7
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureCreate), onCreate)           // 8
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureMkDir), onMkdir)             // 9
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureSymlink), onSymlink)         // 10
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureMkNod), onMknod)             // 11
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureRemove), onRemove)           // 12
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureRmDir), onRmDir)             // 13
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureRename), onRename)           // 14
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureLink), onLink)               // 15
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureReadDir), onReadDir)         // 16
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureReadDirPlus), onReadDirPlus) // 17
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureFSStat), onFSStat)           // 18
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureFSInfo), onFSInfo)           // 19
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedurePathConf), onPathConf)       // 20
+	_ = RegisterMessageHandler(nfsServiceID, uint32(NFSProcedureCommit), onCommit)           // 21
+}
+
+func onNull(ctx context.Context, w *response, userHandle Handler) error {
+	return w.Write([]byte{})
+}
diff --git a/pkg/go-nfs/nfs_onaccess.go b/pkg/go-nfs/nfs_onaccess.go
new file mode 100644
index 0000000..6674734
--- /dev/null
+++ b/pkg/go-nfs/nfs_onaccess.go
@@ -0,0 +1,45 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+
+	billy "github.com/go-git/go-billy/v5"
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+func onAccess(ctx context.Context, w *response, userHandle Handler) error {
+	w.errorFmt = opAttrErrorFormatter
+	roothandle, err := xdr.ReadOpaque(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+	fs, path, err := userHandle.FromHandle(roothandle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+	mask, err := xdr.ReadUint32(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, path)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if !CapabilityCheck(fs, billy.WriteCapability) {
+		mask = mask & (1 | 2 | 0x20)
+	}
+
+	if err := xdr.Write(writer, mask); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_oncommit.go b/pkg/go-nfs/nfs_oncommit.go
new file mode 100644
index 0000000..e2616d6
--- /dev/null
+++ b/pkg/go-nfs/nfs_oncommit.go
@@ -0,0 +1,51 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"os"
+
+	billy "github.com/go-git/go-billy/v5"
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+// onCommit - note this is a no-op, as we always push writes to the backing store.
+func onCommit(ctx context.Context, w *response, userHandle Handler) error {
+	w.errorFmt = wccDataErrorFormatter
+	handle, err := xdr.ReadOpaque(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+	// The conn will drain the unread offset and count arguments.
+
+	fs, path, err := userHandle.FromHandle(handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+
+	if !CapabilityCheck(fs, billy.WriteCapability) {
+		return &NFSStatusError{NFSStatusServerFault, os.ErrPermission}
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return err
+	}
+
+	// no pre-op cache data.
+	if err := xdr.Write(writer, uint32(0)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, path)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	// write the 8 bytes of write verification.
+	if err := xdr.Write(writer, w.Server.ID); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_oncreate.go b/pkg/go-nfs/nfs_oncreate.go
new file mode 100644
index 0000000..3a181d0
--- /dev/null
+++ b/pkg/go-nfs/nfs_oncreate.go
@@ -0,0 +1,125 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"os"
+
+	billy "github.com/go-git/go-billy/v5"
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+const (
+	createModeUnchecked = 0
+	createModeGuarded   = 1
+	createModeExclusive = 2
+)
+
+func onCreate(ctx context.Context, w *response, userHandle Handler) error {
+	w.errorFmt = wccDataErrorFormatter
+	obj := DirOpArg{}
+	err := xdr.Read(w.req.Body, &obj)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+	how, err := xdr.ReadUint32(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+	var attrs *SetFileAttributes
+	if how == createModeUnchecked || how == createModeGuarded {
+		sattr, err := ReadSetFileAttributes(w.req.Body)
+		if err != nil {
+			return &NFSStatusError{NFSStatusInval, err}
+		}
+		attrs = sattr
+	} else if how == createModeExclusive {
+		// read createverf3
+		var verf [8]byte
+		if err := xdr.Read(w.req.Body, &verf); err != nil {
+			return &NFSStatusError{NFSStatusInval, err}
+		}
+		Log.Errorf("failing create to indicate lack of support for 'exclusive' mode.")
+		// TODO: support 'exclusive' mode.
+		return &NFSStatusError{NFSStatusNotSupp, os.ErrPermission}
+	} else {
+		// invalid
+		return &NFSStatusError{NFSStatusNotSupp, os.ErrInvalid}
+	}
+
+	fs, path, err := userHandle.FromHandle(obj.Handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+
+	if !CapabilityCheck(fs, billy.WriteCapability) {
+		return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
+	}
+
+	if len(string(obj.Filename)) > PathNameMax {
+		return &NFSStatusError{NFSStatusNameTooLong, nil}
+	}
+
+	newFile := append(path, string(obj.Filename))
+	newFilePath := fs.Join(newFile...)
+	if s, err := fs.Stat(ctx, newFilePath); err == nil {
+		if s.IsDir() {
+			return &NFSStatusError{NFSStatusExist, nil}
+		}
+		if how == createModeGuarded {
+			return &NFSStatusError{NFSStatusExist, os.ErrPermission}
+		}
+	} else {
+		if s, err := fs.Stat(ctx, fs.Join(path...)); err != nil {
+			return &NFSStatusError{NFSStatusAccess, err}
+		} else if !s.IsDir() {
+			return &NFSStatusError{NFSStatusNotDir, nil}
+		}
+	}
+
+	file, err := fs.Create(ctx, newFilePath)
+	if err != nil {
+		Log.Errorf("Error Creating: %v", err)
+		return &NFSStatusError{NFSStatusAccess, err}
+	}
+	if err := file.Close(ctx); err != nil {
+		Log.Errorf("Error Creating: %v", err)
+		return &NFSStatusError{NFSStatusAccess, err}
+	}
+
+	fp := userHandle.ToHandle(fs, newFile)
+	changer := userHandle.Change(fs)
+	if err := attrs.Apply(ctx, changer, fs, newFilePath); err != nil {
+		Log.Errorf("Error applying attributes: %v\n", err)
+		return &NFSStatusError{NFSStatusIO, err}
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	// "handle follows"
+	if err := xdr.Write(writer, uint32(1)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := xdr.Write(writer, fp); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, []string{file.Name()})); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	// dir_wcc (we don't include pre_op_attr)
+	if err := xdr.Write(writer, uint32(0)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, path)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_onfsinfo.go b/pkg/go-nfs/nfs_onfsinfo.go
new file mode 100644
index 0000000..152e366
--- /dev/null
+++ b/pkg/go-nfs/nfs_onfsinfo.go
@@ -0,0 +1,89 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+
+	"github.com/go-git/go-billy/v5"
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+const (
+	// FSInfoPropertyLink does the FS support hard links?
+	FSInfoPropertyLink = 0x0001
+	// FSInfoPropertySymlink does the FS support soft links?
+	FSInfoPropertySymlink = 0x0002
+	// FSInfoPropertyHomogeneous does the FS need PATHCONF calls for each file
+	FSInfoPropertyHomogeneous = 0x0008
+	// FSInfoPropertyCanSetTime can the FS support setting access/mod times?
+	FSInfoPropertyCanSetTime = 0x0010
+)
+
+func onFSInfo(ctx context.Context, w *response, userHandle Handler) error {
+	roothandle, err := xdr.ReadOpaque(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+	fs, path, err := userHandle.FromHandle(roothandle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, path)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	type fsinfores struct {
+		Rtmax       uint32
+		Rtpref      uint32
+		Rtmult      uint32
+		Wtmax       uint32
+		Wtpref      uint32
+		Wtmult      uint32
+		Dtpref      uint32
+		Maxfilesize uint64
+		TimeDelta   uint64
+		Properties  uint32
+	}
+
+	res := fsinfores{
+		Rtmax:       1 << 30,
+		Rtpref:      1 << 30,
+		Rtmult:      4096,
+		Wtmax:       1 << 30,
+		Wtpref:      1 << 30,
+		Wtmult:      4096,
+		Dtpref:      8192,
+		Maxfilesize: 1 << 62, // wild guess. this seems big.
+		TimeDelta:   1,       // nanosecond precision.
+		Properties:  0,
+	}
+
+	// TODO: these aren't great indications of support, really.
+	// if _, ok := fs.(billy.Symlink); ok {
+	// 	res.Properties |= FSInfoPropertyLink
+	// 	res.Properties |= FSInfoPropertySymlink
+	// }
+	// TODO: if the nfs share spans multiple virtual mounts, may need
+	// to support granular PATHINFO responses.
+	res.Properties |= FSInfoPropertyHomogeneous
+	// TODO: not a perfect indicator
+
+	if CapabilityCheck(fs, billy.WriteCapability) {
+		res.Properties |= FSInfoPropertyCanSetTime
+	}
+
+	// TODO: this whole struct should be specifiable by the userhandler.
+
+	if err := xdr.Write(writer, res); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_onfsstat.go b/pkg/go-nfs/nfs_onfsstat.go
new file mode 100644
index 0000000..325a106
--- /dev/null
+++ b/pkg/go-nfs/nfs_onfsstat.go
@@ -0,0 +1,59 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+
+	billy "github.com/go-git/go-billy/v5"
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+func onFSStat(ctx context.Context, w *response, userHandle Handler) error {
+	roothandle, err := xdr.ReadOpaque(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+	fs, path, err := userHandle.FromHandle(roothandle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+
+	defaults := FSStat{
+		TotalSize:      1 << 62,
+		FreeSize:       1 << 62,
+		AvailableSize:  1 << 62,
+		TotalFiles:     1 << 62,
+		FreeFiles:      1 << 62,
+		AvailableFiles: 1 << 62,
+		CacheHint:      0,
+	}
+
+	if !CapabilityCheck(fs, billy.WriteCapability) {
+		defaults.AvailableFiles = 0
+		defaults.AvailableSize = 0
+	}
+
+	err = userHandle.FSStat(ctx, fs, &defaults)
+	if err != nil {
+		if _, ok := err.(*NFSStatusError); ok {
+			return err
+		}
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, path)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := xdr.Write(writer, defaults); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_ongetattr.go b/pkg/go-nfs/nfs_ongetattr.go
new file mode 100644
index 0000000..40c4148
--- /dev/null
+++ b/pkg/go-nfs/nfs_ongetattr.go
@@ -0,0 +1,48 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"os"
+
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+func onGetAttr(ctx context.Context, w *response, userHandle Handler) error {
+	handle, err := xdr.ReadOpaque(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+
+	fs, path, err := userHandle.FromHandle(handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+
+	fullPath := fs.Join(path...)
+	info, err := fs.Lstat(ctx, fullPath)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return &NFSStatusError{NFSStatusNoEnt, err}
+		}
+		if errors.Is(err, context.DeadlineExceeded) {
+			return &NFSStatusError{NFSStatusJukebox, err}
+		}
+		return &NFSStatusError{NFSStatusIO, err}
+	}
+	attr := ToFileAttribute(info, fullPath)
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := xdr.Write(writer, attr); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_onlink.go b/pkg/go-nfs/nfs_onlink.go
new file mode 100644
index 0000000..460a969
--- /dev/null
+++ b/pkg/go-nfs/nfs_onlink.go
@@ -0,0 +1,94 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"os"
+
+	"github.com/go-git/go-billy/v5"
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+// Backing billy.FS doesn't support hard links
+func onLink(ctx context.Context, w *response, userHandle Handler) error {
+	w.errorFmt = wccDataErrorFormatter
+	obj := DirOpArg{}
+	err := xdr.Read(w.req.Body, &obj)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+	attrs, err := ReadSetFileAttributes(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+
+	target, err := xdr.ReadOpaque(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+
+	fs, path, err := userHandle.FromHandle(obj.Handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+	if !CapabilityCheck(fs, billy.WriteCapability) {
+		return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
+	}
+
+	if len(string(obj.Filename)) > PathNameMax {
+		return &NFSStatusError{NFSStatusNameTooLong, os.ErrInvalid}
+	}
+
+	newFilePath := fs.Join(append(path, string(obj.Filename))...)
+	if _, err := fs.Stat(ctx, newFilePath); err == nil {
+		return &NFSStatusError{NFSStatusExist, os.ErrExist}
+	}
+	if s, err := fs.Stat(ctx, fs.Join(path...)); err != nil {
+		return &NFSStatusError{NFSStatusAccess, err}
+	} else if !s.IsDir() {
+		return &NFSStatusError{NFSStatusNotDir, nil}
+	}
+
+	fp := userHandle.ToHandle(fs, append(path, string(obj.Filename)))
+	changer := userHandle.Change(fs)
+	if changer == nil {
+		return &NFSStatusError{NFSStatusAccess, err}
+	}
+	cos, ok := changer.(UnixChange)
+	if !ok {
+		return &NFSStatusError{NFSStatusAccess, err}
+	}
+
+	err = cos.Link(ctx, string(target), newFilePath)
+	if err != nil {
+		return &NFSStatusError{NFSStatusAccess, err}
+	}
+	if err := attrs.Apply(ctx, changer, fs, newFilePath); err != nil {
+		return &NFSStatusError{NFSStatusIO, err}
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	// "handle follows"
+	if err := xdr.Write(writer, uint32(1)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := xdr.Write(writer, fp); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, append(path, string(obj.Filename)))); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := WriteWcc(writer, nil, tryStat(ctx, fs, path)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_onlookup.go b/pkg/go-nfs/nfs_onlookup.go
new file mode 100644
index 0000000..6507d03
--- /dev/null
+++ b/pkg/go-nfs/nfs_onlookup.go
@@ -0,0 +1,86 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"os"
+
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+func lookupSuccessResponse(ctx context.Context, handle []byte, entPath, dirPath []string, fs Filesystem) ([]byte, error) {
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return nil, err
+	}
+	if err := xdr.Write(writer, handle); err != nil {
+		return nil, err
+	}
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, entPath)); err != nil {
+		return nil, err
+	}
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, dirPath)); err != nil {
+		return nil, err
+	}
+	return writer.Bytes(), nil
+}
+
+func onLookup(ctx context.Context, w *response, userHandle Handler) error {
+	w.errorFmt = opAttrErrorFormatter
+	obj := DirOpArg{}
+	err := xdr.Read(w.req.Body, &obj)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+
+	fs, p, err := userHandle.FromHandle(obj.Handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+	dirInfo, err := fs.Lstat(ctx, fs.Join(p...))
+	if err != nil || !dirInfo.IsDir() {
+		return &NFSStatusError{NFSStatusNotDir, err}
+	}
+
+	// Special cases for "." and ".."
+	if bytes.Equal(obj.Filename, []byte(".")) {
+		resp, err := lookupSuccessResponse(ctx, obj.Handle, p, p, fs)
+		if err != nil {
+			return &NFSStatusError{NFSStatusServerFault, err}
+		}
+		if err := w.Write(resp); err != nil {
+			return &NFSStatusError{NFSStatusServerFault, err}
+		}
+		return nil
+	}
+	if bytes.Equal(obj.Filename, []byte("..")) {
+		if len(p) == 0 {
+			return &NFSStatusError{NFSStatusAccess, os.ErrPermission}
+		}
+		pPath := p[0 : len(p)-1]
+		pHandle := userHandle.ToHandle(fs, pPath)
+		resp, err := lookupSuccessResponse(ctx, pHandle, pPath, p, fs)
+		if err != nil {
+			return &NFSStatusError{NFSStatusServerFault, err}
+		}
+		if err := w.Write(resp); err != nil {
+			return &NFSStatusError{NFSStatusServerFault, err}
+		}
+		return nil
+	}
+
+	reqPath := append(p, string(obj.Filename))
+	if _, err = fs.Lstat(ctx, fs.Join(reqPath...)); err != nil {
+		return &NFSStatusError{NFSStatusNoEnt, os.ErrNotExist}
+	}
+
+	newHandle := userHandle.ToHandle(fs, reqPath)
+	resp, err := lookupSuccessResponse(ctx, newHandle, reqPath, p, fs)
+	if err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := w.Write(resp); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_onmkdir.go b/pkg/go-nfs/nfs_onmkdir.go
new file mode 100644
index 0000000..d96bab1
--- /dev/null
+++ b/pkg/go-nfs/nfs_onmkdir.go
@@ -0,0 +1,94 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"os"
+
+	"github.com/go-git/go-billy/v5"
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+const (
+	mkdirDefaultMode = 755
+)
+
+func onMkdir(ctx context.Context, w *response, userHandle Handler) error {
+	w.errorFmt = wccDataErrorFormatter
+	obj := DirOpArg{}
+	err := xdr.Read(w.req.Body, &obj)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+
+	attrs, err := ReadSetFileAttributes(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+
+	fs, path, err := userHandle.FromHandle(obj.Handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+	if !CapabilityCheck(fs, billy.WriteCapability) {
+		return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
+	}
+
+	if len(string(obj.Filename)) > PathNameMax {
+		return &NFSStatusError{NFSStatusNameTooLong, os.ErrInvalid}
+	}
+	if string(obj.Filename) == "." || string(obj.Filename) == ".." {
+		return &NFSStatusError{NFSStatusExist, os.ErrExist}
+	}
+
+	newFolder := append(path, string(obj.Filename))
+	newFolderPath := fs.Join(newFolder...)
+	if s, err := fs.Stat(ctx, newFolderPath); err == nil {
+		if s.IsDir() {
+			return &NFSStatusError{NFSStatusExist, nil}
+		}
+	} else {
+		if s, err := fs.Stat(ctx, fs.Join(path...)); err != nil {
+			return &NFSStatusError{NFSStatusAccess, err}
+		} else if !s.IsDir() {
+			return &NFSStatusError{NFSStatusNotDir, nil}
+		}
+	}
+
+	if err := fs.MkdirAll(ctx, newFolderPath, attrs.Mode(mkdirDefaultMode)); err != nil {
+		return &NFSStatusError{NFSStatusAccess, err}
+	}
+
+	fp := userHandle.ToHandle(fs, newFolder)
+	changer := userHandle.Change(fs)
+	if changer != nil {
+		if err := attrs.Apply(ctx, changer, fs, newFolderPath); err != nil {
+			return &NFSStatusError{NFSStatusIO, err}
+		}
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	// "handle follows"
+	if err := xdr.Write(writer, uint32(1)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := xdr.Write(writer, fp); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, newFolder)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := WriteWcc(writer, nil, tryStat(ctx, fs, path)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_onmknod.go b/pkg/go-nfs/nfs_onmknod.go
new file mode 100644
index 0000000..81ca2fa
--- /dev/null
+++ b/pkg/go-nfs/nfs_onmknod.go
@@ -0,0 +1,158 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"os"
+
+	billy "github.com/go-git/go-billy/v5"
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+type nfs_ftype int32
+
+const (
+	FTYPE_NF3REG  nfs_ftype = 1
+	FTYPE_NF3DIR  nfs_ftype = 2
+	FTYPE_NF3BLK  nfs_ftype = 3
+	FTYPE_NF3CHR  nfs_ftype = 4
+	FTYPE_NF3LNK  nfs_ftype = 5
+	FTYPE_NF3SOCK nfs_ftype = 6
+	FTYPE_NF3FIFO nfs_ftype = 7
+)
+
+// Backing billy.FS doesn't support creation of
+// char, block, socket, or fifo pipe nodes
+func onMknod(ctx context.Context, w *response, userHandle Handler) error {
+	w.errorFmt = wccDataErrorFormatter
+	obj := DirOpArg{}
+	err := xdr.Read(w.req.Body, &obj)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+
+	ftype, err := xdr.ReadUint32(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+
+	// see if the filesystem supports mknod
+	fs, path, err := userHandle.FromHandle(obj.Handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+
+	if !CapabilityCheck(fs, billy.WriteCapability) {
+		return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
+	}
+
+	c := userHandle.Change(fs)
+	if c == nil {
+		return &NFSStatusError{NFSStatusAccess, os.ErrPermission}
+	}
+	cu, ok := c.(UnixChange)
+	if !ok {
+		return &NFSStatusError{NFSStatusAccess, os.ErrPermission}
+	}
+
+	if len(string(obj.Filename)) > PathNameMax {
+		return &NFSStatusError{NFSStatusNameTooLong, os.ErrInvalid}
+	}
+
+	newFilePath := fs.Join(append(path, string(obj.Filename))...)
+	if _, err := fs.Stat(ctx, newFilePath); err == nil {
+		return &NFSStatusError{NFSStatusExist, os.ErrExist}
+	}
+	parent, err := fs.Stat(ctx, fs.Join(path...))
+	if err != nil {
+		return &NFSStatusError{NFSStatusAccess, err}
+	} else if !parent.IsDir() {
+		return &NFSStatusError{NFSStatusNotDir, nil}
+	}
+	fp := userHandle.ToHandle(fs, append(path, string(obj.Filename)))
+
+	switch nfs_ftype(ftype) {
+	case FTYPE_NF3CHR:
+	case FTYPE_NF3BLK:
+		// read devicedata3 = {sattr3, specdata3}
+		attrs, err := ReadSetFileAttributes(w.req.Body)
+		if err != nil {
+			return &NFSStatusError{NFSStatusInval, err}
+		}
+		specData1, err := xdr.ReadUint32(w.req.Body)
+		if err != nil {
+			return &NFSStatusError{NFSStatusInval, err}
+		}
+		specData2, err := xdr.ReadUint32(w.req.Body)
+		if err != nil {
+			return &NFSStatusError{NFSStatusInval, err}
+		}
+
+		err = cu.Mknod(ctx, newFilePath, uint32(attrs.Mode(parent.Mode())), specData1, specData2)
+		if err != nil {
+			return &NFSStatusError{NFSStatusAccess, err}
+		}
+		if err = attrs.Apply(ctx, cu, fs, newFilePath); err != nil {
+			return &NFSStatusError{NFSStatusServerFault, err}
+		}
+
+	case FTYPE_NF3SOCK:
+		// read sattr3
+		attrs, err := ReadSetFileAttributes(w.req.Body)
+		if err != nil {
+			return &NFSStatusError{NFSStatusInval, err}
+		}
+		if err := cu.Socket(ctx, newFilePath); err != nil {
+			return &NFSStatusError{NFSStatusAccess, err}
+		}
+		if err = attrs.Apply(ctx, cu, fs, newFilePath); err != nil {
+			return &NFSStatusError{NFSStatusServerFault, err}
+		}
+
+	case FTYPE_NF3FIFO:
+		// read sattr3
+		attrs, err := ReadSetFileAttributes(w.req.Body)
+		if err != nil {
+			return &NFSStatusError{NFSStatusInval, err}
+		}
+		err = cu.Mkfifo(ctx, newFilePath, uint32(attrs.Mode(parent.Mode())))
+		if err != nil {
+			return &NFSStatusError{NFSStatusAccess, err}
+		}
+		if err = attrs.Apply(ctx, cu, fs, newFilePath); err != nil {
+			return &NFSStatusError{NFSStatusServerFault, err}
+		}
+
+	default:
+		return &NFSStatusError{NFSStatusBadType, os.ErrInvalid}
+		// end of input.
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	// "handle follows"
+	if err := xdr.Write(writer, uint32(1)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	// fh3
+	if err := xdr.Write(writer, fp); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	// attr
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, append(path, string(obj.Filename)))); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	// wcc
+	if err := WriteWcc(writer, nil, tryStat(ctx, fs, path)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_onpathconf.go b/pkg/go-nfs/nfs_onpathconf.go
new file mode 100644
index 0000000..1771b60
--- /dev/null
+++ b/pkg/go-nfs/nfs_onpathconf.go
@@ -0,0 +1,55 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+// PathNameMax is the maximum length for a file name
+const PathNameMax = 255
+
+func onPathConf(ctx context.Context, w *response, userHandle Handler) error {
+	roothandle, err := xdr.ReadOpaque(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+	fs, path, err := userHandle.FromHandle(roothandle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, path)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	type PathConf struct {
+		LinkMax         uint32
+		NameMax         uint32
+		NoTrunc         uint32
+		ChownRestricted uint32
+		CaseInsensitive uint32
+		CasePreserving  uint32
+	}
+
+	defaults := PathConf{
+		LinkMax:         1,
+		NameMax:         PathNameMax,
+		NoTrunc:         1,
+		ChownRestricted: 0,
+		CaseInsensitive: 0,
+		CasePreserving:  1,
+	}
+	if err := xdr.Write(writer, defaults); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_onread.go b/pkg/go-nfs/nfs_onread.go
new file mode 100644
index 0000000..8616acd
--- /dev/null
+++ b/pkg/go-nfs/nfs_onread.go
@@ -0,0 +1,97 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"io"
+	"os"
+
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+type nfsReadArgs struct {
+	Handle []byte
+	Offset uint64
+	Count  uint32
+}
+
+type nfsReadResponse struct {
+	Count uint32
+	EOF   uint32
+	Data  []byte
+}
+
+// MaxRead is the advertised largest buffer the server is willing to read
+const MaxRead = 1 << 24
+
+// CheckRead is a size where - if a request to read is larger than this,
+// the server will stat the file to learn it's actual size before allocating
+// a buffer to read into.
+const CheckRead = 1 << 15
+
+func onRead(ctx context.Context, w *response, userHandle Handler) error {
+	w.errorFmt = opAttrErrorFormatter
+	var obj nfsReadArgs
+	err := xdr.Read(w.req.Body, &obj)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+	fs, path, err := userHandle.FromHandle(obj.Handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+
+	fh, err := fs.Open(ctx, fs.Join(path...))
+	if err != nil {
+		if os.IsNotExist(err) {
+			return &NFSStatusError{NFSStatusNoEnt, err}
+		}
+		if errors.Is(err, context.DeadlineExceeded) {
+			return &NFSStatusError{NFSStatusJukebox, err}
+		}
+		return &NFSStatusError{NFSStatusAccess, err}
+	}
+
+	resp := nfsReadResponse{}
+
+	if obj.Count > CheckRead {
+		info, err := fs.Stat(ctx, fs.Join(path...))
+		if err != nil {
+			return &NFSStatusError{NFSStatusAccess, err}
+		}
+		if info.Size()-int64(obj.Offset) < int64(obj.Count) {
+			obj.Count = uint32(uint64(info.Size()) - obj.Offset)
+		}
+	}
+	if obj.Count > MaxRead {
+		obj.Count = MaxRead
+	}
+	resp.Data = make([]byte, obj.Count)
+	// todo: multiple reads if size isn't full
+	cnt, err := fh.ReadAt(ctx, resp.Data, int64(obj.Offset))
+	if err != nil && !errors.Is(err, io.EOF) {
+		return &NFSStatusError{NFSStatusIO, err}
+	}
+	resp.Count = uint32(cnt)
+	resp.Data = resp.Data[:resp.Count]
+	if errors.Is(err, io.EOF) {
+		resp.EOF = 1
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, path)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := xdr.Write(writer, resp); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_onreaddir.go b/pkg/go-nfs/nfs_onreaddir.go
new file mode 100644
index 0000000..2769d02
--- /dev/null
+++ b/pkg/go-nfs/nfs_onreaddir.go
@@ -0,0 +1,195 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"crypto/sha256"
+	"encoding/binary"
+	"errors"
+	"io"
+	"io/fs"
+	"os"
+	"path"
+	"sort"
+
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+type readDirArgs struct {
+	Handle      []byte
+	Cookie      uint64
+	CookieVerif uint64
+	Count       uint32
+}
+
+type readDirEntity struct {
+	FileID uint64
+	Name   []byte
+	Cookie uint64
+	Next   bool
+}
+
+func onReadDir(ctx context.Context, w *response, userHandle Handler) error {
+	w.errorFmt = opAttrErrorFormatter
+	obj := readDirArgs{}
+	err := xdr.Read(w.req.Body, &obj)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+
+	if obj.Count < 1024 {
+		return &NFSStatusError{NFSStatusTooSmall, io.ErrShortBuffer}
+	}
+
+	fs, p, err := userHandle.FromHandle(obj.Handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+
+	contents, verifier, err := getDirListingWithVerifier(ctx, userHandle, obj.Handle, obj.CookieVerif)
+	if err != nil {
+		return err
+	}
+	if obj.Cookie > 0 && obj.CookieVerif > 0 && verifier != obj.CookieVerif {
+		return &NFSStatusError{NFSStatusBadCookie, nil}
+	}
+
+	entities := make([]readDirEntity, 0)
+	maxBytes := uint32(100) // conservative overhead measure
+
+	started := obj.Cookie == 0
+	if started {
+		// add '.' and '..' to entities
+		dotdotFileID := uint64(0)
+		if len(p) > 0 {
+			dda := tryStat(ctx, fs, p[0:len(p)-1])
+			if dda != nil {
+				dotdotFileID = dda.Fileid
+			}
+		}
+		dotFileID := uint64(0)
+		da := tryStat(ctx, fs, p)
+		if da != nil {
+			dotFileID = da.Fileid
+		}
+		entities = append(entities,
+			readDirEntity{Name: []byte("."), Cookie: 0, Next: true, FileID: dotFileID},
+			readDirEntity{Name: []byte(".."), Cookie: 1, Next: true, FileID: dotdotFileID},
+		)
+	}
+
+	eof := true
+	maxEntities := userHandle.HandleLimit() / 2
+	for i, c := range contents {
+		// cookie equates to index within contents + 2 (for '.' and '..')
+		cookie := uint64(i + 2)
+		if started {
+			maxBytes += 512 // TODO: better estimation.
+			if maxBytes > obj.Count || len(entities) > maxEntities {
+				eof = false
+				break
+			}
+
+			attrs := ToFileAttribute(c, path.Join(append(p, c.Name())...))
+			entities = append(entities, readDirEntity{
+				FileID: attrs.Fileid,
+				Name:   []byte(c.Name()),
+				Cookie: cookie,
+				Next:   true,
+			})
+		} else if cookie == obj.Cookie {
+			started = true
+		}
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, p)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := xdr.Write(writer, verifier); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := xdr.Write(writer, len(entities) > 0); err != nil { // next
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if len(entities) > 0 {
+		entities[len(entities)-1].Next = false
+		// no next for last entity
+
+		for _, e := range entities {
+			if err := xdr.Write(writer, e); err != nil {
+				return &NFSStatusError{NFSStatusServerFault, err}
+			}
+		}
+	}
+	if err := xdr.Write(writer, eof); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	// TODO: track writer size at this point to validate maxcount estimation and stop early if needed.
+
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
+
+func getDirListingWithVerifier(ctx context.Context, userHandle Handler, fsHandle []byte, verifier uint64) ([]fs.FileInfo, uint64, error) {
+	// figure out what directory it is.
+	fs, p, err := userHandle.FromHandle(fsHandle)
+	if err != nil {
+		return nil, 0, &NFSStatusError{NFSStatusStale, err}
+	}
+
+	path := fs.Join(p...)
+	// see if the verifier has this dir cached:
+	if vh, ok := userHandle.(CachingHandler); verifier != 0 && ok {
+		entries := vh.DataForVerifier(path, verifier)
+		if entries != nil {
+			return entries, verifier, nil
+		}
+	}
+	// load the entries.
+	contents, err := fs.ReadDir(ctx, path)
+	if err != nil {
+		if os.IsPermission(err) {
+			return nil, 0, &NFSStatusError{NFSStatusAccess, err}
+		}
+		if errors.Is(err, context.DeadlineExceeded) {
+			return nil, 0, &NFSStatusError{NFSStatusJukebox, err}
+		}
+		return nil, 0, &NFSStatusError{NFSStatusIO, err}
+	}
+
+	sort.Slice(contents, func(i, j int) bool {
+		return contents[i].Name() < contents[j].Name()
+	})
+
+	if vh, ok := userHandle.(CachingHandler); ok {
+		// let the user handler make a verifier if it can.
+		v := vh.VerifierFor(path, contents)
+		return contents, v, nil
+	}
+
+	id := hashPathAndContents(path, contents)
+	return contents, id, nil
+}
+
+func hashPathAndContents(path string, contents []fs.FileInfo) uint64 {
+	//calculate a cookie-verifier.
+	vHash := sha256.New()
+
+	// Add the path to avoid collisions of directories with the same content
+	vHash.Write([]byte(path))
+
+	for _, c := range contents {
+		vHash.Write([]byte(c.Name())) // Never fails according to the docs
+	}
+
+	verify := vHash.Sum(nil)[0:8]
+	return binary.BigEndian.Uint64(verify)
+}
diff --git a/pkg/go-nfs/nfs_onreaddirplus.go b/pkg/go-nfs/nfs_onreaddirplus.go
new file mode 100644
index 0000000..9fa49cb
--- /dev/null
+++ b/pkg/go-nfs/nfs_onreaddirplus.go
@@ -0,0 +1,153 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"path"
+
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+type readDirPlusArgs struct {
+	Handle      []byte
+	Cookie      uint64
+	CookieVerif uint64
+	DirCount    uint32
+	MaxCount    uint32
+}
+
+type readDirPlusEntity struct {
+	FileID     uint64
+	Name       []byte
+	Cookie     uint64
+	Attributes *FileAttribute `xdr:"optional"`
+	Handle     *[]byte        `xdr:"optional"`
+	Next       bool
+}
+
+func joinPath(parent []string, elements ...string) []string {
+	joinedPath := make([]string, 0, len(parent)+len(elements))
+	joinedPath = append(joinedPath, parent...)
+	joinedPath = append(joinedPath, elements...)
+	return joinedPath
+}
+
+func onReadDirPlus(ctx context.Context, w *response, userHandle Handler) error {
+	w.errorFmt = opAttrErrorFormatter
+	obj := readDirPlusArgs{}
+	if err := xdr.Read(w.req.Body, &obj); err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+
+	// in case of test, nfs-client send:
+	// DirCount = 512
+	// MaxCount = 4096
+	if obj.DirCount < 512 || obj.MaxCount < 4096 {
+		return &NFSStatusError{NFSStatusTooSmall, nil}
+	}
+
+	fs, p, err := userHandle.FromHandle(obj.Handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+
+	contents, verifier, err := getDirListingWithVerifier(ctx, userHandle, obj.Handle, obj.CookieVerif)
+	if err != nil {
+		return err
+	}
+	if obj.Cookie > 0 && obj.CookieVerif > 0 && verifier != obj.CookieVerif {
+		return &NFSStatusError{NFSStatusBadCookie, nil}
+	}
+
+	entities := make([]readDirPlusEntity, 0)
+	dirBytes := uint32(0)
+	maxBytes := uint32(100) // conservative overhead measure
+
+	started := obj.Cookie == 0
+	if started {
+		// add '.' and '..' to entities
+		dotdotFileID := uint64(0)
+		if len(p) > 0 {
+			dda := tryStat(ctx, fs, p[0:len(p)-1])
+			if dda != nil {
+				dotdotFileID = dda.Fileid
+			}
+		}
+		dotFileID := uint64(0)
+		da := tryStat(ctx, fs, p)
+		if da != nil {
+			dotFileID = da.Fileid
+		}
+		entities = append(entities,
+			readDirPlusEntity{Name: []byte("."), Cookie: 0, Next: true, FileID: dotFileID, Attributes: da},
+			readDirPlusEntity{Name: []byte(".."), Cookie: 1, Next: true, FileID: dotdotFileID},
+		)
+	}
+
+	eof := true
+	maxEntities := userHandle.HandleLimit() / 2
+	fb := 0
+	fss := 0
+	for i, c := range contents {
+		// cookie equates to index within contents + 2 (for '.' and '..')
+		cookie := uint64(i + 2)
+		fb++
+		if started {
+			fss++
+			dirBytes += uint32(len(c.Name()) + 20)
+			maxBytes += 512 // TODO: better estimation.
+			if dirBytes > obj.DirCount || maxBytes > obj.MaxCount || len(entities) > maxEntities {
+				eof = false
+				break
+			}
+
+			filePath := joinPath(p, c.Name())
+			handle := userHandle.ToHandle(fs, filePath)
+			attrs := ToFileAttribute(c, path.Join(filePath...))
+			entities = append(entities, readDirPlusEntity{
+				FileID:     attrs.Fileid,
+				Name:       []byte(c.Name()),
+				Cookie:     cookie,
+				Attributes: attrs,
+				Handle:     &handle,
+				Next:       true,
+			})
+		} else if cookie == obj.Cookie {
+			started = true
+		}
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, p)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := xdr.Write(writer, verifier); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := xdr.Write(writer, len(entities) > 0); err != nil { // next
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if len(entities) > 0 {
+		entities[len(entities)-1].Next = false
+		// no next for last entity
+
+		for _, e := range entities {
+			if err := xdr.Write(writer, e); err != nil {
+				return &NFSStatusError{NFSStatusServerFault, err}
+			}
+		}
+	}
+	if err := xdr.Write(writer, eof); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	// TODO: track writer size at this point to validate maxcount estimation and stop early if needed.
+
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_onreadlink.go b/pkg/go-nfs/nfs_onreadlink.go
new file mode 100644
index 0000000..887047a
--- /dev/null
+++ b/pkg/go-nfs/nfs_onreadlink.go
@@ -0,0 +1,55 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"os"
+
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+func onReadLink(ctx context.Context, w *response, userHandle Handler) error {
+	w.errorFmt = opAttrErrorFormatter
+	handle, err := xdr.ReadOpaque(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+	fs, path, err := userHandle.FromHandle(handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+
+	out, err := fs.Readlink(ctx, fs.Join(path...))
+	if err != nil {
+		if info, err := fs.Stat(ctx, fs.Join(path...)); err == nil {
+			if info.Mode()&os.ModeSymlink == 0 {
+				return &NFSStatusError{NFSStatusInval, err}
+			}
+		}
+		if os.IsNotExist(err) {
+			return &NFSStatusError{NFSStatusNoEnt, err}
+		}
+		if errors.Is(err, context.DeadlineExceeded) {
+			return &NFSStatusError{NFSStatusJukebox, err}
+		}
+
+		return &NFSStatusError{NFSStatusAccess, err}
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, path)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := xdr.Write(writer, out); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_onremove.go b/pkg/go-nfs/nfs_onremove.go
new file mode 100644
index 0000000..bffb446
--- /dev/null
+++ b/pkg/go-nfs/nfs_onremove.go
@@ -0,0 +1,85 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"os"
+
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+func onRemove(ctx context.Context, w *response, userHandle Handler) error {
+	w.errorFmt = wccDataErrorFormatter
+	obj := DirOpArg{}
+	if err := xdr.Read(w.req.Body, &obj); err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+	fs, path, err := userHandle.FromHandle(obj.Handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+
+	// TODO
+	// if !CapabilityCheck(fs, billy.WriteCapability) {
+	// 	return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
+	// }
+
+	if len(string(obj.Filename)) > PathNameMax {
+		return &NFSStatusError{NFSStatusNameTooLong, nil}
+	}
+
+	fullPath := fs.Join(path...)
+	dirInfo, err := fs.Stat(ctx, fullPath)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return &NFSStatusError{NFSStatusNoEnt, err}
+		}
+		if os.IsPermission(err) {
+			return &NFSStatusError{NFSStatusAccess, err}
+		}
+		if errors.Is(err, context.DeadlineExceeded) {
+			return &NFSStatusError{NFSStatusJukebox, err}
+		}
+		return &NFSStatusError{NFSStatusIO, err}
+	}
+	if !dirInfo.IsDir() {
+		return &NFSStatusError{NFSStatusNotDir, nil}
+	}
+	preCacheData := ToFileAttribute(dirInfo, fullPath).AsCache()
+
+	toDelete := fs.Join(append(path, string(obj.Filename))...)
+	toDeleteHandle := userHandle.ToHandle(fs, append(path, string(obj.Filename)))
+
+	err = fs.Remove(ctx, toDelete)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return &NFSStatusError{NFSStatusNoEnt, err}
+		}
+		if os.IsPermission(err) {
+			return &NFSStatusError{NFSStatusAccess, err}
+		}
+		if errors.Is(err, context.DeadlineExceeded) {
+			return &NFSStatusError{NFSStatusJukebox, err}
+		}
+		return &NFSStatusError{NFSStatusIO, err}
+	}
+
+	if err := userHandle.InvalidateHandle(fs, toDeleteHandle); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := WriteWcc(writer, preCacheData, tryStat(ctx, fs, path)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_onrename.go b/pkg/go-nfs/nfs_onrename.go
new file mode 100644
index 0000000..dc0d463
--- /dev/null
+++ b/pkg/go-nfs/nfs_onrename.go
@@ -0,0 +1,120 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"os"
+	"reflect"
+
+	"github.com/go-git/go-billy/v5"
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+var doubleWccErrorBody = [16]byte{}
+
+func onRename(ctx context.Context, w *response, userHandle Handler) error {
+	w.errorFmt = errFormatterWithBody(doubleWccErrorBody[:])
+	from := DirOpArg{}
+	err := xdr.Read(w.req.Body, &from)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+	fs, fromPath, err := userHandle.FromHandle(from.Handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+
+	to := DirOpArg{}
+	if err = xdr.Read(w.req.Body, &to); err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+	fs2, toPath, err := userHandle.FromHandle(to.Handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+	// check the two fs are the same
+	if !reflect.DeepEqual(fs, fs2) {
+		return &NFSStatusError{NFSStatusNotSupp, os.ErrPermission}
+	}
+
+	if !CapabilityCheck(fs, billy.WriteCapability) {
+		return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
+	}
+
+	if len(string(from.Filename)) > PathNameMax || len(string(to.Filename)) > PathNameMax {
+		return &NFSStatusError{NFSStatusNameTooLong, os.ErrInvalid}
+	}
+
+	fromDirPath := fs.Join(fromPath...)
+	fromDirInfo, err := fs.Stat(ctx, fromDirPath)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return &NFSStatusError{NFSStatusNoEnt, err}
+		}
+		if errors.Is(err, context.DeadlineExceeded) {
+			return &NFSStatusError{NFSStatusJukebox, err}
+		}
+		return &NFSStatusError{NFSStatusIO, err}
+	}
+	if !fromDirInfo.IsDir() {
+		return &NFSStatusError{NFSStatusNotDir, nil}
+	}
+	preCacheData := ToFileAttribute(fromDirInfo, fromDirPath).AsCache()
+
+	toDirPath := fs.Join(toPath...)
+	toDirInfo, err := fs.Stat(ctx, toDirPath)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return &NFSStatusError{NFSStatusNoEnt, err}
+		}
+		if errors.Is(err, context.DeadlineExceeded) {
+			return &NFSStatusError{NFSStatusJukebox, err}
+		}
+		return &NFSStatusError{NFSStatusIO, err}
+	}
+	if !toDirInfo.IsDir() {
+		return &NFSStatusError{NFSStatusNotDir, nil}
+	}
+	preDestData := ToFileAttribute(toDirInfo, toDirPath).AsCache()
+
+	oldHandle := userHandle.ToHandle(fs, append(fromPath, string(from.Filename)))
+
+	fromLoc := fs.Join(append(fromPath, string(from.Filename))...)
+	toLoc := fs.Join(append(toPath, string(to.Filename))...)
+
+	err = fs.Rename(ctx, fromLoc, toLoc)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return &NFSStatusError{NFSStatusNoEnt, err}
+		}
+		if os.IsPermission(err) {
+			return &NFSStatusError{NFSStatusAccess, err}
+		}
+		if errors.Is(err, context.DeadlineExceeded) {
+			return &NFSStatusError{NFSStatusJukebox, err}
+		}
+		return &NFSStatusError{NFSStatusIO, err}
+	}
+
+	if err := userHandle.InvalidateHandle(fs, oldHandle); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := WriteWcc(writer, preCacheData, tryStat(ctx, fs, fromPath)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := WriteWcc(writer, preDestData, tryStat(ctx, fs, toPath)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_onrmdir.go b/pkg/go-nfs/nfs_onrmdir.go
new file mode 100644
index 0000000..12e3571
--- /dev/null
+++ b/pkg/go-nfs/nfs_onrmdir.go
@@ -0,0 +1,9 @@
+package nfs
+
+import (
+	"context"
+)
+
+func onRmDir(ctx context.Context, w *response, userHandle Handler) error {
+	return onRemove(ctx, w, userHandle)
+}
diff --git a/pkg/go-nfs/nfs_onsetattr.go b/pkg/go-nfs/nfs_onsetattr.go
new file mode 100644
index 0000000..cc2c202
--- /dev/null
+++ b/pkg/go-nfs/nfs_onsetattr.go
@@ -0,0 +1,80 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"os"
+
+	"github.com/go-git/go-billy/v5"
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+func onSetAttr(ctx context.Context, w *response, userHandle Handler) error {
+	w.errorFmt = wccDataErrorFormatter
+	handle, err := xdr.ReadOpaque(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+
+	fs, path, err := userHandle.FromHandle(handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+	attrs, err := ReadSetFileAttributes(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+
+	fullPath := fs.Join(path...)
+	info, err := fs.Lstat(ctx, fullPath)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return &NFSStatusError{NFSStatusNoEnt, err}
+		}
+		if errors.Is(err, context.DeadlineExceeded) {
+			return &NFSStatusError{NFSStatusJukebox, err}
+		}
+		return &NFSStatusError{NFSStatusAccess, err}
+	}
+
+	// see if there's a "guard"
+	if guard, err := xdr.ReadUint32(w.req.Body); err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	} else if guard != 0 {
+		// read the ctime.
+		t := FileTime{}
+		if err := xdr.Read(w.req.Body, &t); err != nil {
+			return &NFSStatusError{NFSStatusInval, err}
+		}
+		attr := ToFileAttribute(info, fullPath)
+		if t != attr.Ctime {
+			return &NFSStatusError{NFSStatusNotSync, nil}
+		}
+	}
+
+	if !CapabilityCheck(fs, billy.WriteCapability) {
+		return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
+	}
+
+	changer := userHandle.Change(fs)
+	if err := attrs.Apply(ctx, changer, fs, fs.Join(path...)); err != nil {
+		// Already an nfsstatuserror
+		return err
+	}
+
+	preAttr := ToFileAttribute(info, fullPath).AsCache()
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := WriteWcc(writer, preAttr, tryStat(ctx, fs, path)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_onsymlink.go b/pkg/go-nfs/nfs_onsymlink.go
new file mode 100644
index 0000000..4b728c6
--- /dev/null
+++ b/pkg/go-nfs/nfs_onsymlink.go
@@ -0,0 +1,88 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"os"
+
+	"github.com/go-git/go-billy/v5"
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+func onSymlink(ctx context.Context, w *response, userHandle Handler) error {
+	w.errorFmt = wccDataErrorFormatter
+	obj := DirOpArg{}
+	err := xdr.Read(w.req.Body, &obj)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+	attrs, err := ReadSetFileAttributes(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+
+	target, err := xdr.ReadOpaque(w.req.Body)
+	if err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+
+	fs, path, err := userHandle.FromHandle(obj.Handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+	if !CapabilityCheck(fs, billy.WriteCapability) {
+		return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
+	}
+
+	if len(string(obj.Filename)) > PathNameMax {
+		return &NFSStatusError{NFSStatusNameTooLong, os.ErrInvalid}
+	}
+
+	newFilePath := fs.Join(append(path, string(obj.Filename))...)
+	if _, err := fs.Stat(ctx, newFilePath); err == nil {
+		return &NFSStatusError{NFSStatusExist, os.ErrExist}
+	}
+	if s, err := fs.Stat(ctx, fs.Join(path...)); err != nil {
+		return &NFSStatusError{NFSStatusAccess, err}
+	} else if !s.IsDir() {
+		return &NFSStatusError{NFSStatusNotDir, nil}
+	}
+
+	err = fs.Symlink(ctx, string(target), newFilePath)
+	if err != nil {
+		return &NFSStatusError{NFSStatusAccess, err}
+	}
+
+	fp := userHandle.ToHandle(fs, append(path, string(obj.Filename)))
+	changer := userHandle.Change(fs)
+	if changer != nil {
+		if err := attrs.Apply(ctx, changer, fs, newFilePath); err != nil {
+			return &NFSStatusError{NFSStatusIO, err}
+		}
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	// "handle follows"
+	if err := xdr.Write(writer, uint32(1)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := xdr.Write(writer, fp); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := WritePostOpAttrs(writer, tryStat(ctx, fs, append(path, string(obj.Filename)))); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := WriteWcc(writer, nil, tryStat(ctx, fs, path)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_onwrite.go b/pkg/go-nfs/nfs_onwrite.go
new file mode 100644
index 0000000..cfe2eb4
--- /dev/null
+++ b/pkg/go-nfs/nfs_onwrite.go
@@ -0,0 +1,116 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"io"
+	"math"
+	"os"
+
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+// writeStability is the level of durability requested with the write
+type writeStability uint32
+
+const (
+	unstable writeStability = 0
+	dataSync writeStability = 1
+	fileSync writeStability = 2
+)
+
+type writeArgs struct {
+	Handle []byte
+	Offset uint64
+	Count  uint32
+	How    uint32
+	Data   []byte
+}
+
+func onWrite(ctx context.Context, w *response, userHandle Handler) error {
+	w.errorFmt = wccDataErrorFormatter
+	var req writeArgs
+	if err := xdr.Read(w.req.Body, &req); err != nil {
+		return &NFSStatusError{NFSStatusInval, err}
+	}
+
+	fs, path, err := userHandle.FromHandle(req.Handle)
+	if err != nil {
+		return &NFSStatusError{NFSStatusStale, err}
+	}
+	// TODO
+	// if !CapabilityCheck(fs, billy.WriteCapability) {
+	// 	return &NFSStatusError{NFSStatusROFS, os.ErrPermission}
+	// }
+	if len(req.Data) > math.MaxInt32 || req.Count > math.MaxInt32 {
+		return &NFSStatusError{NFSStatusFBig, os.ErrInvalid}
+	}
+	if req.How != uint32(unstable) && req.How != uint32(dataSync) && req.How != uint32(fileSync) {
+		return &NFSStatusError{NFSStatusInval, os.ErrInvalid}
+	}
+
+	// stat first for pre-op wcc.
+	fullPath := fs.Join(path...)
+	info, err := fs.Stat(ctx, fullPath)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return &NFSStatusError{NFSStatusNoEnt, err}
+		}
+		if errors.Is(err, context.DeadlineExceeded) {
+			return &NFSStatusError{NFSStatusJukebox, err}
+		}
+		return &NFSStatusError{NFSStatusAccess, err}
+	}
+	if !info.Mode().IsRegular() {
+		return &NFSStatusError{NFSStatusInval, os.ErrInvalid}
+	}
+	preOpCache := ToFileAttribute(info, fullPath).AsCache()
+
+	// now the actual op.
+	file, err := fs.OpenFile(ctx, fs.Join(path...), os.O_RDWR, info.Mode().Perm())
+	if err != nil {
+		return &NFSStatusError{NFSStatusAccess, err}
+	}
+	if req.Offset > 0 {
+		if _, err := file.Seek(int64(req.Offset), io.SeekStart); err != nil {
+			return &NFSStatusError{NFSStatusIO, err}
+		}
+	}
+	end := req.Count
+	if len(req.Data) < int(end) {
+		end = uint32(len(req.Data))
+	}
+	writtenCount, err := file.Write(ctx, req.Data[:end])
+	if err != nil {
+		Log.Errorf("Error writing: %v", err)
+		return &NFSStatusError{NFSStatusIO, err}
+	}
+	if err := file.Close(ctx); err != nil {
+		Log.Errorf("error closing: %v", err)
+		return &NFSStatusError{NFSStatusIO, err}
+	}
+
+	writer := bytes.NewBuffer([]byte{})
+	if err := xdr.Write(writer, uint32(NFSStatusOk)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := WriteWcc(writer, preOpCache, tryStat(ctx, fs, path)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := xdr.Write(writer, uint32(writtenCount)); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := xdr.Write(writer, fileSync); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	if err := xdr.Write(writer, w.Server.ID); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+
+	if err := w.Write(writer.Bytes()); err != nil {
+		return &NFSStatusError{NFSStatusServerFault, err}
+	}
+	return nil
+}
diff --git a/pkg/go-nfs/nfs_test.go b/pkg/go-nfs/nfs_test.go
new file mode 100644
index 0000000..8a79752
--- /dev/null
+++ b/pkg/go-nfs/nfs_test.go
@@ -0,0 +1,293 @@
+package nfs_test
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"net"
+	"reflect"
+	"sort"
+	"testing"
+
+	nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
+	"git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
+	"git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers/memfs"
+
+	nfsc "github.com/willscott/go-nfs-client/nfs"
+	rpc "github.com/willscott/go-nfs-client/nfs/rpc"
+	"github.com/willscott/go-nfs-client/nfs/util"
+	"github.com/willscott/go-nfs-client/nfs/xdr"
+)
+
+func TestNFS(t *testing.T) {
+	ctx := context.Background()
+
+	if testing.Verbose() {
+		util.DefaultLogger.SetDebug(true)
+	}
+
+	// make an empty in-memory server.
+	listener, err := net.Listen("tcp", "localhost:0")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	mem := helpers.WrapBillyFS(memfs.New())
+	// File needs to exist in the root for memfs to acknowledge the root exists.
+	_, _ = mem.Create(ctx, "/test")
+
+	handler := helpers.NewNullAuthHandler(mem)
+	cacheHelper := helpers.NewCachingHandler(handler, 1024)
+	go func() {
+		_ = nfs.Serve(listener, cacheHelper)
+	}()
+
+	c, err := rpc.DialTCP(listener.Addr().Network(), listener.Addr().(*net.TCPAddr).String(), false)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer c.Close()
+
+	var mounter nfsc.Mount
+	mounter.Client = c
+	target, err := mounter.Mount("/", rpc.AuthNull)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer func() {
+		_ = mounter.Unmount()
+	}()
+
+	_, err = target.FSInfo()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Validate sample file creation
+	_, err = target.Create("/helloworld.txt", 0666)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if info, err := mem.Stat(ctx, "/helloworld.txt"); err != nil {
+		t.Fatal(err)
+	} else {
+		if info.Size() != 0 || info.Mode().Perm() != 0666 {
+			t.Fatal("incorrect creation.")
+		}
+	}
+
+	// Validate writing to a file.
+	f, err := target.OpenFile("/helloworld.txt", 0666)
+	if err != nil {
+		t.Fatal(err)
+	}
+	b := []byte("hello world")
+	_, err = f.Write(b)
+	if err != nil {
+		t.Fatal(err)
+	}
+	mf, _ := mem.Open(ctx, "/helloworld.txt")
+	buf := make([]byte, len(b))
+	if _, err = mf.Read(ctx, buf[:]); err != nil {
+		t.Fatal(err)
+	}
+	if !bytes.Equal(buf, b) {
+		t.Fatal("written does not match expected")
+	}
+
+	// for test nfs.ReadDirPlus in case of many files
+	dirF1, err := mem.ReadDir(ctx, "/")
+	if err != nil {
+		t.Fatal(err)
+	}
+	shouldBeNames := []string{}
+	for _, f := range dirF1 {
+		shouldBeNames = append(shouldBeNames, f.Name())
+	}
+	for i := 0; i < 2000; i++ {
+		fName := fmt.Sprintf("f-%04d.txt", i)
+		shouldBeNames = append(shouldBeNames, fName)
+		f, err := mem.Create(ctx, fName)
+		if err != nil {
+			t.Fatal(err)
+		}
+		f.Close(ctx)
+	}
+
+	manyEntitiesPlus, err := target.ReadDirPlus("/")
+	if err != nil {
+		t.Fatal(err)
+	}
+	actualBeNamesPlus := []string{}
+	for _, e := range manyEntitiesPlus {
+		actualBeNamesPlus = append(actualBeNamesPlus, e.Name())
+	}
+
+	as := sort.StringSlice(shouldBeNames)
+	bs := sort.StringSlice(actualBeNamesPlus)
+	as.Sort()
+	bs.Sort()
+	if !reflect.DeepEqual(as, bs) {
+		t.Fatal("nfs.ReadDirPlus error")
+	}
+
+	// for test nfs.ReadDir in case of many files
+	manyEntities, err := readDir(target, "/")
+	if err != nil {
+		t.Fatal(err)
+	}
+	actualBeNames := []string{}
+	for _, e := range manyEntities {
+		actualBeNames = append(actualBeNames, e.FileName)
+	}
+
+	as2 := sort.StringSlice(shouldBeNames)
+	bs2 := sort.StringSlice(actualBeNames)
+	as2.Sort()
+	bs2.Sort()
+	if !reflect.DeepEqual(as2, bs2) {
+		fmt.Printf("should be %v\n", as2)
+		fmt.Printf("actual be %v\n", bs2)
+		t.Fatal("nfs.ReadDir error")
+	}
+
+	// confirm rename works as expected
+	oldFA, _, err := target.Lookup("/f-0010.txt", false)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := target.Rename("/f-0010.txt", "/g-0010.txt"); err != nil {
+		t.Fatal(err)
+	}
+	new, _, err := target.Lookup("/g-0010.txt", false)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if new.Sys() != oldFA.Sys() {
+		t.Fatal("rename failed to update")
+	}
+	_, _, err = target.Lookup("/f-0010.txt", false)
+	if err == nil {
+		t.Fatal("old handle should be invalid")
+	}
+
+	// for test nfs.ReadDirPlus in case of empty directory
+	_, err = target.Mkdir("/empty", 0755)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	emptyEntitiesPlus, err := target.ReadDirPlus("/empty")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(emptyEntitiesPlus) != 0 {
+		t.Fatal("nfs.ReadDirPlus error reading empty dir")
+	}
+
+	// for test nfs.ReadDir in case of empty directory
+	emptyEntities, err := readDir(target, "/empty")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(emptyEntities) != 0 {
+		t.Fatal("nfs.ReadDir error reading empty dir")
+	}
+}
+
+type readDirEntry struct {
+	FileId   uint64
+	FileName string
+	Cookie   uint64
+}
+
+// readDir implementation "appropriated" from go-nfs-client implementation of READDIRPLUS
+func readDir(target *nfsc.Target, dir string) ([]*readDirEntry, error) {
+	_, fh, err := target.Lookup(dir)
+	if err != nil {
+		return nil, err
+	}
+
+	type readDirArgs struct {
+		rpc.Header
+		Handle      []byte
+		Cookie      uint64
+		CookieVerif uint64
+		Count       uint32
+	}
+
+	type readDirList struct {
+		IsSet bool         `xdr:"union"`
+		Entry readDirEntry `xdr:"unioncase=1"`
+	}
+
+	type readDirListOK struct {
+		DirAttrs   nfsc.PostOpAttr
+		CookieVerf uint64
+	}
+
+	cookie := uint64(0)
+	cookieVerf := uint64(0)
+	eof := false
+
+	var entries []*readDirEntry
+	for !eof {
+		res, err := target.Call(&readDirArgs{
+			Header: rpc.Header{
+				Rpcvers: 2,
+				Vers:    nfsc.Nfs3Vers,
+				Prog:    nfsc.Nfs3Prog,
+				Proc:    uint32(nfs.NFSProcedureReadDir),
+				Cred:    rpc.AuthNull,
+				Verf:    rpc.AuthNull,
+			},
+			Handle:      fh,
+			Cookie:      cookie,
+			CookieVerif: cookieVerf,
+			Count:       4096,
+		})
+		if err != nil {
+			return nil, err
+		}
+
+		status, err := xdr.ReadUint32(res)
+		if err != nil {
+			return nil, err
+		}
+
+		if err = nfsc.NFS3Error(status); err != nil {
+			return nil, err
+		}
+
+		dirListOK := new(readDirListOK)
+		if err = xdr.Read(res, dirListOK); err != nil {
+			return nil, err
+		}
+
+		for {
+			var item readDirList
+			if err = xdr.Read(res, &item); err != nil {
+				return nil, err
+			}
+
+			if !item.IsSet {
+				break
+			}
+
+			cookie = item.Entry.Cookie
+			if item.Entry.FileName == "." || item.Entry.FileName == ".." {
+				continue
+			}
+			entries = append(entries, &item.Entry)
+		}
+
+		if err = xdr.Read(res, &eof); err != nil {
+			return nil, err
+		}
+
+		cookieVerf = dirListOK.CookieVerf
+	}
+
+	return entries, nil
+}
diff --git a/pkg/go-nfs/nfsinterface.go b/pkg/go-nfs/nfsinterface.go
new file mode 100644
index 0000000..cf439af
--- /dev/null
+++ b/pkg/go-nfs/nfsinterface.go
@@ -0,0 +1,188 @@
+package nfs
+
+// NFSProcedure is the valid RPC calls for the nfs service.
+type NFSProcedure uint32
+
+// NfsProcedure Codes
+const (
+	NFSProcedureNull NFSProcedure = iota
+	NFSProcedureGetAttr
+	NFSProcedureSetAttr
+	NFSProcedureLookup
+	NFSProcedureAccess
+	NFSProcedureReadlink
+	NFSProcedureRead
+	NFSProcedureWrite
+	NFSProcedureCreate
+	NFSProcedureMkDir
+	NFSProcedureSymlink
+	NFSProcedureMkNod
+	NFSProcedureRemove
+	NFSProcedureRmDir
+	NFSProcedureRename
+	NFSProcedureLink
+	NFSProcedureReadDir
+	NFSProcedureReadDirPlus
+	NFSProcedureFSStat
+	NFSProcedureFSInfo
+	NFSProcedurePathConf
+	NFSProcedureCommit
+)
+
+func (n NFSProcedure) String() string {
+	switch n {
+	case NFSProcedureNull:
+		return "Null"
+	case NFSProcedureGetAttr:
+		return "GetAttr"
+	case NFSProcedureSetAttr:
+		return "SetAttr"
+	case NFSProcedureLookup:
+		return "Lookup"
+	case NFSProcedureAccess:
+		return "Access"
+	case NFSProcedureReadlink:
+		return "ReadLink"
+	case NFSProcedureRead:
+		return "Read"
+	case NFSProcedureWrite:
+		return "Write"
+	case NFSProcedureCreate:
+		return "Create"
+	case NFSProcedureMkDir:
+		return "Mkdir"
+	case NFSProcedureSymlink:
+		return "Symlink"
+	case NFSProcedureMkNod:
+		return "Mknod"
+	case NFSProcedureRemove:
+		return "Remove"
+	case NFSProcedureRmDir:
+		return "Rmdir"
+	case NFSProcedureRename:
+		return "Rename"
+	case NFSProcedureLink:
+		return "Link"
+	case NFSProcedureReadDir:
+		return "ReadDir"
+	case NFSProcedureReadDirPlus:
+		return "ReadDirPlus"
+	case NFSProcedureFSStat:
+		return "FSStat"
+	case NFSProcedureFSInfo:
+		return "FSInfo"
+	case NFSProcedurePathConf:
+		return "PathConf"
+	case NFSProcedureCommit:
+		return "Commit"
+	default:
+		return "Unknown"
+	}
+}
+
+// NFSStatus (nfsstat3) is a result code for nfs rpc calls
+type NFSStatus uint32
+
+// NFSStatus codes
+const (
+	NFSStatusOk          NFSStatus = 0
+	NFSStatusPerm        NFSStatus = 1
+	NFSStatusNoEnt       NFSStatus = 2
+	NFSStatusIO          NFSStatus = 5
+	NFSStatusNXIO        NFSStatus = 6
+	NFSStatusAccess      NFSStatus = 13
+	NFSStatusExist       NFSStatus = 17
+	NFSStatusXDev        NFSStatus = 18
+	NFSStatusNoDev       NFSStatus = 19
+	NFSStatusNotDir      NFSStatus = 20
+	NFSStatusIsDir       NFSStatus = 21
+	NFSStatusInval       NFSStatus = 22
+	NFSStatusFBig        NFSStatus = 27
+	NFSStatusNoSPC       NFSStatus = 28
+	NFSStatusROFS        NFSStatus = 30
+	NFSStatusMlink       NFSStatus = 31
+	NFSStatusNameTooLong NFSStatus = 63
+	NFSStatusNotEmpty    NFSStatus = 66
+	NFSStatusDQuot       NFSStatus = 69
+	NFSStatusStale       NFSStatus = 70
+	NFSStatusRemote      NFSStatus = 71
+	NFSStatusBadHandle   NFSStatus = 10001
+	NFSStatusNotSync     NFSStatus = 10002
+	NFSStatusBadCookie   NFSStatus = 10003
+	NFSStatusNotSupp     NFSStatus = 10004
+	NFSStatusTooSmall    NFSStatus = 10005
+	NFSStatusServerFault NFSStatus = 10006
+	NFSStatusBadType     NFSStatus = 10007
+	NFSStatusJukebox     NFSStatus = 10008
+)
+
+func (s NFSStatus) String() string {
+	switch s {
+	case NFSStatusOk:
+		return "Call Completed Successfull"
+	case NFSStatusPerm:
+		return "Not Owner"
+	case NFSStatusNoEnt:
+		return "No such file or directory"
+	case NFSStatusIO:
+		return "I/O error"
+	case NFSStatusNXIO:
+		return "I/O error: No such device"
+	case NFSStatusAccess:
+		return "Permission denied"
+	case NFSStatusExist:
+		return "File exists"
+	case NFSStatusXDev:
+		return "Attempt to do a cross device hard link"
+	case NFSStatusNoDev:
+		return "No such device"
+	case NFSStatusNotDir:
+		return "Not a directory"
+	case NFSStatusIsDir:
+		return "Is a directory"
+	case NFSStatusInval:
+		return "Invalid argument"
+	case NFSStatusFBig:
+		return "File too large"
+	case NFSStatusNoSPC:
+		return "No space left on device"
+	case NFSStatusROFS:
+		return "Read only file system"
+	case NFSStatusMlink:
+		return "Too many hard links"
+	case NFSStatusNameTooLong:
+		return "Name too long"
+	case NFSStatusNotEmpty:
+		return "Not empty"
+	case NFSStatusDQuot:
+		return "Resource quota exceeded"
+	case NFSStatusStale:
+		return "Invalid file handle"
+	case NFSStatusRemote:
+		return "Too many levels of remote in path"
+	case NFSStatusBadHandle:
+		return "Illegal NFS file handle"
+	case NFSStatusNotSync:
+		return "Synchronization mismatch"
+	case NFSStatusBadCookie:
+		return "Cookie is Stale"
+	case NFSStatusNotSupp:
+		return "Operation not supported"
+	case NFSStatusTooSmall:
+		return "Buffer or request too small"
+	case NFSStatusServerFault:
+		return "Unmapped error (EIO)"
+	case NFSStatusBadType:
+		return "Type not supported"
+	case NFSStatusJukebox:
+		return "Initiated, but too slow. Try again with new txn"
+	default:
+		return "unknown"
+	}
+}
+
+// DirOpArg is a common serialization used for referencing an object in a directory
+type DirOpArg struct {
+	Handle   []byte
+	Filename []byte
+}
diff --git a/pkg/go-nfs/server.go b/pkg/go-nfs/server.go
new file mode 100644
index 0000000..e9d2085
--- /dev/null
+++ b/pkg/go-nfs/server.go
@@ -0,0 +1,102 @@
+package nfs
+
+import (
+	"bytes"
+	"context"
+	"crypto/rand"
+	"errors"
+	"net"
+	"time"
+)
+
+// Server is a handle to the listening NFS server.
+type Server struct {
+	Handler
+	ID [8]byte
+}
+
+// RegisterMessageHandler registers a handler for a specific
+// XDR procedure.
+func RegisterMessageHandler(protocol uint32, proc uint32, handler HandleFunc) error {
+	if registeredHandlers == nil {
+		registeredHandlers = make(map[registeredHandlerID]HandleFunc)
+	}
+	for k := range registeredHandlers {
+		if k.protocol == protocol && k.proc == proc {
+			return errors.New("already registered")
+		}
+	}
+	id := registeredHandlerID{protocol, proc}
+	registeredHandlers[id] = handler
+	return nil
+}
+
+// HandleFunc represents a handler for a specific protocol message.
+type HandleFunc func(ctx context.Context, w *response, userHandler Handler) error
+
+// TODO: store directly as a uint64 for more efficient lookups
+type registeredHandlerID struct {
+	protocol uint32
+	proc     uint32
+}
+
+var registeredHandlers map[registeredHandlerID]HandleFunc
+
+// Serve listens on the provided listener port for incoming client requests.
+func (s *Server) Serve(l net.Listener) error {
+	defer l.Close()
+	if bytes.Equal(s.ID[:], []byte{0, 0, 0, 0, 0, 0, 0, 0}) {
+		if _, err := rand.Reader.Read(s.ID[:]); err != nil {
+			return err
+		}
+	}
+
+	var tempDelay time.Duration
+
+	for {
+		conn, err := l.Accept()
+		if err != nil {
+			if ne, ok := err.(net.Error); ok && ne.Timeout() {
+				if tempDelay == 0 {
+					tempDelay = 5 * time.Millisecond
+				} else {
+					tempDelay *= 2
+				}
+				if max := 1 * time.Second; tempDelay > max {
+					tempDelay = max
+				}
+				time.Sleep(tempDelay)
+				continue
+			}
+			return err
+		}
+		tempDelay = 0
+		c := s.newConn(conn)
+		go c.serve()
+	}
+}
+
+func (s *Server) newConn(nc net.Conn) *conn {
+	c := &conn{
+		Server: s,
+		Conn:   nc,
+	}
+	return c
+}
+
+// TODO: keep an immutable map for each server instance to have less
+// chance of races.
+func (s *Server) handlerFor(prog uint32, proc uint32) HandleFunc {
+	for k, v := range registeredHandlers {
+		if k.protocol == prog && k.proc == proc {
+			return v
+		}
+	}
+	return nil
+}
+
+// Serve is a singleton listener paralleling http.Serve
+func Serve(l net.Listener, handler Handler) error {
+	srv := &Server{Handler: handler}
+	return srv.Serve(l)
+}
diff --git a/pkg/go-nfs/time.go b/pkg/go-nfs/time.go
new file mode 100644
index 0000000..266dd27
--- /dev/null
+++ b/pkg/go-nfs/time.go
@@ -0,0 +1,32 @@
+package nfs
+
+import (
+	"time"
+)
+
+// FileTime is the NFS wire time format
+// This is equivalent to go-nfs-client/nfs.NFS3Time
+type FileTime struct {
+	Seconds  uint32
+	Nseconds uint32
+}
+
+// ToNFSTime generates the nfs 64bit time format from a golang time.
+func ToNFSTime(t time.Time) FileTime {
+	return FileTime{
+		Seconds:  uint32(t.Unix()),
+		Nseconds: uint32(t.UnixNano() % int64(time.Second)),
+	}
+}
+
+// Native generates a golang time from an nfs time spec
+func (t FileTime) Native() *time.Time {
+	ts := time.Unix(int64(t.Seconds), int64(t.Nseconds))
+	return &ts
+}
+
+// EqualTimespec returns if this time is equal to a local time spec
+func (t FileTime) EqualTimespec(sec int64, nsec int64) bool {
+	// TODO: bounds check on sec/nsec overflow
+	return t.Nseconds == uint32(nsec) && t.Seconds == uint32(sec)
+}
diff --git a/pkg/kvtrace/kvmetrics.go b/pkg/kvtrace/kvmetrics.go
new file mode 100644
index 0000000..869c37a
--- /dev/null
+++ b/pkg/kvtrace/kvmetrics.go
@@ -0,0 +1,90 @@
+package kvtrace
+
+import (
+	"context"
+
+	"github.com/royalcat/kv"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+)
+
+var tracer = otel.Tracer("github.com/royalcat/kv/tracer")
+
+type traceSrtore[K, V any] struct {
+	kv    kv.Store[K, V]
+	attrs []attribute.KeyValue
+}
+
+func WrapTracing[K, V any](kv kv.Store[K, V], attrs ...attribute.KeyValue) kv.Store[K, V] {
+	return &traceSrtore[K, V]{
+		kv:    kv,
+		attrs: attrs,
+	}
+}
+
+// Close implements kv.Store.
+func (m *traceSrtore[K, V]) Close(ctx context.Context) error {
+	ctx, span := tracer.Start(ctx, "Close", trace.WithAttributes(m.attrs...))
+	defer span.End()
+
+	return m.kv.Close(ctx)
+}
+
+// Delete implements kv.Store.
+func (m *traceSrtore[K, V]) Delete(ctx context.Context, k K) error {
+	ctx, span := tracer.Start(ctx, "Delete", trace.WithAttributes(m.attrs...))
+	defer span.End()
+
+	return m.kv.Delete(ctx, k)
+}
+
+// Get implements kv.Store.
+func (m *traceSrtore[K, V]) Get(ctx context.Context, k K) (v V, found bool, err error) {
+	ctx, span := tracer.Start(ctx, "Get", trace.WithAttributes(m.attrs...))
+	defer span.End()
+
+	return m.kv.Get(ctx, k)
+}
+
+// Range implements kv.Store.
+func (m *traceSrtore[K, V]) Range(ctx context.Context, iter kv.Iter[K, V]) error {
+	ctx, span := tracer.Start(ctx, "Range", trace.WithAttributes(m.attrs...))
+	defer span.End()
+
+	count := 0
+	iterCount := func(k K, v V) bool {
+		count++
+		return iter(k, v)
+	}
+
+	err := m.kv.Range(ctx, iterCount)
+	span.SetAttributes(attribute.Int("count", count))
+	return err
+}
+
+// RangeWithPrefix implements kv.Store.
+func (m *traceSrtore[K, V]) RangeWithPrefix(ctx context.Context, k K, iter kv.Iter[K, V]) error {
+	ctx, span := tracer.Start(ctx, "RangeWithPrefix", trace.WithAttributes(m.attrs...))
+	defer span.End()
+
+	count := 0
+	iterCount := func(k K, v V) bool {
+		count++
+		return iter(k, v)
+	}
+
+	err := m.kv.Range(ctx, iterCount)
+	span.SetAttributes(attribute.Int("count", count))
+	return err
+}
+
+// Set implements kv.Store.
+func (m *traceSrtore[K, V]) Set(ctx context.Context, k K, v V) error {
+	ctx, span := tracer.Start(ctx, "Set", trace.WithAttributes(m.attrs...))
+	defer span.End()
+
+	return m.kv.Set(ctx, k, v)
+}
+
+var _ kv.Store[any, any] = (*traceSrtore[any, any])(nil)
diff --git a/pkg/rlog/rlog.go b/pkg/rlog/rlog.go
new file mode 100644
index 0000000..c8e1136
--- /dev/null
+++ b/pkg/rlog/rlog.go
@@ -0,0 +1,70 @@
+package rlog
+
+import (
+	"log/slog"
+	"os"
+
+	"github.com/rs/zerolog"
+	slogmulti "github.com/samber/slog-multi"
+	slogzerolog "github.com/samber/slog-zerolog"
+)
+
+const errKey = "error"
+const labelGroupKey = "labelGroup"
+
+var zl = zerolog.New(&zerolog.ConsoleWriter{Out: os.Stderr})
+
+var handlers = []slog.Handler{
+	slogzerolog.Option{Logger: &zl}.NewZerologHandler(),
+}
+
+var defaultLogger = slog.New(slogmulti.Fanout(handlers...))
+
+func init() {
+	slog.SetDefault(defaultLogger)
+}
+
+func AddHandler(nh slog.Handler) {
+	handlers = append(handlers, nh)
+	defaultLogger = slog.New(slogmulti.Fanout(handlers...))
+	slog.SetDefault(defaultLogger)
+}
+
+func ComponentLog(name string) *slog.Logger {
+	return defaultLogger.With(slog.String("component", name))
+}
+
+func ServiceLog(name string) *slog.Logger {
+	return ComponentLog("service/" + name)
+}
+
+func FunctionLog(log *slog.Logger, name string) *slog.Logger {
+	return log.With(slog.String("function", name))
+}
+
+func EndpointLog(log *slog.Logger, name string) *slog.Logger {
+	return log.With(slog.String("endpoint", name))
+}
+
+func Err(err error) slog.Attr {
+	return slog.Attr{Key: errKey, Value: fmtErr(err)}
+}
+
+func Label(args ...any) slog.Attr {
+	return slog.Group(labelGroupKey, args...)
+}
+
+// fmtErr returns a slog.GroupValue with keys "msg" and "trace". If the error
+// does not implement interface { StackTrace() errors.StackTrace }, the "trace"
+// key is omitted.
+func fmtErr(err error) slog.Value {
+	if err == nil {
+		return slog.AnyValue(nil)
+	}
+
+	var groupValues []slog.Attr
+
+	groupValues = append(groupValues, slog.String("msg", err.Error()))
+
+	return slog.GroupValue(groupValues...)
+}
diff --git a/pkg/uuid/uuid.go b/pkg/uuid/uuid.go
new file mode 100644
index 0000000..923ef0e
--- /dev/null
+++ b/pkg/uuid/uuid.go
@@ -0,0 +1,102 @@
+package uuid
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"strconv"
+	"time"
+
+	fuuid "github.com/gofrs/uuid/v5"
+)
+
+var Nil = UUID{}
+
+type UUIDList = []UUID
+
+type UUID struct {
+	fuuid.UUID
+}
+
+func New() UUID {
+	return UUID{fuuid.Must(fuuid.NewV7())}
+}
+
+func NewFromTime(t time.Time) UUID {
+	gen := fuuid.NewGenWithOptions(
+		fuuid.WithEpochFunc(func() time.Time { return t }),
+	)
+	return UUID{fuuid.Must(gen.NewV7())}
+}
+
+func NewP() *UUID {
+	return &UUID{fuuid.Must(fuuid.NewV7())}
+}
+
+func FromString(text string) (UUID, error) {
+	u, err := fuuid.FromString(text)
+	if err != nil {
+		return Nil, err
+	}
+
+	return UUID{u}, nil
+}
+
+func MustFromString(text string) UUID {
+	u, err := fuuid.FromString(text)
+	if err != nil {
+		panic(err)
+	}
+
+	return UUID{u}
+}
+
+func FromBytes(input []byte) (UUID, error) {
+	u, err := fuuid.FromBytes(input)
+	if err != nil {
+		return Nil, err
+	}
+
+	return UUID{u}, nil
+}
+
+func (a *UUID) UnmarshalJSON(b []byte) error {
+	var s string
+	if err := json.Unmarshal(b, &s); err != nil {
+		return err
+	}
+
+	if s == "" {
+		a.UUID = fuuid.Nil
+		return nil
+	}
+
+	return a.UUID.Parse(s)
+}
+
+func (a UUID) MarshalJSON() ([]byte, error) {
+	if a.IsNil() {
+		return json.Marshal("")
+	}
+
+	return json.Marshal(a.UUID)
+}
+
+// UnmarshalGQL implements the graphql.Unmarshaler interface
+func (u *UUID) UnmarshalGQL(v interface{}) error {
+	id, ok := v.(string)
+	if !ok {
+		return fmt.Errorf("uuid must be a string")
+	}
+
+	return u.Parse(id)
+}
+
+// MarshalGQL implements the graphql.Marshaler interface
+func (u UUID) MarshalGQL(w io.Writer) {
+	b := []byte(strconv.Quote(u.String()))
+	_, err := w.Write(b)
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/src/config/default.go b/src/config/default.go
index 488d633..5ace89f 100644
--- a/src/config/default.go
+++ b/src/config/default.go
@@ -1,7 +1,7 @@
 package config
 
-var defaultConfig = Config{
-	DataFolder: "./data",
+var defaultConfig = Settings{
+	SourceDir: "./data",
 	WebUi: WebUi{
 		Port: 4444,
 		IP:   "0.0.0.0",
@@ -20,6 +20,11 @@ var defaultConfig = Config{
 		Fuse: Fuse{
 			Enabled: false,
 		},
+		NFS: NFS{
+			Enabled:   false,
+			Port:      8122,
+			CachePath: "./nfs-cache",
+		},
 	},
 
 	TorrentClient: TorrentClient{
@@ -29,8 +34,8 @@ var defaultConfig = Config{
 
 		// GlobalCacheSize: 2048,
 
-		AddTimeout:  60,
-		ReadTimeout: 120,
+		// AddTimeout:  60,
+		// ReadTimeout: 120,
 	},
 
 	Log: Log{
diff --git a/src/config/load.go b/src/config/load.go
index 5ccb1d9..53f1156 100644
--- a/src/config/load.go
+++ b/src/config/load.go
@@ -13,15 +13,25 @@ import (
 
 var k = koanf.New(".")
 
-func Load(path string) (*Config, error) {
+var Config = defaultConfig
 
+func Load(path string) (*Settings, error) {
 	err := k.Load(structs.Provider(defaultConfig, "koanf"), nil)
 	if err != nil {
 		return nil, err
 	}
 
 	if path != "" {
-		_ = k.Load(file.Provider(path), yaml.Parser()) // its ok if file doesnt exist
+		_, err := os.Stat(path)
+		if err != nil && !os.IsNotExist(err) { // its ok if file doesnt exist
+			return nil, err
+
+		}
+
+		err = k.Load(file.Provider(path), yaml.Parser())
+		if err != nil {
+			return nil, err
+		}
 	}
 
 	err = k.Load(env.Provider("TSTOR_", ".", func(s string) string {
@@ -41,8 +51,11 @@ func Load(path string) (*Config, error) {
 		return nil, err
 	}
 
-	conf := Config{}
-	k.Unmarshal("", &conf)
+	conf := Settings{}
+	err = k.Unmarshal("", &conf)
+	if err != nil {
+		return nil, err
+	}
 
 	return &conf, nil
 }
diff --git a/src/config/model.go b/src/config/model.go
index f7c9fd9..8dd744e 100644
--- a/src/config/model.go
+++ b/src/config/model.go
@@ -1,13 +1,15 @@
 package config
 
 // Config is the main config object
-type Config struct {
+type Settings struct {
 	WebUi         WebUi         `koanf:"webUi"`
 	TorrentClient TorrentClient `koanf:"torrent"`
 	Mounts        Mounts        `koanf:"mounts"`
 	Log           Log           `koanf:"log"`
 
-	DataFolder string `koanf:"dataFolder"`
+	SourceDir string `koanf:"source_dir"`
+
+	OtelHttp string `koanf:"otel_http"`
 }
 
 type WebUi struct {
@@ -24,8 +26,8 @@ type Log struct {
 }
 
 type TorrentClient struct {
-	ReadTimeout int `koanf:"read_timeout,omitempty"`
-	AddTimeout  int `koanf:"add_timeout,omitempty"`
+	// ReadTimeout int `koanf:"read_timeout,omitempty"`
+	// AddTimeout  int `koanf:"add_timeout,omitempty"`
 
 	DHTNodes    []string `koanf:"dhtnodes,omitempty"`
 	DisableIPv6 bool     `koanf:"disable_ipv6,omitempty"`
@@ -61,6 +63,13 @@ type Mounts struct {
 	WebDAV WebDAV `koanf:"webdav"`
 	HttpFs HttpFs `koanf:"httpfs"`
 	Fuse   Fuse   `koanf:"fuse"`
+	NFS    NFS    `koanf:"nfs"`
+}
+
+type NFS struct {
+	Enabled   bool   `koanf:"enabled"`
+	Port      int    `koanf:"port"`
+	CachePath string `koanf:"cache_path"`
 }
 
 type HttpFs struct {
diff --git a/src/http/api.go b/src/delivery/api.go
similarity index 86%
rename from src/http/api.go
rename to src/delivery/api.go
index c227c2e..b9b032b 100644
--- a/src/http/api.go
+++ b/src/delivery/api.go
@@ -1,4 +1,4 @@
-package http
+package delivery
 
 import (
 	"bytes"
@@ -7,12 +7,12 @@ import (
 	"net/http"
 	"os"
 
-	"git.kmsign.ru/royalcat/tstor/src/host/torrent"
+	"git.kmsign.ru/royalcat/tstor/src/host/service"
 	"github.com/anacrolix/missinggo/v2/filecache"
 	"github.com/gin-gonic/gin"
 )
 
-var apiStatusHandler = func(fc *filecache.Cache, ss *torrent.Stats) gin.HandlerFunc {
+var apiStatusHandler = func(fc *filecache.Cache, ss *service.Stats) gin.HandlerFunc {
 	return func(ctx *gin.Context) {
 		stat := gin.H{
 			"torrentStats": ss.GlobalStats(),
@@ -29,7 +29,7 @@ var apiStatusHandler = func(fc *filecache.Cache, ss *torrent.Stats) gin.HandlerF
 	}
 }
 
-// var apiServersHandler = func(ss []*torrent.Server) gin.HandlerFunc {
+// var apiServersHandler = func(ss []*service.Server) gin.HandlerFunc {
 // 	return func(ctx *gin.Context) {
 // 		var infos []*torrent.ServerInfo
 // 		for _, s := range ss {
@@ -39,7 +39,7 @@ var apiStatusHandler = func(fc *filecache.Cache, ss *torrent.Stats) gin.HandlerF
 // 	}
 // }
 
-// var apiRoutesHandler = func(ss *torrent.Stats) gin.HandlerFunc {
+// var apiRoutesHandler = func(ss *service.Stats) gin.HandlerFunc {
 // 	return func(ctx *gin.Context) {
 // 		s := ss.RoutesStats()
 // 		sort.Sort(torrent.ByName(s))
@@ -47,7 +47,7 @@ var apiStatusHandler = func(fc *filecache.Cache, ss *torrent.Stats) gin.HandlerF
 // 	}
 // }
 
-// var apiAddTorrentHandler = func(s *torrent.Service) gin.HandlerFunc {
+// var apiAddTorrentHandler = func(s *service.Service) gin.HandlerFunc {
 // 	return func(ctx *gin.Context) {
 // 		route := ctx.Param("route")
 
@@ -66,7 +66,7 @@ var apiStatusHandler = func(fc *filecache.Cache, ss *torrent.Stats) gin.HandlerF
 // 	}
 // }
 
-// var apiDelTorrentHandler = func(s *torrent.Service) gin.HandlerFunc {
+// var apiDelTorrentHandler = func(s *service.Service) gin.HandlerFunc {
 // 	return func(ctx *gin.Context) {
 // 		route := ctx.Param("route")
 // 		hash := ctx.Param("torrent_hash")
diff --git a/src/delivery/graphql/generated.go b/src/delivery/graphql/generated.go
new file mode 100644
index 0000000..3253c0c
--- /dev/null
+++ b/src/delivery/graphql/generated.go
@@ -0,0 +1,7961 @@
+// Code generated by github.com/99designs/gqlgen, DO NOT EDIT.
+
+package graph
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"strconv"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"git.kmsign.ru/royalcat/tstor/src/delivery/graphql/model"
+	"github.com/99designs/gqlgen/graphql"
+	"github.com/99designs/gqlgen/graphql/introspection"
+	gqlparser "github.com/vektah/gqlparser/v2"
+	"github.com/vektah/gqlparser/v2/ast"
+)
+
+// region    ************************** generated!.gotpl **************************
+
+// NewExecutableSchema creates an ExecutableSchema from the ResolverRoot interface.
+func NewExecutableSchema(cfg Config) graphql.ExecutableSchema {
+	return &executableSchema{
+		schema:     cfg.Schema,
+		resolvers:  cfg.Resolvers,
+		directives: cfg.Directives,
+		complexity: cfg.Complexity,
+	}
+}
+
+type Config struct {
+	Schema     *ast.Schema
+	Resolvers  ResolverRoot
+	Directives DirectiveRoot
+	Complexity ComplexityRoot
+}
+
+type ResolverRoot interface {
+	Mutation() MutationResolver
+	Query() QueryResolver
+	Subscription() SubscriptionResolver
+	Torrent() TorrentResolver
+}
+
+type DirectiveRoot struct {
+	OneOf  func(ctx context.Context, obj interface{}, next graphql.Resolver) (res interface{}, err error)
+	Stream func(ctx context.Context, obj interface{}, next graphql.Resolver) (res interface{}, err error)
+}
+
+type ComplexityRoot struct {
+	ArchiveFS struct {
+		Name func(childComplexity int) int
+		Size func(childComplexity int) int
+	}
+
+	CleanupResponse struct {
+		Count func(childComplexity int) int
+		List  func(childComplexity int) int
+	}
+
+	Dir struct {
+		Name func(childComplexity int) int
+	}
+
+	DownloadTorrentResponse struct {
+		Task func(childComplexity int) int
+	}
+
+	File struct {
+		Name func(childComplexity int) int
+		Size func(childComplexity int) int
+	}
+
+	ListDirResponse struct {
+		Entries func(childComplexity int) int
+		Root    func(childComplexity int) int
+	}
+
+	Mutation struct {
+		CleanupTorrents  func(childComplexity int, files *bool, dryRun bool) int
+		DedupeStorage    func(childComplexity int) int
+		DownloadTorrent  func(childComplexity int, infohash string, file *string) int
+		ValidateTorrents func(childComplexity int, filter model.TorrentFilter) int
+	}
+
+	Query struct {
+		FsListDir func(childComplexity int, path string) int
+		Torrents  func(childComplexity int, filter *model.TorrentsFilter, pagination *model.Pagination) int
+	}
+
+	ResolverFS struct {
+		Name func(childComplexity int) int
+	}
+
+	Schema struct {
+		Mutation func(childComplexity int) int
+		Query    func(childComplexity int) int
+	}
+
+	Subscription struct {
+		TaskProgress           func(childComplexity int, taskID string) int
+		TorrentDownloadUpdates func(childComplexity int) int
+	}
+
+	Task struct {
+		ID func(childComplexity int) int
+	}
+
+	Torrent struct {
+		BytesCompleted  func(childComplexity int) int
+		BytesMissing    func(childComplexity int) int
+		ExcludedFiles   func(childComplexity int) int
+		Files           func(childComplexity int) int
+		Infohash        func(childComplexity int) int
+		Name            func(childComplexity int) int
+		Peers           func(childComplexity int) int
+		TorrentFilePath func(childComplexity int) int
+	}
+
+	TorrentFS struct {
+		Name    func(childComplexity int) int
+		Torrent func(childComplexity int) int
+	}
+
+	TorrentFile struct {
+		BytesCompleted func(childComplexity int) int
+		Filename       func(childComplexity int) int
+		Size           func(childComplexity int) int
+	}
+
+	TorrentPeer struct {
+		ClientName   func(childComplexity int) int
+		Discovery    func(childComplexity int) int
+		DownloadRate func(childComplexity int) int
+		IP           func(childComplexity int) int
+		Port         func(childComplexity int) int
+	}
+
+	TorrentProgress struct {
+		Current func(childComplexity int) int
+		Torrent func(childComplexity int) int
+		Total   func(childComplexity int) int
+	}
+}
+
+type MutationResolver interface {
+	ValidateTorrents(ctx context.Context, filter model.TorrentFilter) (bool, error)
+	CleanupTorrents(ctx context.Context, files *bool, dryRun bool) (*model.CleanupResponse, error)
+	DownloadTorrent(ctx context.Context, infohash string, file *string) (*model.DownloadTorrentResponse, error)
+	DedupeStorage(ctx context.Context) (int64, error)
+}
+type QueryResolver interface {
+	Torrents(ctx context.Context, filter *model.TorrentsFilter, pagination *model.Pagination) ([]*model.Torrent, error)
+	FsListDir(ctx context.Context, path string) (*model.ListDirResponse, error)
+}
+type SubscriptionResolver interface {
+	TaskProgress(ctx context.Context, taskID string) (<-chan model.Progress, error)
+	TorrentDownloadUpdates(ctx context.Context) (<-chan *model.TorrentProgress, error)
+}
+type TorrentResolver interface {
+	Name(ctx context.Context, obj *model.Torrent) (string, error)
+
+	Files(ctx context.Context, obj *model.Torrent) ([]*model.TorrentFile, error)
+	ExcludedFiles(ctx context.Context, obj *model.Torrent) ([]*model.TorrentFile, error)
+	Peers(ctx context.Context, obj *model.Torrent) ([]*model.TorrentPeer, error)
+}
+
+type executableSchema struct {
+	schema     *ast.Schema
+	resolvers  ResolverRoot
+	directives DirectiveRoot
+	complexity ComplexityRoot
+}
+
+func (e *executableSchema) Schema() *ast.Schema {
+	if e.schema != nil {
+		return e.schema
+	}
+	return parsedSchema
+}
+
+func (e *executableSchema) Complexity(typeName, field string, childComplexity int, rawArgs map[string]interface{}) (int, bool) {
+	ec := executionContext{nil, e, 0, 0, nil}
+	_ = ec
+	switch typeName + "." + field {
+
+	case "ArchiveFS.name":
+		if e.complexity.ArchiveFS.Name == nil {
+			break
+		}
+
+		return e.complexity.ArchiveFS.Name(childComplexity), true
+
+	case "ArchiveFS.size":
+		if e.complexity.ArchiveFS.Size == nil {
+			break
+		}
+
+		return e.complexity.ArchiveFS.Size(childComplexity), true
+
+	case "CleanupResponse.count":
+		if e.complexity.CleanupResponse.Count == nil {
+			break
+		}
+
+		return e.complexity.CleanupResponse.Count(childComplexity), true
+
+	case "CleanupResponse.list":
+		if e.complexity.CleanupResponse.List == nil {
+			break
+		}
+
+		return e.complexity.CleanupResponse.List(childComplexity), true
+
+	case "Dir.name":
+		if e.complexity.Dir.Name == nil {
+			break
+		}
+
+		return e.complexity.Dir.Name(childComplexity), true
+
+	case "DownloadTorrentResponse.task":
+		if e.complexity.DownloadTorrentResponse.Task == nil {
+			break
+		}
+
+		return e.complexity.DownloadTorrentResponse.Task(childComplexity), true
+
+	case "File.name":
+		if e.complexity.File.Name == nil {
+			break
+		}
+
+		return e.complexity.File.Name(childComplexity), true
+
+	case "File.size":
+		if e.complexity.File.Size == nil {
+			break
+		}
+
+		return e.complexity.File.Size(childComplexity), true
+
+	case "ListDirResponse.entries":
+		if e.complexity.ListDirResponse.Entries == nil {
+			break
+		}
+
+		return e.complexity.ListDirResponse.Entries(childComplexity), true
+
+	case "ListDirResponse.root":
+		if e.complexity.ListDirResponse.Root == nil {
+			break
+		}
+
+		return e.complexity.ListDirResponse.Root(childComplexity), true
+
+	case "Mutation.cleanupTorrents":
+		if e.complexity.Mutation.CleanupTorrents == nil {
+			break
+		}
+
+		args, err := ec.field_Mutation_cleanupTorrents_args(context.TODO(), rawArgs)
+		if err != nil {
+			return 0, false
+		}
+
+		return e.complexity.Mutation.CleanupTorrents(childComplexity, args["files"].(*bool), args["dryRun"].(bool)), true
+
+	case "Mutation.dedupeStorage":
+		if e.complexity.Mutation.DedupeStorage == nil {
+			break
+		}
+
+		return e.complexity.Mutation.DedupeStorage(childComplexity), true
+
+	case "Mutation.downloadTorrent":
+		if e.complexity.Mutation.DownloadTorrent == nil {
+			break
+		}
+
+		args, err := ec.field_Mutation_downloadTorrent_args(context.TODO(), rawArgs)
+		if err != nil {
+			return 0, false
+		}
+
+		return e.complexity.Mutation.DownloadTorrent(childComplexity, args["infohash"].(string), args["file"].(*string)), true
+
+	case "Mutation.validateTorrents":
+		if e.complexity.Mutation.ValidateTorrents == nil {
+			break
+		}
+
+		args, err := ec.field_Mutation_validateTorrents_args(context.TODO(), rawArgs)
+		if err != nil {
+			return 0, false
+		}
+
+		return e.complexity.Mutation.ValidateTorrents(childComplexity, args["filter"].(model.TorrentFilter)), true
+
+	case "Query.fsListDir":
+		if e.complexity.Query.FsListDir == nil {
+			break
+		}
+
+		args, err := ec.field_Query_fsListDir_args(context.TODO(), rawArgs)
+		if err != nil {
+			return 0, false
+		}
+
+		return e.complexity.Query.FsListDir(childComplexity, args["path"].(string)), true
+
+	case "Query.torrents":
+		if e.complexity.Query.Torrents == nil {
+			break
+		}
+
+		args, err := ec.field_Query_torrents_args(context.TODO(), rawArgs)
+		if err != nil {
+			return 0, false
+		}
+
+		return e.complexity.Query.Torrents(childComplexity, args["filter"].(*model.TorrentsFilter), args["pagination"].(*model.Pagination)), true
+
+	case "ResolverFS.name":
+		if e.complexity.ResolverFS.Name == nil {
+			break
+		}
+
+		return e.complexity.ResolverFS.Name(childComplexity), true
+
+	case "Schema.mutation":
+		if e.complexity.Schema.Mutation == nil {
+			break
+		}
+
+		return e.complexity.Schema.Mutation(childComplexity), true
+
+	case "Schema.query":
+		if e.complexity.Schema.Query == nil {
+			break
+		}
+
+		return e.complexity.Schema.Query(childComplexity), true
+
+	case "Subscription.taskProgress":
+		if e.complexity.Subscription.TaskProgress == nil {
+			break
+		}
+
+		args, err := ec.field_Subscription_taskProgress_args(context.TODO(), rawArgs)
+		if err != nil {
+			return 0, false
+		}
+
+		return e.complexity.Subscription.TaskProgress(childComplexity, args["taskID"].(string)), true
+
+	case "Subscription.torrentDownloadUpdates":
+		if e.complexity.Subscription.TorrentDownloadUpdates == nil {
+			break
+		}
+
+		return e.complexity.Subscription.TorrentDownloadUpdates(childComplexity), true
+
+	case "Task.id":
+		if e.complexity.Task.ID == nil {
+			break
+		}
+
+		return e.complexity.Task.ID(childComplexity), true
+
+	case "Torrent.bytesCompleted":
+		if e.complexity.Torrent.BytesCompleted == nil {
+			break
+		}
+
+		return e.complexity.Torrent.BytesCompleted(childComplexity), true
+
+	case "Torrent.bytesMissing":
+		if e.complexity.Torrent.BytesMissing == nil {
+			break
+		}
+
+		return e.complexity.Torrent.BytesMissing(childComplexity), true
+
+	case "Torrent.excludedFiles":
+		if e.complexity.Torrent.ExcludedFiles == nil {
+			break
+		}
+
+		return e.complexity.Torrent.ExcludedFiles(childComplexity), true
+
+	case "Torrent.files":
+		if e.complexity.Torrent.Files == nil {
+			break
+		}
+
+		return e.complexity.Torrent.Files(childComplexity), true
+
+	case "Torrent.infohash":
+		if e.complexity.Torrent.Infohash == nil {
+			break
+		}
+
+		return e.complexity.Torrent.Infohash(childComplexity), true
+
+	case "Torrent.name":
+		if e.complexity.Torrent.Name == nil {
+			break
+		}
+
+		return e.complexity.Torrent.Name(childComplexity), true
+
+	case "Torrent.peers":
+		if e.complexity.Torrent.Peers == nil {
+			break
+		}
+
+		return e.complexity.Torrent.Peers(childComplexity), true
+
+	case "Torrent.torrentFilePath":
+		if e.complexity.Torrent.TorrentFilePath == nil {
+			break
+		}
+
+		return e.complexity.Torrent.TorrentFilePath(childComplexity), true
+
+	case "TorrentFS.name":
+		if e.complexity.TorrentFS.Name == nil {
+			break
+		}
+
+		return e.complexity.TorrentFS.Name(childComplexity), true
+
+	case "TorrentFS.torrent":
+		if e.complexity.TorrentFS.Torrent == nil {
+			break
+		}
+
+		return e.complexity.TorrentFS.Torrent(childComplexity), true
+
+	case "TorrentFile.bytesCompleted":
+		if e.complexity.TorrentFile.BytesCompleted == nil {
+			break
+		}
+
+		return e.complexity.TorrentFile.BytesCompleted(childComplexity), true
+
+	case "TorrentFile.filename":
+		if e.complexity.TorrentFile.Filename == nil {
+			break
+		}
+
+		return e.complexity.TorrentFile.Filename(childComplexity), true
+
+	case "TorrentFile.size":
+		if e.complexity.TorrentFile.Size == nil {
+			break
+		}
+
+		return e.complexity.TorrentFile.Size(childComplexity), true
+
+	case "TorrentPeer.clientName":
+		if e.complexity.TorrentPeer.ClientName == nil {
+			break
+		}
+
+		return e.complexity.TorrentPeer.ClientName(childComplexity), true
+
+	case "TorrentPeer.discovery":
+		if e.complexity.TorrentPeer.Discovery == nil {
+			break
+		}
+
+		return e.complexity.TorrentPeer.Discovery(childComplexity), true
+
+	case "TorrentPeer.downloadRate":
+		if e.complexity.TorrentPeer.DownloadRate == nil {
+			break
+		}
+
+		return e.complexity.TorrentPeer.DownloadRate(childComplexity), true
+
+	case "TorrentPeer.ip":
+		if e.complexity.TorrentPeer.IP == nil {
+			break
+		}
+
+		return e.complexity.TorrentPeer.IP(childComplexity), true
+
+	case "TorrentPeer.port":
+		if e.complexity.TorrentPeer.Port == nil {
+			break
+		}
+
+		return e.complexity.TorrentPeer.Port(childComplexity), true
+
+	case "TorrentProgress.current":
+		if e.complexity.TorrentProgress.Current == nil {
+			break
+		}
+
+		return e.complexity.TorrentProgress.Current(childComplexity), true
+
+	case "TorrentProgress.torrent":
+		if e.complexity.TorrentProgress.Torrent == nil {
+			break
+		}
+
+		return e.complexity.TorrentProgress.Torrent(childComplexity), true
+
+	case "TorrentProgress.total":
+		if e.complexity.TorrentProgress.Total == nil {
+			break
+		}
+
+		return e.complexity.TorrentProgress.Total(childComplexity), true
+
+	}
+	return 0, false
+}
+
+func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler {
+	rc := graphql.GetOperationContext(ctx)
+	ec := executionContext{rc, e, 0, 0, make(chan graphql.DeferredResult)}
+	inputUnmarshalMap := graphql.BuildUnmarshalerMap(
+		ec.unmarshalInputBooleanFilter,
+		ec.unmarshalInputDateTimeFilter,
+		ec.unmarshalInputIntFilter,
+		ec.unmarshalInputPagination,
+		ec.unmarshalInputStringFilter,
+		ec.unmarshalInputTorrentFilter,
+		ec.unmarshalInputTorrentsFilter,
+	)
+	first := true
+
+	switch rc.Operation.Operation {
+	case ast.Query:
+		return func(ctx context.Context) *graphql.Response {
+			var response graphql.Response
+			var data graphql.Marshaler
+			if first {
+				first = false
+				ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap)
+				data = ec._Query(ctx, rc.Operation.SelectionSet)
+			} else {
+				if atomic.LoadInt32(&ec.pendingDeferred) > 0 {
+					result := <-ec.deferredResults
+					atomic.AddInt32(&ec.pendingDeferred, -1)
+					data = result.Result
+					response.Path = result.Path
+					response.Label = result.Label
+					response.Errors = result.Errors
+				} else {
+					return nil
+				}
+			}
+			var buf bytes.Buffer
+			data.MarshalGQL(&buf)
+			response.Data = buf.Bytes()
+			if atomic.LoadInt32(&ec.deferred) > 0 {
+				hasNext := atomic.LoadInt32(&ec.pendingDeferred) > 0
+				response.HasNext = &hasNext
+			}
+
+			return &response
+		}
+	case ast.Mutation:
+		return func(ctx context.Context) *graphql.Response {
+			if !first {
+				return nil
+			}
+			first = false
+			ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap)
+			data := ec._Mutation(ctx, rc.Operation.SelectionSet)
+			var buf bytes.Buffer
+			data.MarshalGQL(&buf)
+
+			return &graphql.Response{
+				Data: buf.Bytes(),
+			}
+		}
+	case ast.Subscription:
+		next := ec._Subscription(ctx, rc.Operation.SelectionSet)
+
+		var buf bytes.Buffer
+		return func(ctx context.Context) *graphql.Response {
+			buf.Reset()
+			data := next(ctx)
+
+			if data == nil {
+				return nil
+			}
+			data.MarshalGQL(&buf)
+
+			return &graphql.Response{
+				Data: buf.Bytes(),
+			}
+		}
+
+	default:
+		return graphql.OneShot(graphql.ErrorResponse(ctx, "unsupported GraphQL operation"))
+	}
+}
+
+type executionContext struct {
+	*graphql.OperationContext
+	*executableSchema
+	deferred        int32
+	pendingDeferred int32
+	deferredResults chan graphql.DeferredResult
+}
+
+func (ec *executionContext) processDeferredGroup(dg graphql.DeferredGroup) {
+	atomic.AddInt32(&ec.pendingDeferred, 1)
+	go func() {
+		ctx := graphql.WithFreshResponseContext(dg.Context)
+		dg.FieldSet.Dispatch(ctx)
+		ds := graphql.DeferredResult{
+			Path:   dg.Path,
+			Label:  dg.Label,
+			Result: dg.FieldSet,
+			Errors: graphql.GetErrors(ctx),
+		}
+		// null fields should bubble up
+		if dg.FieldSet.Invalids > 0 {
+			ds.Result = graphql.Null
+		}
+		ec.deferredResults <- ds
+	}()
+}
+
+func (ec *executionContext) introspectSchema() (*introspection.Schema, error) {
+	if ec.DisableIntrospection {
+		return nil, errors.New("introspection disabled")
+	}
+	return introspection.WrapSchema(ec.Schema()), nil
+}
+
+func (ec *executionContext) introspectType(name string) (*introspection.Type, error) {
+	if ec.DisableIntrospection {
+		return nil, errors.New("introspection disabled")
+	}
+	return introspection.WrapTypeFromDef(ec.Schema(), ec.Schema().Types[name]), nil
+}
+
+var sources = []*ast.Source{
+	{Name: "../../../graphql/mutation.graphql", Input: `type Mutation {
+  validateTorrents(filter: TorrentFilter!): Boolean!
+  cleanupTorrents(files: Boolean, dryRun: Boolean!): CleanupResponse!
+  downloadTorrent(infohash: String!, file: String): DownloadTorrentResponse
+  dedupeStorage: Int!
+}
+
+input TorrentFilter @oneOf {
+  everything: Boolean
+  infohash: String
+  # pathGlob: String!
+}
+
+type DownloadTorrentResponse {
+  task: Task
+}
+
+type CleanupResponse {
+  count: Int!
+  list: [String!]!
+}
+
+type Task {
+  id: ID!
+}
+`, BuiltIn: false},
+	{Name: "../../../graphql/query.graphql", Input: `type Query {
+  torrents(filter: TorrentsFilter, pagination: Pagination): [Torrent!]!
+  fsListDir(path: String!): ListDirResponse!
+}
+
+input TorrentsFilter {
+  name: StringFilter
+  bytesCompleted: IntFilter
+  bytesMissing: IntFilter
+
+  peersCount: IntFilter
+}
+
+type ListDirResponse {
+  root: DirEntry!
+  entries: [DirEntry!]!
+}
+
+input Pagination {
+  offset: Int!
+  limit: Int!
+}
+
+input StringFilter @oneOf {
+  eq: String
+  substr: String
+  in: [String!]
+}
+
+input IntFilter @oneOf {
+  eq: Int
+  gt: Int
+  lt: Int
+  gte: Int
+  lte: Int
+  in: [Int!]
+}
+
+input DateTimeFilter @oneOf {
+  eq: DateTime
+  gt: DateTime
+  lt: DateTime
+  gte: DateTime
+  lte: DateTime
+}
+
+input BooleanFilter @oneOf {
+  eq: Boolean
+}
+`, BuiltIn: false},
+	{Name: "../../../graphql/schema.graphql", Input: `directive @oneOf on INPUT_OBJECT | FIELD_DEFINITION
+directive @stream on FIELD_DEFINITION
+
+scalar DateTime
+
+type Schema {
+  query: Query
+  mutation: Mutation
+}
+`, BuiltIn: false},
+	{Name: "../../../graphql/subscription.graphql", Input: `type Subscription {
+    taskProgress(taskID: ID!): Progress
+    torrentDownloadUpdates: TorrentProgress
+}
+
+
+type TorrentProgress implements Progress {
+    torrent: Torrent!
+    current: Int!
+    total: Int!
+}
+
+interface Progress {
+    current: Int!
+    total: Int!
+}`, BuiltIn: false},
+	{Name: "../../../graphql/types/fs.graphql", Input: `interface DirEntry {
+  name: String!
+}
+
+type Dir implements DirEntry {
+  name: String!
+}
+
+type File implements DirEntry {
+  name: String!
+  size: Int!
+}
+
+type ResolverFS implements DirEntry {
+  name: String!
+}
+
+type TorrentFS implements DirEntry {
+  name: String!
+  torrent: Torrent!
+}
+
+type ArchiveFS implements DirEntry {
+  name: String!
+  size: Int!
+}
+`, BuiltIn: false},
+	{Name: "../../../graphql/types/torrent.graphql", Input: `type Torrent {
+  name: String!
+  infohash: String!
+  bytesCompleted: Int!
+  torrentFilePath: String!
+  bytesMissing: Int!
+  files: [TorrentFile!]!
+  excludedFiles: [TorrentFile!]!
+  peers: [TorrentPeer!]!
+}
+
+type TorrentFile {
+  filename: String!
+  size: Int!
+  bytesCompleted: Int!
+}
+
+type TorrentPeer {
+  ip: String!
+  downloadRate: Float!
+  discovery: String!
+  port: Int!
+  clientName: String!
+}`, BuiltIn: false},
+}
+var parsedSchema = gqlparser.MustLoadSchema(sources...)
+
+// endregion ************************** generated!.gotpl **************************
+
+// region    ***************************** args.gotpl *****************************
+
+func (ec *executionContext) field_Mutation_cleanupTorrents_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+	var err error
+	args := map[string]interface{}{}
+	var arg0 *bool
+	if tmp, ok := rawArgs["files"]; ok {
+		ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("files"))
+		arg0, err = ec.unmarshalOBoolean2ᚖbool(ctx, tmp)
+		if err != nil {
+			return nil, err
+		}
+	}
+	args["files"] = arg0
+	var arg1 bool
+	if tmp, ok := rawArgs["dryRun"]; ok {
+		ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("dryRun"))
+		arg1, err = ec.unmarshalNBoolean2bool(ctx, tmp)
+		if err != nil {
+			return nil, err
+		}
+	}
+	args["dryRun"] = arg1
+	return args, nil
+}
+
+func (ec *executionContext) field_Mutation_downloadTorrent_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+	var err error
+	args := map[string]interface{}{}
+	var arg0 string
+	if tmp, ok := rawArgs["infohash"]; ok {
+		ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("infohash"))
+		arg0, err = ec.unmarshalNString2string(ctx, tmp)
+		if err != nil {
+			return nil, err
+		}
+	}
+	args["infohash"] = arg0
+	var arg1 *string
+	if tmp, ok := rawArgs["file"]; ok {
+		ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("file"))
+		arg1, err = ec.unmarshalOString2ᚖstring(ctx, tmp)
+		if err != nil {
+			return nil, err
+		}
+	}
+	args["file"] = arg1
+	return args, nil
+}
+
+func (ec *executionContext) field_Mutation_validateTorrents_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+	var err error
+	args := map[string]interface{}{}
+	var arg0 model.TorrentFilter
+	if tmp, ok := rawArgs["filter"]; ok {
+		ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("filter"))
+		arg0, err = ec.unmarshalNTorrentFilter2gitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentFilter(ctx, tmp)
+		if err != nil {
+			return nil, err
+		}
+	}
+	args["filter"] = arg0
+	return args, nil
+}
+
+func (ec *executionContext) field_Query___type_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+	var err error
+	args := map[string]interface{}{}
+	var arg0 string
+	if tmp, ok := rawArgs["name"]; ok {
+		ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name"))
+		arg0, err = ec.unmarshalNString2string(ctx, tmp)
+		if err != nil {
+			return nil, err
+		}
+	}
+	args["name"] = arg0
+	return args, nil
+}
+
+func (ec *executionContext) field_Query_fsListDir_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+	var err error
+	args := map[string]interface{}{}
+	var arg0 string
+	if tmp, ok := rawArgs["path"]; ok {
+		ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("path"))
+		arg0, err = ec.unmarshalNString2string(ctx, tmp)
+		if err != nil {
+			return nil, err
+		}
+	}
+	args["path"] = arg0
+	return args, nil
+}
+
+func (ec *executionContext) field_Query_torrents_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+	var err error
+	args := map[string]interface{}{}
+	var arg0 *model.TorrentsFilter
+	if tmp, ok := rawArgs["filter"]; ok {
+		ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("filter"))
+		arg0, err = ec.unmarshalOTorrentsFilter2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentsFilter(ctx, tmp)
+		if err != nil {
+			return nil, err
+		}
+	}
+	args["filter"] = arg0
+	var arg1 *model.Pagination
+	if tmp, ok := rawArgs["pagination"]; ok {
+		ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("pagination"))
+		arg1, err = ec.unmarshalOPagination2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐPagination(ctx, tmp)
+		if err != nil {
+			return nil, err
+		}
+	}
+	args["pagination"] = arg1
+	return args, nil
+}
+
+func (ec *executionContext) field_Subscription_taskProgress_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+	var err error
+	args := map[string]interface{}{}
+	var arg0 string
+	if tmp, ok := rawArgs["taskID"]; ok {
+		ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("taskID"))
+		arg0, err = ec.unmarshalNID2string(ctx, tmp)
+		if err != nil {
+			return nil, err
+		}
+	}
+	args["taskID"] = arg0
+	return args, nil
+}
+
+func (ec *executionContext) field___Type_enumValues_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+	var err error
+	args := map[string]interface{}{}
+	var arg0 bool
+	if tmp, ok := rawArgs["includeDeprecated"]; ok {
+		ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated"))
+		arg0, err = ec.unmarshalOBoolean2bool(ctx, tmp)
+		if err != nil {
+			return nil, err
+		}
+	}
+	args["includeDeprecated"] = arg0
+	return args, nil
+}
+
+func (ec *executionContext) field___Type_fields_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+	var err error
+	args := map[string]interface{}{}
+	var arg0 bool
+	if tmp, ok := rawArgs["includeDeprecated"]; ok {
+		ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated"))
+		arg0, err = ec.unmarshalOBoolean2bool(ctx, tmp)
+		if err != nil {
+			return nil, err
+		}
+	}
+	args["includeDeprecated"] = arg0
+	return args, nil
+}
+
+// endregion ***************************** args.gotpl *****************************
+
+// region    ************************** directives.gotpl **************************
+
+// endregion ************************** directives.gotpl **************************
+
+// region    **************************** field.gotpl *****************************
+
+func (ec *executionContext) _ArchiveFS_name(ctx context.Context, field graphql.CollectedField, obj *model.ArchiveFs) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_ArchiveFS_name(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Name, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ArchiveFS_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "ArchiveFS",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _ArchiveFS_size(ctx context.Context, field graphql.CollectedField, obj *model.ArchiveFs) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_ArchiveFS_size(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Size, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(int64)
+	fc.Result = res
+	return ec.marshalNInt2int64(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ArchiveFS_size(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "ArchiveFS",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type Int does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _CleanupResponse_count(ctx context.Context, field graphql.CollectedField, obj *model.CleanupResponse) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_CleanupResponse_count(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Count, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(int64)
+	fc.Result = res
+	return ec.marshalNInt2int64(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_CleanupResponse_count(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "CleanupResponse",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type Int does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _CleanupResponse_list(ctx context.Context, field graphql.CollectedField, obj *model.CleanupResponse) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_CleanupResponse_list(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.List, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.([]string)
+	fc.Result = res
+	return ec.marshalNString2ᚕstringᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_CleanupResponse_list(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "CleanupResponse",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Dir_name(ctx context.Context, field graphql.CollectedField, obj *model.Dir) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Dir_name(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Name, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Dir_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Dir",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _DownloadTorrentResponse_task(ctx context.Context, field graphql.CollectedField, obj *model.DownloadTorrentResponse) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_DownloadTorrentResponse_task(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Task, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*model.Task)
+	fc.Result = res
+	return ec.marshalOTask2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTask(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DownloadTorrentResponse_task(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "DownloadTorrentResponse",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "id":
+				return ec.fieldContext_Task_id(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type Task", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _File_name(ctx context.Context, field graphql.CollectedField, obj *model.File) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_File_name(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Name, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_File_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "File",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _File_size(ctx context.Context, field graphql.CollectedField, obj *model.File) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_File_size(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Size, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(int64)
+	fc.Result = res
+	return ec.marshalNInt2int64(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_File_size(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "File",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type Int does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _ListDirResponse_root(ctx context.Context, field graphql.CollectedField, obj *model.ListDirResponse) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_ListDirResponse_root(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Root, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(model.DirEntry)
+	fc.Result = res
+	return ec.marshalNDirEntry2gitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐDirEntry(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ListDirResponse_root(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "ListDirResponse",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("FieldContext.Child cannot be called on type INTERFACE")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _ListDirResponse_entries(ctx context.Context, field graphql.CollectedField, obj *model.ListDirResponse) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_ListDirResponse_entries(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Entries, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.([]model.DirEntry)
+	fc.Result = res
+	return ec.marshalNDirEntry2ᚕgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐDirEntryᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ListDirResponse_entries(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "ListDirResponse",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("FieldContext.Child cannot be called on type INTERFACE")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Mutation_validateTorrents(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Mutation_validateTorrents(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return ec.resolvers.Mutation().ValidateTorrents(rctx, fc.Args["filter"].(model.TorrentFilter))
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(bool)
+	fc.Result = res
+	return ec.marshalNBoolean2bool(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Mutation_validateTorrents(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Mutation",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: true,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type Boolean does not have child fields")
+		},
+	}
+	defer func() {
+		if r := recover(); r != nil {
+			err = ec.Recover(ctx, r)
+			ec.Error(ctx, err)
+		}
+	}()
+	ctx = graphql.WithFieldContext(ctx, fc)
+	if fc.Args, err = ec.field_Mutation_validateTorrents_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+		ec.Error(ctx, err)
+		return fc, err
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Mutation_cleanupTorrents(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Mutation_cleanupTorrents(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return ec.resolvers.Mutation().CleanupTorrents(rctx, fc.Args["files"].(*bool), fc.Args["dryRun"].(bool))
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(*model.CleanupResponse)
+	fc.Result = res
+	return ec.marshalNCleanupResponse2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐCleanupResponse(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Mutation_cleanupTorrents(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Mutation",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: true,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "count":
+				return ec.fieldContext_CleanupResponse_count(ctx, field)
+			case "list":
+				return ec.fieldContext_CleanupResponse_list(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type CleanupResponse", field.Name)
+		},
+	}
+	defer func() {
+		if r := recover(); r != nil {
+			err = ec.Recover(ctx, r)
+			ec.Error(ctx, err)
+		}
+	}()
+	ctx = graphql.WithFieldContext(ctx, fc)
+	if fc.Args, err = ec.field_Mutation_cleanupTorrents_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+		ec.Error(ctx, err)
+		return fc, err
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Mutation_downloadTorrent(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Mutation_downloadTorrent(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return ec.resolvers.Mutation().DownloadTorrent(rctx, fc.Args["infohash"].(string), fc.Args["file"].(*string))
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*model.DownloadTorrentResponse)
+	fc.Result = res
+	return ec.marshalODownloadTorrentResponse2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐDownloadTorrentResponse(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Mutation_downloadTorrent(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Mutation",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: true,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "task":
+				return ec.fieldContext_DownloadTorrentResponse_task(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type DownloadTorrentResponse", field.Name)
+		},
+	}
+	defer func() {
+		if r := recover(); r != nil {
+			err = ec.Recover(ctx, r)
+			ec.Error(ctx, err)
+		}
+	}()
+	ctx = graphql.WithFieldContext(ctx, fc)
+	if fc.Args, err = ec.field_Mutation_downloadTorrent_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+		ec.Error(ctx, err)
+		return fc, err
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Mutation_dedupeStorage(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Mutation_dedupeStorage(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return ec.resolvers.Mutation().DedupeStorage(rctx)
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(int64)
+	fc.Result = res
+	return ec.marshalNInt2int64(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Mutation_dedupeStorage(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Mutation",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: true,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type Int does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Query_torrents(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Query_torrents(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return ec.resolvers.Query().Torrents(rctx, fc.Args["filter"].(*model.TorrentsFilter), fc.Args["pagination"].(*model.Pagination))
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.([]*model.Torrent)
+	fc.Result = res
+	return ec.marshalNTorrent2ᚕᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_torrents(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Query",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: true,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "name":
+				return ec.fieldContext_Torrent_name(ctx, field)
+			case "infohash":
+				return ec.fieldContext_Torrent_infohash(ctx, field)
+			case "bytesCompleted":
+				return ec.fieldContext_Torrent_bytesCompleted(ctx, field)
+			case "torrentFilePath":
+				return ec.fieldContext_Torrent_torrentFilePath(ctx, field)
+			case "bytesMissing":
+				return ec.fieldContext_Torrent_bytesMissing(ctx, field)
+			case "files":
+				return ec.fieldContext_Torrent_files(ctx, field)
+			case "excludedFiles":
+				return ec.fieldContext_Torrent_excludedFiles(ctx, field)
+			case "peers":
+				return ec.fieldContext_Torrent_peers(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type Torrent", field.Name)
+		},
+	}
+	defer func() {
+		if r := recover(); r != nil {
+			err = ec.Recover(ctx, r)
+			ec.Error(ctx, err)
+		}
+	}()
+	ctx = graphql.WithFieldContext(ctx, fc)
+	if fc.Args, err = ec.field_Query_torrents_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+		ec.Error(ctx, err)
+		return fc, err
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Query_fsListDir(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Query_fsListDir(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return ec.resolvers.Query().FsListDir(rctx, fc.Args["path"].(string))
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(*model.ListDirResponse)
+	fc.Result = res
+	return ec.marshalNListDirResponse2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐListDirResponse(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_fsListDir(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Query",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: true,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "root":
+				return ec.fieldContext_ListDirResponse_root(ctx, field)
+			case "entries":
+				return ec.fieldContext_ListDirResponse_entries(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type ListDirResponse", field.Name)
+		},
+	}
+	defer func() {
+		if r := recover(); r != nil {
+			err = ec.Recover(ctx, r)
+			ec.Error(ctx, err)
+		}
+	}()
+	ctx = graphql.WithFieldContext(ctx, fc)
+	if fc.Args, err = ec.field_Query_fsListDir_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+		ec.Error(ctx, err)
+		return fc, err
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Query___type(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Query___type(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return ec.introspectType(fc.Args["name"].(string))
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*introspection.Type)
+	fc.Result = res
+	return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query___type(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Query",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "kind":
+				return ec.fieldContext___Type_kind(ctx, field)
+			case "name":
+				return ec.fieldContext___Type_name(ctx, field)
+			case "description":
+				return ec.fieldContext___Type_description(ctx, field)
+			case "fields":
+				return ec.fieldContext___Type_fields(ctx, field)
+			case "interfaces":
+				return ec.fieldContext___Type_interfaces(ctx, field)
+			case "possibleTypes":
+				return ec.fieldContext___Type_possibleTypes(ctx, field)
+			case "enumValues":
+				return ec.fieldContext___Type_enumValues(ctx, field)
+			case "inputFields":
+				return ec.fieldContext___Type_inputFields(ctx, field)
+			case "ofType":
+				return ec.fieldContext___Type_ofType(ctx, field)
+			case "specifiedByURL":
+				return ec.fieldContext___Type_specifiedByURL(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+		},
+	}
+	defer func() {
+		if r := recover(); r != nil {
+			err = ec.Recover(ctx, r)
+			ec.Error(ctx, err)
+		}
+	}()
+	ctx = graphql.WithFieldContext(ctx, fc)
+	if fc.Args, err = ec.field_Query___type_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+		ec.Error(ctx, err)
+		return fc, err
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Query___schema(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Query___schema(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return ec.introspectSchema()
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*introspection.Schema)
+	fc.Result = res
+	return ec.marshalO__Schema2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query___schema(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Query",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "description":
+				return ec.fieldContext___Schema_description(ctx, field)
+			case "types":
+				return ec.fieldContext___Schema_types(ctx, field)
+			case "queryType":
+				return ec.fieldContext___Schema_queryType(ctx, field)
+			case "mutationType":
+				return ec.fieldContext___Schema_mutationType(ctx, field)
+			case "subscriptionType":
+				return ec.fieldContext___Schema_subscriptionType(ctx, field)
+			case "directives":
+				return ec.fieldContext___Schema_directives(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __Schema", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _ResolverFS_name(ctx context.Context, field graphql.CollectedField, obj *model.ResolverFs) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_ResolverFS_name(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Name, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ResolverFS_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "ResolverFS",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Schema_query(ctx context.Context, field graphql.CollectedField, obj *model.Schema) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Schema_query(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	res := &model.Query{}
+	fc.Result = res
+	return ec.marshalOQuery2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐQuery(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Schema_query(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Schema",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "torrents":
+				return ec.fieldContext_Query_torrents(ctx, field)
+			case "fsListDir":
+				return ec.fieldContext_Query_fsListDir(ctx, field)
+			case "__schema":
+				return ec.fieldContext_Query___schema(ctx, field)
+			case "__type":
+				return ec.fieldContext_Query___type(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type Query", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Schema_mutation(ctx context.Context, field graphql.CollectedField, obj *model.Schema) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Schema_mutation(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	res := &model.Mutation{}
+	fc.Result = res
+	return ec.marshalOMutation2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐMutation(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Schema_mutation(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Schema",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "validateTorrents":
+				return ec.fieldContext_Mutation_validateTorrents(ctx, field)
+			case "cleanupTorrents":
+				return ec.fieldContext_Mutation_cleanupTorrents(ctx, field)
+			case "downloadTorrent":
+				return ec.fieldContext_Mutation_downloadTorrent(ctx, field)
+			case "dedupeStorage":
+				return ec.fieldContext_Mutation_dedupeStorage(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type Mutation", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Subscription_taskProgress(ctx context.Context, field graphql.CollectedField) (ret func(ctx context.Context) graphql.Marshaler) {
+	fc, err := ec.fieldContext_Subscription_taskProgress(ctx, field)
+	if err != nil {
+		return nil
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = nil
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return ec.resolvers.Subscription().TaskProgress(rctx, fc.Args["taskID"].(string))
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return nil
+	}
+	if resTmp == nil {
+		return nil
+	}
+	return func(ctx context.Context) graphql.Marshaler {
+		select {
+		case res, ok := <-resTmp.(<-chan model.Progress):
+			if !ok {
+				return nil
+			}
+			return graphql.WriterFunc(func(w io.Writer) {
+				w.Write([]byte{'{'})
+				graphql.MarshalString(field.Alias).MarshalGQL(w)
+				w.Write([]byte{':'})
+				ec.marshalOProgress2gitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐProgress(ctx, field.Selections, res).MarshalGQL(w)
+				w.Write([]byte{'}'})
+			})
+		case <-ctx.Done():
+			return nil
+		}
+	}
+}
+
+func (ec *executionContext) fieldContext_Subscription_taskProgress(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Subscription",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: true,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("FieldContext.Child cannot be called on type INTERFACE")
+		},
+	}
+	defer func() {
+		if r := recover(); r != nil {
+			err = ec.Recover(ctx, r)
+			ec.Error(ctx, err)
+		}
+	}()
+	ctx = graphql.WithFieldContext(ctx, fc)
+	if fc.Args, err = ec.field_Subscription_taskProgress_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+		ec.Error(ctx, err)
+		return fc, err
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Subscription_torrentDownloadUpdates(ctx context.Context, field graphql.CollectedField) (ret func(ctx context.Context) graphql.Marshaler) {
+	fc, err := ec.fieldContext_Subscription_torrentDownloadUpdates(ctx, field)
+	if err != nil {
+		return nil
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = nil
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return ec.resolvers.Subscription().TorrentDownloadUpdates(rctx)
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return nil
+	}
+	if resTmp == nil {
+		return nil
+	}
+	return func(ctx context.Context) graphql.Marshaler {
+		select {
+		case res, ok := <-resTmp.(<-chan *model.TorrentProgress):
+			if !ok {
+				return nil
+			}
+			return graphql.WriterFunc(func(w io.Writer) {
+				w.Write([]byte{'{'})
+				graphql.MarshalString(field.Alias).MarshalGQL(w)
+				w.Write([]byte{':'})
+				ec.marshalOTorrentProgress2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentProgress(ctx, field.Selections, res).MarshalGQL(w)
+				w.Write([]byte{'}'})
+			})
+		case <-ctx.Done():
+			return nil
+		}
+	}
+}
+
+func (ec *executionContext) fieldContext_Subscription_torrentDownloadUpdates(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Subscription",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: true,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "torrent":
+				return ec.fieldContext_TorrentProgress_torrent(ctx, field)
+			case "current":
+				return ec.fieldContext_TorrentProgress_current(ctx, field)
+			case "total":
+				return ec.fieldContext_TorrentProgress_total(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type TorrentProgress", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Task_id(ctx context.Context, field graphql.CollectedField, obj *model.Task) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Task_id(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.ID, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNID2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Task_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Task",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type ID does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Torrent_name(ctx context.Context, field graphql.CollectedField, obj *model.Torrent) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Torrent_name(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return ec.resolvers.Torrent().Name(rctx, obj)
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Torrent_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Torrent",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: true,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Torrent_infohash(ctx context.Context, field graphql.CollectedField, obj *model.Torrent) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Torrent_infohash(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Infohash, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Torrent_infohash(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Torrent",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Torrent_bytesCompleted(ctx context.Context, field graphql.CollectedField, obj *model.Torrent) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Torrent_bytesCompleted(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.BytesCompleted, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(int64)
+	fc.Result = res
+	return ec.marshalNInt2int64(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Torrent_bytesCompleted(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Torrent",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type Int does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Torrent_torrentFilePath(ctx context.Context, field graphql.CollectedField, obj *model.Torrent) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Torrent_torrentFilePath(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.TorrentFilePath, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Torrent_torrentFilePath(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Torrent",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Torrent_bytesMissing(ctx context.Context, field graphql.CollectedField, obj *model.Torrent) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Torrent_bytesMissing(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.BytesMissing, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(int64)
+	fc.Result = res
+	return ec.marshalNInt2int64(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Torrent_bytesMissing(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Torrent",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type Int does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Torrent_files(ctx context.Context, field graphql.CollectedField, obj *model.Torrent) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Torrent_files(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return ec.resolvers.Torrent().Files(rctx, obj)
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.([]*model.TorrentFile)
+	fc.Result = res
+	return ec.marshalNTorrentFile2ᚕᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentFileᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Torrent_files(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Torrent",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: true,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "filename":
+				return ec.fieldContext_TorrentFile_filename(ctx, field)
+			case "size":
+				return ec.fieldContext_TorrentFile_size(ctx, field)
+			case "bytesCompleted":
+				return ec.fieldContext_TorrentFile_bytesCompleted(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type TorrentFile", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Torrent_excludedFiles(ctx context.Context, field graphql.CollectedField, obj *model.Torrent) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Torrent_excludedFiles(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return ec.resolvers.Torrent().ExcludedFiles(rctx, obj)
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.([]*model.TorrentFile)
+	fc.Result = res
+	return ec.marshalNTorrentFile2ᚕᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentFileᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Torrent_excludedFiles(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Torrent",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: true,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "filename":
+				return ec.fieldContext_TorrentFile_filename(ctx, field)
+			case "size":
+				return ec.fieldContext_TorrentFile_size(ctx, field)
+			case "bytesCompleted":
+				return ec.fieldContext_TorrentFile_bytesCompleted(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type TorrentFile", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _Torrent_peers(ctx context.Context, field graphql.CollectedField, obj *model.Torrent) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_Torrent_peers(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return ec.resolvers.Torrent().Peers(rctx, obj)
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.([]*model.TorrentPeer)
+	fc.Result = res
+	return ec.marshalNTorrentPeer2ᚕᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentPeerᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Torrent_peers(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "Torrent",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: true,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "ip":
+				return ec.fieldContext_TorrentPeer_ip(ctx, field)
+			case "downloadRate":
+				return ec.fieldContext_TorrentPeer_downloadRate(ctx, field)
+			case "discovery":
+				return ec.fieldContext_TorrentPeer_discovery(ctx, field)
+			case "port":
+				return ec.fieldContext_TorrentPeer_port(ctx, field)
+			case "clientName":
+				return ec.fieldContext_TorrentPeer_clientName(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type TorrentPeer", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _TorrentFS_name(ctx context.Context, field graphql.CollectedField, obj *model.TorrentFs) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_TorrentFS_name(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Name, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TorrentFS_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "TorrentFS",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _TorrentFS_torrent(ctx context.Context, field graphql.CollectedField, obj *model.TorrentFs) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_TorrentFS_torrent(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Torrent, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(*model.Torrent)
+	fc.Result = res
+	return ec.marshalNTorrent2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrent(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TorrentFS_torrent(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "TorrentFS",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "name":
+				return ec.fieldContext_Torrent_name(ctx, field)
+			case "infohash":
+				return ec.fieldContext_Torrent_infohash(ctx, field)
+			case "bytesCompleted":
+				return ec.fieldContext_Torrent_bytesCompleted(ctx, field)
+			case "torrentFilePath":
+				return ec.fieldContext_Torrent_torrentFilePath(ctx, field)
+			case "bytesMissing":
+				return ec.fieldContext_Torrent_bytesMissing(ctx, field)
+			case "files":
+				return ec.fieldContext_Torrent_files(ctx, field)
+			case "excludedFiles":
+				return ec.fieldContext_Torrent_excludedFiles(ctx, field)
+			case "peers":
+				return ec.fieldContext_Torrent_peers(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type Torrent", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _TorrentFile_filename(ctx context.Context, field graphql.CollectedField, obj *model.TorrentFile) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_TorrentFile_filename(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Filename, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TorrentFile_filename(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "TorrentFile",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _TorrentFile_size(ctx context.Context, field graphql.CollectedField, obj *model.TorrentFile) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_TorrentFile_size(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Size, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(int64)
+	fc.Result = res
+	return ec.marshalNInt2int64(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TorrentFile_size(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "TorrentFile",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type Int does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _TorrentFile_bytesCompleted(ctx context.Context, field graphql.CollectedField, obj *model.TorrentFile) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_TorrentFile_bytesCompleted(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.BytesCompleted, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(int64)
+	fc.Result = res
+	return ec.marshalNInt2int64(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TorrentFile_bytesCompleted(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "TorrentFile",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type Int does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _TorrentPeer_ip(ctx context.Context, field graphql.CollectedField, obj *model.TorrentPeer) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_TorrentPeer_ip(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.IP, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TorrentPeer_ip(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "TorrentPeer",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _TorrentPeer_downloadRate(ctx context.Context, field graphql.CollectedField, obj *model.TorrentPeer) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_TorrentPeer_downloadRate(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.DownloadRate, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(float64)
+	fc.Result = res
+	return ec.marshalNFloat2float64(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TorrentPeer_downloadRate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "TorrentPeer",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type Float does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _TorrentPeer_discovery(ctx context.Context, field graphql.CollectedField, obj *model.TorrentPeer) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_TorrentPeer_discovery(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Discovery, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TorrentPeer_discovery(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "TorrentPeer",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _TorrentPeer_port(ctx context.Context, field graphql.CollectedField, obj *model.TorrentPeer) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_TorrentPeer_port(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Port, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(int64)
+	fc.Result = res
+	return ec.marshalNInt2int64(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TorrentPeer_port(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "TorrentPeer",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type Int does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _TorrentPeer_clientName(ctx context.Context, field graphql.CollectedField, obj *model.TorrentPeer) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_TorrentPeer_clientName(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.ClientName, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TorrentPeer_clientName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "TorrentPeer",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _TorrentProgress_torrent(ctx context.Context, field graphql.CollectedField, obj *model.TorrentProgress) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_TorrentProgress_torrent(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Torrent, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(*model.Torrent)
+	fc.Result = res
+	return ec.marshalNTorrent2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrent(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TorrentProgress_torrent(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "TorrentProgress",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "name":
+				return ec.fieldContext_Torrent_name(ctx, field)
+			case "infohash":
+				return ec.fieldContext_Torrent_infohash(ctx, field)
+			case "bytesCompleted":
+				return ec.fieldContext_Torrent_bytesCompleted(ctx, field)
+			case "torrentFilePath":
+				return ec.fieldContext_Torrent_torrentFilePath(ctx, field)
+			case "bytesMissing":
+				return ec.fieldContext_Torrent_bytesMissing(ctx, field)
+			case "files":
+				return ec.fieldContext_Torrent_files(ctx, field)
+			case "excludedFiles":
+				return ec.fieldContext_Torrent_excludedFiles(ctx, field)
+			case "peers":
+				return ec.fieldContext_Torrent_peers(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type Torrent", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _TorrentProgress_current(ctx context.Context, field graphql.CollectedField, obj *model.TorrentProgress) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_TorrentProgress_current(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Current, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(int64)
+	fc.Result = res
+	return ec.marshalNInt2int64(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TorrentProgress_current(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "TorrentProgress",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type Int does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) _TorrentProgress_total(ctx context.Context, field graphql.CollectedField, obj *model.TorrentProgress) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext_TorrentProgress_total(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Total, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(int64)
+	fc.Result = res
+	return ec.marshalNInt2int64(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TorrentProgress_total(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "TorrentProgress",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type Int does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Directive_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Directive_name(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Name, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Directive_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Directive",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Directive_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Directive_description(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Description(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*string)
+	fc.Result = res
+	return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Directive_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Directive",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Directive_locations(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Directive_locations(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Locations, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.([]string)
+	fc.Result = res
+	return ec.marshalN__DirectiveLocation2ᚕstringᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Directive_locations(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Directive",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type __DirectiveLocation does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Directive_args(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Directive_args(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Args, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.([]introspection.InputValue)
+	fc.Result = res
+	return ec.marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Directive_args(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Directive",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "name":
+				return ec.fieldContext___InputValue_name(ctx, field)
+			case "description":
+				return ec.fieldContext___InputValue_description(ctx, field)
+			case "type":
+				return ec.fieldContext___InputValue_type(ctx, field)
+			case "defaultValue":
+				return ec.fieldContext___InputValue_defaultValue(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __InputValue", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Directive_isRepeatable(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Directive_isRepeatable(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.IsRepeatable, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(bool)
+	fc.Result = res
+	return ec.marshalNBoolean2bool(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Directive_isRepeatable(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Directive",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type Boolean does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___EnumValue_name(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___EnumValue_name(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Name, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___EnumValue_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__EnumValue",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___EnumValue_description(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___EnumValue_description(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Description(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*string)
+	fc.Result = res
+	return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___EnumValue_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__EnumValue",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___EnumValue_isDeprecated(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___EnumValue_isDeprecated(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.IsDeprecated(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(bool)
+	fc.Result = res
+	return ec.marshalNBoolean2bool(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___EnumValue_isDeprecated(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__EnumValue",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type Boolean does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___EnumValue_deprecationReason(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___EnumValue_deprecationReason(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.DeprecationReason(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*string)
+	fc.Result = res
+	return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___EnumValue_deprecationReason(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__EnumValue",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Field_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Field_name(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Name, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Field_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Field",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Field_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Field_description(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Description(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*string)
+	fc.Result = res
+	return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Field_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Field",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Field_args(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Field_args(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Args, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.([]introspection.InputValue)
+	fc.Result = res
+	return ec.marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Field_args(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Field",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "name":
+				return ec.fieldContext___InputValue_name(ctx, field)
+			case "description":
+				return ec.fieldContext___InputValue_description(ctx, field)
+			case "type":
+				return ec.fieldContext___InputValue_type(ctx, field)
+			case "defaultValue":
+				return ec.fieldContext___InputValue_defaultValue(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __InputValue", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Field_type(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Field_type(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Type, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(*introspection.Type)
+	fc.Result = res
+	return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Field_type(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Field",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "kind":
+				return ec.fieldContext___Type_kind(ctx, field)
+			case "name":
+				return ec.fieldContext___Type_name(ctx, field)
+			case "description":
+				return ec.fieldContext___Type_description(ctx, field)
+			case "fields":
+				return ec.fieldContext___Type_fields(ctx, field)
+			case "interfaces":
+				return ec.fieldContext___Type_interfaces(ctx, field)
+			case "possibleTypes":
+				return ec.fieldContext___Type_possibleTypes(ctx, field)
+			case "enumValues":
+				return ec.fieldContext___Type_enumValues(ctx, field)
+			case "inputFields":
+				return ec.fieldContext___Type_inputFields(ctx, field)
+			case "ofType":
+				return ec.fieldContext___Type_ofType(ctx, field)
+			case "specifiedByURL":
+				return ec.fieldContext___Type_specifiedByURL(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Field_isDeprecated(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Field_isDeprecated(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.IsDeprecated(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(bool)
+	fc.Result = res
+	return ec.marshalNBoolean2bool(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Field_isDeprecated(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Field",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type Boolean does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Field_deprecationReason(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Field_deprecationReason(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.DeprecationReason(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*string)
+	fc.Result = res
+	return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Field_deprecationReason(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Field",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___InputValue_name(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___InputValue_name(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Name, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___InputValue_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__InputValue",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___InputValue_description(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___InputValue_description(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Description(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*string)
+	fc.Result = res
+	return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___InputValue_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__InputValue",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___InputValue_type(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___InputValue_type(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Type, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(*introspection.Type)
+	fc.Result = res
+	return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___InputValue_type(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__InputValue",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "kind":
+				return ec.fieldContext___Type_kind(ctx, field)
+			case "name":
+				return ec.fieldContext___Type_name(ctx, field)
+			case "description":
+				return ec.fieldContext___Type_description(ctx, field)
+			case "fields":
+				return ec.fieldContext___Type_fields(ctx, field)
+			case "interfaces":
+				return ec.fieldContext___Type_interfaces(ctx, field)
+			case "possibleTypes":
+				return ec.fieldContext___Type_possibleTypes(ctx, field)
+			case "enumValues":
+				return ec.fieldContext___Type_enumValues(ctx, field)
+			case "inputFields":
+				return ec.fieldContext___Type_inputFields(ctx, field)
+			case "ofType":
+				return ec.fieldContext___Type_ofType(ctx, field)
+			case "specifiedByURL":
+				return ec.fieldContext___Type_specifiedByURL(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___InputValue_defaultValue(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___InputValue_defaultValue(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.DefaultValue, nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*string)
+	fc.Result = res
+	return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___InputValue_defaultValue(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__InputValue",
+		Field:      field,
+		IsMethod:   false,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Schema_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Schema_description(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Description(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*string)
+	fc.Result = res
+	return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Schema_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Schema",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Schema_types(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Schema_types(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Types(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.([]introspection.Type)
+	fc.Result = res
+	return ec.marshalN__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Schema_types(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Schema",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "kind":
+				return ec.fieldContext___Type_kind(ctx, field)
+			case "name":
+				return ec.fieldContext___Type_name(ctx, field)
+			case "description":
+				return ec.fieldContext___Type_description(ctx, field)
+			case "fields":
+				return ec.fieldContext___Type_fields(ctx, field)
+			case "interfaces":
+				return ec.fieldContext___Type_interfaces(ctx, field)
+			case "possibleTypes":
+				return ec.fieldContext___Type_possibleTypes(ctx, field)
+			case "enumValues":
+				return ec.fieldContext___Type_enumValues(ctx, field)
+			case "inputFields":
+				return ec.fieldContext___Type_inputFields(ctx, field)
+			case "ofType":
+				return ec.fieldContext___Type_ofType(ctx, field)
+			case "specifiedByURL":
+				return ec.fieldContext___Type_specifiedByURL(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Schema_queryType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Schema_queryType(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.QueryType(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(*introspection.Type)
+	fc.Result = res
+	return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Schema_queryType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Schema",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "kind":
+				return ec.fieldContext___Type_kind(ctx, field)
+			case "name":
+				return ec.fieldContext___Type_name(ctx, field)
+			case "description":
+				return ec.fieldContext___Type_description(ctx, field)
+			case "fields":
+				return ec.fieldContext___Type_fields(ctx, field)
+			case "interfaces":
+				return ec.fieldContext___Type_interfaces(ctx, field)
+			case "possibleTypes":
+				return ec.fieldContext___Type_possibleTypes(ctx, field)
+			case "enumValues":
+				return ec.fieldContext___Type_enumValues(ctx, field)
+			case "inputFields":
+				return ec.fieldContext___Type_inputFields(ctx, field)
+			case "ofType":
+				return ec.fieldContext___Type_ofType(ctx, field)
+			case "specifiedByURL":
+				return ec.fieldContext___Type_specifiedByURL(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Schema_mutationType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Schema_mutationType(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.MutationType(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*introspection.Type)
+	fc.Result = res
+	return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Schema_mutationType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Schema",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "kind":
+				return ec.fieldContext___Type_kind(ctx, field)
+			case "name":
+				return ec.fieldContext___Type_name(ctx, field)
+			case "description":
+				return ec.fieldContext___Type_description(ctx, field)
+			case "fields":
+				return ec.fieldContext___Type_fields(ctx, field)
+			case "interfaces":
+				return ec.fieldContext___Type_interfaces(ctx, field)
+			case "possibleTypes":
+				return ec.fieldContext___Type_possibleTypes(ctx, field)
+			case "enumValues":
+				return ec.fieldContext___Type_enumValues(ctx, field)
+			case "inputFields":
+				return ec.fieldContext___Type_inputFields(ctx, field)
+			case "ofType":
+				return ec.fieldContext___Type_ofType(ctx, field)
+			case "specifiedByURL":
+				return ec.fieldContext___Type_specifiedByURL(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Schema_subscriptionType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Schema_subscriptionType(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.SubscriptionType(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*introspection.Type)
+	fc.Result = res
+	return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Schema_subscriptionType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Schema",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "kind":
+				return ec.fieldContext___Type_kind(ctx, field)
+			case "name":
+				return ec.fieldContext___Type_name(ctx, field)
+			case "description":
+				return ec.fieldContext___Type_description(ctx, field)
+			case "fields":
+				return ec.fieldContext___Type_fields(ctx, field)
+			case "interfaces":
+				return ec.fieldContext___Type_interfaces(ctx, field)
+			case "possibleTypes":
+				return ec.fieldContext___Type_possibleTypes(ctx, field)
+			case "enumValues":
+				return ec.fieldContext___Type_enumValues(ctx, field)
+			case "inputFields":
+				return ec.fieldContext___Type_inputFields(ctx, field)
+			case "ofType":
+				return ec.fieldContext___Type_ofType(ctx, field)
+			case "specifiedByURL":
+				return ec.fieldContext___Type_specifiedByURL(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Schema_directives(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Schema_directives(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Directives(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.([]introspection.Directive)
+	fc.Result = res
+	return ec.marshalN__Directive2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirectiveᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Schema_directives(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Schema",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "name":
+				return ec.fieldContext___Directive_name(ctx, field)
+			case "description":
+				return ec.fieldContext___Directive_description(ctx, field)
+			case "locations":
+				return ec.fieldContext___Directive_locations(ctx, field)
+			case "args":
+				return ec.fieldContext___Directive_args(ctx, field)
+			case "isRepeatable":
+				return ec.fieldContext___Directive_isRepeatable(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __Directive", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Type_kind(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Type_kind(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Kind(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		if !graphql.HasFieldError(ctx, fc) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(string)
+	fc.Result = res
+	return ec.marshalN__TypeKind2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_kind(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Type",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type __TypeKind does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Type_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Type_name(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Name(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*string)
+	fc.Result = res
+	return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Type",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Type_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Type_description(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Description(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*string)
+	fc.Result = res
+	return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Type",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Type_fields(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Type_fields(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Fields(fc.Args["includeDeprecated"].(bool)), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.([]introspection.Field)
+	fc.Result = res
+	return ec.marshalO__Field2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐFieldᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_fields(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Type",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "name":
+				return ec.fieldContext___Field_name(ctx, field)
+			case "description":
+				return ec.fieldContext___Field_description(ctx, field)
+			case "args":
+				return ec.fieldContext___Field_args(ctx, field)
+			case "type":
+				return ec.fieldContext___Field_type(ctx, field)
+			case "isDeprecated":
+				return ec.fieldContext___Field_isDeprecated(ctx, field)
+			case "deprecationReason":
+				return ec.fieldContext___Field_deprecationReason(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __Field", field.Name)
+		},
+	}
+	defer func() {
+		if r := recover(); r != nil {
+			err = ec.Recover(ctx, r)
+			ec.Error(ctx, err)
+		}
+	}()
+	ctx = graphql.WithFieldContext(ctx, fc)
+	if fc.Args, err = ec.field___Type_fields_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+		ec.Error(ctx, err)
+		return fc, err
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Type_interfaces(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Type_interfaces(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.Interfaces(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.([]introspection.Type)
+	fc.Result = res
+	return ec.marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_interfaces(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Type",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "kind":
+				return ec.fieldContext___Type_kind(ctx, field)
+			case "name":
+				return ec.fieldContext___Type_name(ctx, field)
+			case "description":
+				return ec.fieldContext___Type_description(ctx, field)
+			case "fields":
+				return ec.fieldContext___Type_fields(ctx, field)
+			case "interfaces":
+				return ec.fieldContext___Type_interfaces(ctx, field)
+			case "possibleTypes":
+				return ec.fieldContext___Type_possibleTypes(ctx, field)
+			case "enumValues":
+				return ec.fieldContext___Type_enumValues(ctx, field)
+			case "inputFields":
+				return ec.fieldContext___Type_inputFields(ctx, field)
+			case "ofType":
+				return ec.fieldContext___Type_ofType(ctx, field)
+			case "specifiedByURL":
+				return ec.fieldContext___Type_specifiedByURL(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Type_possibleTypes(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Type_possibleTypes(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.PossibleTypes(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.([]introspection.Type)
+	fc.Result = res
+	return ec.marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_possibleTypes(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Type",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "kind":
+				return ec.fieldContext___Type_kind(ctx, field)
+			case "name":
+				return ec.fieldContext___Type_name(ctx, field)
+			case "description":
+				return ec.fieldContext___Type_description(ctx, field)
+			case "fields":
+				return ec.fieldContext___Type_fields(ctx, field)
+			case "interfaces":
+				return ec.fieldContext___Type_interfaces(ctx, field)
+			case "possibleTypes":
+				return ec.fieldContext___Type_possibleTypes(ctx, field)
+			case "enumValues":
+				return ec.fieldContext___Type_enumValues(ctx, field)
+			case "inputFields":
+				return ec.fieldContext___Type_inputFields(ctx, field)
+			case "ofType":
+				return ec.fieldContext___Type_ofType(ctx, field)
+			case "specifiedByURL":
+				return ec.fieldContext___Type_specifiedByURL(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Type_enumValues(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Type_enumValues(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.EnumValues(fc.Args["includeDeprecated"].(bool)), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.([]introspection.EnumValue)
+	fc.Result = res
+	return ec.marshalO__EnumValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValueᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_enumValues(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Type",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "name":
+				return ec.fieldContext___EnumValue_name(ctx, field)
+			case "description":
+				return ec.fieldContext___EnumValue_description(ctx, field)
+			case "isDeprecated":
+				return ec.fieldContext___EnumValue_isDeprecated(ctx, field)
+			case "deprecationReason":
+				return ec.fieldContext___EnumValue_deprecationReason(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __EnumValue", field.Name)
+		},
+	}
+	defer func() {
+		if r := recover(); r != nil {
+			err = ec.Recover(ctx, r)
+			ec.Error(ctx, err)
+		}
+	}()
+	ctx = graphql.WithFieldContext(ctx, fc)
+	if fc.Args, err = ec.field___Type_enumValues_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+		ec.Error(ctx, err)
+		return fc, err
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Type_inputFields(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Type_inputFields(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.InputFields(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.([]introspection.InputValue)
+	fc.Result = res
+	return ec.marshalO__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_inputFields(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Type",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "name":
+				return ec.fieldContext___InputValue_name(ctx, field)
+			case "description":
+				return ec.fieldContext___InputValue_description(ctx, field)
+			case "type":
+				return ec.fieldContext___InputValue_type(ctx, field)
+			case "defaultValue":
+				return ec.fieldContext___InputValue_defaultValue(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __InputValue", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Type_ofType(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Type_ofType(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.OfType(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*introspection.Type)
+	fc.Result = res
+	return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_ofType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Type",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			switch field.Name {
+			case "kind":
+				return ec.fieldContext___Type_kind(ctx, field)
+			case "name":
+				return ec.fieldContext___Type_name(ctx, field)
+			case "description":
+				return ec.fieldContext___Type_description(ctx, field)
+			case "fields":
+				return ec.fieldContext___Type_fields(ctx, field)
+			case "interfaces":
+				return ec.fieldContext___Type_interfaces(ctx, field)
+			case "possibleTypes":
+				return ec.fieldContext___Type_possibleTypes(ctx, field)
+			case "enumValues":
+				return ec.fieldContext___Type_enumValues(ctx, field)
+			case "inputFields":
+				return ec.fieldContext___Type_inputFields(ctx, field)
+			case "ofType":
+				return ec.fieldContext___Type_ofType(ctx, field)
+			case "specifiedByURL":
+				return ec.fieldContext___Type_specifiedByURL(ctx, field)
+			}
+			return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+		},
+	}
+	return fc, nil
+}
+
+func (ec *executionContext) ___Type_specifiedByURL(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+	fc, err := ec.fieldContext___Type_specifiedByURL(ctx, field)
+	if err != nil {
+		return graphql.Null
+	}
+	ctx = graphql.WithFieldContext(ctx, fc)
+	defer func() {
+		if r := recover(); r != nil {
+			ec.Error(ctx, ec.Recover(ctx, r))
+			ret = graphql.Null
+		}
+	}()
+	resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+		ctx = rctx // use context from middleware stack in children
+		return obj.SpecifiedByURL(), nil
+	})
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	if resTmp == nil {
+		return graphql.Null
+	}
+	res := resTmp.(*string)
+	fc.Result = res
+	return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_specifiedByURL(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+	fc = &graphql.FieldContext{
+		Object:     "__Type",
+		Field:      field,
+		IsMethod:   true,
+		IsResolver: false,
+		Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+			return nil, errors.New("field of type String does not have child fields")
+		},
+	}
+	return fc, nil
+}
+
+// endregion **************************** field.gotpl *****************************
+
+// region    **************************** input.gotpl *****************************
+
+func (ec *executionContext) unmarshalInputBooleanFilter(ctx context.Context, obj interface{}) (model.BooleanFilter, error) {
+	var it model.BooleanFilter
+	asMap := map[string]interface{}{}
+	for k, v := range obj.(map[string]interface{}) {
+		asMap[k] = v
+	}
+
+	fieldsInOrder := [...]string{"eq"}
+	for _, k := range fieldsInOrder {
+		v, ok := asMap[k]
+		if !ok {
+			continue
+		}
+		switch k {
+		case "eq":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("eq"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalOBoolean2ᚖbool(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*bool); ok {
+				it.Eq = data
+			} else if tmp == nil {
+				it.Eq = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *bool`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		}
+	}
+
+	return it, nil
+}
+
+func (ec *executionContext) unmarshalInputDateTimeFilter(ctx context.Context, obj interface{}) (model.DateTimeFilter, error) {
+	var it model.DateTimeFilter
+	asMap := map[string]interface{}{}
+	for k, v := range obj.(map[string]interface{}) {
+		asMap[k] = v
+	}
+
+	fieldsInOrder := [...]string{"eq", "gt", "lt", "gte", "lte"}
+	for _, k := range fieldsInOrder {
+		v, ok := asMap[k]
+		if !ok {
+			continue
+		}
+		switch k {
+		case "eq":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("eq"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalODateTime2ᚖtimeᚐTime(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*time.Time); ok {
+				it.Eq = data
+			} else if tmp == nil {
+				it.Eq = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *time.Time`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		case "gt":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("gt"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalODateTime2ᚖtimeᚐTime(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*time.Time); ok {
+				it.Gt = data
+			} else if tmp == nil {
+				it.Gt = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *time.Time`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		case "lt":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("lt"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalODateTime2ᚖtimeᚐTime(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*time.Time); ok {
+				it.Lt = data
+			} else if tmp == nil {
+				it.Lt = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *time.Time`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		case "gte":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("gte"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalODateTime2ᚖtimeᚐTime(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*time.Time); ok {
+				it.Gte = data
+			} else if tmp == nil {
+				it.Gte = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *time.Time`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		case "lte":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("lte"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalODateTime2ᚖtimeᚐTime(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*time.Time); ok {
+				it.Lte = data
+			} else if tmp == nil {
+				it.Lte = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *time.Time`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		}
+	}
+
+	return it, nil
+}
+
+func (ec *executionContext) unmarshalInputIntFilter(ctx context.Context, obj interface{}) (model.IntFilter, error) {
+	var it model.IntFilter
+	asMap := map[string]interface{}{}
+	for k, v := range obj.(map[string]interface{}) {
+		asMap[k] = v
+	}
+
+	fieldsInOrder := [...]string{"eq", "gt", "lt", "gte", "lte", "in"}
+	for _, k := range fieldsInOrder {
+		v, ok := asMap[k]
+		if !ok {
+			continue
+		}
+		switch k {
+		case "eq":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("eq"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalOInt2ᚖint64(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*int64); ok {
+				it.Eq = data
+			} else if tmp == nil {
+				it.Eq = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *int64`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		case "gt":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("gt"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalOInt2ᚖint64(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*int64); ok {
+				it.Gt = data
+			} else if tmp == nil {
+				it.Gt = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *int64`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		case "lt":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("lt"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalOInt2ᚖint64(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*int64); ok {
+				it.Lt = data
+			} else if tmp == nil {
+				it.Lt = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *int64`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		case "gte":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("gte"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalOInt2ᚖint64(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*int64); ok {
+				it.Gte = data
+			} else if tmp == nil {
+				it.Gte = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *int64`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		case "lte":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("lte"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalOInt2ᚖint64(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*int64); ok {
+				it.Lte = data
+			} else if tmp == nil {
+				it.Lte = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *int64`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		case "in":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("in"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalOInt2ᚕint64ᚄ(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.([]int64); ok {
+				it.In = data
+			} else if tmp == nil {
+				it.In = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be []int64`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		}
+	}
+
+	return it, nil
+}
+
+func (ec *executionContext) unmarshalInputPagination(ctx context.Context, obj interface{}) (model.Pagination, error) {
+	var it model.Pagination
+	asMap := map[string]interface{}{}
+	for k, v := range obj.(map[string]interface{}) {
+		asMap[k] = v
+	}
+
+	fieldsInOrder := [...]string{"offset", "limit"}
+	for _, k := range fieldsInOrder {
+		v, ok := asMap[k]
+		if !ok {
+			continue
+		}
+		switch k {
+		case "offset":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("offset"))
+			data, err := ec.unmarshalNInt2int64(ctx, v)
+			if err != nil {
+				return it, err
+			}
+			it.Offset = data
+		case "limit":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("limit"))
+			data, err := ec.unmarshalNInt2int64(ctx, v)
+			if err != nil {
+				return it, err
+			}
+			it.Limit = data
+		}
+	}
+
+	return it, nil
+}
+
+func (ec *executionContext) unmarshalInputStringFilter(ctx context.Context, obj interface{}) (model.StringFilter, error) {
+	var it model.StringFilter
+	asMap := map[string]interface{}{}
+	for k, v := range obj.(map[string]interface{}) {
+		asMap[k] = v
+	}
+
+	fieldsInOrder := [...]string{"eq", "substr", "in"}
+	for _, k := range fieldsInOrder {
+		v, ok := asMap[k]
+		if !ok {
+			continue
+		}
+		switch k {
+		case "eq":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("eq"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalOString2ᚖstring(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*string); ok {
+				it.Eq = data
+			} else if tmp == nil {
+				it.Eq = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *string`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		case "substr":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("substr"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalOString2ᚖstring(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*string); ok {
+				it.Substr = data
+			} else if tmp == nil {
+				it.Substr = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *string`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		case "in":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("in"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalOString2ᚕstringᚄ(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.([]string); ok {
+				it.In = data
+			} else if tmp == nil {
+				it.In = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be []string`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		}
+	}
+
+	return it, nil
+}
+
+func (ec *executionContext) unmarshalInputTorrentFilter(ctx context.Context, obj interface{}) (model.TorrentFilter, error) {
+	var it model.TorrentFilter
+	asMap := map[string]interface{}{}
+	for k, v := range obj.(map[string]interface{}) {
+		asMap[k] = v
+	}
+
+	fieldsInOrder := [...]string{"everything", "infohash"}
+	for _, k := range fieldsInOrder {
+		v, ok := asMap[k]
+		if !ok {
+			continue
+		}
+		switch k {
+		case "everything":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("everything"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalOBoolean2ᚖbool(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*bool); ok {
+				it.Everything = data
+			} else if tmp == nil {
+				it.Everything = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *bool`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		case "infohash":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("infohash"))
+			directive0 := func(ctx context.Context) (interface{}, error) { return ec.unmarshalOString2ᚖstring(ctx, v) }
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*string); ok {
+				it.Infohash = data
+			} else if tmp == nil {
+				it.Infohash = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *string`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		}
+	}
+
+	return it, nil
+}
+
+func (ec *executionContext) unmarshalInputTorrentsFilter(ctx context.Context, obj interface{}) (model.TorrentsFilter, error) {
+	var it model.TorrentsFilter
+	asMap := map[string]interface{}{}
+	for k, v := range obj.(map[string]interface{}) {
+		asMap[k] = v
+	}
+
+	fieldsInOrder := [...]string{"name", "bytesCompleted", "bytesMissing", "peersCount"}
+	for _, k := range fieldsInOrder {
+		v, ok := asMap[k]
+		if !ok {
+			continue
+		}
+		switch k {
+		case "name":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name"))
+			directive0 := func(ctx context.Context) (interface{}, error) {
+				return ec.unmarshalOStringFilter2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐStringFilter(ctx, v)
+			}
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*model.StringFilter); ok {
+				it.Name = data
+			} else if tmp == nil {
+				it.Name = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *git.kmsign.ru/royalcat/tstor/src/delivery/graphql/model.StringFilter`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		case "bytesCompleted":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("bytesCompleted"))
+			directive0 := func(ctx context.Context) (interface{}, error) {
+				return ec.unmarshalOIntFilter2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐIntFilter(ctx, v)
+			}
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*model.IntFilter); ok {
+				it.BytesCompleted = data
+			} else if tmp == nil {
+				it.BytesCompleted = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *git.kmsign.ru/royalcat/tstor/src/delivery/graphql/model.IntFilter`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		case "bytesMissing":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("bytesMissing"))
+			directive0 := func(ctx context.Context) (interface{}, error) {
+				return ec.unmarshalOIntFilter2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐIntFilter(ctx, v)
+			}
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*model.IntFilter); ok {
+				it.BytesMissing = data
+			} else if tmp == nil {
+				it.BytesMissing = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *git.kmsign.ru/royalcat/tstor/src/delivery/graphql/model.IntFilter`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		case "peersCount":
+			ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("peersCount"))
+			directive0 := func(ctx context.Context) (interface{}, error) {
+				return ec.unmarshalOIntFilter2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐIntFilter(ctx, v)
+			}
+			directive1 := func(ctx context.Context) (interface{}, error) {
+				if ec.directives.OneOf == nil {
+					return nil, errors.New("directive oneOf is not implemented")
+				}
+				return ec.directives.OneOf(ctx, obj, directive0)
+			}
+
+			tmp, err := directive1(ctx)
+			if err != nil {
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+			if data, ok := tmp.(*model.IntFilter); ok {
+				it.PeersCount = data
+			} else if tmp == nil {
+				it.PeersCount = nil
+			} else {
+				err := fmt.Errorf(`unexpected type %T from directive, should be *git.kmsign.ru/royalcat/tstor/src/delivery/graphql/model.IntFilter`, tmp)
+				return it, graphql.ErrorOnPath(ctx, err)
+			}
+		}
+	}
+
+	return it, nil
+}
+
+// endregion **************************** input.gotpl *****************************
+
+// region    ************************** interface.gotpl ***************************
+
+func (ec *executionContext) _DirEntry(ctx context.Context, sel ast.SelectionSet, obj model.DirEntry) graphql.Marshaler {
+	switch obj := (obj).(type) {
+	case nil:
+		return graphql.Null
+	case model.Dir:
+		return ec._Dir(ctx, sel, &obj)
+	case *model.Dir:
+		if obj == nil {
+			return graphql.Null
+		}
+		return ec._Dir(ctx, sel, obj)
+	case model.File:
+		return ec._File(ctx, sel, &obj)
+	case *model.File:
+		if obj == nil {
+			return graphql.Null
+		}
+		return ec._File(ctx, sel, obj)
+	case model.ResolverFs:
+		return ec._ResolverFS(ctx, sel, &obj)
+	case *model.ResolverFs:
+		if obj == nil {
+			return graphql.Null
+		}
+		return ec._ResolverFS(ctx, sel, obj)
+	case model.TorrentFs:
+		return ec._TorrentFS(ctx, sel, &obj)
+	case *model.TorrentFs:
+		if obj == nil {
+			return graphql.Null
+		}
+		return ec._TorrentFS(ctx, sel, obj)
+	case model.ArchiveFs:
+		return ec._ArchiveFS(ctx, sel, &obj)
+	case *model.ArchiveFs:
+		if obj == nil {
+			return graphql.Null
+		}
+		return ec._ArchiveFS(ctx, sel, obj)
+	default:
+		panic(fmt.Errorf("unexpected type %T", obj))
+	}
+}
+
+func (ec *executionContext) _Progress(ctx context.Context, sel ast.SelectionSet, obj model.Progress) graphql.Marshaler {
+	switch obj := (obj).(type) {
+	case nil:
+		return graphql.Null
+	case model.TorrentProgress:
+		return ec._TorrentProgress(ctx, sel, &obj)
+	case *model.TorrentProgress:
+		if obj == nil {
+			return graphql.Null
+		}
+		return ec._TorrentProgress(ctx, sel, obj)
+	default:
+		panic(fmt.Errorf("unexpected type %T", obj))
+	}
+}
+
+// endregion ************************** interface.gotpl ***************************
+
+// region    **************************** object.gotpl ****************************
+
+var archiveFSImplementors = []string{"ArchiveFS", "DirEntry"}
+
+func (ec *executionContext) _ArchiveFS(ctx context.Context, sel ast.SelectionSet, obj *model.ArchiveFs) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, archiveFSImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("ArchiveFS")
+		case "name":
+			out.Values[i] = ec._ArchiveFS_name(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "size":
+			out.Values[i] = ec._ArchiveFS_size(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var cleanupResponseImplementors = []string{"CleanupResponse"}
+
+func (ec *executionContext) _CleanupResponse(ctx context.Context, sel ast.SelectionSet, obj *model.CleanupResponse) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, cleanupResponseImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("CleanupResponse")
+		case "count":
+			out.Values[i] = ec._CleanupResponse_count(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "list":
+			out.Values[i] = ec._CleanupResponse_list(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var dirImplementors = []string{"Dir", "DirEntry"}
+
+func (ec *executionContext) _Dir(ctx context.Context, sel ast.SelectionSet, obj *model.Dir) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, dirImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("Dir")
+		case "name":
+			out.Values[i] = ec._Dir_name(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var downloadTorrentResponseImplementors = []string{"DownloadTorrentResponse"}
+
+func (ec *executionContext) _DownloadTorrentResponse(ctx context.Context, sel ast.SelectionSet, obj *model.DownloadTorrentResponse) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, downloadTorrentResponseImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("DownloadTorrentResponse")
+		case "task":
+			out.Values[i] = ec._DownloadTorrentResponse_task(ctx, field, obj)
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var fileImplementors = []string{"File", "DirEntry"}
+
+func (ec *executionContext) _File(ctx context.Context, sel ast.SelectionSet, obj *model.File) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, fileImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("File")
+		case "name":
+			out.Values[i] = ec._File_name(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "size":
+			out.Values[i] = ec._File_size(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var listDirResponseImplementors = []string{"ListDirResponse"}
+
+func (ec *executionContext) _ListDirResponse(ctx context.Context, sel ast.SelectionSet, obj *model.ListDirResponse) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, listDirResponseImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("ListDirResponse")
+		case "root":
+			out.Values[i] = ec._ListDirResponse_root(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "entries":
+			out.Values[i] = ec._ListDirResponse_entries(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var mutationImplementors = []string{"Mutation"}
+
+func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, mutationImplementors)
+	ctx = graphql.WithFieldContext(ctx, &graphql.FieldContext{
+		Object: "Mutation",
+	})
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		innerCtx := graphql.WithRootFieldContext(ctx, &graphql.RootFieldContext{
+			Object: field.Name,
+			Field:  field,
+		})
+
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("Mutation")
+		case "validateTorrents":
+			out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
+				return ec._Mutation_validateTorrents(ctx, field)
+			})
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "cleanupTorrents":
+			out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
+				return ec._Mutation_cleanupTorrents(ctx, field)
+			})
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "downloadTorrent":
+			out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
+				return ec._Mutation_downloadTorrent(ctx, field)
+			})
+		case "dedupeStorage":
+			out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
+				return ec._Mutation_dedupeStorage(ctx, field)
+			})
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var queryImplementors = []string{"Query"}
+
+func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, queryImplementors)
+	ctx = graphql.WithFieldContext(ctx, &graphql.FieldContext{
+		Object: "Query",
+	})
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		innerCtx := graphql.WithRootFieldContext(ctx, &graphql.RootFieldContext{
+			Object: field.Name,
+			Field:  field,
+		})
+
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("Query")
+		case "torrents":
+			field := field
+
+			innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+				defer func() {
+					if r := recover(); r != nil {
+						ec.Error(ctx, ec.Recover(ctx, r))
+					}
+				}()
+				res = ec._Query_torrents(ctx, field)
+				if res == graphql.Null {
+					atomic.AddUint32(&fs.Invalids, 1)
+				}
+				return res
+			}
+
+			rrm := func(ctx context.Context) graphql.Marshaler {
+				return ec.OperationContext.RootResolverMiddleware(ctx,
+					func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+			}
+
+			out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+		case "fsListDir":
+			field := field
+
+			innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+				defer func() {
+					if r := recover(); r != nil {
+						ec.Error(ctx, ec.Recover(ctx, r))
+					}
+				}()
+				res = ec._Query_fsListDir(ctx, field)
+				if res == graphql.Null {
+					atomic.AddUint32(&fs.Invalids, 1)
+				}
+				return res
+			}
+
+			rrm := func(ctx context.Context) graphql.Marshaler {
+				return ec.OperationContext.RootResolverMiddleware(ctx,
+					func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+			}
+
+			out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+		case "__type":
+			out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
+				return ec._Query___type(ctx, field)
+			})
+		case "__schema":
+			out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
+				return ec._Query___schema(ctx, field)
+			})
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var resolverFSImplementors = []string{"ResolverFS", "DirEntry"}
+
+func (ec *executionContext) _ResolverFS(ctx context.Context, sel ast.SelectionSet, obj *model.ResolverFs) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, resolverFSImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("ResolverFS")
+		case "name":
+			out.Values[i] = ec._ResolverFS_name(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var schemaImplementors = []string{"Schema"}
+
+func (ec *executionContext) _Schema(ctx context.Context, sel ast.SelectionSet, obj *model.Schema) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, schemaImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("Schema")
+		case "query":
+			out.Values[i] = ec._Schema_query(ctx, field, obj)
+		case "mutation":
+			out.Values[i] = ec._Schema_mutation(ctx, field, obj)
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var subscriptionImplementors = []string{"Subscription"}
+
+func (ec *executionContext) _Subscription(ctx context.Context, sel ast.SelectionSet) func(ctx context.Context) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, subscriptionImplementors)
+	ctx = graphql.WithFieldContext(ctx, &graphql.FieldContext{
+		Object: "Subscription",
+	})
+	if len(fields) != 1 {
+		ec.Errorf(ctx, "must subscribe to exactly one stream")
+		return nil
+	}
+
+	switch fields[0].Name {
+	case "taskProgress":
+		return ec._Subscription_taskProgress(ctx, fields[0])
+	case "torrentDownloadUpdates":
+		return ec._Subscription_torrentDownloadUpdates(ctx, fields[0])
+	default:
+		panic("unknown field " + strconv.Quote(fields[0].Name))
+	}
+}
+
+var taskImplementors = []string{"Task"}
+
+func (ec *executionContext) _Task(ctx context.Context, sel ast.SelectionSet, obj *model.Task) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, taskImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("Task")
+		case "id":
+			out.Values[i] = ec._Task_id(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var torrentImplementors = []string{"Torrent"}
+
+func (ec *executionContext) _Torrent(ctx context.Context, sel ast.SelectionSet, obj *model.Torrent) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, torrentImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("Torrent")
+		case "name":
+			field := field
+
+			innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+				defer func() {
+					if r := recover(); r != nil {
+						ec.Error(ctx, ec.Recover(ctx, r))
+					}
+				}()
+				res = ec._Torrent_name(ctx, field, obj)
+				if res == graphql.Null {
+					atomic.AddUint32(&fs.Invalids, 1)
+				}
+				return res
+			}
+
+			if field.Deferrable != nil {
+				dfs, ok := deferred[field.Deferrable.Label]
+				di := 0
+				if ok {
+					dfs.AddField(field)
+					di = len(dfs.Values) - 1
+				} else {
+					dfs = graphql.NewFieldSet([]graphql.CollectedField{field})
+					deferred[field.Deferrable.Label] = dfs
+				}
+				dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler {
+					return innerFunc(ctx, dfs)
+				})
+
+				// don't run the out.Concurrently() call below
+				out.Values[i] = graphql.Null
+				continue
+			}
+
+			out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+		case "infohash":
+			out.Values[i] = ec._Torrent_infohash(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				atomic.AddUint32(&out.Invalids, 1)
+			}
+		case "bytesCompleted":
+			out.Values[i] = ec._Torrent_bytesCompleted(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				atomic.AddUint32(&out.Invalids, 1)
+			}
+		case "torrentFilePath":
+			out.Values[i] = ec._Torrent_torrentFilePath(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				atomic.AddUint32(&out.Invalids, 1)
+			}
+		case "bytesMissing":
+			out.Values[i] = ec._Torrent_bytesMissing(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				atomic.AddUint32(&out.Invalids, 1)
+			}
+		case "files":
+			field := field
+
+			innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+				defer func() {
+					if r := recover(); r != nil {
+						ec.Error(ctx, ec.Recover(ctx, r))
+					}
+				}()
+				res = ec._Torrent_files(ctx, field, obj)
+				if res == graphql.Null {
+					atomic.AddUint32(&fs.Invalids, 1)
+				}
+				return res
+			}
+
+			if field.Deferrable != nil {
+				dfs, ok := deferred[field.Deferrable.Label]
+				di := 0
+				if ok {
+					dfs.AddField(field)
+					di = len(dfs.Values) - 1
+				} else {
+					dfs = graphql.NewFieldSet([]graphql.CollectedField{field})
+					deferred[field.Deferrable.Label] = dfs
+				}
+				dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler {
+					return innerFunc(ctx, dfs)
+				})
+
+				// don't run the out.Concurrently() call below
+				out.Values[i] = graphql.Null
+				continue
+			}
+
+			out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+		case "excludedFiles":
+			field := field
+
+			innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+				defer func() {
+					if r := recover(); r != nil {
+						ec.Error(ctx, ec.Recover(ctx, r))
+					}
+				}()
+				res = ec._Torrent_excludedFiles(ctx, field, obj)
+				if res == graphql.Null {
+					atomic.AddUint32(&fs.Invalids, 1)
+				}
+				return res
+			}
+
+			if field.Deferrable != nil {
+				dfs, ok := deferred[field.Deferrable.Label]
+				di := 0
+				if ok {
+					dfs.AddField(field)
+					di = len(dfs.Values) - 1
+				} else {
+					dfs = graphql.NewFieldSet([]graphql.CollectedField{field})
+					deferred[field.Deferrable.Label] = dfs
+				}
+				dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler {
+					return innerFunc(ctx, dfs)
+				})
+
+				// don't run the out.Concurrently() call below
+				out.Values[i] = graphql.Null
+				continue
+			}
+
+			out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+		case "peers":
+			field := field
+
+			innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+				defer func() {
+					if r := recover(); r != nil {
+						ec.Error(ctx, ec.Recover(ctx, r))
+					}
+				}()
+				res = ec._Torrent_peers(ctx, field, obj)
+				if res == graphql.Null {
+					atomic.AddUint32(&fs.Invalids, 1)
+				}
+				return res
+			}
+
+			if field.Deferrable != nil {
+				dfs, ok := deferred[field.Deferrable.Label]
+				di := 0
+				if ok {
+					dfs.AddField(field)
+					di = len(dfs.Values) - 1
+				} else {
+					dfs = graphql.NewFieldSet([]graphql.CollectedField{field})
+					deferred[field.Deferrable.Label] = dfs
+				}
+				dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler {
+					return innerFunc(ctx, dfs)
+				})
+
+				// don't run the out.Concurrently() call below
+				out.Values[i] = graphql.Null
+				continue
+			}
+
+			out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var torrentFSImplementors = []string{"TorrentFS", "DirEntry"}
+
+func (ec *executionContext) _TorrentFS(ctx context.Context, sel ast.SelectionSet, obj *model.TorrentFs) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, torrentFSImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("TorrentFS")
+		case "name":
+			out.Values[i] = ec._TorrentFS_name(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "torrent":
+			out.Values[i] = ec._TorrentFS_torrent(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var torrentFileImplementors = []string{"TorrentFile"}
+
+func (ec *executionContext) _TorrentFile(ctx context.Context, sel ast.SelectionSet, obj *model.TorrentFile) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, torrentFileImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("TorrentFile")
+		case "filename":
+			out.Values[i] = ec._TorrentFile_filename(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "size":
+			out.Values[i] = ec._TorrentFile_size(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "bytesCompleted":
+			out.Values[i] = ec._TorrentFile_bytesCompleted(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var torrentPeerImplementors = []string{"TorrentPeer"}
+
+func (ec *executionContext) _TorrentPeer(ctx context.Context, sel ast.SelectionSet, obj *model.TorrentPeer) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, torrentPeerImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("TorrentPeer")
+		case "ip":
+			out.Values[i] = ec._TorrentPeer_ip(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "downloadRate":
+			out.Values[i] = ec._TorrentPeer_downloadRate(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "discovery":
+			out.Values[i] = ec._TorrentPeer_discovery(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "port":
+			out.Values[i] = ec._TorrentPeer_port(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "clientName":
+			out.Values[i] = ec._TorrentPeer_clientName(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var torrentProgressImplementors = []string{"TorrentProgress", "Progress"}
+
+func (ec *executionContext) _TorrentProgress(ctx context.Context, sel ast.SelectionSet, obj *model.TorrentProgress) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, torrentProgressImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("TorrentProgress")
+		case "torrent":
+			out.Values[i] = ec._TorrentProgress_torrent(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "current":
+			out.Values[i] = ec._TorrentProgress_current(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "total":
+			out.Values[i] = ec._TorrentProgress_total(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var __DirectiveImplementors = []string{"__Directive"}
+
+func (ec *executionContext) ___Directive(ctx context.Context, sel ast.SelectionSet, obj *introspection.Directive) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, __DirectiveImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("__Directive")
+		case "name":
+			out.Values[i] = ec.___Directive_name(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "description":
+			out.Values[i] = ec.___Directive_description(ctx, field, obj)
+		case "locations":
+			out.Values[i] = ec.___Directive_locations(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "args":
+			out.Values[i] = ec.___Directive_args(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "isRepeatable":
+			out.Values[i] = ec.___Directive_isRepeatable(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var __EnumValueImplementors = []string{"__EnumValue"}
+
+func (ec *executionContext) ___EnumValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.EnumValue) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, __EnumValueImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("__EnumValue")
+		case "name":
+			out.Values[i] = ec.___EnumValue_name(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "description":
+			out.Values[i] = ec.___EnumValue_description(ctx, field, obj)
+		case "isDeprecated":
+			out.Values[i] = ec.___EnumValue_isDeprecated(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "deprecationReason":
+			out.Values[i] = ec.___EnumValue_deprecationReason(ctx, field, obj)
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var __FieldImplementors = []string{"__Field"}
+
+func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, __FieldImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("__Field")
+		case "name":
+			out.Values[i] = ec.___Field_name(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "description":
+			out.Values[i] = ec.___Field_description(ctx, field, obj)
+		case "args":
+			out.Values[i] = ec.___Field_args(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "type":
+			out.Values[i] = ec.___Field_type(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "isDeprecated":
+			out.Values[i] = ec.___Field_isDeprecated(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "deprecationReason":
+			out.Values[i] = ec.___Field_deprecationReason(ctx, field, obj)
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var __InputValueImplementors = []string{"__InputValue"}
+
+func (ec *executionContext) ___InputValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.InputValue) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, __InputValueImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("__InputValue")
+		case "name":
+			out.Values[i] = ec.___InputValue_name(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "description":
+			out.Values[i] = ec.___InputValue_description(ctx, field, obj)
+		case "type":
+			out.Values[i] = ec.___InputValue_type(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "defaultValue":
+			out.Values[i] = ec.___InputValue_defaultValue(ctx, field, obj)
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var __SchemaImplementors = []string{"__Schema"}
+
+func (ec *executionContext) ___Schema(ctx context.Context, sel ast.SelectionSet, obj *introspection.Schema) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, __SchemaImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("__Schema")
+		case "description":
+			out.Values[i] = ec.___Schema_description(ctx, field, obj)
+		case "types":
+			out.Values[i] = ec.___Schema_types(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "queryType":
+			out.Values[i] = ec.___Schema_queryType(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "mutationType":
+			out.Values[i] = ec.___Schema_mutationType(ctx, field, obj)
+		case "subscriptionType":
+			out.Values[i] = ec.___Schema_subscriptionType(ctx, field, obj)
+		case "directives":
+			out.Values[i] = ec.___Schema_directives(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+var __TypeImplementors = []string{"__Type"}
+
+func (ec *executionContext) ___Type(ctx context.Context, sel ast.SelectionSet, obj *introspection.Type) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.OperationContext, sel, __TypeImplementors)
+
+	out := graphql.NewFieldSet(fields)
+	deferred := make(map[string]*graphql.FieldSet)
+	for i, field := range fields {
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("__Type")
+		case "kind":
+			out.Values[i] = ec.___Type_kind(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				out.Invalids++
+			}
+		case "name":
+			out.Values[i] = ec.___Type_name(ctx, field, obj)
+		case "description":
+			out.Values[i] = ec.___Type_description(ctx, field, obj)
+		case "fields":
+			out.Values[i] = ec.___Type_fields(ctx, field, obj)
+		case "interfaces":
+			out.Values[i] = ec.___Type_interfaces(ctx, field, obj)
+		case "possibleTypes":
+			out.Values[i] = ec.___Type_possibleTypes(ctx, field, obj)
+		case "enumValues":
+			out.Values[i] = ec.___Type_enumValues(ctx, field, obj)
+		case "inputFields":
+			out.Values[i] = ec.___Type_inputFields(ctx, field, obj)
+		case "ofType":
+			out.Values[i] = ec.___Type_ofType(ctx, field, obj)
+		case "specifiedByURL":
+			out.Values[i] = ec.___Type_specifiedByURL(ctx, field, obj)
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	out.Dispatch(ctx)
+	if out.Invalids > 0 {
+		return graphql.Null
+	}
+
+	atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+	for label, dfs := range deferred {
+		ec.processDeferredGroup(graphql.DeferredGroup{
+			Label:    label,
+			Path:     graphql.GetPath(ctx),
+			FieldSet: dfs,
+			Context:  ctx,
+		})
+	}
+
+	return out
+}
+
+// endregion **************************** object.gotpl ****************************
+
+// region    ***************************** type.gotpl *****************************
+
+func (ec *executionContext) unmarshalNBoolean2bool(ctx context.Context, v interface{}) (bool, error) {
+	res, err := graphql.UnmarshalBoolean(v)
+	return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalNBoolean2bool(ctx context.Context, sel ast.SelectionSet, v bool) graphql.Marshaler {
+	res := graphql.MarshalBoolean(v)
+	if res == graphql.Null {
+		if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+			ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+		}
+	}
+	return res
+}
+
+func (ec *executionContext) marshalNCleanupResponse2gitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐCleanupResponse(ctx context.Context, sel ast.SelectionSet, v model.CleanupResponse) graphql.Marshaler {
+	return ec._CleanupResponse(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalNCleanupResponse2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐCleanupResponse(ctx context.Context, sel ast.SelectionSet, v *model.CleanupResponse) graphql.Marshaler {
+	if v == nil {
+		if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+			ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+		}
+		return graphql.Null
+	}
+	return ec._CleanupResponse(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNDirEntry2gitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐDirEntry(ctx context.Context, sel ast.SelectionSet, v model.DirEntry) graphql.Marshaler {
+	if v == nil {
+		if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+			ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+		}
+		return graphql.Null
+	}
+	return ec._DirEntry(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNDirEntry2ᚕgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐDirEntryᚄ(ctx context.Context, sel ast.SelectionSet, v []model.DirEntry) graphql.Marshaler {
+	ret := make(graphql.Array, len(v))
+	var wg sync.WaitGroup
+	isLen1 := len(v) == 1
+	if !isLen1 {
+		wg.Add(len(v))
+	}
+	for i := range v {
+		i := i
+		fc := &graphql.FieldContext{
+			Index:  &i,
+			Result: &v[i],
+		}
+		ctx := graphql.WithFieldContext(ctx, fc)
+		f := func(i int) {
+			defer func() {
+				if r := recover(); r != nil {
+					ec.Error(ctx, ec.Recover(ctx, r))
+					ret = nil
+				}
+			}()
+			if !isLen1 {
+				defer wg.Done()
+			}
+			ret[i] = ec.marshalNDirEntry2gitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐDirEntry(ctx, sel, v[i])
+		}
+		if isLen1 {
+			f(i)
+		} else {
+			go f(i)
+		}
+
+	}
+	wg.Wait()
+
+	for _, e := range ret {
+		if e == graphql.Null {
+			return graphql.Null
+		}
+	}
+
+	return ret
+}
+
+func (ec *executionContext) unmarshalNFloat2float64(ctx context.Context, v interface{}) (float64, error) {
+	res, err := graphql.UnmarshalFloatContext(ctx, v)
+	return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalNFloat2float64(ctx context.Context, sel ast.SelectionSet, v float64) graphql.Marshaler {
+	res := graphql.MarshalFloatContext(v)
+	if res == graphql.Null {
+		if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+			ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+		}
+	}
+	return graphql.WrapContextMarshaler(ctx, res)
+}
+
+func (ec *executionContext) unmarshalNID2string(ctx context.Context, v interface{}) (string, error) {
+	res, err := graphql.UnmarshalID(v)
+	return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalNID2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
+	res := graphql.MarshalID(v)
+	if res == graphql.Null {
+		if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+			ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+		}
+	}
+	return res
+}
+
+func (ec *executionContext) unmarshalNInt2int64(ctx context.Context, v interface{}) (int64, error) {
+	res, err := graphql.UnmarshalInt64(v)
+	return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalNInt2int64(ctx context.Context, sel ast.SelectionSet, v int64) graphql.Marshaler {
+	res := graphql.MarshalInt64(v)
+	if res == graphql.Null {
+		if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+			ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+		}
+	}
+	return res
+}
+
+func (ec *executionContext) marshalNListDirResponse2gitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐListDirResponse(ctx context.Context, sel ast.SelectionSet, v model.ListDirResponse) graphql.Marshaler {
+	return ec._ListDirResponse(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalNListDirResponse2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐListDirResponse(ctx context.Context, sel ast.SelectionSet, v *model.ListDirResponse) graphql.Marshaler {
+	if v == nil {
+		if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+			ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+		}
+		return graphql.Null
+	}
+	return ec._ListDirResponse(ctx, sel, v)
+}
+
+func (ec *executionContext) unmarshalNString2string(ctx context.Context, v interface{}) (string, error) {
+	res, err := graphql.UnmarshalString(v)
+	return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalNString2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
+	res := graphql.MarshalString(v)
+	if res == graphql.Null {
+		if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+			ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+		}
+	}
+	return res
+}
+
+func (ec *executionContext) unmarshalNString2ᚕstringᚄ(ctx context.Context, v interface{}) ([]string, error) {
+	var vSlice []interface{}
+	if v != nil {
+		vSlice = graphql.CoerceList(v)
+	}
+	var err error
+	res := make([]string, len(vSlice))
+	for i := range vSlice {
+		ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
+		res[i], err = ec.unmarshalNString2string(ctx, vSlice[i])
+		if err != nil {
+			return nil, err
+		}
+	}
+	return res, nil
+}
+
+func (ec *executionContext) marshalNString2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler {
+	ret := make(graphql.Array, len(v))
+	for i := range v {
+		ret[i] = ec.marshalNString2string(ctx, sel, v[i])
+	}
+
+	for _, e := range ret {
+		if e == graphql.Null {
+			return graphql.Null
+		}
+	}
+
+	return ret
+}
+
+func (ec *executionContext) marshalNTorrent2ᚕᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Torrent) graphql.Marshaler {
+	ret := make(graphql.Array, len(v))
+	var wg sync.WaitGroup
+	isLen1 := len(v) == 1
+	if !isLen1 {
+		wg.Add(len(v))
+	}
+	for i := range v {
+		i := i
+		fc := &graphql.FieldContext{
+			Index:  &i,
+			Result: &v[i],
+		}
+		ctx := graphql.WithFieldContext(ctx, fc)
+		f := func(i int) {
+			defer func() {
+				if r := recover(); r != nil {
+					ec.Error(ctx, ec.Recover(ctx, r))
+					ret = nil
+				}
+			}()
+			if !isLen1 {
+				defer wg.Done()
+			}
+			ret[i] = ec.marshalNTorrent2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrent(ctx, sel, v[i])
+		}
+		if isLen1 {
+			f(i)
+		} else {
+			go f(i)
+		}
+
+	}
+	wg.Wait()
+
+	for _, e := range ret {
+		if e == graphql.Null {
+			return graphql.Null
+		}
+	}
+
+	return ret
+}
+
+func (ec *executionContext) marshalNTorrent2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrent(ctx context.Context, sel ast.SelectionSet, v *model.Torrent) graphql.Marshaler {
+	if v == nil {
+		if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+			ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+		}
+		return graphql.Null
+	}
+	return ec._Torrent(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNTorrentFile2ᚕᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentFileᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.TorrentFile) graphql.Marshaler {
+	ret := make(graphql.Array, len(v))
+	var wg sync.WaitGroup
+	isLen1 := len(v) == 1
+	if !isLen1 {
+		wg.Add(len(v))
+	}
+	for i := range v {
+		i := i
+		fc := &graphql.FieldContext{
+			Index:  &i,
+			Result: &v[i],
+		}
+		ctx := graphql.WithFieldContext(ctx, fc)
+		f := func(i int) {
+			defer func() {
+				if r := recover(); r != nil {
+					ec.Error(ctx, ec.Recover(ctx, r))
+					ret = nil
+				}
+			}()
+			if !isLen1 {
+				defer wg.Done()
+			}
+			ret[i] = ec.marshalNTorrentFile2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentFile(ctx, sel, v[i])
+		}
+		if isLen1 {
+			f(i)
+		} else {
+			go f(i)
+		}
+
+	}
+	wg.Wait()
+
+	for _, e := range ret {
+		if e == graphql.Null {
+			return graphql.Null
+		}
+	}
+
+	return ret
+}
+
+func (ec *executionContext) marshalNTorrentFile2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentFile(ctx context.Context, sel ast.SelectionSet, v *model.TorrentFile) graphql.Marshaler {
+	if v == nil {
+		if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+			ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+		}
+		return graphql.Null
+	}
+	return ec._TorrentFile(ctx, sel, v)
+}
+
+func (ec *executionContext) unmarshalNTorrentFilter2gitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentFilter(ctx context.Context, v interface{}) (model.TorrentFilter, error) {
+	res, err := ec.unmarshalInputTorrentFilter(ctx, v)
+	return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalNTorrentPeer2ᚕᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentPeerᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.TorrentPeer) graphql.Marshaler {
+	ret := make(graphql.Array, len(v))
+	var wg sync.WaitGroup
+	isLen1 := len(v) == 1
+	if !isLen1 {
+		wg.Add(len(v))
+	}
+	for i := range v {
+		i := i
+		fc := &graphql.FieldContext{
+			Index:  &i,
+			Result: &v[i],
+		}
+		ctx := graphql.WithFieldContext(ctx, fc)
+		f := func(i int) {
+			defer func() {
+				if r := recover(); r != nil {
+					ec.Error(ctx, ec.Recover(ctx, r))
+					ret = nil
+				}
+			}()
+			if !isLen1 {
+				defer wg.Done()
+			}
+			ret[i] = ec.marshalNTorrentPeer2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentPeer(ctx, sel, v[i])
+		}
+		if isLen1 {
+			f(i)
+		} else {
+			go f(i)
+		}
+
+	}
+	wg.Wait()
+
+	for _, e := range ret {
+		if e == graphql.Null {
+			return graphql.Null
+		}
+	}
+
+	return ret
+}
+
+func (ec *executionContext) marshalNTorrentPeer2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentPeer(ctx context.Context, sel ast.SelectionSet, v *model.TorrentPeer) graphql.Marshaler {
+	if v == nil {
+		if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+			ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+		}
+		return graphql.Null
+	}
+	return ec._TorrentPeer(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx context.Context, sel ast.SelectionSet, v introspection.Directive) graphql.Marshaler {
+	return ec.___Directive(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalN__Directive2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirectiveᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Directive) graphql.Marshaler {
+	ret := make(graphql.Array, len(v))
+	var wg sync.WaitGroup
+	isLen1 := len(v) == 1
+	if !isLen1 {
+		wg.Add(len(v))
+	}
+	for i := range v {
+		i := i
+		fc := &graphql.FieldContext{
+			Index:  &i,
+			Result: &v[i],
+		}
+		ctx := graphql.WithFieldContext(ctx, fc)
+		f := func(i int) {
+			defer func() {
+				if r := recover(); r != nil {
+					ec.Error(ctx, ec.Recover(ctx, r))
+					ret = nil
+				}
+			}()
+			if !isLen1 {
+				defer wg.Done()
+			}
+			ret[i] = ec.marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx, sel, v[i])
+		}
+		if isLen1 {
+			f(i)
+		} else {
+			go f(i)
+		}
+
+	}
+	wg.Wait()
+
+	for _, e := range ret {
+		if e == graphql.Null {
+			return graphql.Null
+		}
+	}
+
+	return ret
+}
+
+func (ec *executionContext) unmarshalN__DirectiveLocation2string(ctx context.Context, v interface{}) (string, error) {
+	res, err := graphql.UnmarshalString(v)
+	return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalN__DirectiveLocation2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
+	res := graphql.MarshalString(v)
+	if res == graphql.Null {
+		if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+			ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+		}
+	}
+	return res
+}
+
+func (ec *executionContext) unmarshalN__DirectiveLocation2ᚕstringᚄ(ctx context.Context, v interface{}) ([]string, error) {
+	var vSlice []interface{}
+	if v != nil {
+		vSlice = graphql.CoerceList(v)
+	}
+	var err error
+	res := make([]string, len(vSlice))
+	for i := range vSlice {
+		ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
+		res[i], err = ec.unmarshalN__DirectiveLocation2string(ctx, vSlice[i])
+		if err != nil {
+			return nil, err
+		}
+	}
+	return res, nil
+}
+
+func (ec *executionContext) marshalN__DirectiveLocation2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler {
+	ret := make(graphql.Array, len(v))
+	var wg sync.WaitGroup
+	isLen1 := len(v) == 1
+	if !isLen1 {
+		wg.Add(len(v))
+	}
+	for i := range v {
+		i := i
+		fc := &graphql.FieldContext{
+			Index:  &i,
+			Result: &v[i],
+		}
+		ctx := graphql.WithFieldContext(ctx, fc)
+		f := func(i int) {
+			defer func() {
+				if r := recover(); r != nil {
+					ec.Error(ctx, ec.Recover(ctx, r))
+					ret = nil
+				}
+			}()
+			if !isLen1 {
+				defer wg.Done()
+			}
+			ret[i] = ec.marshalN__DirectiveLocation2string(ctx, sel, v[i])
+		}
+		if isLen1 {
+			f(i)
+		} else {
+			go f(i)
+		}
+
+	}
+	wg.Wait()
+
+	for _, e := range ret {
+		if e == graphql.Null {
+			return graphql.Null
+		}
+	}
+
+	return ret
+}
+
+func (ec *executionContext) marshalN__EnumValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx context.Context, sel ast.SelectionSet, v introspection.EnumValue) graphql.Marshaler {
+	return ec.___EnumValue(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalN__Field2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx context.Context, sel ast.SelectionSet, v introspection.Field) graphql.Marshaler {
+	return ec.___Field(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx context.Context, sel ast.SelectionSet, v introspection.InputValue) graphql.Marshaler {
+	return ec.___InputValue(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.InputValue) graphql.Marshaler {
+	ret := make(graphql.Array, len(v))
+	var wg sync.WaitGroup
+	isLen1 := len(v) == 1
+	if !isLen1 {
+		wg.Add(len(v))
+	}
+	for i := range v {
+		i := i
+		fc := &graphql.FieldContext{
+			Index:  &i,
+			Result: &v[i],
+		}
+		ctx := graphql.WithFieldContext(ctx, fc)
+		f := func(i int) {
+			defer func() {
+				if r := recover(); r != nil {
+					ec.Error(ctx, ec.Recover(ctx, r))
+					ret = nil
+				}
+			}()
+			if !isLen1 {
+				defer wg.Done()
+			}
+			ret[i] = ec.marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, sel, v[i])
+		}
+		if isLen1 {
+			f(i)
+		} else {
+			go f(i)
+		}
+
+	}
+	wg.Wait()
+
+	for _, e := range ret {
+		if e == graphql.Null {
+			return graphql.Null
+		}
+	}
+
+	return ret
+}
+
+func (ec *executionContext) marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v introspection.Type) graphql.Marshaler {
+	return ec.___Type(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalN__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Type) graphql.Marshaler {
+	ret := make(graphql.Array, len(v))
+	var wg sync.WaitGroup
+	isLen1 := len(v) == 1
+	if !isLen1 {
+		wg.Add(len(v))
+	}
+	for i := range v {
+		i := i
+		fc := &graphql.FieldContext{
+			Index:  &i,
+			Result: &v[i],
+		}
+		ctx := graphql.WithFieldContext(ctx, fc)
+		f := func(i int) {
+			defer func() {
+				if r := recover(); r != nil {
+					ec.Error(ctx, ec.Recover(ctx, r))
+					ret = nil
+				}
+			}()
+			if !isLen1 {
+				defer wg.Done()
+			}
+			ret[i] = ec.marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, sel, v[i])
+		}
+		if isLen1 {
+			f(i)
+		} else {
+			go f(i)
+		}
+
+	}
+	wg.Wait()
+
+	for _, e := range ret {
+		if e == graphql.Null {
+			return graphql.Null
+		}
+	}
+
+	return ret
+}
+
+func (ec *executionContext) marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v *introspection.Type) graphql.Marshaler {
+	if v == nil {
+		if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+			ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+		}
+		return graphql.Null
+	}
+	return ec.___Type(ctx, sel, v)
+}
+
+func (ec *executionContext) unmarshalN__TypeKind2string(ctx context.Context, v interface{}) (string, error) {
+	res, err := graphql.UnmarshalString(v)
+	return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalN__TypeKind2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
+	res := graphql.MarshalString(v)
+	if res == graphql.Null {
+		if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+			ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+		}
+	}
+	return res
+}
+
+func (ec *executionContext) unmarshalOBoolean2bool(ctx context.Context, v interface{}) (bool, error) {
+	res, err := graphql.UnmarshalBoolean(v)
+	return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalOBoolean2bool(ctx context.Context, sel ast.SelectionSet, v bool) graphql.Marshaler {
+	res := graphql.MarshalBoolean(v)
+	return res
+}
+
+func (ec *executionContext) unmarshalOBoolean2ᚖbool(ctx context.Context, v interface{}) (*bool, error) {
+	if v == nil {
+		return nil, nil
+	}
+	res, err := graphql.UnmarshalBoolean(v)
+	return &res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalOBoolean2ᚖbool(ctx context.Context, sel ast.SelectionSet, v *bool) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	res := graphql.MarshalBoolean(*v)
+	return res
+}
+
+func (ec *executionContext) unmarshalODateTime2ᚖtimeᚐTime(ctx context.Context, v interface{}) (*time.Time, error) {
+	if v == nil {
+		return nil, nil
+	}
+	res, err := graphql.UnmarshalTime(v)
+	return &res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalODateTime2ᚖtimeᚐTime(ctx context.Context, sel ast.SelectionSet, v *time.Time) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	res := graphql.MarshalTime(*v)
+	return res
+}
+
+func (ec *executionContext) marshalODownloadTorrentResponse2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐDownloadTorrentResponse(ctx context.Context, sel ast.SelectionSet, v *model.DownloadTorrentResponse) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	return ec._DownloadTorrentResponse(ctx, sel, v)
+}
+
+func (ec *executionContext) unmarshalOInt2ᚕint64ᚄ(ctx context.Context, v interface{}) ([]int64, error) {
+	if v == nil {
+		return nil, nil
+	}
+	var vSlice []interface{}
+	if v != nil {
+		vSlice = graphql.CoerceList(v)
+	}
+	var err error
+	res := make([]int64, len(vSlice))
+	for i := range vSlice {
+		ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
+		res[i], err = ec.unmarshalNInt2int64(ctx, vSlice[i])
+		if err != nil {
+			return nil, err
+		}
+	}
+	return res, nil
+}
+
+func (ec *executionContext) marshalOInt2ᚕint64ᚄ(ctx context.Context, sel ast.SelectionSet, v []int64) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	ret := make(graphql.Array, len(v))
+	for i := range v {
+		ret[i] = ec.marshalNInt2int64(ctx, sel, v[i])
+	}
+
+	for _, e := range ret {
+		if e == graphql.Null {
+			return graphql.Null
+		}
+	}
+
+	return ret
+}
+
+func (ec *executionContext) unmarshalOInt2ᚖint64(ctx context.Context, v interface{}) (*int64, error) {
+	if v == nil {
+		return nil, nil
+	}
+	res, err := graphql.UnmarshalInt64(v)
+	return &res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalOInt2ᚖint64(ctx context.Context, sel ast.SelectionSet, v *int64) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	res := graphql.MarshalInt64(*v)
+	return res
+}
+
+func (ec *executionContext) unmarshalOIntFilter2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐIntFilter(ctx context.Context, v interface{}) (*model.IntFilter, error) {
+	if v == nil {
+		return nil, nil
+	}
+	res, err := ec.unmarshalInputIntFilter(ctx, v)
+	return &res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalOMutation2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐMutation(ctx context.Context, sel ast.SelectionSet, v *model.Mutation) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	return ec._Mutation(ctx, sel)
+}
+
+func (ec *executionContext) unmarshalOPagination2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐPagination(ctx context.Context, v interface{}) (*model.Pagination, error) {
+	if v == nil {
+		return nil, nil
+	}
+	res, err := ec.unmarshalInputPagination(ctx, v)
+	return &res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalOProgress2gitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐProgress(ctx context.Context, sel ast.SelectionSet, v model.Progress) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	return ec._Progress(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalOQuery2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐQuery(ctx context.Context, sel ast.SelectionSet, v *model.Query) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	return ec._Query(ctx, sel)
+}
+
+func (ec *executionContext) unmarshalOString2ᚕstringᚄ(ctx context.Context, v interface{}) ([]string, error) {
+	if v == nil {
+		return nil, nil
+	}
+	var vSlice []interface{}
+	if v != nil {
+		vSlice = graphql.CoerceList(v)
+	}
+	var err error
+	res := make([]string, len(vSlice))
+	for i := range vSlice {
+		ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
+		res[i], err = ec.unmarshalNString2string(ctx, vSlice[i])
+		if err != nil {
+			return nil, err
+		}
+	}
+	return res, nil
+}
+
+func (ec *executionContext) marshalOString2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	ret := make(graphql.Array, len(v))
+	for i := range v {
+		ret[i] = ec.marshalNString2string(ctx, sel, v[i])
+	}
+
+	for _, e := range ret {
+		if e == graphql.Null {
+			return graphql.Null
+		}
+	}
+
+	return ret
+}
+
+func (ec *executionContext) unmarshalOString2ᚖstring(ctx context.Context, v interface{}) (*string, error) {
+	if v == nil {
+		return nil, nil
+	}
+	res, err := graphql.UnmarshalString(v)
+	return &res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalOString2ᚖstring(ctx context.Context, sel ast.SelectionSet, v *string) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	res := graphql.MarshalString(*v)
+	return res
+}
+
+func (ec *executionContext) unmarshalOStringFilter2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐStringFilter(ctx context.Context, v interface{}) (*model.StringFilter, error) {
+	if v == nil {
+		return nil, nil
+	}
+	res, err := ec.unmarshalInputStringFilter(ctx, v)
+	return &res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalOTask2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTask(ctx context.Context, sel ast.SelectionSet, v *model.Task) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	return ec._Task(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalOTorrentProgress2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentProgress(ctx context.Context, sel ast.SelectionSet, v *model.TorrentProgress) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	return ec._TorrentProgress(ctx, sel, v)
+}
+
+func (ec *executionContext) unmarshalOTorrentsFilter2ᚖgitᚗkmsignᚗruᚋroyalcatᚋtstorᚋsrcᚋdeliveryᚋgraphqlᚋmodelᚐTorrentsFilter(ctx context.Context, v interface{}) (*model.TorrentsFilter, error) {
+	if v == nil {
+		return nil, nil
+	}
+	res, err := ec.unmarshalInputTorrentsFilter(ctx, v)
+	return &res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalO__EnumValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.EnumValue) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	ret := make(graphql.Array, len(v))
+	var wg sync.WaitGroup
+	isLen1 := len(v) == 1
+	if !isLen1 {
+		wg.Add(len(v))
+	}
+	for i := range v {
+		i := i
+		fc := &graphql.FieldContext{
+			Index:  &i,
+			Result: &v[i],
+		}
+		ctx := graphql.WithFieldContext(ctx, fc)
+		f := func(i int) {
+			defer func() {
+				if r := recover(); r != nil {
+					ec.Error(ctx, ec.Recover(ctx, r))
+					ret = nil
+				}
+			}()
+			if !isLen1 {
+				defer wg.Done()
+			}
+			ret[i] = ec.marshalN__EnumValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx, sel, v[i])
+		}
+		if isLen1 {
+			f(i)
+		} else {
+			go f(i)
+		}
+
+	}
+	wg.Wait()
+
+	for _, e := range ret {
+		if e == graphql.Null {
+			return graphql.Null
+		}
+	}
+
+	return ret
+}
+
+func (ec *executionContext) marshalO__Field2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐFieldᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Field) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	ret := make(graphql.Array, len(v))
+	var wg sync.WaitGroup
+	isLen1 := len(v) == 1
+	if !isLen1 {
+		wg.Add(len(v))
+	}
+	for i := range v {
+		i := i
+		fc := &graphql.FieldContext{
+			Index:  &i,
+			Result: &v[i],
+		}
+		ctx := graphql.WithFieldContext(ctx, fc)
+		f := func(i int) {
+			defer func() {
+				if r := recover(); r != nil {
+					ec.Error(ctx, ec.Recover(ctx, r))
+					ret = nil
+				}
+			}()
+			if !isLen1 {
+				defer wg.Done()
+			}
+			ret[i] = ec.marshalN__Field2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx, sel, v[i])
+		}
+		if isLen1 {
+			f(i)
+		} else {
+			go f(i)
+		}
+
+	}
+	wg.Wait()
+
+	for _, e := range ret {
+		if e == graphql.Null {
+			return graphql.Null
+		}
+	}
+
+	return ret
+}
+
+func (ec *executionContext) marshalO__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.InputValue) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	ret := make(graphql.Array, len(v))
+	var wg sync.WaitGroup
+	isLen1 := len(v) == 1
+	if !isLen1 {
+		wg.Add(len(v))
+	}
+	for i := range v {
+		i := i
+		fc := &graphql.FieldContext{
+			Index:  &i,
+			Result: &v[i],
+		}
+		ctx := graphql.WithFieldContext(ctx, fc)
+		f := func(i int) {
+			defer func() {
+				if r := recover(); r != nil {
+					ec.Error(ctx, ec.Recover(ctx, r))
+					ret = nil
+				}
+			}()
+			if !isLen1 {
+				defer wg.Done()
+			}
+			ret[i] = ec.marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, sel, v[i])
+		}
+		if isLen1 {
+			f(i)
+		} else {
+			go f(i)
+		}
+
+	}
+	wg.Wait()
+
+	for _, e := range ret {
+		if e == graphql.Null {
+			return graphql.Null
+		}
+	}
+
+	return ret
+}
+
+func (ec *executionContext) marshalO__Schema2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx context.Context, sel ast.SelectionSet, v *introspection.Schema) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	return ec.___Schema(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Type) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	ret := make(graphql.Array, len(v))
+	var wg sync.WaitGroup
+	isLen1 := len(v) == 1
+	if !isLen1 {
+		wg.Add(len(v))
+	}
+	for i := range v {
+		i := i
+		fc := &graphql.FieldContext{
+			Index:  &i,
+			Result: &v[i],
+		}
+		ctx := graphql.WithFieldContext(ctx, fc)
+		f := func(i int) {
+			defer func() {
+				if r := recover(); r != nil {
+					ec.Error(ctx, ec.Recover(ctx, r))
+					ret = nil
+				}
+			}()
+			if !isLen1 {
+				defer wg.Done()
+			}
+			ret[i] = ec.marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, sel, v[i])
+		}
+		if isLen1 {
+			f(i)
+		} else {
+			go f(i)
+		}
+
+	}
+	wg.Wait()
+
+	for _, e := range ret {
+		if e == graphql.Null {
+			return graphql.Null
+		}
+	}
+
+	return ret
+}
+
+func (ec *executionContext) marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v *introspection.Type) graphql.Marshaler {
+	if v == nil {
+		return graphql.Null
+	}
+	return ec.___Type(ctx, sel, v)
+}
+
+// endregion ***************************** type.gotpl *****************************
diff --git a/src/delivery/graphql/model/filter.go b/src/delivery/graphql/model/filter.go
new file mode 100644
index 0000000..0c96e46
--- /dev/null
+++ b/src/delivery/graphql/model/filter.go
@@ -0,0 +1,21 @@
+package model
+
+import "slices"
+
+func (f *IntFilter) Include(v int64) bool {
+	if f.Eq != nil {
+		return v == *f.Eq
+	} else if f.Gt != nil {
+		return v > *f.Gt
+	} else if f.Gte != nil {
+		return v >= *f.Gte
+	} else if f.Lt != nil {
+		return v < *f.Lt
+	} else if f.Lte != nil {
+		return v <= *f.Lte
+	} else if f.In != nil {
+		return slices.Contains(f.In, v)
+	}
+
+	return true
+}
diff --git a/src/delivery/graphql/model/mappers.go b/src/delivery/graphql/model/mappers.go
new file mode 100644
index 0000000..e2227e0
--- /dev/null
+++ b/src/delivery/graphql/model/mappers.go
@@ -0,0 +1,37 @@
+package model
+
+import (
+	"git.kmsign.ru/royalcat/tstor/src/host/controller"
+	"github.com/anacrolix/torrent"
+)
+
+func MapPeerSource(source torrent.PeerSource) string {
+	switch source {
+	case torrent.PeerSourceDirect:
+		return "Direct"
+	case torrent.PeerSourceUtHolepunch:
+		return "Ut Holepunch"
+	case torrent.PeerSourceDhtAnnouncePeer:
+		return "DHT Announce"
+	case torrent.PeerSourceDhtGetPeers:
+		return "DHT"
+	case torrent.PeerSourceIncoming:
+		return "Incoming"
+	case torrent.PeerSourceTracker:
+		return "Tracker"
+	case torrent.PeerSourcePex:
+		return "PEX"
+	default:
+		return "Unknown"
+	}
+}
+
+func MapTorrent(t *controller.Torrent) *Torrent {
+	return &Torrent{
+		Infohash:       t.InfoHash(),
+		Name:           t.Name(),
+		BytesCompleted: t.BytesCompleted(),
+		BytesMissing:   t.BytesMissing(),
+		T:              t,
+	}
+}
diff --git a/src/delivery/graphql/model/models_gen.go b/src/delivery/graphql/model/models_gen.go
new file mode 100644
index 0000000..6852932
--- /dev/null
+++ b/src/delivery/graphql/model/models_gen.go
@@ -0,0 +1,173 @@
+// Code generated by github.com/99designs/gqlgen, DO NOT EDIT.
+
+package model
+
+import (
+	"time"
+
+	"git.kmsign.ru/royalcat/tstor/src/host/controller"
+	"github.com/anacrolix/torrent"
+)
+
+type DirEntry interface {
+	IsDirEntry()
+	GetName() string
+}
+
+type Progress interface {
+	IsProgress()
+	GetCurrent() int64
+	GetTotal() int64
+}
+
+type ArchiveFs struct {
+	Name string `json:"name"`
+	Size int64  `json:"size"`
+}
+
+func (ArchiveFs) IsDirEntry()          {}
+func (this ArchiveFs) GetName() string { return this.Name }
+
+type BooleanFilter struct {
+	Eq *bool `json:"eq,omitempty"`
+}
+
+type CleanupResponse struct {
+	Count int64    `json:"count"`
+	List  []string `json:"list"`
+}
+
+type DateTimeFilter struct {
+	Eq  *time.Time `json:"eq,omitempty"`
+	Gt  *time.Time `json:"gt,omitempty"`
+	Lt  *time.Time `json:"lt,omitempty"`
+	Gte *time.Time `json:"gte,omitempty"`
+	Lte *time.Time `json:"lte,omitempty"`
+}
+
+type Dir struct {
+	Name string `json:"name"`
+}
+
+func (Dir) IsDirEntry()          {}
+func (this Dir) GetName() string { return this.Name }
+
+type DownloadTorrentResponse struct {
+	Task *Task `json:"task,omitempty"`
+}
+
+type File struct {
+	Name string `json:"name"`
+	Size int64  `json:"size"`
+}
+
+func (File) IsDirEntry()          {}
+func (this File) GetName() string { return this.Name }
+
+type IntFilter struct {
+	Eq  *int64  `json:"eq,omitempty"`
+	Gt  *int64  `json:"gt,omitempty"`
+	Lt  *int64  `json:"lt,omitempty"`
+	Gte *int64  `json:"gte,omitempty"`
+	Lte *int64  `json:"lte,omitempty"`
+	In  []int64 `json:"in,omitempty"`
+}
+
+type ListDirResponse struct {
+	Root    DirEntry   `json:"root"`
+	Entries []DirEntry `json:"entries"`
+}
+
+type Mutation struct {
+}
+
+type Pagination struct {
+	Offset int64 `json:"offset"`
+	Limit  int64 `json:"limit"`
+}
+
+type Query struct {
+}
+
+type ResolverFs struct {
+	Name string `json:"name"`
+}
+
+func (ResolverFs) IsDirEntry()          {}
+func (this ResolverFs) GetName() string { return this.Name }
+
+type Schema struct {
+	Query    *Query    `json:"query,omitempty"`
+	Mutation *Mutation `json:"mutation,omitempty"`
+}
+
+type StringFilter struct {
+	Eq     *string  `json:"eq,omitempty"`
+	Substr *string  `json:"substr,omitempty"`
+	In     []string `json:"in,omitempty"`
+}
+
+type Subscription struct {
+}
+
+type Task struct {
+	ID string `json:"id"`
+}
+
+type Torrent struct {
+	Name            string              `json:"name"`
+	Infohash        string              `json:"infohash"`
+	BytesCompleted  int64               `json:"bytesCompleted"`
+	TorrentFilePath string              `json:"torrentFilePath"`
+	BytesMissing    int64               `json:"bytesMissing"`
+	Files           []*TorrentFile      `json:"files"`
+	ExcludedFiles   []*TorrentFile      `json:"excludedFiles"`
+	Peers           []*TorrentPeer      `json:"peers"`
+	T               *controller.Torrent `json:"-"`
+}
+
+type TorrentFs struct {
+	Name    string   `json:"name"`
+	Torrent *Torrent `json:"torrent"`
+}
+
+func (TorrentFs) IsDirEntry()          {}
+func (this TorrentFs) GetName() string { return this.Name }
+
+type TorrentFile struct {
+	Filename       string        `json:"filename"`
+	Size           int64         `json:"size"`
+	BytesCompleted int64         `json:"bytesCompleted"`
+	F              *torrent.File `json:"-"`
+}
+
+type TorrentFilter struct {
+	Everything *bool   `json:"everything,omitempty"`
+	Infohash   *string `json:"infohash,omitempty"`
+}
+
+type TorrentPeer struct {
+	IP           string            `json:"ip"`
+	DownloadRate float64           `json:"downloadRate"`
+	Discovery    string            `json:"discovery"`
+	Port         int64             `json:"port"`
+	ClientName   string            `json:"clientName"`
+	F            *torrent.PeerConn `json:"-"`
+}
+
+type TorrentProgress struct {
+	Torrent *Torrent `json:"torrent"`
+	Current int64    `json:"current"`
+	Total   int64    `json:"total"`
+}
+
+func (TorrentProgress) IsProgress()            {}
+func (this TorrentProgress) GetCurrent() int64 { return this.Current }
+func (this TorrentProgress) GetTotal() int64   { return this.Total }
+
+type TorrentsFilter struct {
+	Name           *StringFilter `json:"name,omitempty"`
+	BytesCompleted *IntFilter    `json:"bytesCompleted,omitempty"`
+	BytesMissing   *IntFilter    `json:"bytesMissing,omitempty"`
+	PeersCount     *IntFilter    `json:"peersCount,omitempty"`
+}
diff --git a/src/delivery/graphql/oneof.go b/src/delivery/graphql/oneof.go
new file mode 100644
index 0000000..93bf335
--- /dev/null
+++ b/src/delivery/graphql/oneof.go
@@ -0,0 +1,28 @@
+package graph
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/99designs/gqlgen/graphql"
+)
+
+func OneOf(ctx context.Context, obj interface{}, next graphql.Resolver) (res interface{}, err error) {
+	wasValue := false
+	m, ok := obj.(map[string]any)
+	if !ok {
+		return nil, fmt.Errorf("OneOf error, unknow object type: %T", obj)
+	}
+
+	for k, v := range m {
+		if v != nil {
+			if !wasValue {
+				wasValue = true
+			} else {
+				return nil, fmt.Errorf("OneOf with multiple fields: %s", k)
+			}
+		}
+	}
+
+	return next(ctx)
+}
diff --git a/src/delivery/graphql/resolver/mutation.resolvers.go b/src/delivery/graphql/resolver/mutation.resolvers.go
new file mode 100644
index 0000000..ed7dc31
--- /dev/null
+++ b/src/delivery/graphql/resolver/mutation.resolvers.go
@@ -0,0 +1,101 @@
+package resolver
+
+// This file will be automatically regenerated based on the schema, any resolver implementations
+// will be copied through when generating and any unknown code will be moved to the end.
+// Code generated by github.com/99designs/gqlgen version v0.17.43
+
+import (
+	"context"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/uuid"
+	graph "git.kmsign.ru/royalcat/tstor/src/delivery/graphql"
+	"git.kmsign.ru/royalcat/tstor/src/delivery/graphql/model"
+	"git.kmsign.ru/royalcat/tstor/src/host/service"
+	aih "github.com/anacrolix/torrent/types/infohash"
+)
+
+// ValidateTorrents is the resolver for the validateTorrents field.
+func (r *mutationResolver) ValidateTorrents(ctx context.Context, filter model.TorrentFilter) (bool, error) {
+	if filter.Infohash != nil {
+		t, err := r.Resolver.Service.GetTorrent(*filter.Infohash)
+		if err != nil {
+			return false, err
+		}
+		if t == nil {
+			return false, nil
+		}
+
+		t.ValidateTorrent()
+		return true, nil
+	}
+
+	if filter.Everything != nil && *filter.Everything {
+		torrents, err := r.Resolver.Service.ListTorrents(ctx)
+		if err != nil {
+			return false, err
+		}
+		for _, v := range torrents {
+			if err := v.ValidateTorrent(); err != nil {
+				return false, err
+			}
+		}
+		return true, nil
+	}
+
+	return false, nil
+}
+
+// CleanupTorrents is the resolver for the cleanupTorrents field.
+func (r *mutationResolver) CleanupTorrents(ctx context.Context, files *bool, dryRun bool) (*model.CleanupResponse, error) {
+	torrents, err := r.Service.ListTorrents(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	if files != nil && *files {
+		r, err := r.Service.Storage.CleanupFiles(ctx, torrents, dryRun)
+		return &model.CleanupResponse{
+			Count: int64(len(r)),
+			List:  r,
+		}, err
+	} else {
+		r, err := r.Service.Storage.CleanupDirs(ctx, torrents, dryRun)
+		return &model.CleanupResponse{
+			Count: int64(len(r)),
+			List:  r,
+		}, err
+	}
+}
+
+// DownloadTorrent is the resolver for the downloadTorrent field.
+func (r *mutationResolver) DownloadTorrent(ctx context.Context, infohash string, file *string) (*model.DownloadTorrentResponse, error) {
+	f := ""
+	if file != nil {
+		f = *file
+	}
+
+	err := r.Service.Download(ctx, &service.TorrentDownloadTask{
+		ID:       uuid.New(),
+		InfoHash: aih.FromHexString(infohash),
+		File:     f,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return &model.DownloadTorrentResponse{}, nil
+}
+
+// DedupeStorage is the resolver for the dedupeStorage field.
+func (r *mutationResolver) DedupeStorage(ctx context.Context) (int64, error) {
+	deduped, err := r.Service.Storage.Dedupe(ctx)
+	if err != nil {
+		return 0, err
+	}
+	return int64(deduped), nil
+}
+
+// Mutation returns graph.MutationResolver implementation.
+func (r *Resolver) Mutation() graph.MutationResolver { return &mutationResolver{r} }
+
+type mutationResolver struct{ *Resolver }
diff --git a/src/delivery/graphql/resolver/query.resolvers.go b/src/delivery/graphql/resolver/query.resolvers.go
new file mode 100644
index 0000000..1b6aa43
--- /dev/null
+++ b/src/delivery/graphql/resolver/query.resolvers.go
@@ -0,0 +1,142 @@
+package resolver
+
+// This file will be automatically regenerated based on the schema, any resolver implementations
+// will be copied through when generating and any unknown code will be moved to the end.
+// Code generated by github.com/99designs/gqlgen version v0.17.43
+
+import (
+	"context"
+	"io/fs"
+
+	graph "git.kmsign.ru/royalcat/tstor/src/delivery/graphql"
+	"git.kmsign.ru/royalcat/tstor/src/delivery/graphql/model"
+	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
+)
+
+// Torrents is the resolver for the torrents field.
+func (r *queryResolver) Torrents(ctx context.Context, filter *model.TorrentsFilter, pagination *model.Pagination) ([]*model.Torrent, error) {
+	torrents, err := r.Service.ListTorrents(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	filterFuncs := []func(torrent *model.Torrent) bool{}
+
+	if filter != nil {
+		if filter.BytesCompleted != nil {
+			filterFuncs = append(filterFuncs, func(torrent *model.Torrent) bool {
+				return filter.BytesCompleted.Include(torrent.BytesCompleted)
+			})
+		}
+		if filter.BytesMissing != nil {
+			filterFuncs = append(filterFuncs, func(torrent *model.Torrent) bool {
+				return filter.BytesMissing.Include(torrent.BytesMissing)
+			})
+		}
+		if filter.PeersCount != nil {
+			filterFuncs = append(filterFuncs, func(torrent *model.Torrent) bool {
+				return filter.PeersCount.Include(
+					int64(len(torrent.T.Torrent().PeerConns())),
+				)
+			})
+		}
+
+	}
+
+	filterFunc := func(torrent *model.Torrent) bool {
+		for _, f := range filterFuncs {
+			if !f(torrent) {
+				return false
+			}
+		}
+		return true
+	}
+
+	tr := []*model.Torrent{}
+	for _, t := range torrents {
+		d := model.MapTorrent(t)
+
+		if !filterFunc(d) {
+			continue
+		}
+		tr = append(tr, d)
+	}
+
+	return tr, nil
+}
+
+type dirEntry interface {
+	Name() string
+	IsDir() bool
+}
+
+func fillDirEntry(e dirEntry) model.DirEntry {
+	switch e.(type) {
+	case *vfs.ArchiveFS:
+		e := e.(*vfs.ArchiveFS)
+		return model.ArchiveFs{
+			Name: e.Name(),
+			Size: e.Size(),
+		}
+	case *vfs.ResolverFS:
+		e := e.(*vfs.ResolverFS)
+		return model.ResolverFs{
+			Name: e.Name(),
+		}
+	case *vfs.TorrentFs:
+		e := e.(*vfs.TorrentFs)
+		return model.TorrentFs{
+			Name:    e.Name(),
+			Torrent: model.MapTorrent(e.Torrent),
+		}
+	default:
+		if e.IsDir() {
+			return model.Dir{
+				Name: e.Name(),
+			}
+		}
+		if de, ok := e.(fs.DirEntry); ok {
+			info, _ := de.Info()
+			return model.File{
+				Name: e.Name(),
+				Size: info.Size(),
+			}
+		}
+
+		if fe, ok := e.(fs.FileInfo); ok {
+			return model.File{
+				Name: fe.Name(),
+				Size: fe.Size(),
+			}
+		}
+	}
+
+	panic("this dir entry is strange af")
+}
+
+// FsListDir is the resolver for the fsListDir field.
+func (r *queryResolver) FsListDir(ctx context.Context, path string) (*model.ListDirResponse, error) {
+	root, err := r.VFS.Stat(ctx, path)
+	if err != nil {
+		return nil, err
+	}
+
+	entries, err := r.VFS.ReadDir(ctx, path)
+	if err != nil {
+		return nil, err
+	}
+	out := []model.DirEntry{}
+	for _, e := range entries {
+		out = append(out, fillDirEntry(e))
+	}
+
+	return &model.ListDirResponse{
+		Root:    fillDirEntry(root),
+		Entries: out,
+	}, nil
+}
+
+// Query returns graph.QueryResolver implementation.
+func (r *Resolver) Query() graph.QueryResolver { return &queryResolver{r} }
+
+type queryResolver struct{ *Resolver }
diff --git a/src/delivery/graphql/resolver/resolver.go b/src/delivery/graphql/resolver/resolver.go
new file mode 100644
index 0000000..9a76120
--- /dev/null
+++ b/src/delivery/graphql/resolver/resolver.go
@@ -0,0 +1,15 @@
+package resolver
+
+import (
+	"git.kmsign.ru/royalcat/tstor/src/host/service"
+	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
+)
+
+// This file will not be regenerated automatically.
+//
+// It serves as dependency injection for your app, add any dependencies you require here.
+
+type Resolver struct {
+	Service *service.Service
+	VFS     vfs.Filesystem
+}
diff --git a/src/delivery/graphql/resolver/subscription.resolvers.go b/src/delivery/graphql/resolver/subscription.resolvers.go
new file mode 100644
index 0000000..f30af50
--- /dev/null
+++ b/src/delivery/graphql/resolver/subscription.resolvers.go
@@ -0,0 +1,54 @@
+package resolver
+
+// This file will be automatically regenerated based on the schema, any resolver implementations
+// will be copied through when generating and any unknown code will be moved to the end.
+// Code generated by github.com/99designs/gqlgen version v0.17.43
+
+import (
+	"context"
+	"fmt"
+
+	graph "git.kmsign.ru/royalcat/tstor/src/delivery/graphql"
+	"git.kmsign.ru/royalcat/tstor/src/delivery/graphql/model"
+)
+
+// TaskProgress is the resolver for the taskProgress field.
+func (r *subscriptionResolver) TaskProgress(ctx context.Context, taskID string) (<-chan model.Progress, error) {
+	panic(fmt.Errorf("not implemented: TaskProgress - taskProgress"))
+}
+
+// TorrentDownloadUpdates is the resolver for the torrentDownloadUpdates field.
+func (r *subscriptionResolver) TorrentDownloadUpdates(ctx context.Context) (<-chan *model.TorrentProgress, error) {
+	out := make(chan *model.TorrentProgress)
+	progress, err := r.Service.DownloadProgress(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	go func() {
+		defer close(out)
+		for p := range progress {
+			if p.Torrent == nil {
+				fmt.Println("nil torrent")
+				continue
+			}
+			po := &model.TorrentProgress{
+				Torrent: model.MapTorrent(p.Torrent),
+				Current: p.Current,
+				Total:   p.Total,
+			}
+			select {
+			case <-ctx.Done():
+				return
+			case out <- po:
+			}
+		}
+	}()
+
+	return out, nil
+}
+
+// Subscription returns graph.SubscriptionResolver implementation.
+func (r *Resolver) Subscription() graph.SubscriptionResolver { return &subscriptionResolver{r} }
+
+type subscriptionResolver struct{ *Resolver }
diff --git a/src/delivery/graphql/resolver/torrent.resolvers.go b/src/delivery/graphql/resolver/torrent.resolvers.go
new file mode 100644
index 0000000..ef231ad
--- /dev/null
+++ b/src/delivery/graphql/resolver/torrent.resolvers.go
@@ -0,0 +1,76 @@
+package resolver
+
+// This file will be automatically regenerated based on the schema, any resolver implementations
+// will be copied through when generating and any unknown code will be moved to the end.
+// Code generated by github.com/99designs/gqlgen version v0.17.43
+
+import (
+	"context"
+
+	graph "git.kmsign.ru/royalcat/tstor/src/delivery/graphql"
+	"git.kmsign.ru/royalcat/tstor/src/delivery/graphql/model"
+)
+
+// Name is the resolver for the name field.
+func (r *torrentResolver) Name(ctx context.Context, obj *model.Torrent) (string, error) {
+	return obj.T.Name(), nil
+}
+
+// Files is the resolver for the files field.
+func (r *torrentResolver) Files(ctx context.Context, obj *model.Torrent) ([]*model.TorrentFile, error) {
+	out := []*model.TorrentFile{}
+	files, err := obj.T.Files(ctx)
+	if err != nil {
+		return nil, err
+	}
+	for _, f := range files {
+		out = append(out, &model.TorrentFile{
+			Filename:       f.DisplayPath(),
+			Size:           f.Length(),
+			BytesCompleted: f.BytesCompleted(),
+			F:              f,
+		})
+	}
+	return out, nil
+}
+
+// ExcludedFiles is the resolver for the excludedFiles field.
+func (r *torrentResolver) ExcludedFiles(ctx context.Context, obj *model.Torrent) ([]*model.TorrentFile, error) {
+	out := []*model.TorrentFile{}
+	// files, err := obj.T.ExcludedFiles()
+	// if err != nil {
+	// 	return nil, err
+	// }
+	// for _, f := range files {
+	// 	out = append(out, &model.TorrentFile{
+	// 		Filename: f.DisplayPath(),
+	// 		Size:     f.Length(),
+	// 		F:        f,
+	// 	})
+	// }
+	return out, nil
+}
+
+// Peers is the resolver for the peers field.
+func (r *torrentResolver) Peers(ctx context.Context, obj *model.Torrent) ([]*model.TorrentPeer, error) {
+	peers := []*model.TorrentPeer{}
+	for _, peer := range obj.T.Torrent().PeerConns() {
+		clientName, _ := peer.PeerClientName.Load().(string)
+
+		peers = append(peers, &model.TorrentPeer{
+			IP:           peer.RemoteAddr.String(),
+			DownloadRate: peer.DownloadRate(),
+
+			Discovery:  model.MapPeerSource(peer.Discovery),
+			Port:       int64(peer.PeerListenPort),
+			ClientName: clientName,
+			F:          peer,
+		})
+	}
+	return peers, nil
+}
+
+// Torrent returns graph.TorrentResolver implementation.
+func (r *Resolver) Torrent() graph.TorrentResolver { return &torrentResolver{r} }
+
+type torrentResolver struct{ *Resolver }
diff --git a/src/http/http.go b/src/delivery/http.go
similarity index 72%
rename from src/http/http.go
rename to src/delivery/http.go
index 7f2fa27..92dbc55 100644
--- a/src/http/http.go
+++ b/src/delivery/http.go
@@ -1,24 +1,29 @@
-package http
+package delivery
 
 import (
 	"fmt"
+	"log/slog"
 	"net/http"
 
 	"git.kmsign.ru/royalcat/tstor"
 	"git.kmsign.ru/royalcat/tstor/src/config"
-	"git.kmsign.ru/royalcat/tstor/src/host/torrent"
+	"git.kmsign.ru/royalcat/tstor/src/host/service"
+	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
 	"github.com/anacrolix/missinggo/v2/filecache"
+	"github.com/gin-contrib/pprof"
 	"github.com/gin-gonic/gin"
-	"github.com/rs/zerolog/log"
 	"github.com/shurcooL/httpfs/html/vfstemplate"
 )
 
-func New(fc *filecache.Cache, ss *torrent.Stats, s *torrent.Service, logPath string, cfg *config.Config) error {
+func New(fc *filecache.Cache, ss *service.Stats, s *service.Service, vfs vfs.Filesystem, logPath string, cfg *config.Settings) error {
+	log := slog.With()
+
 	gin.SetMode(gin.ReleaseMode)
 	r := gin.New()
 	r.Use(gin.Recovery())
 	r.Use(gin.ErrorLogger())
 	r.Use(Logger())
+	pprof.Register(r)
 
 	r.GET("/assets/*filepath", func(c *gin.Context) {
 		c.FileFromFS(c.Request.URL.Path, http.FS(tstor.Assets))
@@ -35,20 +40,19 @@ func New(fc *filecache.Cache, ss *torrent.Stats, s *torrent.Service, logPath str
 	// r.GET("/routes", routesHandler(ss))
 	r.GET("/logs", logsHandler)
 	r.GET("/servers", serversFoldersHandler())
+	r.Any("/graphql", gin.WrapH(GraphQLHandler(s, vfs)))
 
 	api := r.Group("/api")
 	{
 		api.GET("/log", apiLogHandler(logPath))
 		api.GET("/status", apiStatusHandler(fc, ss))
 		// api.GET("/servers", apiServersHandler(tss))
-
 		// api.GET("/routes", apiRoutesHandler(ss))
 		// api.POST("/routes/:route/torrent", apiAddTorrentHandler(s))
 		// api.DELETE("/routes/:route/torrent/:torrent_hash", apiDelTorrentHandler(s))
-
 	}
 
-	log.Info().Str("host", fmt.Sprintf("%s:%d", cfg.WebUi.IP, cfg.WebUi.Port)).Msg("starting webserver")
+	log.Info("starting webserver", "host", fmt.Sprintf("%s:%d", cfg.WebUi.IP, cfg.WebUi.Port))
 
 	if err := r.Run(fmt.Sprintf("%s:%d", cfg.WebUi.IP, cfg.WebUi.Port)); err != nil {
 		return fmt.Errorf("error initializing server: %w", err)
@@ -58,7 +62,7 @@ func New(fc *filecache.Cache, ss *torrent.Stats, s *torrent.Service, logPath str
 }
 
 func Logger() gin.HandlerFunc {
-	l := log.Logger.With().Str("component", "http").Logger()
+	l := slog.With("component", "http")
 	return func(c *gin.Context) {
 		path := c.Request.URL.Path
 		raw := c.Request.URL.RawQuery
@@ -74,11 +78,11 @@ func Logger() gin.HandlerFunc {
 		s := c.Writer.Status()
 		switch {
 		case s >= 400 && s < 500:
-			l.Warn().Str("path", path).Int("status", s).Msg(msg)
+			l.Warn(msg, "path", path, "status", s)
 		case s >= 500:
-			l.Error().Str("path", path).Int("status", s).Msg(msg)
+			l.Error(msg, "path", path, "status", s)
 		default:
-			l.Debug().Str("path", path).Int("status", s).Msg(msg)
+			l.Debug(msg, "path", path, "status", s)
 		}
 	}
 }
diff --git a/src/http/model.go b/src/delivery/model.go
similarity index 88%
rename from src/http/model.go
rename to src/delivery/model.go
index c752eb0..3088d80 100644
--- a/src/http/model.go
+++ b/src/delivery/model.go
@@ -1,4 +1,4 @@
-package http
+package delivery
 
 type RouteAdd struct {
 	Magnet string `json:"magnet" binding:"required"`
diff --git a/src/delivery/router.go b/src/delivery/router.go
new file mode 100644
index 0000000..e9e108f
--- /dev/null
+++ b/src/delivery/router.go
@@ -0,0 +1,44 @@
+package delivery
+
+import (
+	"net/http"
+
+	graph "git.kmsign.ru/royalcat/tstor/src/delivery/graphql"
+	"git.kmsign.ru/royalcat/tstor/src/delivery/graphql/resolver"
+	"git.kmsign.ru/royalcat/tstor/src/host/service"
+	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
+	"github.com/99designs/gqlgen/graphql"
+	"github.com/99designs/gqlgen/graphql/handler"
+	"github.com/99designs/gqlgen/graphql/handler/extension"
+	"github.com/99designs/gqlgen/graphql/handler/lru"
+	"github.com/99designs/gqlgen/graphql/handler/transport"
+	"github.com/ravilushqa/otelgqlgen"
+)
+
+func GraphQLHandler(service *service.Service, vfs vfs.Filesystem) http.Handler {
+	graphqlHandler := handler.NewDefaultServer(
+		graph.NewExecutableSchema(
+			graph.Config{
+				Resolvers: &resolver.Resolver{Service: service, VFS: vfs},
+				Directives: graph.DirectiveRoot{
+					OneOf: graph.OneOf,
+				},
+			},
+		),
+	)
+
+	graphqlHandler.AddTransport(&transport.POST{})
+	graphqlHandler.AddTransport(&transport.Websocket{})
+	graphqlHandler.AddTransport(&transport.SSE{})
+	graphqlHandler.AddTransport(&transport.UrlEncodedForm{})
+	graphqlHandler.SetQueryCache(lru.New(1000))
+	graphqlHandler.Use(extension.Introspection{})
+	graphqlHandler.Use(extension.AutomaticPersistedQuery{Cache: lru.New(100)})
+	graphqlHandler.Use(otelgqlgen.Middleware(
+		otelgqlgen.WithCreateSpanFromFields(func(ctx *graphql.FieldContext) bool {
+			return ctx.Field.Directives.ForName("link") != nil
+		}),
+	))
+
+	return graphqlHandler
+}
diff --git a/src/http/web.go b/src/delivery/web.go
similarity index 96%
rename from src/http/web.go
rename to src/delivery/web.go
index 860347f..9bcc202 100644
--- a/src/http/web.go
+++ b/src/delivery/web.go
@@ -1,4 +1,4 @@
-package http
+package delivery
 
 import (
 	"net/http"
diff --git a/src/mounts/fuse/handler.go b/src/export/fuse/handler.go
similarity index 73%
rename from src/mounts/fuse/handler.go
rename to src/export/fuse/handler.go
index c3b4af9..83c635f 100644
--- a/src/mounts/fuse/handler.go
+++ b/src/export/fuse/handler.go
@@ -1,14 +1,15 @@
+//go:build cgo
+
 package fuse
 
 import (
+	"log/slog"
 	"os"
 	"path/filepath"
 	"runtime"
 
 	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
 	"github.com/billziss-gh/cgofuse/fuse"
-
-	"github.com/rs/zerolog/log"
 )
 
 type Handler struct {
@@ -16,16 +17,20 @@ type Handler struct {
 	path           string
 
 	host *fuse.FileSystemHost
+	log  *slog.Logger
 }
 
 func NewHandler(fuseAllowOther bool, path string) *Handler {
 	return &Handler{
 		fuseAllowOther: fuseAllowOther,
 		path:           path,
+		log:            slog.With("component", "fuse-handler").With("path", path),
 	}
 }
 
 func (s *Handler) Mount(vfs vfs.Filesystem) error {
+	log := s.log.With("function", "Mount")
+
 	folder := s.path
 	// On windows, the folder must don't exist
 	if runtime.GOOS == "windows" {
@@ -38,7 +43,7 @@ func (s *Handler) Mount(vfs vfs.Filesystem) error {
 		}
 	}
 
-	host := fuse.NewFileSystemHost(NewFS(vfs))
+	host := fuse.NewFileSystemHost(newFuseFS(vfs))
 
 	// TODO improve error handling here
 	go func() {
@@ -50,18 +55,20 @@ func (s *Handler) Mount(vfs vfs.Filesystem) error {
 
 		ok := host.Mount(s.path, config)
 		if !ok {
-			log.Error().Str("path", s.path).Msg("error trying to mount filesystem")
+			log.Error("error trying to mount filesystem")
 		}
 	}()
 
 	s.host = host
 
-	log.Info().Str("path", s.path).Msg("starting FUSE mount")
+	log.Info("starting FUSE mount", "path", s.path)
 
 	return nil
 }
 
 func (s *Handler) Unmount() {
+	log := s.log.With("function", "Unmount")
+
 	if s.host == nil {
 		return
 	}
@@ -69,6 +76,6 @@ func (s *Handler) Unmount() {
 	ok := s.host.Unmount()
 	if !ok {
 		//TODO try to force unmount if possible
-		log.Error().Str("path", s.path).Msg("unmount failed")
+		log.Error("unmount failed")
 	}
 }
diff --git a/src/export/fuse/handler_nocgo.go b/src/export/fuse/handler_nocgo.go
new file mode 100644
index 0000000..4e0e92e
--- /dev/null
+++ b/src/export/fuse/handler_nocgo.go
@@ -0,0 +1,23 @@
+//go:build !cgo
+
+package fuse
+
+import (
+	"fmt"
+
+	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
+)
+
+type Handler struct{}
+
+func NewHandler(fuseAllowOther bool, path string) *Handler {
+	return &Handler{}
+}
+
+func (s *Handler) Mount(vfs vfs.Filesystem) error {
+	return fmt.Errorf("tstor was build without fuse support")
+
+}
+
+func (s *Handler) Unmount() {
+}
diff --git a/src/mounts/fuse/mount.go b/src/export/fuse/mount.go
similarity index 63%
rename from src/mounts/fuse/mount.go
rename to src/export/fuse/mount.go
index e57a6f2..9248b70 100644
--- a/src/mounts/fuse/mount.go
+++ b/src/export/fuse/mount.go
@@ -1,43 +1,46 @@
+//go:build cgo
+
 package fuse
 
 import (
+	"context"
 	"errors"
 	"io"
+	"log/slog"
 	"math"
 	"os"
 	"sync"
 
 	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
 	"github.com/billziss-gh/cgofuse/fuse"
-
-	"github.com/rs/zerolog"
-	"github.com/rs/zerolog/log"
 )
 
-type FS struct {
+type fuseFS struct {
 	fuse.FileSystemBase
 	fh *fileHandler
 
-	log zerolog.Logger
+	log *slog.Logger
 }
 
-func NewFS(fs vfs.Filesystem) fuse.FileSystemInterface {
-	l := log.Logger.With().Str("component", "fuse").Logger()
-	return &FS{
+func newFuseFS(fs vfs.Filesystem) fuse.FileSystemInterface {
+	l := slog.With("component", "fuse")
+	return &fuseFS{
 		fh:  &fileHandler{fs: fs},
 		log: l,
 	}
 }
 
-func (fs *FS) Open(path string, flags int) (errc int, fh uint64) {
+func (fs *fuseFS) Open(path string, flags int) (errc int, fh uint64) {
+	log := fs.log.With("function", "Open", "path", path, "flags", flags)
+
 	fh, err := fs.fh.OpenHolder(path)
 	if os.IsNotExist(err) {
-		fs.log.Debug().Str("path", path).Msg("file does not exists")
+		log.Debug("file does not exists")
 		return -fuse.ENOENT, fhNone
 
 	}
 	if err != nil {
-		fs.log.Error().Err(err).Str("path", path).Msg("error opening file")
+		log.Error("error opening file", "err", err)
 		return -fuse.EIO, fhNone
 	}
 
@@ -46,15 +49,16 @@ func (fs *FS) Open(path string, flags int) (errc int, fh uint64) {
 
 // Unlink removes a file.
 // The FileSystemBase implementation returns -ENOSYS.
-func (fs *FS) Unlink(path string) int {
+func (fs *fuseFS) Unlink(path string) int {
 	return -fuse.ENOSYS
 }
 
-func (fs *FS) Opendir(path string) (errc int, fh uint64) {
+func (fs *fuseFS) Opendir(path string) (errc int, fh uint64) {
 	return fs.Open(path, 0)
 }
 
-func (fs *FS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {
+func (fs *fuseFS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {
+	log := fs.log.With("function", "Getattr", "path", path, "filehandler", fh)
 	if path == "/" {
 		stat.Mode = fuse.S_IFDIR | 0555
 		return 0
@@ -62,12 +66,12 @@ func (fs *FS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {
 
 	file, err := fs.fh.GetFile(path, fh)
 	if os.IsNotExist(err) {
-		fs.log.Debug().Str("path", path).Msg("file does not exists")
+		log.Debug("file does not exists", "error", err)
 		return -fuse.ENOENT
 
 	}
 	if err != nil {
-		fs.log.Error().Err(err).Str("path", path).Msg("error getting holder when reading file attributes")
+		log.Error("error getting holder when reading file attributes", "error", err)
 		return -fuse.EIO
 	}
 
@@ -81,15 +85,16 @@ func (fs *FS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {
 	return 0
 }
 
-func (fs *FS) Read(path string, dest []byte, off int64, fh uint64) int {
+func (fs *fuseFS) Read(path string, dest []byte, off int64, fh uint64) int {
+	log := fs.log.With("function", "Read", "path", path, "offset", off, "filehandler", fh)
 	file, err := fs.fh.GetFile(path, fh)
 	if os.IsNotExist(err) {
-		fs.log.Error().Err(err).Str("path", path).Msg("file not found on READ operation")
+		log.Error("file not found on READ operation", "path", path, "error", err)
 		return -fuse.ENOENT
 
 	}
 	if err != nil {
-		fs.log.Error().Err(err).Str("path", path).Msg("error getting holder reading data from file")
+		fs.log.Error("error getting holder reading data from file", "path", path, "error", err)
 		return -fuse.EIO
 	}
 
@@ -100,9 +105,9 @@ func (fs *FS) Read(path string, dest []byte, off int64, fh uint64) int {
 
 	buf := dest[:end]
 
-	n, err := file.ReadAt(buf, off)
+	n, err := file.ReadAt(context.TODO(), buf, off)
 	if err != nil && err != io.EOF {
-		log.Error().Err(err).Str("path", path).Msg("error reading data")
+		log.Error("error reading data")
 		return -fuse.EIO
 	}
 
@@ -110,36 +115,38 @@ func (fs *FS) Read(path string, dest []byte, off int64, fh uint64) int {
 	return n
 }
 
-func (fs *FS) Release(path string, fh uint64) int {
+func (fs *fuseFS) Release(path string, fh uint64) int {
+	log := fs.log.With("function", "Release", "path", path, "filehandler", fh)
 	if err := fs.fh.Remove(fh); err != nil {
-		fs.log.Error().Err(err).Str("path", path).Msg("error getting holder when releasing file")
+		log.Error("error getting holder when releasing file", "path", path, "error", err)
 		return -fuse.EIO
 	}
 
 	return 0
 }
 
-func (fs *FS) Releasedir(path string, fh uint64) int {
+func (fs *fuseFS) Releasedir(path string, fh uint64) int {
 	return fs.Release(path, fh)
 }
 
-func (fs *FS) Readdir(path string,
+func (fs *fuseFS) Readdir(path string,
 	fill func(name string, stat *fuse.Stat_t, ofst int64) bool,
 	ofst int64,
 	fh uint64) (errc int) {
+	log := fs.log.With("function", "Readdir", "path", path, "offset", ofst, "filehandler", fh)
 	fill(".", nil, 0)
 	fill("..", nil, 0)
 
 	//TODO improve this function to make use of fh index if possible
 	paths, err := fs.fh.ListDir(path)
 	if err != nil {
-		fs.log.Error().Err(err).Str("path", path).Msg("error reading directory")
+		log.Error("error reading directory", "error", err)
 		return -fuse.ENOSYS
 	}
 
 	for _, p := range paths {
 		if !fill(p, nil, 0) {
-			fs.log.Error().Str("path", path).Msg("error adding directory")
+			log.Error("error adding directory")
 			break
 		}
 	}
@@ -172,13 +179,13 @@ func (fh *fileHandler) ListDir(path string) ([]string, error) {
 	fh.mu.RLock()
 	defer fh.mu.RUnlock()
 
-	var out []string
-	files, err := fh.fs.ReadDir(path)
+	files, err := fh.fs.ReadDir(context.TODO(), path)
 	if err != nil {
 		return nil, err
 	}
-	for p := range files {
-		out = append(out, p)
+	out := make([]string, 0, len(files))
+	for _, p := range files {
+		out = append(out, p.Name())
 	}
 
 	return out, nil
@@ -231,7 +238,7 @@ func (fh *fileHandler) Remove(fhi uint64) error {
 		return ErrHolderEmpty
 	}
 
-	if err := f.Close(); err != nil {
+	if err := f.Close(context.TODO()); err != nil {
 		return err
 	}
 
@@ -241,7 +248,7 @@ func (fh *fileHandler) Remove(fhi uint64) error {
 }
 
 func (fh *fileHandler) lookupFile(path string) (vfs.File, error) {
-	file, err := fh.fs.Open(path)
+	file, err := fh.fs.Open(context.TODO(), path)
 	if err != nil {
 		return nil, err
 	}
diff --git a/src/mounts/fuse/mount_test.go b/src/export/fuse/mount_test.go
similarity index 79%
rename from src/mounts/fuse/mount_test.go
rename to src/export/fuse/mount_test.go
index 0717955..8280bb1 100644
--- a/src/mounts/fuse/mount_test.go
+++ b/src/export/fuse/mount_test.go
@@ -1,3 +1,5 @@
+//go:build cgo
+
 package fuse
 
 import (
@@ -22,8 +24,8 @@ func TestHandler(t *testing.T) {
 
 	h := NewHandler(false, p)
 
-	mem := vfs.NewMemoryFS(map[string]*vfs.MemoryFile{
-		"/test.txt": vfs.NewMemoryFile([]byte("test")),
+	mem := vfs.NewMemoryFS("/", map[string]*vfs.MemoryFile{
+		"/test.txt": vfs.NewMemoryFile("test.txt", []byte("test")),
 	})
 
 	err := h.Mount(mem)
@@ -49,8 +51,8 @@ func TestHandlerDriveLetter(t *testing.T) {
 
 	h := NewHandler(false, p)
 
-	mem := vfs.NewMemoryFS(map[string]*vfs.MemoryFile{
-		"/test.txt": vfs.NewMemoryFile([]byte("test")),
+	mem := vfs.NewMemoryFS("/", map[string]*vfs.MemoryFile{
+		"/test.txt": vfs.NewMemoryFile("test.txt", []byte("test")),
 	})
 
 	err := h.Mount(mem)
diff --git a/src/export/httpfs/httpfs.go b/src/export/httpfs/httpfs.go
new file mode 100644
index 0000000..9a03a23
--- /dev/null
+++ b/src/export/httpfs/httpfs.go
@@ -0,0 +1,132 @@
+package httpfs
+
+import (
+	"context"
+	"io"
+	"io/fs"
+	"net/http"
+	"os"
+	"sync"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/ctxio"
+	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+)
+
+var _ http.FileSystem = &HTTPFS{}
+
+var httpFsTracer = otel.Tracer("git.kmsign.ru/royalcat/tstor/src/export/httpfs.HTTPFS")
+
+type HTTPFS struct {
+	fs vfs.Filesystem
+}
+
+func NewHTTPFS(fs vfs.Filesystem) *HTTPFS {
+	return &HTTPFS{fs: fs}
+}
+
+func (fs *HTTPFS) ctx() context.Context {
+	return context.Background()
+}
+
+func (hfs *HTTPFS) Open(name string) (http.File, error) {
+	ctx, span := httpFsTracer.Start(hfs.ctx(), "Open",
+		trace.WithAttributes(attribute.String("name", name)),
+	)
+	defer span.End()
+	f, err := hfs.fs.Open(ctx, name)
+	if err != nil {
+		return nil, err
+	}
+
+	var fis []fs.FileInfo
+	if f.IsDir() {
+		// TODO make this lazy
+		fis, err = hfs.filesToFileInfo(name)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return newHTTPFile(ctx, f, fis), nil
+}
+
+func (hfs *HTTPFS) filesToFileInfo(name string) ([]fs.FileInfo, error) {
+	ctx, span := httpFsTracer.Start(hfs.ctx(), "Open",
+		trace.WithAttributes(attribute.String("name", name)),
+	)
+	defer span.End()
+
+	files, err := hfs.fs.ReadDir(ctx, name)
+	if err != nil {
+		return nil, err
+	}
+
+	out := make([]os.FileInfo, 0, len(files))
+	for _, f := range files {
+		info, err := f.Info()
+		if err != nil {
+			return nil, err
+		}
+		out = append(out, info)
+	}
+
+	return out, nil
+}
+
+var _ http.File = (*httpFile)(nil)
+
+type httpFile struct {
+	f vfs.File
+
+	io.ReadSeekCloser
+
+	mu sync.Mutex
+	// dirPos is protected by mu.
+	dirPos     int
+	dirContent []os.FileInfo
+}
+
+func newHTTPFile(ctx context.Context, f vfs.File, dirContent []os.FileInfo) *httpFile {
+	return &httpFile{
+		f:              f,
+		dirContent:     dirContent,
+		ReadSeekCloser: ctxio.IoReadSeekCloserWrapper(ctx, f, f.Size()),
+	}
+}
+
+func (f *httpFile) Readdir(count int) ([]fs.FileInfo, error) {
+	if !f.f.IsDir() {
+		return nil, os.ErrInvalid
+	}
+
+	f.mu.Lock()
+	defer f.mu.Unlock()
+
+	old := f.dirPos
+	if old >= len(f.dirContent) {
+		// The os.File Readdir docs say that at the end of a directory,
+		// the error is io.EOF if count > 0 and nil if count <= 0.
+		if count > 0 {
+			return nil, io.EOF
+		}
+		return nil, nil
+	}
+	if count > 0 {
+		f.dirPos += count
+		if f.dirPos > len(f.dirContent) {
+			f.dirPos = len(f.dirContent)
+		}
+	} else {
+		f.dirPos = len(f.dirContent)
+		old = 0
+	}
+
+	return f.dirContent[old:f.dirPos], nil
+}
+
+func (f *httpFile) Stat() (fs.FileInfo, error) {
+	return f.f.Info()
+}
diff --git a/src/export/nfs/handler.go b/src/export/nfs/handler.go
new file mode 100644
index 0000000..18ef6bd
--- /dev/null
+++ b/src/export/nfs/handler.go
@@ -0,0 +1,29 @@
+package nfs
+
+import (
+	"log/slog"
+	"time"
+
+	nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
+	nfshelper "git.kmsign.ru/royalcat/tstor/pkg/go-nfs/helpers"
+	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
+	"git.kmsign.ru/royalcat/tstor/src/log"
+)
+
+func NewNFSv3Handler(fs vfs.Filesystem) (nfs.Handler, error) {
+	nfslog := slog.With("component", "nfs")
+	nfs.SetLogger(log.NewNFSLog(nfslog))
+	nfs.Log.SetLevel(nfs.InfoLevel)
+
+	bfs := &fsWrapper{fs: fs, log: nfslog, timeout: time.Minute}
+	handler := nfshelper.NewNullAuthHandler(bfs)
+
+	cacheHelper, err := NewKvHandler(handler, bfs)
+	if err != nil {
+		return nil, err
+	}
+
+	//  cacheHelper := NewCachingHandler(handler)
+
+	return cacheHelper, nil
+}
diff --git a/src/export/nfs/kvhandler.go b/src/export/nfs/kvhandler.go
new file mode 100644
index 0000000..c814405
--- /dev/null
+++ b/src/export/nfs/kvhandler.go
@@ -0,0 +1,127 @@
+package nfs
+
+import (
+	"context"
+	"fmt"
+	"path"
+	"slices"
+	"time"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
+	"git.kmsign.ru/royalcat/tstor/src/config"
+
+	"github.com/google/uuid"
+	"github.com/royalcat/kv"
+)
+
+const lifetime = time.Hour * 24
+
+// NewKvHandler provides a basic to/from-file handle cache that can be tuned with a smaller cache of active directory listings.
+func NewKvHandler(h nfs.Handler, fs nfs.Filesystem) (nfs.Handler, error) {
+	activeHandles, err := kv.NewBadgerKVMarhsler[uuid.UUID, []string](path.Join(config.Config.Mounts.NFS.CachePath, "handlers"))
+	if err != nil {
+		return nil, err
+	}
+
+	// if s, ok := activeHandles.(kv.BadgerStore); ok {
+	// 	db := s.BadgerDB()
+	// enable with managed database
+	// 	go func() {
+	// 		for n := range time.NewTimer(lifetime / 2).C {
+	// 			db.SetDiscardTs(uint64(n.Add(-lifetime).Unix()))
+	// 		}
+	// 	}()
+	// }
+
+	return &CachingHandler{
+		Handler:       h,
+		fs:            fs,
+		activeHandles: activeHandles,
+	}, nil
+}
+
+// CachingHandler implements to/from handle via an LRU cache.
+type CachingHandler struct {
+	nfs.Handler
+
+	fs            nfs.Filesystem
+	activeHandles kv.Store[uuid.UUID, []string]
+}
+
+// ToHandle takes a file and represents it with an opaque handle to reference it.
+// In stateless nfs (when it's serving a unix fs) this can be the device + inode
+// but we can generalize with a stateful local cache of handed out IDs.
+func (c *CachingHandler) ToHandle(_ nfs.Filesystem, path []string) []byte {
+
+	ctx := context.Background()
+
+	var id uuid.UUID
+	c.activeHandles.Range(ctx, func(k uuid.UUID, v []string) bool {
+		if slices.Equal(path, v) {
+			id = k
+			return false
+		}
+		return true
+	})
+
+	if id != uuid.Nil {
+		return id[:]
+	}
+
+	id = uuid.New()
+
+	c.activeHandles.Set(ctx, id, path)
+
+	return id[:]
+}
+
+// FromHandle converts from an opaque handle to the file it represents
+func (c *CachingHandler) FromHandle(fh []byte) (nfs.Filesystem, []string, error) {
+	ctx := context.Background()
+
+	id, err := uuid.FromBytes(fh)
+	if err != nil {
+		return nil, []string{}, err
+	}
+
+	paths, found, err := c.activeHandles.Get(ctx, id)
+	if err != nil {
+		return nil, nil, fmt.Errorf("kv error: %w", err)
+	}
+
+	if found {
+		return c.fs, paths, nil
+	}
+
+	return nil, []string{}, &nfs.NFSStatusError{NFSStatus: nfs.NFSStatusStale}
+}
+
+func (c *CachingHandler) InvalidateHandle(fs nfs.Filesystem, handle []byte) error {
+	ctx := context.Background()
+	//Remove from cache
+	id, err := uuid.FromBytes(handle)
+	if err != nil {
+		return err
+	}
+	c.activeHandles.Delete(ctx, id)
+	return nil
+}
+
+const maxInt = int(^uint(0) >> 1)
+
+// HandleLimit exports how many file handles can be safely stored by this cache.
+func (c *CachingHandler) HandleLimit() int {
+	return maxInt
+}
+
+func hasPrefix(path, prefix []string) bool {
+	if len(prefix) > len(path) {
+		return false
+	}
+	for i, e := range prefix {
+		if path[i] != e {
+			return false
+		}
+	}
+	return true
+}
diff --git a/src/export/nfs/wrapper-v4.go b/src/export/nfs/wrapper-v4.go
new file mode 100644
index 0000000..e1e7bf3
--- /dev/null
+++ b/src/export/nfs/wrapper-v4.go
@@ -0,0 +1,164 @@
+package nfs
+
+// import (
+// 	"io/fs"
+
+// 	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
+// 	nfsfs "github.com/smallfz/libnfs-go/fs"
+// )
+
+// type nfsFsWrapper struct {
+// 	fs vfs.Filesystem
+// }
+
+// var _ nfsfs.FS = (*nfsFsWrapper)(nil)
+
+// // Attributes implements fs.FS.
+// func (*nfsFsWrapper) Attributes() *nfsfs.Attributes {
+// 	return &nfsfs.Attributes{
+// 		LinkSupport:     true,
+// 		SymlinkSupport:  false, // unsopported
+// 		ChownRestricted: true,  // unsopported
+// 		MaxName:         255,   // common value
+// 		NoTrunc:         false,
+// 	}
+// }
+
+// // Stat implements fs.FS.
+// func (*nfsFsWrapper) Stat(string) (nfsfs.FileInfo, error) {
+// 	panic("unimplemented")
+// }
+
+// // Chmod implements fs.FS.
+// func (*nfsFsWrapper) Chmod(string, fs.FileMode) error {
+// 	panic("unimplemented")
+// }
+
+// // Chown implements fs.FS.
+// func (*nfsFsWrapper) Chown(string, int, int) error {
+// 	panic("unimplemented")
+// }
+
+// // GetFileId implements fs.FS.
+// func (*nfsFsWrapper) GetFileId(nfsfs.FileInfo) uint64 {
+// 	panic("unimplemented")
+// }
+
+// // GetHandle implements fs.FS.
+// func (*nfsFsWrapper) GetHandle(nfsfs.FileInfo) ([]byte, error) {
+// 	panic("unimplemented")
+// }
+
+// // GetRootHandle implements fs.FS.
+// func (*nfsFsWrapper) GetRootHandle() []byte {
+// 	panic("unimplemented")
+// }
+
+// // Link implements fs.FS.
+// func (*nfsFsWrapper) Link(string, string) error {
+// 	panic("unimplemented")
+// }
+
+// // MkdirAll implements fs.FS.
+// func (*nfsFsWrapper) MkdirAll(string, fs.FileMode) error {
+// 	panic("unimplemented")
+// }
+
+// // Open implements fs.FS.
+// func (w *nfsFsWrapper) Open(name string) (nfsfs.File, error) {
+// 	f, err := w.fs.Open(name)
+// 	if err != nil {
+// 		return nil, nfsErr(err)
+// 	}
+// }
+
+// // OpenFile implements fs.FS.
+// func (w *nfsFsWrapper) OpenFile(string, int, fs.FileMode) (nfsfs.File, error) {
+// 	panic("unimplemented")
+// }
+
+// // Readlink implements fs.FS.
+// func (*nfsFsWrapper) Readlink(string) (string, error) {
+// 	panic("unimplemented")
+// }
+
+// // Remove implements fs.FS.
+// func (*nfsFsWrapper) Remove(string) error {
+// 	panic("unimplemented")
+// }
+
+// // Rename implements fs.FS.
+// func (*nfsFsWrapper) Rename(string, string) error {
+// 	panic("unimplemented")
+// }
+
+// // ResolveHandle implements fs.FS.
+// func (*nfsFsWrapper) ResolveHandle([]byte) (string, error) {
+// 	panic("unimplemented")
+// }
+
+// // Symlink implements fs.FS.
+// func (*nfsFsWrapper) Symlink(string, string) error {
+// 	return NotImplementedError
+// }
+
+// var NotImplementedError = vfs.NotImplemented
+
+// func nfsErr(err error) error {
+// 	if err == vfs.NotImplemented {
+// 		return NotImplementedError
+// 	}
+// 	return err
+// }
+
+// type nfsFile struct {
+// 	name string
+// 	f    vfs.File
+// }
+
+// // Close implements fs.File.
+// func (f *nfsFile) Close() error {
+// 	return f.f.Close()
+// }
+
+// // Name implements fs.File.
+// func (f *nfsFile) Name() string {
+// 	return f.name
+// }
+
+// // Read implements fs.File.
+// func (f *nfsFile) Read(p []byte) (n int, err error) {
+// 	return f.f.Read(p)
+// }
+
+// // Readdir implements fs.File.
+// func (f *nfsFile) Readdir(int) ([]nfsfs.FileInfo, error) {
+// 	f.f.IsDir()
+// }
+
+// // Seek implements fs.File.
+// func (*nfsFile) Seek(offset int64, whence int) (int64, error) {
+// 	panic("unimplemented")
+// }
+
+// // Stat implements fs.File.
+// func (*nfsFile) Stat() (nfsfs.FileInfo, error) {
+// 	panic("unimplemented")
+// }
+
+// // Sync implements fs.File.
+// func (*nfsFile) Sync() error {
+// 	panic("unimplemented")
+// }
+
+// // Truncate implements fs.File.
+// func (*nfsFile) Truncate() error {
+// 	panic("unimplemented")
+// }
+
+// // Write implements fs.File.
+// func (*nfsFile) Write(p []byte) (n int, err error) {
+// 	panic("unimplemented")
+// }
+
+// var _ nfsfs.File = (*nfsFile)(nil)
diff --git a/src/export/nfs/wrapper.go b/src/export/nfs/wrapper.go
new file mode 100644
index 0000000..753a5f9
--- /dev/null
+++ b/src/export/nfs/wrapper.go
@@ -0,0 +1,227 @@
+package nfs
+
+import (
+	"context"
+	"errors"
+	"io/fs"
+	"log/slog"
+	"path/filepath"
+	"time"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/ctxbilly"
+	nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
+
+	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
+	"github.com/go-git/go-billy/v5"
+)
+
+type fsWrapper struct {
+	fs  vfs.Filesystem
+	log *slog.Logger
+
+	timeout time.Duration
+}
+
+var _ nfs.Filesystem = (*fsWrapper)(nil)
+
+// var _ ctxbilly.Dir = (*billyFsWrapper)(nil)
+
+// Chroot implements billy.Filesystem.
+func (*fsWrapper) Chroot(path string) (nfs.Filesystem, error) {
+	return nil, billy.ErrNotSupported
+}
+
+// Create implements billy.Filesystem.
+func (*fsWrapper) Create(ctx context.Context, filename string) (nfs.File, error) {
+	return nil, billy.ErrNotSupported
+}
+
+// Join implements billy.Filesystem.
+func (*fsWrapper) Join(elem ...string) string {
+	return filepath.Join(elem...)
+}
+
+// Lstat implements billy.Filesystem.
+func (fs *fsWrapper) Lstat(ctx context.Context, filename string) (fs.FileInfo, error) {
+	ctx, cancel := context.WithTimeout(ctx, fs.timeout)
+	defer cancel()
+
+	info, err := fs.fs.Stat(ctx, filename)
+	if err != nil {
+		return nil, billyErr(ctx, err, fs.log)
+	}
+	return info, nil
+}
+
+// MkdirAll implements billy.Filesystem.
+func (*fsWrapper) MkdirAll(ctx context.Context, filename string, perm fs.FileMode) error {
+	return billy.ErrNotSupported
+}
+
+// Open implements billy.Filesystem.
+func (fs *fsWrapper) Open(ctx context.Context, filename string) (nfs.File, error) {
+	ctx, cancel := context.WithTimeout(ctx, fs.timeout)
+	defer cancel()
+
+	file, err := fs.fs.Open(ctx, filename)
+	if err != nil {
+		return nil, billyErr(ctx, err, fs.log)
+	}
+	return &billyFile{
+		name: filename,
+		file: file,
+		log:  fs.log.With("filename", filename),
+	}, nil
+}
+
+// OpenFile implements billy.Filesystem.
+func (fs *fsWrapper) OpenFile(ctx context.Context, filename string, flag int, perm fs.FileMode) (nfs.File, error) {
+	ctx, cancel := context.WithTimeout(ctx, fs.timeout)
+	defer cancel()
+
+	file, err := fs.fs.Open(ctx, filename)
+	if err != nil {
+		return nil, billyErr(ctx, err, fs.log)
+	}
+	return &billyFile{
+		name: filename,
+		file: file,
+		log:  fs.log.With("filename", filename, "flag", flag, "perm", perm.String()),
+	}, nil
+}
+
+// ReadDir implements billy.Filesystem.
+func (bfs *fsWrapper) ReadDir(ctx context.Context, path string) ([]fs.FileInfo, error) {
+	ctx, cancel := context.WithTimeout(ctx, bfs.timeout)
+	defer cancel()
+
+	ffs, err := bfs.fs.ReadDir(ctx, path)
+	if err != nil {
+		return nil, billyErr(ctx, err, bfs.log)
+	}
+
+	out := make([]fs.FileInfo, 0, len(ffs))
+	for _, v := range ffs {
+		if info, ok := v.(fs.FileInfo); ok {
+			out = append(out, info)
+		} else {
+			info, err := v.Info()
+			if err != nil {
+				return nil, err
+			}
+			out = append(out, info)
+		}
+
+	}
+	return out, nil
+}
+
+// Readlink implements billy.Filesystem.
+func (*fsWrapper) Readlink(ctx context.Context, link string) (string, error) {
+	return "", billy.ErrNotSupported
+}
+
+// Remove implements billy.Filesystem.
+func (bfs *fsWrapper) Remove(ctx context.Context, filename string) error {
+	ctx, cancel := context.WithTimeout(ctx, bfs.timeout)
+	defer cancel()
+
+	return bfs.fs.Unlink(ctx, filename)
+}
+
+// Rename implements billy.Filesystem.
+func (*fsWrapper) Rename(ctx context.Context, oldpath string, newpath string) error {
+	return billy.ErrNotSupported
+}
+
+// Root implements billy.Filesystem.
+func (*fsWrapper) Root() string {
+	return "/"
+}
+
+// Stat implements billy.Filesystem.
+func (bfs *fsWrapper) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
+	ctx, cancel := context.WithTimeout(ctx, bfs.timeout)
+	defer cancel()
+
+	info, err := bfs.fs.Stat(ctx, filename)
+	if err != nil {
+		return nil, billyErr(ctx, err, bfs.log)
+	}
+	return info, nil
+}
+
+// Symlink implements billy.Filesystem.
+func (fs *fsWrapper) Symlink(ctx context.Context, target string, link string) error {
+	return billyErr(nil, vfs.ErrNotImplemented, fs.log)
+}
+
+type billyFile struct {
+	name string
+	file vfs.File
+	log  *slog.Logger
+}
+
+var _ ctxbilly.File = (*billyFile)(nil)
+
+// Close implements billy.File.
+func (f *billyFile) Close(ctx context.Context) error {
+	return f.file.Close(ctx)
+}
+
+// Name implements billy.File.
+func (f *billyFile) Name() string {
+	return f.name
+}
+
+// Read implements billy.File.
+func (bf *billyFile) Read(ctx context.Context, p []byte) (n int, err error) {
+	return bf.file.Read(ctx, p)
+}
+
+// ReadAt implements billy.File.
+func (bf *billyFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
+	return bf.file.ReadAt(ctx, p, off)
+}
+
+// Seek implements billy.File.
+func (f *billyFile) Seek(offset int64, whence int) (int64, error) {
+	return 0, billyErr(nil, vfs.ErrNotImplemented, f.log)
+}
+
+// Truncate implements billy.File.
+func (f *billyFile) Truncate(ctx context.Context, size int64) error {
+	return billyErr(nil, vfs.ErrNotImplemented, f.log)
+}
+
+// Write implements billy.File.
+func (f *billyFile) Write(ctx context.Context, p []byte) (n int, err error) {
+	return 0, billyErr(nil, vfs.ErrNotImplemented, f.log)
+}
+
+// Lock implements billy.File.
+func (*billyFile) Lock() error {
+	return nil // TODO
+}
+
+// Unlock implements billy.File.
+func (*billyFile) Unlock() error {
+	return nil // TODO
+}
+
+func billyErr(ctx context.Context, err error, log *slog.Logger) error {
+	if errors.Is(err, vfs.ErrNotImplemented) {
+		return billy.ErrNotSupported
+	}
+	if errors.Is(err, vfs.ErrNotExist) {
+		if err, ok := asErr[*fs.PathError](err); ok {
+			log.ErrorContext(ctx, "file not found", "op", err.Op, "path", err.Path, "error", err.Err)
+		}
+		return fs.ErrNotExist
+	}
+	return err
+}
+
+func asErr[E error](err error) (e E, ok bool) {
+	return e, errors.As(err, &e)
+}
diff --git a/src/mounts/webdav/fs.go b/src/export/webdav/fs.go
similarity index 74%
rename from src/mounts/webdav/fs.go
rename to src/export/webdav/fs.go
index 29f2727..8b21345 100644
--- a/src/mounts/webdav/fs.go
+++ b/src/export/webdav/fs.go
@@ -3,13 +3,13 @@ package webdav
 import (
 	"context"
 	"io"
+	"io/fs"
 	"os"
-	"path/filepath"
+	"path"
 	"sync"
 	"time"
 
 	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
-	"git.kmsign.ru/royalcat/tstor/src/iio"
 	"golang.org/x/net/webdav"
 )
 
@@ -24,54 +24,53 @@ func newFS(fs vfs.Filesystem) *WebDAV {
 }
 
 func (wd *WebDAV) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error) {
-	p := "/" + name
+	name = vfs.AbsPath(name)
+
 	// TODO handle flag and permissions
-	f, err := wd.lookupFile(p)
+	f, err := wd.lookupFile(ctx, name)
 	if err != nil {
 		return nil, err
 	}
 
-	wdf := newFile(filepath.Base(p), f, func() ([]os.FileInfo, error) {
-		return wd.listDir(p)
+	wdf := newFile(ctx, path.Base(name), f, func() ([]fs.FileInfo, error) {
+		return wd.listDir(ctx, name)
 	})
 	return wdf, nil
 }
 
-func (wd *WebDAV) Stat(ctx context.Context, name string) (os.FileInfo, error) {
-	p := "/" + name
-	f, err := wd.lookupFile(p)
-	if err != nil {
-		return nil, err
-	}
-	fi := newFileInfo(name, f.Size(), f.IsDir())
-	return fi, nil
+func (wd *WebDAV) Stat(ctx context.Context, name string) (fs.FileInfo, error) {
+	return wd.fs.Stat(ctx, vfs.AbsPath(name))
 }
 
-func (wd *WebDAV) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
+func (wd *WebDAV) Mkdir(ctx context.Context, name string, perm fs.FileMode) error {
 	return webdav.ErrNotImplemented
 }
 
 func (wd *WebDAV) RemoveAll(ctx context.Context, name string) error {
-	return webdav.ErrNotImplemented
+	return wd.fs.Unlink(ctx, name)
 }
 
 func (wd *WebDAV) Rename(ctx context.Context, oldName, newName string) error {
 	return webdav.ErrNotImplemented
 }
 
-func (wd *WebDAV) lookupFile(path string) (vfs.File, error) {
-	return wd.fs.Open(path)
+func (wd *WebDAV) lookupFile(ctx context.Context, name string) (vfs.File, error) {
+	return wd.fs.Open(ctx, path.Clean(name))
 }
 
-func (wd *WebDAV) listDir(path string) ([]os.FileInfo, error) {
-	files, err := wd.fs.ReadDir(path)
+func (wd *WebDAV) listDir(ctx context.Context, path string) ([]os.FileInfo, error) {
+	files, err := wd.fs.ReadDir(ctx, path)
 	if err != nil {
 		return nil, err
 	}
 
-	var out []os.FileInfo
-	for n, f := range files {
-		out = append(out, newFileInfo(n, f.Size(), f.IsDir()))
+	out := make([]os.FileInfo, 0, len(files))
+	for _, f := range files {
+		info, err := f.Info()
+		if err != nil {
+			return nil, err
+		}
+		out = append(out, info)
 	}
 
 	return out, nil
@@ -80,9 +79,10 @@ func (wd *WebDAV) listDir(path string) ([]os.FileInfo, error) {
 var _ webdav.File = &webDAVFile{}
 
 type webDAVFile struct {
-	iio.Reader
+	ctx context.Context
 
 	fi os.FileInfo
+	f  vfs.File
 
 	mudp   sync.Mutex
 	dirPos int
@@ -93,11 +93,12 @@ type webDAVFile struct {
 	dirContent []os.FileInfo
 }
 
-func newFile(name string, f vfs.File, df func() ([]os.FileInfo, error)) *webDAVFile {
+func newFile(ctx context.Context, name string, f vfs.File, df func() ([]os.FileInfo, error)) *webDAVFile {
 	return &webDAVFile{
+		ctx:     ctx,
+		f:       f,
 		fi:      newFileInfo(name, f.Size(), f.IsDir()),
 		dirFunc: df,
-		Reader:  f,
 	}
 }
 
@@ -147,7 +148,7 @@ func (wdf *webDAVFile) Read(p []byte) (int, error) {
 	wdf.mup.Lock()
 	defer wdf.mup.Unlock()
 
-	n, err := wdf.Reader.ReadAt(p, wdf.pos)
+	n, err := wdf.f.ReadAt(wdf.ctx, p, wdf.pos)
 	wdf.pos += int64(n)
 
 	return n, err
@@ -173,6 +174,11 @@ func (wdf *webDAVFile) Write(p []byte) (n int, err error) {
 	return 0, webdav.ErrNotImplemented
 }
 
+// Close implements webdav.File.
+func (wdf *webDAVFile) Close() error {
+	return wdf.f.Close(wdf.ctx)
+}
+
 type webDAVFileInfo struct {
 	name  string
 	size  int64
diff --git a/src/mounts/webdav/fs_test.go b/src/export/webdav/fs_test.go
similarity index 64%
rename from src/mounts/webdav/fs_test.go
rename to src/export/webdav/fs_test.go
index 5d703e8..a43a50e 100644
--- a/src/mounts/webdav/fs_test.go
+++ b/src/export/webdav/fs_test.go
@@ -13,16 +13,17 @@ import (
 
 func TestWebDAVFilesystem(t *testing.T) {
 	t.Parallel()
+	ctx := context.Background()
 
 	require := require.New(t)
 
-	mfs := vfs.NewMemoryFS(map[string]*vfs.MemoryFile{
-		"/folder/file.txt": vfs.NewMemoryFile([]byte("test file content.")),
+	mfs := vfs.NewMemoryFS("/", map[string]*vfs.MemoryFile{
+		"/folder/file.txt": vfs.NewMemoryFile("file.txt", []byte("test file content.")),
 	})
 
 	wfs := newFS(mfs)
 
-	dir, err := wfs.OpenFile(context.Background(), "/", 0, 0)
+	dir, err := wfs.OpenFile(ctx, "/", 0, 0)
 	require.NoError(err)
 
 	fi, err := dir.Readdir(0)
@@ -30,7 +31,7 @@ func TestWebDAVFilesystem(t *testing.T) {
 	require.Len(fi, 1)
 	require.Equal("folder", fi[0].Name())
 
-	file, err := wfs.OpenFile(context.Background(), "/folder/file.txt", 0, 0)
+	file, err := wfs.OpenFile(ctx, "/folder/file.txt", 0, 0)
 	require.NoError(err)
 	_, err = file.Readdir(0)
 	require.ErrorIs(err, os.ErrInvalid)
@@ -56,8 +57,8 @@ func TestWebDAVFilesystem(t *testing.T) {
 
 	fInfo, err := wfs.Stat(context.Background(), "/folder/file.txt")
 	require.NoError(err)
-	require.Equal("/folder/file.txt", fInfo.Name())
-	require.Equal(false, fInfo.IsDir())
+	require.Equal("file.txt", fInfo.Name())
+	require.False(fInfo.IsDir())
 	require.Equal(int64(18), fInfo.Size())
 }
 
@@ -66,13 +67,13 @@ func TestErrNotImplemented(t *testing.T) {
 
 	require := require.New(t)
 
-	mfs := vfs.NewMemoryFS(map[string]*vfs.MemoryFile{
-		"/folder/file.txt": vfs.NewMemoryFile([]byte("test file content.")),
+	mfs := vfs.NewMemoryFS("/", map[string]*vfs.MemoryFile{
+		"/folder/file.txt": vfs.NewMemoryFile("file.txt", []byte("test file content.")),
 	})
 
 	wfs := newFS(mfs)
 
 	require.ErrorIs(wfs.Mkdir(context.Background(), "test", 0), webdav.ErrNotImplemented)
-	require.ErrorIs(wfs.RemoveAll(context.Background(), "test"), webdav.ErrNotImplemented)
-	require.ErrorIs(wfs.Rename(context.Background(), "test", "newTest"), webdav.ErrNotImplemented)
+	// require.ErrorIs(wfs.RemoveAll(context.Background(), "test"), webdav.ErrNotImplemented)
+	// require.ErrorIs(wfs.Rename(context.Background(), "test", "newTest"), webdav.ErrNotImplemented)
 }
diff --git a/src/mounts/webdav/handler.go b/src/export/webdav/handler.go
similarity index 68%
rename from src/mounts/webdav/handler.go
rename to src/export/webdav/handler.go
index 3c478ce..c44528b 100644
--- a/src/mounts/webdav/handler.go
+++ b/src/export/webdav/handler.go
@@ -1,22 +1,22 @@
 package webdav
 
 import (
+	"log/slog"
 	"net/http"
 
 	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
-	"github.com/rs/zerolog/log"
 	"golang.org/x/net/webdav"
 )
 
 func newHandler(fs vfs.Filesystem) *webdav.Handler {
-	l := log.Logger.With().Str("component", "webDAV").Logger()
+	log := slog.With("component", "webDAV")
 	return &webdav.Handler{
 		Prefix:     "/",
 		FileSystem: newFS(fs),
 		LockSystem: webdav.NewMemLS(),
 		Logger: func(req *http.Request, err error) {
 			if err != nil {
-				l.Error().Err(err).Str("path", req.RequestURI).Msg("webDAV error")
+				log.Error("webDAV error", "path", req.RequestURI, "error", err)
 			}
 		},
 	}
diff --git a/src/export/webdav/http.go b/src/export/webdav/http.go
new file mode 100644
index 0000000..f88336b
--- /dev/null
+++ b/src/export/webdav/http.go
@@ -0,0 +1,78 @@
+package webdav
+
+import (
+	"fmt"
+	"log/slog"
+	"net/http"
+
+	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
+	"golang.org/x/net/webdav"
+)
+
+func NewWebDAVServer(fs vfs.Filesystem, port int, user, pass string) error {
+
+	srv := newHandler(fs)
+
+	serveMux := http.NewServeMux()
+
+	serveMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+		username, password, _ := r.BasicAuth()
+		if username == user && password == pass {
+			srv.ServeHTTP(w, r)
+			return
+		}
+
+		w.Header().Set("WWW-Authenticate", `Basic realm="BASIC WebDAV REALM"`)
+		w.WriteHeader(401)
+		_, _ = w.Write([]byte("401 Unauthorized\n"))
+	})
+
+	//nolint:exhaustruct
+	httpServer := &http.Server{
+		Addr:    fmt.Sprintf("0.0.0.0:%d", port),
+		Handler: serveMux,
+	}
+
+	slog.With("host", httpServer.Addr).Info("starting webDAV server")
+
+	return httpServer.ListenAndServe()
+}
+
+func NewDirServer(dir string, port int, user, pass string) error {
+
+	log := slog.With("component", "webDAV")
+	srv := &webdav.Handler{
+		Prefix:     "/",
+		FileSystem: webdav.Dir(dir),
+		LockSystem: webdav.NewMemLS(),
+		Logger: func(req *http.Request, err error) {
+			if err != nil {
+				log.Error("webDAV error", "path", req.RequestURI)
+			}
+		},
+	}
+
+	serveMux := http.NewServeMux()
+
+	serveMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+		username, password, _ := r.BasicAuth()
+		if username == user && password == pass {
+			srv.ServeHTTP(w, r)
+			return
+		}
+
+		w.Header().Set("WWW-Authenticate", `Basic realm="BASIC WebDAV REALM"`)
+		w.WriteHeader(401)
+		_, _ = w.Write([]byte("401 Unauthorized\n"))
+	})
+
+	//nolint:exhaustruct
+	httpServer := &http.Server{
+		Addr:    fmt.Sprintf("0.0.0.0:%d", port),
+		Handler: serveMux,
+	}
+
+	log.Info("starting webDAV server", "host", httpServer.Addr)
+
+	return httpServer.ListenAndServe()
+}
diff --git a/src/host/controller/torrent.go b/src/host/controller/torrent.go
new file mode 100644
index 0000000..30aa6cc
--- /dev/null
+++ b/src/host/controller/torrent.go
@@ -0,0 +1,106 @@
+package controller
+
+import (
+	"context"
+	"slices"
+	"strings"
+
+	"git.kmsign.ru/royalcat/tstor/src/host/store"
+	"github.com/anacrolix/torrent"
+)
+
+type Torrent struct {
+	torrentFilePath string
+	t               *torrent.Torrent
+	rep             *store.FilesMappings
+}
+
+func NewTorrent(t *torrent.Torrent, rep *store.FilesMappings) *Torrent {
+	return &Torrent{t: t, rep: rep}
+}
+
+func (s *Torrent) TorrentFilePath() string {
+	return s.torrentFilePath
+}
+
+func (s *Torrent) Torrent() *torrent.Torrent {
+	return s.t
+}
+
+func (c *Torrent) Name() string {
+	<-c.t.GotInfo()
+	if name := c.t.Name(); name != "" {
+		return name
+	}
+
+	return c.InfoHash()
+}
+
+func (s *Torrent) InfoHash() string {
+	<-s.t.GotInfo()
+	return s.t.InfoHash().HexString()
+}
+
+func (s *Torrent) BytesCompleted() int64 {
+	<-s.t.GotInfo()
+	return s.t.BytesCompleted()
+}
+
+func (s *Torrent) BytesMissing() int64 {
+	<-s.t.GotInfo()
+	return s.t.BytesMissing()
+}
+
+func (s *Torrent) Length() int64 {
+	<-s.t.GotInfo()
+	return s.t.Length()
+}
+
+func (s *Torrent) Files(ctx context.Context) ([]*torrent.File, error) {
+	fileMappings, err := s.rep.FileMappings(ctx, s.t.InfoHash())
+	if err != nil {
+		return nil, err
+	}
+
+	<-s.t.GotInfo()
+	files := s.t.Files()
+	files = slices.DeleteFunc(files, func(file *torrent.File) bool {
+		p := file.Path()
+		if strings.Contains(p, "/.pad/") {
+			return true
+		}
+		if target, ok := fileMappings[p]; ok && target == "" {
+			return true
+		}
+		return false
+	})
+
+	return files, nil
+}
+
+func Map[T, U any](ts []T, f func(T) U) []U {
+	us := make([]U, len(ts))
+	for i := range ts {
+		us[i] = f(ts[i])
+	}
+	return us
+}
+
+func (s *Torrent) ExcludeFile(ctx context.Context, f *torrent.File) error {
+	return s.rep.ExcludeFile(ctx, f)
+}
+
+func (s *Torrent) isFileComplete(startIndex int, endIndex int) bool {
+	for i := startIndex; i < endIndex; i++ {
+		if !s.t.Piece(i).State().Complete {
+			return false
+		}
+	}
+	return true
+}
+
+func (s *Torrent) ValidateTorrent() error {
+	<-s.t.GotInfo()
+	s.t.VerifyData()
+	return nil
+}
diff --git a/src/host/datastorage/piece_storage.go b/src/host/datastorage/piece_storage.go
new file mode 100644
index 0000000..5197e3b
--- /dev/null
+++ b/src/host/datastorage/piece_storage.go
@@ -0,0 +1,169 @@
+package datastorage
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"os"
+	"path"
+
+	"git.kmsign.ru/royalcat/tstor/src/host/controller"
+	"github.com/anacrolix/torrent"
+	"github.com/anacrolix/torrent/metainfo"
+	"github.com/anacrolix/torrent/storage"
+	"github.com/anacrolix/torrent/types/infohash"
+	"github.com/hashicorp/go-multierror"
+)
+
+// NOT USED
+type PieceStorage struct {
+	basePath   string
+	completion storage.PieceCompletion
+}
+
+func NewPieceStorage(path string, completion storage.PieceCompletion) *PieceStorage {
+	return &PieceStorage{
+		basePath:   path,
+		completion: completion,
+	}
+}
+
+// OpenTorrent implements FileStorageDeleter.
+func (p *PieceStorage) OpenTorrent(info *metainfo.Info, infoHash infohash.T) (storage.TorrentImpl, error) {
+	torrentPath := path.Join(p.basePath, infoHash.HexString())
+	descriptors := []*os.File{}
+
+	return storage.TorrentImpl{
+		Piece: func(piece metainfo.Piece) storage.PieceImpl {
+			hash := piece.Hash().HexString()
+			piecePrefixDir := path.Join(torrentPath, hash[:2])
+			err := os.MkdirAll(piecePrefixDir, os.ModePerm|os.ModeDir)
+			if err != nil {
+				return &errPiece{err: err}
+			}
+			piecePath := path.Join(torrentPath, hash[:2], hash)
+			file, err := os.OpenFile(piecePath, os.O_CREATE|os.O_RDWR, os.ModePerm)
+			if err != nil {
+				return &errPiece{err: err}
+			}
+			pk := metainfo.PieceKey{
+				InfoHash: infoHash,
+				Index:    piece.Index(),
+			}
+			return newPieceFile(pk, file, p.completion)
+
+			// file, err os.OpenFile(piecePath)
+		},
+		Flush: func() error {
+			var res error
+			for _, f := range descriptors {
+				if err := f.Sync(); err != nil {
+					res = multierror.Append(res, err)
+				}
+			}
+			return res
+		},
+		Close: func() error {
+			var res error
+			for _, f := range descriptors {
+				if err := f.Close(); err != nil {
+					res = multierror.Append(res, err)
+				}
+			}
+			return res
+		},
+	}, nil
+}
+
+// Close implements FileStorageDeleter.
+func (p *PieceStorage) Close() error {
+	return nil
+}
+
+// DeleteFile implements FileStorageDeleter.
+func (p *PieceStorage) DeleteFile(file *torrent.File) error {
+	return fmt.Errorf("not implemented")
+}
+
+// CleanupDirs implements DataStorage.
+func (p *PieceStorage) CleanupDirs(ctx context.Context, expected []*controller.Torrent, dryRun bool) (int, error) {
+	return 0, nil // TODO
+}
+
+// CleanupFiles implements DataStorage.
+func (p *PieceStorage) CleanupFiles(ctx context.Context, expected []*controller.Torrent, dryRun bool) (int, error) {
+	return 0, nil // TODO
+}
+
+func newPieceFile(pk metainfo.PieceKey, file *os.File, completion storage.PieceCompletionGetSetter) *piece {
+	return &piece{
+		pk:         pk,
+		File:       file,
+		completion: completion,
+	}
+}
+
+type piece struct {
+	*os.File
+	pk         metainfo.PieceKey
+	completion storage.PieceCompletionGetSetter
+}
+
+// Completion implements storage.PieceImpl.
+func (p *piece) Completion() storage.Completion {
+	compl, err := p.completion.Get(p.pk)
+	if err != nil {
+		return storage.Completion{Complete: false, Ok: false, Err: err}
+	}
+	return compl
+}
+
+// MarkComplete implements storage.PieceImpl.
+func (p *piece) MarkComplete() error {
+	return p.completion.Set(p.pk, true)
+}
+
+// MarkNotComplete implements storage.PieceImpl.
+func (p *piece) MarkNotComplete() error {
+	return p.completion.Set(p.pk, false)
+}
+
+var _ storage.PieceImpl = (*piece)(nil)
+var _ io.WriterTo = (*piece)(nil)
+
+type errPiece struct {
+	err error
+}
+
+// WriteTo implements io.WriterTo.
+func (p *errPiece) WriteTo(io.Writer) (int64, error) {
+	return 0, p.err
+}
+
+// ReadAt implements storage.PieceImpl.
+func (p *errPiece) ReadAt([]byte, int64) (int, error) {
+	return 0, p.err
+}
+
+// WriteAt implements storage.PieceImpl.
+func (p *errPiece) WriteAt([]byte, int64) (int, error) {
+	return 0, p.err
+}
+
+// Completion implements storage.PieceImpl.
+func (p *errPiece) Completion() storage.Completion {
+	return storage.Completion{Complete: false, Ok: false, Err: p.err}
+}
+
+// MarkComplete implements storage.PieceImpl.
+func (p *errPiece) MarkComplete() error {
+	return p.err
+}
+
+// MarkNotComplete implements storage.PieceImpl.
+func (p *errPiece) MarkNotComplete() error {
+	return p.err
+}
+
+var _ storage.PieceImpl = (*errPiece)(nil)
+var _ io.WriterTo = (*errPiece)(nil)
diff --git a/src/host/datastorage/setup.go b/src/host/datastorage/setup.go
new file mode 100644
index 0000000..209a5f3
--- /dev/null
+++ b/src/host/datastorage/setup.go
@@ -0,0 +1,51 @@
+package datastorage
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"git.kmsign.ru/royalcat/tstor/src/config"
+	"git.kmsign.ru/royalcat/tstor/src/host/store"
+	"github.com/anacrolix/torrent/storage"
+)
+
+func Setup(cfg config.TorrentClient) (*DataStorage, storage.PieceCompletion, error) {
+	pcp := filepath.Join(cfg.MetadataFolder, "piece-completion")
+	if err := os.MkdirAll(pcp, 0744); err != nil {
+		return nil, nil, fmt.Errorf("error creating piece completion folder: %w", err)
+	}
+	pc, err := store.NewBadgerPieceCompletion(pcp)
+	if err != nil {
+		return nil, nil, fmt.Errorf("error creating servers piece completion: %w", err)
+	}
+
+	// TODO implement cache/storage switching
+	// cacheDir := filepath.Join(tcfg.DataFolder, "cache")
+	// if err := os.MkdirAll(cacheDir, 0744); err != nil {
+	// 	return nil, nil, fmt.Errorf("error creating piece completion folder: %w", err)
+	// }
+	// fc, err := filecache.NewCache(cacheDir)
+	// if err != nil {
+	// 	return nil, nil, fmt.Errorf("error creating cache: %w", err)
+	// }
+	// log.Info().Msg(fmt.Sprintf("setting cache size to %d MB", 1024))
+	// fc.SetCapacity(1024 * 1024 * 1024)
+
+	// rp := storage.NewResourcePieces(fc.AsResourceProvider())
+	// st := &stc{rp}
+
+	filesDir := cfg.DataFolder
+	if err := os.MkdirAll(filesDir, 0744); err != nil {
+		return nil, nil, fmt.Errorf("error creating piece completion folder: %w", err)
+	}
+	st := NewFileStorage(filesDir, pc)
+
+	// piecesDir := filepath.Join(cfg.DataFolder, ".pieces")
+	// if err := os.MkdirAll(piecesDir, 0744); err != nil {
+	// 	return nil, nil, fmt.Errorf("error creating piece completion folder: %w", err)
+	// }
+	// st := storage.NewMMapWithCompletion(piecesDir, pc)
+
+	return st, pc, nil
+}
diff --git a/src/host/datastorage/storage.go b/src/host/datastorage/storage.go
new file mode 100644
index 0000000..3587beb
--- /dev/null
+++ b/src/host/datastorage/storage.go
@@ -0,0 +1,429 @@
+package datastorage
+
+import (
+	"context"
+	"crypto/sha1"
+	"fmt"
+	"io"
+	"io/fs"
+	"log/slog"
+	"os"
+	"path"
+	"path/filepath"
+	"slices"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/rlog"
+	"git.kmsign.ru/royalcat/tstor/src/host/controller"
+	"github.com/anacrolix/torrent"
+	"github.com/anacrolix/torrent/metainfo"
+	"github.com/anacrolix/torrent/storage"
+	"github.com/dustin/go-humanize"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+	"golang.org/x/exp/maps"
+	"golang.org/x/sys/unix"
+)
+
+// type DataStorage interface {
+// 	storage.ClientImplCloser
+// 	DeleteFile(file *torrent.File) error
+// 	CleanupDirs(ctx context.Context, expected []*controller.Torrent, dryRun bool) (int, error)
+// 	CleanupFiles(ctx context.Context, expected []*controller.Torrent, dryRun bool) (int, error)
+// }
+
+var tracer = otel.Tracer("git.kmsign.ru/royalcat/tstor/src/host/datastorage")
+
+// NewFileStorage creates a new ClientImplCloser that stores files using the OS native filesystem.
+func NewFileStorage(baseDir string, pc storage.PieceCompletion) *DataStorage {
+	return &DataStorage{
+		ClientImplCloser: storage.NewFileOpts(storage.NewFileClientOpts{
+			ClientBaseDir:   baseDir,
+			PieceCompletion: pc,
+			TorrentDirMaker: torrentDir,
+			FilePathMaker:   filePath,
+		}),
+		baseDir:         baseDir,
+		pieceCompletion: pc,
+		log:             slog.With("component", "torrent-client"),
+	}
+}
+
+// File-based storage for torrents, that isn't yet bound to a particular torrent.
+type DataStorage struct {
+	baseDir string
+	storage.ClientImplCloser
+	pieceCompletion storage.PieceCompletion
+	log             *slog.Logger
+}
+
+func (me *DataStorage) Close() error {
+	return me.pieceCompletion.Close()
+}
+
+func torrentDir(baseDir string, info *metainfo.Info, infoHash metainfo.Hash) string {
+	dirName := info.Name
+	if dirName == "" {
+		dirName = infoHash.HexString()
+	}
+
+	return filepath.Join(baseDir, dirName)
+}
+
+func filePath(opts storage.FilePathMakerOpts) string {
+	return filepath.Join(opts.File.Path...)
+}
+
+func (fs *DataStorage) filePath(info *metainfo.Info, infoHash metainfo.Hash, fileInfo *metainfo.FileInfo) string {
+	return filepath.Join(torrentDir(fs.baseDir, info, infoHash), filePath(storage.FilePathMakerOpts{
+		Info: info,
+		File: fileInfo,
+	}))
+}
+
+func (fs *DataStorage) DeleteFile(file *torrent.File) error {
+	info := file.Torrent().Info()
+	infoHash := file.Torrent().InfoHash()
+	torrentDir := torrentDir(fs.baseDir, info, infoHash)
+	fileInfo := file.FileInfo()
+	relFilePath := filePath(storage.FilePathMakerOpts{
+		Info: info,
+		File: &fileInfo,
+	})
+	filePath := path.Join(torrentDir, relFilePath)
+	for i := file.BeginPieceIndex(); i < file.EndPieceIndex(); i++ {
+		pk := metainfo.PieceKey{InfoHash: infoHash, Index: i}
+		err := fs.pieceCompletion.Set(pk, false)
+		if err != nil {
+			return err
+		}
+	}
+	return os.Remove(filePath)
+}
+
+func (fs *DataStorage) CleanupDirs(ctx context.Context, expected []*controller.Torrent, dryRun bool) ([]string, error) {
+	log := fs.log.With("function", "CleanupDirs", "expectedTorrents", len(expected), "dryRun", dryRun)
+
+	expectedEntries := []string{}
+	for _, e := range expected {
+		expectedEntries = append(expectedEntries, e.Torrent().Name())
+	}
+
+	entries, err := os.ReadDir(fs.baseDir)
+	if err != nil {
+		return nil, err
+	}
+
+	toDelete := []string{}
+	for _, v := range entries {
+		if !slices.Contains(expectedEntries, v.Name()) {
+			toDelete = append(toDelete, v.Name())
+		}
+	}
+
+	if ctx.Err() != nil {
+		return nil, ctx.Err()
+	}
+
+	log.Info("deleting trash data", "dirsCount", len(toDelete))
+	if !dryRun {
+		for i, name := range toDelete {
+			p := path.Join(fs.baseDir, name)
+			log.Warn("deleting trash data", "path", p)
+			err := os.RemoveAll(p)
+			if err != nil {
+				return toDelete[:i], err
+			}
+		}
+	}
+
+	return toDelete, nil
+}
+
+func (s *DataStorage) CleanupFiles(ctx context.Context, expected []*controller.Torrent, dryRun bool) ([]string, error) {
+	log := s.log.With("function", "CleanupFiles", "expectedTorrents", len(expected), "dryRun", dryRun)
+
+	expectedEntries := []string{}
+	{
+		for _, e := range expected {
+			files, err := e.Files(ctx)
+			if err != nil {
+				return nil, err
+			}
+
+			for _, f := range files {
+				expectedEntries = append(expectedEntries, s.filePath(e.Torrent().Info(), e.Torrent().InfoHash(), ptr(f.FileInfo())))
+			}
+		}
+	}
+
+	entries := []string{}
+	err := filepath.WalkDir(s.baseDir,
+		func(path string, info fs.DirEntry, err error) error {
+			if err != nil {
+				return err
+			}
+			if ctx.Err() != nil {
+				return ctx.Err()
+			}
+
+			if info.IsDir() {
+				return nil
+			}
+			entries = append(entries, path)
+			return nil
+		})
+	if err != nil {
+		return nil, err
+	}
+
+	toDelete := []string{}
+	for _, v := range entries {
+		if !slices.Contains(expectedEntries, v) {
+			toDelete = append(toDelete, v)
+		}
+	}
+
+	if ctx.Err() != nil {
+		return toDelete, ctx.Err()
+	}
+
+	log.Info("deleting trash data", "filesCount", len(toDelete))
+	if !dryRun {
+		for i, p := range toDelete {
+			s.log.Warn("deleting trash data", "path", p)
+			err := os.Remove(p)
+			if err != nil {
+				return toDelete[i:], err
+			}
+		}
+	}
+	return toDelete, nil
+}
+
+func (s *DataStorage) iterFiles(ctx context.Context, iter func(ctx context.Context, path string, entry fs.FileInfo) error) error {
+	return filepath.Walk(s.baseDir,
+		func(path string, info fs.FileInfo, err error) error {
+			if err != nil {
+				return err
+			}
+			if ctx.Err() != nil {
+				return ctx.Err()
+			}
+
+			if info.IsDir() {
+				return nil
+			}
+
+			return iter(ctx, path, info)
+		})
+}
+
+func (s *DataStorage) Dedupe(ctx context.Context) (uint64, error) {
+	ctx, span := tracer.Start(ctx, fmt.Sprintf("Dedupe"))
+	defer span.End()
+
+	log := rlog.FunctionLog(s.log, "Dedupe")
+
+	sizeMap := map[int64][]string{}
+	err := s.iterFiles(ctx, func(ctx context.Context, path string, info fs.FileInfo) error {
+		size := info.Size()
+		sizeMap[size] = append(sizeMap[size], path)
+		return nil
+	})
+	if err != nil {
+		return 0, err
+	}
+
+	maps.DeleteFunc(sizeMap, func(k int64, v []string) bool {
+		return len(v) <= 1
+	})
+
+	span.AddEvent("collected files with same size", trace.WithAttributes(
+		attribute.Int("count", len(sizeMap)),
+	))
+
+	var deduped uint64 = 0
+
+	i := 0
+	for _, paths := range sizeMap {
+		if i%100 == 0 {
+			log.Info("deduping in progress", "current", i, "total", len(sizeMap))
+		}
+		i++
+
+		if ctx.Err() != nil {
+			return deduped, ctx.Err()
+		}
+
+		slices.Sort(paths)
+		paths = slices.Compact(paths)
+		if len(paths) <= 1 {
+			continue
+		}
+
+		paths, err = applyErr(paths, filepath.Abs)
+		if err != nil {
+			return deduped, err
+		}
+
+		dedupedGroup, err := s.dedupeFiles(ctx, paths)
+		if err != nil {
+			log.Error("Error applying dedupe", "files", paths, "error", err.Error())
+			continue
+		}
+
+		if dedupedGroup > 0 {
+			deduped += dedupedGroup
+			log.Info("deduped file group",
+				slog.String("files", fmt.Sprint(paths)),
+				slog.String("deduped", humanize.Bytes(dedupedGroup)),
+				slog.String("deduped_total", humanize.Bytes(deduped)),
+			)
+		}
+
+	}
+
+	return deduped, nil
+}
+
+func applyErr[E, O any](in []E, apply func(E) (O, error)) ([]O, error) {
+	out := make([]O, 0, len(in))
+	for _, p := range in {
+		o, err := apply(p)
+		if err != nil {
+			return out, err
+		}
+		out = append(out, o)
+
+	}
+	return out, nil
+}
+
+// const blockSize uint64 = 4096
+
+func (s *DataStorage) dedupeFiles(ctx context.Context, paths []string) (deduped uint64, err error) {
+	ctx, span := tracer.Start(ctx, fmt.Sprintf("dedupeFiles"), trace.WithAttributes(
+		attribute.StringSlice("files", paths),
+	))
+	defer func() {
+		span.SetAttributes(attribute.Int64("deduped", int64(deduped)))
+		if err != nil {
+			span.RecordError(err)
+		}
+		span.End()
+	}()
+
+	log := rlog.FunctionLog(s.log, "dedupeFiles")
+
+	srcF, err := os.Open(paths[0])
+	if err != nil {
+		return deduped, err
+	}
+	defer srcF.Close()
+	srcStat, err := srcF.Stat()
+	if err != nil {
+		return deduped, err
+	}
+
+	srcFd := int(srcF.Fd())
+	srcSize := srcStat.Size()
+
+	fsStat := unix.Statfs_t{}
+	err = unix.Fstatfs(srcFd, &fsStat)
+	if err != nil {
+		span.RecordError(err)
+		return deduped, err
+	}
+
+	srcHash, err := filehash(srcF)
+	if err != nil {
+		return deduped, err
+	}
+
+	if fsStat.Bsize > srcSize { // for btrfs it means file in residing in not deduplicatable metadata
+		return deduped, nil
+	}
+
+	blockSize := uint64((srcSize % fsStat.Bsize) * fsStat.Bsize)
+
+	span.SetAttributes(attribute.Int64("blocksize", int64(blockSize)))
+
+	rng := unix.FileDedupeRange{
+		Src_offset: 0,
+		Src_length: blockSize,
+		Info:       []unix.FileDedupeRangeInfo{},
+	}
+
+	for _, dst := range paths[1:] {
+		if ctx.Err() != nil {
+			return deduped, ctx.Err()
+		}
+
+		destF, err := os.OpenFile(dst, os.O_RDWR, os.ModePerm)
+		if err != nil {
+			return deduped, err
+		}
+		defer destF.Close()
+
+		dstHash, err := filehash(destF)
+		if err != nil {
+			return deduped, err
+		}
+
+		if srcHash != dstHash {
+			destF.Close()
+			continue
+		}
+
+		rng.Info = append(rng.Info, unix.FileDedupeRangeInfo{
+			Dest_fd:     int64(destF.Fd()),
+			Dest_offset: 0,
+		})
+	}
+
+	if len(rng.Info) == 0 {
+		return deduped, nil
+	}
+
+	log.Info("found same files, deduping", "files", paths, "size", humanize.Bytes(uint64(srcStat.Size())))
+
+	if ctx.Err() != nil {
+		return deduped, ctx.Err()
+	}
+
+	rng.Src_offset = 0
+	for i := range rng.Info {
+		rng.Info[i].Dest_offset = 0
+	}
+
+	err = unix.IoctlFileDedupeRange(srcFd, &rng)
+	if err != nil {
+		return deduped, err
+	}
+
+	for i := range rng.Info {
+		deduped += rng.Info[i].Bytes_deduped
+
+		rng.Info[i].Status = 0
+		rng.Info[i].Bytes_deduped = 0
+	}
+
+	return deduped, nil
+}
+
+const compareBlockSize = 1024 * 128
+
+func filehash(r io.Reader) ([20]byte, error) {
+	buf := make([]byte, compareBlockSize)
+	_, err := r.Read(buf)
+	if err != nil && err != io.EOF {
+		return [20]byte{}, err
+	}
+
+	return sha1.Sum(buf), nil
+}
+
+func ptr[D any](v D) *D {
+	return &v
+}
diff --git a/src/host/service/queue.go b/src/host/service/queue.go
new file mode 100644
index 0000000..a67f286
--- /dev/null
+++ b/src/host/service/queue.go
@@ -0,0 +1,130 @@
+package service
+
+import (
+	"context"
+	"fmt"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/uuid"
+	"git.kmsign.ru/royalcat/tstor/src/host/controller"
+	"github.com/anacrolix/torrent"
+	"github.com/anacrolix/torrent/types/infohash"
+)
+
+type TorrentDownloadTask struct {
+	ID       uuid.UUID
+	InfoHash infohash.T
+	File     string
+}
+
+func (s *Service) Download(ctx context.Context, task *TorrentDownloadTask) error {
+	t, ok := s.c.Torrent(task.InfoHash)
+	if !ok {
+		return fmt.Errorf("torrent with IH %s not found", task.InfoHash.HexString())
+	}
+
+	if task.File != "" {
+		var file *torrent.File
+		for _, tf := range t.Files() {
+			if tf.Path() == task.File {
+				file = tf
+				break
+			}
+		}
+
+		if file == nil {
+			return fmt.Errorf("file %s not found in torrent torrent with IH %s", task.File, task.InfoHash.HexString())
+		}
+
+		file.Download()
+		return nil
+	}
+
+	t.DownloadAll()
+	return nil
+}
+
+// func (s *Service) DownloadAndWait(ctx context.Context, task *TorrentDownloadTask) error {
+// 	t, ok := s.c.Torrent(task.InfoHash)
+// 	if !ok {
+// 		return fmt.Errorf("torrent with IH %s not found", task.InfoHash.HexString())
+// 	}
+
+// 	if task.File != "" {
+// 		var file *torrent.File
+// 		for _, tf := range t.Files() {
+// 			if tf.Path() == task.File {
+// 				file = tf
+// 				break
+// 			}
+// 		}
+
+// 		if file == nil {
+// 			return fmt.Errorf("file %s not found in torrent torrent with IH %s", task.File, task.InfoHash.HexString())
+// 		}
+
+// 		file.Download()
+// 		return waitPieceRange(ctx, t, file.BeginPieceIndex(), file.EndPieceIndex())
+
+// 	}
+
+// 	t.DownloadAll()
+// 	select {
+// 	case <-ctx.Done():
+// 		return ctx.Err()
+// 	case <-t.Complete.On():
+// 		return nil
+// 	}
+// }
+
+// func waitPieceRange(ctx context.Context, t *torrent.Torrent, start, end int) error {
+// 	for i := start; i < end; i++ {
+// 		timer := time.NewTimer(time.Millisecond)
+// 		for {
+// 			select {
+// 			case <-ctx.Done():
+// 				return ctx.Err()
+// 			case <-timer.C:
+// 				if t.PieceState(i).Complete {
+// 					continue
+// 				}
+// 			}
+
+// 		}
+// 	}
+// 	return nil
+// }
+
+type TorrentProgress struct {
+	Torrent *controller.Torrent
+	Current int64
+	Total   int64
+}
+
+func (s *Service) DownloadProgress(ctx context.Context) (<-chan TorrentProgress, error) {
+	torrents, err := s.ListTorrents(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	out := make(chan TorrentProgress, 1)
+	go func() {
+		defer close(out)
+		for _, t := range torrents {
+			sub := t.Torrent().SubscribePieceStateChanges()
+			go func() {
+				for range sub.Values {
+					out <- TorrentProgress{
+						Torrent: t,
+						Current: t.BytesCompleted(),
+						Total:   t.Length(),
+					}
+				}
+			}()
+			defer sub.Close()
+		}
+
+		<-ctx.Done()
+	}()
+
+	return out, nil
+}
diff --git a/src/host/service/service.go b/src/host/service/service.go
new file mode 100644
index 0000000..a5809c7
--- /dev/null
+++ b/src/host/service/service.go
@@ -0,0 +1,436 @@
+package service
+
+import (
+	"bufio"
+	"context"
+	"fmt"
+	"log/slog"
+	"os"
+	"path/filepath"
+	"slices"
+	"strings"
+	"sync"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/ctxio"
+	"git.kmsign.ru/royalcat/tstor/pkg/rlog"
+	"git.kmsign.ru/royalcat/tstor/src/config"
+	"git.kmsign.ru/royalcat/tstor/src/host/controller"
+	"git.kmsign.ru/royalcat/tstor/src/host/datastorage"
+	"git.kmsign.ru/royalcat/tstor/src/host/store"
+	"git.kmsign.ru/royalcat/tstor/src/host/tkv"
+	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+	"go.uber.org/multierr"
+	"golang.org/x/exp/maps"
+
+	"github.com/anacrolix/torrent"
+	"github.com/anacrolix/torrent/bencode"
+	"github.com/anacrolix/torrent/metainfo"
+	"github.com/anacrolix/torrent/types"
+	"github.com/anacrolix/torrent/types/infohash"
+	"github.com/royalcat/kv"
+)
+
+var tracer = otel.Tracer("git.kmsign.ru/royalcat/tstor/src/service")
+
+type DirAquire struct {
+	Name   string
+	Hashes []infohash.T
+}
+
+type Service struct {
+	c             *torrent.Client
+	excludedFiles *store.FilesMappings
+	infoBytes     *store.InfoBytes
+
+	torrentLoaded chan struct{}
+
+	loadMutex sync.Mutex
+
+	// stats *Stats
+	DefaultPriority types.PiecePriority
+	Storage         *datastorage.DataStorage
+	SourceDir       string
+
+	dirsAquire kv.Store[string, DirAquire]
+
+	log *slog.Logger
+}
+
+func NewService(sourceDir string, cfg config.TorrentClient, c *torrent.Client,
+	storage *datastorage.DataStorage, excludedFiles *store.FilesMappings, infoBytes *store.InfoBytes,
+) (*Service, error) {
+	dirsAcquire, err := tkv.New[string, DirAquire](cfg.MetadataFolder, "dir-acquire")
+	if err != nil {
+		return nil, err
+	}
+
+	s := &Service{
+		log:             slog.With("component", "torrent-service"),
+		c:               c,
+		DefaultPriority: types.PiecePriorityNone,
+		excludedFiles:   excludedFiles,
+		infoBytes:       infoBytes,
+		Storage:         storage,
+		SourceDir:       sourceDir,
+		torrentLoaded:   make(chan struct{}),
+		loadMutex:       sync.Mutex{},
+		dirsAquire:      dirsAcquire,
+
+		// stats:       newStats(), // TODO persistent
+	}
+
+	go func() {
+		ctx := context.Background()
+		err := s.loadTorrentFiles(ctx)
+		if err != nil {
+			s.log.Error("initial torrent load failed", "error", err)
+		}
+		close(s.torrentLoaded)
+	}()
+
+	return s, nil
+}
+
+var _ vfs.FsFactory = (*Service)(nil).NewTorrentFs
+
+func (s *Service) Close() error {
+	err := multierr.Combine(s.c.Close()...)
+	err = multierr.Append(err, s.Storage.Close())
+	return err
+}
+
+func (s *Service) LoadTorrent(ctx context.Context, f vfs.File) (*torrent.Torrent, error) {
+	ctx, span := tracer.Start(ctx, "LoadTorrent")
+	defer span.End()
+
+	log := rlog.FunctionLog(s.log, "LoadTorrent")
+
+	defer f.Close(ctx)
+
+	stat, err := f.Info()
+	if err != nil {
+		return nil, fmt.Errorf("call stat failed: %w", err)
+	}
+
+	span.SetAttributes(attribute.String("filename", stat.Name()))
+
+	mi, err := metainfo.Load(bufio.NewReader(ctxio.IoReader(ctx, f)))
+	if err != nil {
+		return nil, fmt.Errorf("loading torrent metadata from file %s, error: %w", stat.Name(), err)
+	}
+
+	t, ok := s.c.Torrent(mi.HashInfoBytes())
+	if !ok {
+
+		span.AddEvent("torrent not found, loading from file")
+		log.InfoContext(ctx, "torrent not found, loading from file")
+
+		spec, err := torrent.TorrentSpecFromMetaInfoErr(mi)
+		if err != nil {
+			return nil, fmt.Errorf("parse spec from metadata: %w", err)
+		}
+		infoBytes := spec.InfoBytes
+
+		if !isValidInfoHashBytes(infoBytes) {
+			log.WarnContext(ctx, "info loaded from spec not valid")
+			infoBytes = nil
+		}
+
+		if len(infoBytes) == 0 {
+			log.InfoContext(ctx, "no info loaded from file, try to load from cache")
+			infoBytes, err = s.infoBytes.GetBytes(spec.InfoHash)
+			if err != nil && err != store.ErrNotFound {
+				return nil, fmt.Errorf("get info bytes from database: %w", err)
+			}
+		}
+
+		t, _ = s.c.AddTorrentOpt(torrent.AddTorrentOpts{
+			InfoHash:  spec.InfoHash,
+			Storage:   s.Storage,
+			InfoBytes: infoBytes,
+			ChunkSize: spec.ChunkSize,
+		})
+		t.AllowDataDownload()
+		t.AllowDataUpload()
+
+		span.AddEvent("torrent added to client")
+
+		select {
+		case <-ctx.Done():
+			return nil, ctx.Err()
+		case <-t.GotInfo():
+			err := s.infoBytes.Set(t.InfoHash(), t.Metainfo())
+			if err != nil {
+				s.log.Error("error setting info bytes for torrent %s: %s", t.Name(), err.Error())
+			}
+		}
+		span.AddEvent("got info")
+
+		info := t.Info()
+		if info == nil {
+			return nil, fmt.Errorf("info is nil")
+		}
+
+		compatable, _, err := s.checkTorrentCompatable(ctx, spec.InfoHash, *info)
+		if err != nil {
+			return nil, err
+		}
+		if !compatable {
+			return nil, fmt.Errorf(
+				"torrent with name '%s' not compatable existing infohash: %s, new: %s",
+				t.Name(), t.InfoHash().HexString(), spec.InfoHash.HexString(),
+			)
+		}
+	}
+
+	return t, nil
+}
+
+func (s *Service) checkTorrentCompatable(ctx context.Context, ih infohash.T, info metainfo.Info) (compatable bool, tryLater bool, err error) {
+	log := s.log.With("new-name", info.BestName(), "new-infohash", ih.String())
+
+	name := info.BestName()
+
+	aq, found, err := s.dirsAquire.Get(ctx, info.BestName())
+	if err != nil {
+		return false, false, err
+	}
+	if !found {
+		err = s.dirsAquire.Set(ctx, name, DirAquire{
+			Name:   name,
+			Hashes: slices.Compact([]infohash.T{ih}),
+		})
+		if err != nil {
+			return false, false, err
+		}
+
+		log.Debug("acquiring was not found, so created")
+		return true, false, nil
+	}
+
+	if slices.Contains(aq.Hashes, ih) {
+		log.Debug("hash already know to be compatable")
+		return true, false, nil
+	}
+
+	for _, existingTorrent := range s.c.Torrents() {
+		if existingTorrent.Name() != name || existingTorrent.InfoHash() == ih {
+			continue
+		}
+
+		existingInfo := existingTorrent.Info()
+
+		existingFiles := slices.Clone(existingInfo.Files)
+		newFiles := slices.Clone(info.Files)
+
+		if !s.checkTorrentFilesCompatable(aq, existingFiles, newFiles) {
+			return false, false, nil
+		}
+
+		aq.Hashes = slicesUnique(append(aq.Hashes, ih))
+		err = s.dirsAquire.Set(ctx, aq.Name, aq)
+		if err != nil {
+			log.Warn("torrent not compatible")
+			return false, false, err
+		}
+
+	}
+
+	if slices.Contains(aq.Hashes, ih) {
+		log.Debug("hash is compatable")
+		return true, false, nil
+	}
+
+	log.Debug("torrent with same name not found, try later")
+	return false, true, nil
+}
+
+func (s *Service) checkTorrentFilesCompatable(aq DirAquire, existingFiles, newFiles []metainfo.FileInfo) bool {
+	log := s.log.With("name", aq.Name)
+
+	pathCmp := func(a, b metainfo.FileInfo) int {
+		return slices.Compare(a.BestPath(), b.BestPath())
+	}
+	slices.SortStableFunc(existingFiles, pathCmp)
+	slices.SortStableFunc(newFiles, pathCmp)
+
+	// torrents basically equals
+	if slices.EqualFunc(existingFiles, newFiles, func(fi1, fi2 metainfo.FileInfo) bool {
+		return fi1.Length == fi2.Length && slices.Equal(fi1.BestPath(), fi1.BestPath())
+	}) {
+		return true
+	}
+
+	if len(newFiles) > len(existingFiles) {
+		type fileInfo struct {
+			Path   string
+			Length int64
+		}
+		mapInfo := func(fi metainfo.FileInfo) fileInfo {
+			return fileInfo{
+				Path:   strings.Join(fi.BestPath(), "/"),
+				Length: fi.Length,
+			}
+		}
+
+		existingFiles := apply(existingFiles, mapInfo)
+		newFiles := apply(newFiles, mapInfo)
+
+		for _, n := range newFiles {
+			if slices.Contains(existingFiles, n) {
+				continue
+			}
+
+			for _, e := range existingFiles {
+				if e.Path == n.Path && e.Length != n.Length {
+					log.Warn("torrents not compatible, has files with different length", "path", n.Path, "existing-length", e.Length, "new-length", e.Length)
+					return false
+				}
+			}
+		}
+	}
+
+	return true
+}
+
+// func (s *Service) getTorrentsByName(name string) []*torrent.Torrent {
+// 	out := []*torrent.Torrent{}
+// 	for _, t := range s.c.Torrents() {
+// 		if t.Name() == name {
+// 			out = append(out, t)
+// 		}
+// 	}
+// 	return out
+// }
+
+func isValidInfoHashBytes(d []byte) bool {
+	var info metainfo.Info
+	err := bencode.Unmarshal(d, &info)
+	return err == nil
+}
+
+func (s *Service) NewTorrentFs(ctx context.Context, f vfs.File) (vfs.Filesystem, error) {
+	defer f.Close(ctx)
+
+	info, err := f.Info()
+	if err != nil {
+		return nil, err
+	}
+
+	t, err := s.LoadTorrent(ctx, f)
+	if err != nil {
+		return nil, err
+	}
+
+	return vfs.NewTorrentFs(info.Name(), controller.NewTorrent(t, s.excludedFiles)), nil
+}
+
+func (s *Service) Stats() (*Stats, error) {
+	return &Stats{}, nil
+}
+
+func (s *Service) GetStats() torrent.ConnStats {
+	return s.c.ConnStats()
+}
+
+const loadWorkers = 5
+
+func (s *Service) loadTorrentFiles(ctx context.Context) error {
+	ctx, span := tracer.Start(ctx, "loadTorrentFiles", trace.WithAttributes(
+		attribute.Int("workers", loadWorkers),
+	))
+	defer span.End()
+
+	log := rlog.FunctionLog(s.log, "loadTorrentFiles")
+
+	loaderPaths := make(chan string)
+	wg := sync.WaitGroup{}
+
+	defer func() {
+		close(loaderPaths)
+		wg.Wait()
+	}()
+
+	loaderWorker := func() {
+		wg.Add(1)
+		for path := range loaderPaths {
+			file, err := vfs.NewLazyOsFile(path)
+			if err != nil {
+				log.Error("error opening torrent file", "filename", path, rlog.Err(err))
+				continue
+			}
+			defer file.Close(ctx)
+
+			_, err = s.LoadTorrent(ctx, file)
+			if err != nil {
+				s.log.Error("failed adding torrent", "error", err)
+			}
+		}
+		wg.Done()
+	}
+
+	for range loadWorkers {
+		go loaderWorker()
+	}
+
+	return filepath.Walk(s.SourceDir, func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			return fmt.Errorf("fs walk error: %w", err)
+		}
+
+		if ctx.Err() != nil {
+			return ctx.Err()
+		}
+
+		if info.IsDir() {
+			return nil
+		}
+
+		if strings.HasSuffix(path, ".torrent") {
+			loaderPaths <- path
+		}
+
+		return nil
+	})
+}
+
+func (s *Service) ListTorrents(ctx context.Context) ([]*controller.Torrent, error) {
+	<-s.torrentLoaded
+
+	out := []*controller.Torrent{}
+	for _, v := range s.c.Torrents() {
+		out = append(out, controller.NewTorrent(v, s.excludedFiles))
+	}
+	return out, nil
+}
+
+func (s *Service) GetTorrent(infohashHex string) (*controller.Torrent, error) {
+	<-s.torrentLoaded
+
+	t, ok := s.c.Torrent(infohash.FromHexString(infohashHex))
+	if !ok {
+		return nil, nil
+	}
+
+	return controller.NewTorrent(t, s.excludedFiles), nil
+}
+
+func slicesUnique[S ~[]E, E comparable](in S) S {
+	m := map[E]struct{}{}
+	for _, v := range in {
+		m[v] = struct{}{}
+	}
+
+	return maps.Keys(m)
+}
+
+func apply[I, O any](in []I, f func(e I) O) []O {
+	out := []O{}
+	for _, v := range in {
+		out = append(out, f(v))
+	}
+	return out
+}
diff --git a/src/host/torrent/stats.go b/src/host/service/stats.go
similarity index 99%
rename from src/host/torrent/stats.go
rename to src/host/service/stats.go
index 9b6f956..962e9e2 100644
--- a/src/host/torrent/stats.go
+++ b/src/host/service/stats.go
@@ -1,4 +1,4 @@
-package torrent
+package service
 
 import (
 	"errors"
@@ -79,7 +79,7 @@ type Stats struct {
 	gTime time.Time
 }
 
-func newStats() *Stats {
+func NewStats() *Stats {
 	return &Stats{
 		gTime:    time.Now(),
 		torrents: make(map[string]*torrent.Torrent),
diff --git a/src/host/storage.go b/src/host/storage.go
index 8372068..04af4b4 100644
--- a/src/host/storage.go
+++ b/src/host/storage.go
@@ -1,15 +1,11 @@
 package host
 
 import (
-	"git.kmsign.ru/royalcat/tstor/src/host/torrent"
+	"git.kmsign.ru/royalcat/tstor/src/host/service"
 	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
 )
 
-type storage struct {
-	factories map[string]vfs.FsFactory
-}
-
-func NewStorage(downPath string, tsrv *torrent.Service) vfs.Filesystem {
+func NewTorrentStorage(dataPath string, tsrv *service.Service) vfs.Filesystem {
 	factories := map[string]vfs.FsFactory{
 		".torrent": tsrv.NewTorrentFs,
 	}
@@ -19,102 +15,5 @@ func NewStorage(downPath string, tsrv *torrent.Service) vfs.Filesystem {
 		factories[k] = v
 	}
 
-	return vfs.NewResolveFS(downPath, factories)
+	return vfs.NewResolveFS(vfs.NewOsFs(dataPath), factories)
 }
-
-// func (s *storage) Clear() {
-// 	s.files = make(map[string]vfs.File)
-// }
-
-// func (s *storage) Has(path string) bool {
-// 	path = clean(path)
-
-// 	f := s.files[path]
-// 	if f != nil {
-// 		return true
-// 	}
-
-// 	if f, _ := s.getFileFromFs(path); f != nil {
-// 		return true
-// 	}
-
-// 	return false
-// }
-
-// func (s *storage) createParent(p string, f File) error {
-// 	base, filename := path.Split(p)
-// 	base = clean(base)
-
-// 	if err := s.Add(&Dir{}, base); err != nil {
-// 		return err
-// 	}
-
-// 	if _, ok := s.children[base]; !ok {
-// 		s.children[base] = make(map[string]File)
-// 	}
-
-// 	if filename != "" {
-// 		s.children[base][filename] = f
-// 	}
-
-// 	return nil
-// }
-
-// func (s *storage) Children(path string) (map[string]File, error) {
-// 	path = clean(path)
-
-// 	files, err := s.getDirFromFs(path)
-// 	if err == nil {
-// 		return files, nil
-// 	}
-
-// 	if !os.IsNotExist(err) {
-// 		return nil, err
-// 	}
-
-// 	l := make(map[string]File)
-// 	for n, f := range s.children[path] {
-// 		l[n] = f
-// 	}
-
-// 	return l, nil
-// }
-
-// func (s *storage) Get(path string) (File, error) {
-// 	path = clean(path)
-// 	if !s.Has(path) {
-// 		return nil, os.ErrNotExist
-// 	}
-
-// 	file, ok := s.files[path]
-// 	if ok {
-// 		return file, nil
-// 	}
-
-// 	return s.getFileFromFs(path)
-// }
-
-// func (s *storage) getFileFromFs(p string) (File, error) {
-// 	for fsp, fs := range s.filesystems {
-// 		if strings.HasPrefix(p, fsp) {
-// 			return fs.Open(separator + strings.TrimPrefix(p, fsp))
-// 		}
-// 	}
-
-// 	return nil, os.ErrNotExist
-// }
-
-// func (s *storage) getDirFromFs(p string) (map[string]File, error) {
-// 	for fsp, fs := range s.filesystems {
-// 		if strings.HasPrefix(p, fsp) {
-// 			path := strings.TrimPrefix(p, fsp)
-// 			return fs.ReadDir(path)
-// 		}
-// 	}
-
-// 	return nil, os.ErrNotExist
-// }
-
-// func clean(p string) string {
-// 	return path.Clean(separator + strings.ReplaceAll(p, "\\", "/"))
-// }
diff --git a/src/host/store/client.go b/src/host/store/client.go
new file mode 100644
index 0000000..904251d
--- /dev/null
+++ b/src/host/store/client.go
@@ -0,0 +1,64 @@
+package store
+
+import (
+	"log/slog"
+
+	"github.com/anacrolix/dht/v2/bep44"
+	tlog "github.com/anacrolix/log"
+	"github.com/anacrolix/torrent"
+	"github.com/anacrolix/torrent/storage"
+
+	"git.kmsign.ru/royalcat/tstor/src/config"
+	dlog "git.kmsign.ru/royalcat/tstor/src/log"
+)
+
+// MOVE
+func NewClient(st storage.ClientImpl, fis bep44.Store, cfg *config.TorrentClient, id [20]byte) (*torrent.Client, error) {
+	l := slog.With("component", "torrent-client")
+
+	// TODO download and upload limits
+	torrentCfg := torrent.NewDefaultClientConfig()
+	torrentCfg.PeerID = string(id[:])
+	torrentCfg.DefaultStorage = st
+	torrentCfg.AlwaysWantConns = true
+	torrentCfg.AcceptPeerConnections = true
+	torrentCfg.DisableAggressiveUpload = false
+
+	torrentCfg.Seed = true
+	// torrentCfg.DownloadRateLimiter = rate.NewLimiter(rate.Inf, 0)
+	// torrentCfg
+
+	tl := tlog.NewLogger()
+	tl.SetHandlers(&dlog.Torrent{L: l})
+	torrentCfg.Logger = tl
+	torrentCfg.Callbacks.NewPeer = append(torrentCfg.Callbacks.NewPeer, func(p *torrent.Peer) {
+		l := l.With("ip", p.RemoteAddr.String())
+		if p.Torrent() != nil {
+			l = l.With("torrent", p.Torrent().Name())
+		}
+
+		l.Debug("new peer")
+
+	})
+
+	torrentCfg.Callbacks.PeerClosed = append(torrentCfg.Callbacks.PeerClosed, func(p *torrent.Peer) {
+		l := l.With("ip", p.RemoteAddr.String())
+		if p.Torrent() != nil {
+			l = l.With("torrent", p.Torrent().Name())
+		}
+
+		l.Debug("peer closed")
+	})
+
+	// torrentCfg.Callbacks.PeerConnClosed = append(torrentCfg.Callbacks.PeerConnClosed, func(c *torrent.PeerConn) {
+	// 	l.Debug("peer closed", "ip", c.RemoteAddr.String())
+	// })
+
+	// torrentCfg.ConfigureAnacrolixDhtServer = func(cfg *dht.ServerConfig) {
+	// 	cfg.Store = fis
+	// 	cfg.Exp = 2 * time.Hour
+	// 	cfg.NoSecurity = false
+	// }
+
+	return torrent.NewClient(torrentCfg)
+}
diff --git a/src/host/store/file-mappings.go b/src/host/store/file-mappings.go
new file mode 100644
index 0000000..2a1c5b2
--- /dev/null
+++ b/src/host/store/file-mappings.go
@@ -0,0 +1,57 @@
+package store
+
+import (
+	"context"
+	"errors"
+	"path/filepath"
+
+	"github.com/anacrolix/torrent"
+	"github.com/anacrolix/torrent/types/infohash"
+	"github.com/royalcat/kv"
+)
+
+func NewFileMappings(metaDir string, storage TorrentFileDeleter) (*FilesMappings, error) {
+	str, err := kv.NewBadgerKVBytes[string, string](filepath.Join(metaDir, "file-mappings"))
+	if err != nil {
+		return nil, err
+	}
+
+	r := &FilesMappings{
+		mappings: str,
+		storage:  storage,
+	}
+
+	return r, nil
+}
+
+type FilesMappings struct {
+	mappings kv.Store[string, string]
+	storage  TorrentFileDeleter
+}
+
+var ErrNotFound = errors.New("not found")
+
+type TorrentFileDeleter interface {
+	DeleteFile(file *torrent.File) error
+}
+
+func fileKey(file *torrent.File) string {
+	return file.Torrent().InfoHash().HexString() + "/" + file.Path()
+}
+
+func (r *FilesMappings) MapFile(ctx context.Context, file *torrent.File, target string) error {
+	return r.mappings.Set(ctx, fileKey(file), target)
+}
+
+func (r *FilesMappings) ExcludeFile(ctx context.Context, file *torrent.File) error {
+	return r.mappings.Set(ctx, fileKey(file), "")
+}
+
+func (r *FilesMappings) FileMappings(ctx context.Context, ih infohash.T) (map[string]string, error) {
+	out := map[string]string{}
+	err := r.mappings.RangeWithPrefix(ctx, ih.HexString(), func(k, v string) bool {
+		out[k] = v
+		return true
+	})
+	return out, err
+}
diff --git a/src/host/torrent/store.go b/src/host/store/fileitem.go
similarity index 94%
rename from src/host/torrent/store.go
rename to src/host/store/fileitem.go
index f762671..8192b67 100644
--- a/src/host/torrent/store.go
+++ b/src/host/store/fileitem.go
@@ -1,14 +1,14 @@
-package torrent
+package store
 
 import (
 	"bytes"
 	"encoding/gob"
+	"log/slog"
 	"time"
 
 	dlog "git.kmsign.ru/royalcat/tstor/src/log"
 	"github.com/anacrolix/dht/v2/bep44"
 	"github.com/dgraph-io/badger/v4"
-	"github.com/rs/zerolog/log"
 )
 
 var _ bep44.Store = &FileItemStore{}
@@ -19,7 +19,7 @@ type FileItemStore struct {
 }
 
 func NewFileItemStore(path string, itemsTTL time.Duration) (*FileItemStore, error) {
-	l := log.Logger.With().Str("component", "item-store").Logger()
+	l := slog.With("component", "item-store")
 
 	opts := badger.DefaultOptions(path).
 		WithLogger(&dlog.Badger{L: l}).
diff --git a/src/host/torrent/id.go b/src/host/store/id.go
similarity index 96%
rename from src/host/torrent/id.go
rename to src/host/store/id.go
index e18471c..d8b59a7 100644
--- a/src/host/torrent/id.go
+++ b/src/host/store/id.go
@@ -1,4 +1,4 @@
-package torrent
+package store
 
 import (
 	"crypto/rand"
diff --git a/src/host/store/info.go b/src/host/store/info.go
new file mode 100644
index 0000000..205a60c
--- /dev/null
+++ b/src/host/store/info.go
@@ -0,0 +1,90 @@
+package store
+
+import (
+	"bytes"
+	"fmt"
+	"log/slog"
+	"path/filepath"
+
+	dlog "git.kmsign.ru/royalcat/tstor/src/log"
+	"github.com/anacrolix/torrent/metainfo"
+	"github.com/anacrolix/torrent/types/infohash"
+	"github.com/dgraph-io/badger/v4"
+)
+
+type InfoBytes struct {
+	db *badger.DB
+}
+
+func NewInfoBytes(metaDir string) (*InfoBytes, error) {
+	l := slog.With("component", "badger", "db", "info-bytes")
+
+	opts := badger.
+		DefaultOptions(filepath.Join(metaDir, "infobytes")).
+		WithLogger(&dlog.Badger{L: l})
+	db, err := badger.Open(opts)
+	if err != nil {
+		return nil, err
+	}
+	return &InfoBytes{db}, nil
+}
+
+func (k *InfoBytes) GetBytes(ih infohash.T) ([]byte, error) {
+	var data []byte
+	err := k.db.View(func(tx *badger.Txn) error {
+		item, err := tx.Get(ih.Bytes())
+		if err != nil {
+			if err == badger.ErrKeyNotFound {
+				return ErrNotFound
+			}
+
+			return fmt.Errorf("error getting value: %w", err)
+		}
+
+		data, err = item.ValueCopy(data)
+		return err
+	})
+	return data, err
+}
+
+func (k *InfoBytes) Get(ih infohash.T) (*metainfo.MetaInfo, error) {
+	data, err := k.GetBytes(ih)
+	if err != nil {
+		return nil, err
+	}
+
+	return metainfo.Load(bytes.NewReader(data))
+}
+
+func (me *InfoBytes) SetBytes(ih infohash.T, data []byte) error {
+	return me.db.Update(func(txn *badger.Txn) error {
+		item, err := txn.Get(ih.Bytes())
+		if err != nil {
+			if err == badger.ErrKeyNotFound {
+				return txn.Set(ih.Bytes(), data)
+			}
+			return err
+		}
+
+		return item.Value(func(val []byte) error {
+			if !bytes.Equal(val, data) {
+				return txn.Set(ih.Bytes(), data)
+			}
+			return nil
+		})
+	})
+}
+
+func (me *InfoBytes) Set(ih infohash.T, info metainfo.MetaInfo) error {
+	return me.SetBytes(ih, info.InfoBytes)
+}
+
+func (k *InfoBytes) Delete(ih infohash.T) error {
+	return k.db.Update(func(txn *badger.Txn) error {
+		return txn.Delete(ih.Bytes())
+	})
+}
+
+func (me *InfoBytes) Close() error {
+	return me.db.Close()
+}
diff --git a/src/host/store/piece-completion.go b/src/host/store/piece-completion.go
new file mode 100644
index 0000000..61011bb
--- /dev/null
+++ b/src/host/store/piece-completion.go
@@ -0,0 +1,134 @@
+package store
+
+import (
+	"encoding/binary"
+	"fmt"
+	"log/slog"
+
+	dlog "git.kmsign.ru/royalcat/tstor/src/log"
+	"github.com/anacrolix/torrent/metainfo"
+	"github.com/anacrolix/torrent/storage"
+	"github.com/dgraph-io/badger/v4"
+)
+
+type PieceCompletionState byte
+
+const (
+	PieceNotComplete PieceCompletionState = 0
+	PieceComplete    PieceCompletionState = 1<<8 - 1
+)
+
+func pieceCompletionState(i bool) PieceCompletionState {
+	if i {
+		return PieceComplete
+	} else {
+		return PieceNotComplete
+	}
+}
+
+type badgerPieceCompletion struct {
+	db *badger.DB
+}
+
+var _ storage.PieceCompletion = (*badgerPieceCompletion)(nil)
+
+func NewBadgerPieceCompletion(dir string) (storage.PieceCompletion, error) {
+	l := slog.With("component", "badger", "db", "piece-completion")
+
+	opts := badger.
+		DefaultOptions(dir).
+		WithLogger(&dlog.Badger{L: l})
+	db, err := badger.Open(opts)
+	if err != nil {
+		return nil, err
+	}
+	return &badgerPieceCompletion{db}, nil
+}
+
+func pkToBytes(pk metainfo.PieceKey) []byte {
+	key := make([]byte, len(pk.InfoHash.Bytes()))
+	copy(key, pk.InfoHash.Bytes())
+	binary.BigEndian.AppendUint32(key, uint32(pk.Index))
+	return key
+}
+
+func (k *badgerPieceCompletion) Get(pk metainfo.PieceKey) (storage.Completion, error) {
+	completion := storage.Completion{
+		Ok: true,
+	}
+	err := k.db.View(func(tx *badger.Txn) error {
+		item, err := tx.Get(pkToBytes(pk))
+		if err != nil {
+			if err == badger.ErrKeyNotFound {
+				completion.Ok = false
+				return nil
+			}
+
+			return fmt.Errorf("getting value: %w", err)
+		}
+
+		valCopy, err := item.ValueCopy(nil)
+		if err != nil {
+			return fmt.Errorf("copying value: %w", err)
+		}
+		compl := PieceCompletionState(valCopy[0])
+
+		completion.Ok = true
+		switch compl {
+		case PieceComplete:
+			completion.Complete = true
+		case PieceNotComplete:
+			completion.Complete = false
+		}
+
+		return nil
+	})
+	return completion, err
+}
+
+func (me badgerPieceCompletion) Set(pk metainfo.PieceKey, b bool) error {
+	if c, err := me.Get(pk); err == nil && c.Ok && c.Complete == b {
+		return nil
+	}
+
+	return me.db.Update(func(txn *badger.Txn) error {
+		return txn.Set(pkToBytes(pk), []byte{byte(pieceCompletionState(b))})
+	})
+}
+
+func (k *badgerPieceCompletion) Delete(key string) error {
+	return k.db.Update(
+		func(txn *badger.Txn) error {
+			return txn.Delete([]byte(key))
+		})
+}
+
+func (me *badgerPieceCompletion) Close() error {
+	return me.db.Close()
+}
+
+type badgerSlog struct {
+	slog *slog.Logger
+}
+
+// Debugf implements badger.Logger.
+func (log badgerSlog) Debugf(f string, a ...interface{}) {
+	log.slog.Debug(f, a...)
+}
+
+// Errorf implements badger.Logger.
+func (log badgerSlog) Errorf(f string, a ...interface{}) {
+	log.slog.Error(f, a...)
+}
+
+// Infof implements badger.Logger.
+func (log badgerSlog) Infof(f string, a ...interface{}) {
+	log.slog.Info(f, a...)
+}
+
+// Warningf implements badger.Logger.
+func (log badgerSlog) Warningf(f string, a ...interface{}) {
+	log.slog.Warn(f, a...)
+}
+
+var _ badger.Logger = (*badgerSlog)(nil)
diff --git a/src/host/store/stats.go b/src/host/store/stats.go
new file mode 100644
index 0000000..0986566
--- /dev/null
+++ b/src/host/store/stats.go
@@ -0,0 +1,106 @@
+package store
+
+import (
+	"context"
+	"encoding/json"
+	"path"
+	"time"
+
+	"github.com/anacrolix/torrent/types/infohash"
+	"github.com/dgraph-io/badger/v4"
+	"github.com/dgraph-io/ristretto/z"
+)
+
+func NewStatsHistory(metaDir string, lifetime time.Duration) (*StatsHistory, error) {
+	db, err := badger.OpenManaged(
+		badger.
+			DefaultOptions(path.Join(metaDir, "stats-history")).
+			WithNumVersionsToKeep(int(^uint(0) >> 1)), // Infinity
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	go func() {
+		for n := range time.NewTimer(lifetime / 2).C {
+			db.SetDiscardTs(uint64(n.Add(-lifetime).Unix()))
+		}
+	}()
+	r := &StatsHistory{
+		db: db,
+	}
+
+	return r, nil
+}
+
+type StatsHistory struct {
+	db *badger.DB
+}
+
+type TorrentStat struct {
+	Name            string        `json:"name"`
+	Hash            string        `json:"hash"`
+	DownloadedBytes int64         `json:"downloadedBytes"`
+	UploadedBytes   int64         `json:"uploadedBytes"`
+	Peers           int           `json:"peers"`
+	Seeders         int           `json:"seeders"`
+	PieceChunks     []*PieceChunk `json:"pieceChunks"`
+	TotalPieces     int           `json:"totalPieces"`
+	PieceSize       int64         `json:"pieceSize"`
+}
+
+type PieceChunk struct {
+	Status    PieceStatus `json:"status"`
+	NumPieces int         `json:"numPieces"`
+}
+
+type PieceStatus string
+
+const (
+	Checking PieceStatus = "H"
+	Partial  PieceStatus = "P"
+	Complete PieceStatus = "C"
+	Waiting  PieceStatus = "W"
+	Error    PieceStatus = "?"
+)
+
+func (r *StatsHistory) AddStat(ih infohash.T, stat TorrentStat) error {
+	data, err := json.Marshal(stat)
+	if err != nil {
+		return err
+	}
+
+	return r.db.Update(func(txn *badger.Txn) error {
+		return txn.Set(ih.Bytes(), data)
+	})
+}
+
+func (r *StatsHistory) ReadStatsHistory(ctx context.Context, since time.Time) (GlobalTorrentStats, error) {
+	var stats GlobalTorrentStats
+	stream := r.db.NewStream()
+	stream.SinceTs = uint64(since.Unix())
+
+	var tstat TorrentStat
+	stream.Send = func(buf *z.Buffer) error {
+		err := json.Unmarshal(buf.Bytes(), &tstat)
+		if err != nil {
+			return err
+		}
+
+		stats.DownloadedBytes += tstat.DownloadedBytes
+		stats.UploadedBytes += tstat.UploadedBytes
+
+		return nil
+	}
+
+	err := stream.Orchestrate(ctx)
+	if err != nil {
+		return stats, err
+	}
+	return stats, nil
+}
+
+type GlobalTorrentStats struct {
+	DownloadedBytes int64 `json:"downloadedBytes"`
+	UploadedBytes   int64 `json:"uploadedBytes"`
+}
diff --git a/src/host/tkv/new.go b/src/host/tkv/new.go
new file mode 100644
index 0000000..3f740ea
--- /dev/null
+++ b/src/host/tkv/new.go
@@ -0,0 +1,21 @@
+package tkv
+
+import (
+	"path"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/kvtrace"
+	"github.com/royalcat/kv"
+	"go.opentelemetry.io/otel/attribute"
+)
+
+func New[K kv.Bytes, V any](dbdir, name string) (store kv.Store[K, V], err error) {
+	dir := path.Join(dbdir, name)
+	store, err = kv.NewBadgerKV[K, V](dir)
+	if err != nil {
+		return nil, err
+	}
+
+	store = kvtrace.WrapTracing(store, attribute.String("collection", name), attribute.String("database", "badger"))
+
+	return store, err
+}
diff --git a/src/host/torrent/client.go b/src/host/torrent/client.go
deleted file mode 100644
index f0936d7..0000000
--- a/src/host/torrent/client.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package torrent
-
-import (
-	"time"
-
-	"github.com/anacrolix/dht/v2"
-	"github.com/anacrolix/dht/v2/bep44"
-	tlog "github.com/anacrolix/log"
-	"github.com/anacrolix/torrent"
-	"github.com/anacrolix/torrent/storage"
-	"github.com/rs/zerolog/log"
-
-	"git.kmsign.ru/royalcat/tstor/src/config"
-	dlog "git.kmsign.ru/royalcat/tstor/src/log"
-)
-
-func NewClient(st storage.ClientImpl, fis bep44.Store, cfg *config.TorrentClient, id [20]byte) (*torrent.Client, error) {
-	// TODO download and upload limits
-	torrentCfg := torrent.NewDefaultClientConfig()
-	torrentCfg.Seed = true
-	torrentCfg.PeerID = string(id[:])
-	torrentCfg.DefaultStorage = st
-	torrentCfg.DisableIPv6 = cfg.DisableIPv6
-
-	l := log.Logger.With().Str("component", "torrent-client").Logger()
-
-	tl := tlog.NewLogger()
-	tl.SetHandlers(&dlog.Torrent{L: l})
-	torrentCfg.Logger = tl
-
-	torrentCfg.ConfigureAnacrolixDhtServer = func(cfg *dht.ServerConfig) {
-		cfg.Store = fis
-		cfg.Exp = 2 * time.Hour
-		cfg.NoSecurity = false
-	}
-
-	return torrent.NewClient(torrentCfg)
-}
diff --git a/src/host/torrent/service.go b/src/host/torrent/service.go
deleted file mode 100644
index de87544..0000000
--- a/src/host/torrent/service.go
+++ /dev/null
@@ -1,213 +0,0 @@
-package torrent
-
-import (
-	"sync"
-
-	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
-	"github.com/anacrolix/torrent"
-	"github.com/anacrolix/torrent/metainfo"
-	"github.com/rs/zerolog"
-	"github.com/rs/zerolog/log"
-)
-
-type Service struct {
-	c *torrent.Client
-
-	// stats *Stats
-
-	mu sync.Mutex
-
-	log                     zerolog.Logger
-	addTimeout, readTimeout int
-}
-
-func NewService(c *torrent.Client, addTimeout, readTimeout int) *Service {
-	l := log.Logger.With().Str("component", "torrent-service").Logger()
-	return &Service{
-		log: l,
-		c:   c,
-		// stats:       newStats(), // TODO persistent
-		addTimeout:  addTimeout,
-		readTimeout: readTimeout,
-	}
-}
-
-var _ vfs.FsFactory = (*Service)(nil).NewTorrentFs
-
-func (s *Service) NewTorrentFs(f vfs.File) (vfs.Filesystem, error) {
-	defer f.Close()
-
-	mi, err := metainfo.Load(f)
-	if err != nil {
-		return nil, err
-	}
-	t, err := s.c.AddTorrent(mi)
-	if err != nil {
-		return nil, err
-	}
-	<-t.GotInfo()
-	t.AllowDataDownload()
-	return vfs.NewTorrentFs(t, s.readTimeout), nil
-}
-
-func (s *Service) Stats() (*Stats, error) {
-	return &Stats{}, nil
-}
-
-// func (s *Service) Load() (map[string]vfs.Filesystem, error) {
-// 	// Load from config
-// 	s.log.Info().Msg("adding torrents from configuration")
-// 	for _, loader := range s.loaders {
-// 		if err := s.load(loader); err != nil {
-// 			return nil, err
-// 		}
-// 	}
-
-// 	// Load from DB
-// 	s.log.Info().Msg("adding torrents from database")
-// 	return s.fss, s.load(s.db)
-// }
-
-// func (s *Service) load(l loader.Loader) error {
-// 	list, err := l.ListMagnets()
-// 	if err != nil {
-// 		return err
-// 	}
-// 	for r, ms := range list {
-// 		s.addRoute(r)
-// 		for _, m := range ms {
-// 			if err := s.addMagnet(r, m); err != nil {
-// 				return err
-// 			}
-// 		}
-// 	}
-
-// 	list, err = l.ListTorrentPaths()
-// 	if err != nil {
-// 		return err
-// 	}
-// 	for r, ms := range list {
-// 		s.addRoute(r)
-// 		for _, p := range ms {
-// 			if err := s.addTorrentPath(r, p); err != nil {
-// 				return err
-// 			}
-// 		}
-// 	}
-
-// 	return nil
-// }
-
-// func (s *Service) AddMagnet(r, m string) error {
-// 	if err := s.addMagnet(r, m); err != nil {
-// 		return err
-// 	}
-
-// 	// Add to db
-// 	return s.db.AddMagnet(r, m)
-// }
-
-// func (s *Service) addTorrentPath(r, p string) error {
-// 	// Add to client
-// 	t, err := s.c.AddTorrentFromFile(p)
-// 	if err != nil {
-// 		return err
-// 	}
-
-// 	return s.addTorrent(r, t)
-// }
-
-// func (s *Service) addMagnet(r, m string) error {
-// 	// Add to client
-// 	t, err := s.c.AddMagnet(m)
-// 	if err != nil {
-// 		return err
-// 	}
-
-// 	return s.addTorrent(r, t)
-
-// }
-
-// func (s *Service) addRoute(r string) {
-// 	s.s.AddRoute(r)
-
-// 	// Add to filesystems
-// 	folder := path.Join("/", r)
-// 	s.mu.Lock()
-// 	defer s.mu.Unlock()
-// 	_, ok := s.fss[folder]
-// 	if !ok {
-// 		s.fss[folder] = vfs.NewTorrentFs(s.readTimeout)
-// 	}
-// }
-
-// func (s *Service) addTorrent(r string, t *torrent.Torrent) error {
-// 	// only get info if name is not available
-// 	if t.Info() == nil {
-// 		s.log.Info().Str("hash", t.InfoHash().String()).Msg("getting torrent info")
-// 		select {
-// 		case <-time.After(time.Duration(s.addTimeout) * time.Second):
-// 			s.log.Error().Str("hash", t.InfoHash().String()).Msg("timeout getting torrent info")
-// 			return errors.New("timeout getting torrent info")
-// 		case <-t.GotInfo():
-// 			s.log.Info().Str("hash", t.InfoHash().String()).Msg("obtained torrent info")
-// 		}
-
-// 	}
-
-// 	// Add to stats
-// 	s.s.Add(r, t)
-
-// 	// Add to filesystems
-// 	folder := path.Join("/", r)
-// 	s.mu.Lock()
-// 	defer s.mu.Unlock()
-
-// 	tfs, ok := s.fss[folder].(*vfs.TorrentFs)
-// 	if !ok {
-// 		return errors.New("error adding torrent to filesystem")
-// 	}
-
-// 	tfs.AddTorrent(t)
-// 	s.log.Info().Str("name", t.Info().Name).Str("route", r).Msg("torrent added")
-
-// 	return nil
-// }
-
-// func (s *Service) RemoveFromHash(r, h string) error {
-// 	// Remove from db
-// 	deleted, err := s.db.RemoveFromHash(r, h)
-// 	if err != nil {
-// 		return err
-// 	}
-
-// 	if !deleted {
-// 		return fmt.Errorf("element with hash %v on route %v cannot be removed", h, r)
-// 	}
-
-// 	// Remove from stats
-// 	s.s.Del(r, h)
-
-// 	// Remove from fs
-// 	folder := path.Join("/", r)
-
-// 	tfs, ok := s.fss[folder].(*vfs.TorrentFs)
-// 	if !ok {
-// 		return errors.New("error removing torrent from filesystem")
-// 	}
-
-// 	tfs.RemoveTorrent(h)
-
-// 	// Remove from client
-// 	var mh metainfo.Hash
-// 	if err := mh.FromHexString(h); err != nil {
-// 		return err
-// 	}
-
-// 	t, ok := s.c.Torrent(metainfo.NewHashFromHex(h))
-// 	if ok {
-// 		t.Drop()
-// 	}
-
-// 	return nil
-// }
diff --git a/src/host/vfs/archive.go b/src/host/vfs/archive.go
index 3963689..4717ec8 100644
--- a/src/host/vfs/archive.go
+++ b/src/host/vfs/archive.go
@@ -2,200 +2,356 @@ package vfs
 
 import (
 	"archive/zip"
+	"context"
+	"fmt"
 	"io"
-	"os"
-	"path/filepath"
+	"io/fs"
+	"path"
+	"strings"
 	"sync"
+	"time"
 
-	"git.kmsign.ru/royalcat/tstor/src/iio"
+	"git.kmsign.ru/royalcat/tstor/pkg/ctxio"
 	"github.com/bodgit/sevenzip"
 	"github.com/nwaples/rardecode/v2"
 )
 
 var ArchiveFactories = map[string]FsFactory{
-	".zip": func(f File) (Filesystem, error) {
-		return NewArchive(f, f.Size(), ZipLoader), nil
+	".zip": func(ctx context.Context, f File) (Filesystem, error) {
+		stat, err := f.Info()
+		if err != nil {
+			return nil, err
+		}
+		return NewArchive(ctx, stat.Name(), f, stat.Size(), ZipLoader)
 	},
-	".rar": func(f File) (Filesystem, error) {
-		return NewArchive(f, f.Size(), RarLoader), nil
+	".rar": func(ctx context.Context, f File) (Filesystem, error) {
+		stat, err := f.Info()
+		if err != nil {
+			return nil, err
+		}
+		return NewArchive(ctx, stat.Name(), f, stat.Size(), RarLoader)
 	},
-	".7z": func(f File) (Filesystem, error) {
-		return NewArchive(f, f.Size(), SevenZipLoader), nil
+	".7z": func(ctx context.Context, f File) (Filesystem, error) {
+		stat, err := f.Info()
+		if err != nil {
+			return nil, err
+		}
+		return NewArchive(ctx, stat.Name(), f, stat.Size(), SevenZipLoader)
 	},
 }
 
-type ArchiveLoader func(r iio.Reader, size int64) (map[string]*archiveFile, error)
+type archiveLoader func(ctx context.Context, r ctxio.ReaderAt, size int64) (map[string]*archiveFile, error)
 
-var _ Filesystem = &archive{}
+var _ Filesystem = &ArchiveFS{}
 
-type archive struct {
-	r iio.Reader
+type ArchiveFS struct {
+	name string
 
 	size int64
 
-	files func() (map[string]*archiveFile, error)
+	files map[string]File
 }
 
-func NewArchive(r iio.Reader, size int64, loader ArchiveLoader) *archive {
-	return &archive{
-		r:    r,
-		size: size,
-		files: sync.OnceValues(func() (map[string]*archiveFile, error) {
-			return loader(r, size)
-		}),
-	}
+// ModTime implements Filesystem.
+func (a *ArchiveFS) ModTime() time.Time {
+	return time.Time{}
 }
 
-func (a *archive) Open(filename string) (File, error) {
-	files, err := a.files()
-	if err != nil {
-		return nil, err
-	}
-
-	return getFile(files, filename)
+// Mode implements Filesystem.
+func (a *ArchiveFS) Mode() fs.FileMode {
+	return fs.ModeDir
 }
 
-func (fs *archive) ReadDir(path string) (map[string]File, error) {
-	files, err := fs.files()
-	if err != nil {
-		return nil, err
-	}
-
-	return listFilesInDir(files, path)
+// Size implements Filesystem.
+func (a *ArchiveFS) Size() int64 {
+	return int64(a.size)
 }
 
-var _ File = &archiveFile{}
-
-func NewArchiveFile(readerFunc func() (iio.Reader, error), len int64) *archiveFile {
-	return &archiveFile{
-		readerFunc: readerFunc,
-		len:        len,
-	}
-}
-
-type archiveFile struct {
-	readerFunc func() (iio.Reader, error)
-	reader     iio.Reader
-	len        int64
-}
-
-func (d *archiveFile) load() error {
-	if d.reader != nil {
-		return nil
-	}
-	r, err := d.readerFunc()
-	if err != nil {
-		return err
-	}
-
-	d.reader = r
-
+// Sys implements Filesystem.
+func (a *ArchiveFS) Sys() any {
 	return nil
 }
 
+// FsName implements Filesystem.
+func (a *ArchiveFS) FsName() string {
+	return "archivefs"
+}
+
+func NewArchive(ctx context.Context, name string, r ctxio.ReaderAt, size int64, loader archiveLoader) (*ArchiveFS, error) {
+	archiveFiles, err := loader(ctx, r, size)
+	if err != nil {
+		return nil, err
+	}
+
+	// TODO make optional
+	singleDir := true
+	for k := range archiveFiles {
+		if !strings.HasPrefix(k, "/"+name+"/") {
+			singleDir = false
+			break
+		}
+	}
+
+	files := make(map[string]File, len(archiveFiles))
+	for k, v := range archiveFiles {
+		// TODO make optional
+		if strings.Contains(k, "/__MACOSX/") {
+			continue
+		}
+
+		if singleDir {
+			k, _ = strings.CutPrefix(k, "/"+name)
+		}
+
+		files[k] = v
+	}
+
+	// FIXME
+	files["/.forcegallery"] = NewMemoryFile(".forcegallery", []byte{})
+
+	return &ArchiveFS{
+		name:  name,
+		size:  size,
+		files: files,
+	}, nil
+}
+
+// Unlink implements Filesystem.
+func (a *ArchiveFS) Unlink(ctx context.Context, filename string) error {
+	return ErrNotImplemented
+}
+
+func (a *ArchiveFS) Open(ctx context.Context, filename string) (File, error) {
+	return getFile(a.files, filename)
+}
+
+func (a *ArchiveFS) ReadDir(ctx context.Context, path string) ([]fs.DirEntry, error) {
+	return listDirFromFiles(a.files, path)
+}
+
+// Stat implements Filesystem.
+func (afs *ArchiveFS) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
+
+	if file, ok := afs.files[filename]; ok {
+		return file.Info()
+	}
+
+	for p, _ := range afs.files {
+		if strings.HasPrefix(p, filename) {
+			return newDirInfo(path.Base(filename)), nil
+		}
+	}
+
+	return nil, ErrNotExist
+}
+
+// Info implements Filesystem.
+func (a *ArchiveFS) Info() (fs.FileInfo, error) {
+	return a, nil
+}
+
+// IsDir implements Filesystem.
+func (a *ArchiveFS) IsDir() bool {
+	return true
+}
+
+// Name implements Filesystem.
+func (a *ArchiveFS) Name() string {
+	return a.name
+}
+
+// Type implements Filesystem.
+func (a *ArchiveFS) Type() fs.FileMode {
+	return fs.ModeDir
+}
+
+var _ File = (*archiveFile)(nil)
+
+func NewArchiveFile(name string, size int64, af archiveFileReaderFactory) *archiveFile {
+	return &archiveFile{
+		name: name,
+		size: size,
+		af:   af,
+
+		buffer: ctxio.NewFileBuffer(nil),
+	}
+}
+
+const readahead = 1024 * 16
+
+type archiveFile struct {
+	name string
+	size int64
+	af   archiveFileReaderFactory
+
+	m sync.Mutex
+
+	offset int64
+	readen int64
+	buffer *ctxio.FileBuffer
+}
+
+// Name implements File.
+func (d *archiveFile) Name() string {
+	return d.name
+}
+
+// Type implements File.
+func (d *archiveFile) Type() fs.FileMode {
+	return roMode
+}
+
+func (d *archiveFile) Info() (fs.FileInfo, error) {
+	return newFileInfo(d.name, d.size), nil
+}
+
 func (d *archiveFile) Size() int64 {
-	return d.len
+	return d.size
 }
 
 func (d *archiveFile) IsDir() bool {
 	return false
 }
 
-func (d *archiveFile) Close() (err error) {
-	if d.reader != nil {
-		err = d.reader.Close()
-		d.reader = nil
-	}
-
-	return
+func (d *archiveFile) Close(ctx context.Context) error {
+	return d.buffer.Close(ctx)
 }
 
-func (d *archiveFile) Read(p []byte) (n int, err error) {
-	if err := d.load(); err != nil {
-		return 0, err
+func (d *archiveFile) loadMore(ctx context.Context, to int64) error {
+	d.m.Lock()
+	defer d.m.Unlock()
+
+	if to < d.readen {
+		return nil
 	}
 
-	return d.reader.Read(p)
-}
-
-func (d *archiveFile) ReadAt(p []byte, off int64) (n int, err error) {
-	if err := d.load(); err != nil {
-		return 0, err
+	reader, err := d.af(ctx)
+	if err != nil {
+		return fmt.Errorf("failed to get file reader: %w", err)
+	}
+	_, err = d.buffer.Seek(0, io.SeekStart)
+	if err != nil {
+		return fmt.Errorf("failed to seek to start of the file: %w", err)
+	}
+	d.readen, err = ctxio.CopyN(ctx, d.buffer, ctxio.WrapIoReader(reader), to+readahead)
+	if err != nil && err != io.EOF {
+		return fmt.Errorf("error copying from archive file reader: %w", err)
 	}
 
-	return d.reader.ReadAt(p, off)
+	return nil
 }
 
-var _ ArchiveLoader = ZipLoader
+func (d *archiveFile) Read(ctx context.Context, p []byte) (n int, err error) {
+	err = d.loadMore(ctx, d.offset+int64(len(p)))
+	if err != nil {
+		return 0, fmt.Errorf("failed to load more from archive file: %w", err)
+	}
+	n, err = d.buffer.Read(ctx, p)
+	if err != nil && err != io.EOF {
+		return n, fmt.Errorf("failed to read from buffer: %w", err)
+	}
+	return n, nil
+}
+
+func (d *archiveFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
+	err = d.loadMore(ctx, off+int64(len(p)))
+	if err != nil {
+		return 0, fmt.Errorf("failed to load more from archive file: %w", err)
+	}
+	n, err = d.buffer.ReadAt(ctx, p, off)
+	if err != nil && err != io.EOF {
+		return n, fmt.Errorf("failed to read from buffer: %w", err)
+	}
+	return n, nil
+}
+
+type archiveFileReaderFactory func(ctx context.Context) (io.ReadCloser, error)
+
+var _ archiveLoader = ZipLoader
+
+func ZipLoader(ctx context.Context, ctxreader ctxio.ReaderAt, size int64) (map[string]*archiveFile, error) {
+	reader := ctxio.IoReaderAt(ctx, ctxreader)
 
-func ZipLoader(reader iio.Reader, size int64) (map[string]*archiveFile, error) {
 	zr, err := zip.NewReader(reader, size)
 	if err != nil {
 		return nil, err
 	}
 
 	out := make(map[string]*archiveFile)
-	for _, f := range zr.File {
-		f := f
-		if f.FileInfo().IsDir() {
+	for i := range zr.File {
+		zipFile := zr.File[i]
+		if zipFile.FileInfo().IsDir() {
 			continue
 		}
 
-		rf := func() (iio.Reader, error) {
-			zr, err := f.Open()
+		i := i
+		af := func(ctx context.Context) (io.ReadCloser, error) {
+			reader := ctxio.IoReaderAt(ctx, ctxreader)
+
+			zr, err := zip.NewReader(reader, size)
 			if err != nil {
 				return nil, err
 			}
 
-			return iio.NewDiskTeeReader(zr)
+			rc, err := zr.File[i].Open()
+			if err != nil {
+				return nil, err
+			}
+
+			return rc, nil
 		}
 
-		n := filepath.Join(string(os.PathSeparator), f.Name)
-		af := NewArchiveFile(rf, f.FileInfo().Size())
-
-		out[n] = af
+		out[AbsPath(zipFile.Name)] = NewArchiveFile(zipFile.Name, zipFile.FileInfo().Size(), af)
 	}
 
 	return out, nil
 }
 
-var _ ArchiveLoader = SevenZipLoader
+var _ archiveLoader = SevenZipLoader
+
+func SevenZipLoader(ctx context.Context, ctxreader ctxio.ReaderAt, size int64) (map[string]*archiveFile, error) {
+	reader := ctxio.IoReaderAt(ctx, ctxreader)
 
-func SevenZipLoader(reader iio.Reader, size int64) (map[string]*archiveFile, error) {
 	r, err := sevenzip.NewReader(reader, size)
 	if err != nil {
 		return nil, err
 	}
 
 	out := make(map[string]*archiveFile)
-	for _, f := range r.File {
+	for i, f := range r.File {
 		f := f
 		if f.FileInfo().IsDir() {
 			continue
 		}
 
-		rf := func() (iio.Reader, error) {
-			zr, err := f.Open()
+		i := i
+		af := func(ctx context.Context) (io.ReadCloser, error) {
+			reader := ctxio.IoReaderAt(ctx, ctxreader)
+			zr, err := sevenzip.NewReader(reader, size)
 			if err != nil {
 				return nil, err
 			}
 
-			return iio.NewDiskTeeReader(zr)
+			rc, err := zr.File[i].Open()
+			if err != nil {
+				return nil, err
+			}
+
+			return rc, nil
 		}
 
-		af := NewArchiveFile(rf, f.FileInfo().Size())
-		n := filepath.Join(string(os.PathSeparator), f.Name)
-
-		out[n] = af
+		out[AbsPath(f.Name)] = NewArchiveFile(f.Name, f.FileInfo().Size(), af)
 	}
 
 	return out, nil
 }
 
-var _ ArchiveLoader = RarLoader
+var _ archiveLoader = RarLoader
 
-func RarLoader(reader iio.Reader, size int64) (map[string]*archiveFile, error) {
-	r, err := rardecode.NewReader(iio.NewSeekerWrapper(reader, size))
+func RarLoader(ctx context.Context, ctxreader ctxio.ReaderAt, size int64) (map[string]*archiveFile, error) {
+	reader := ctxio.IoReadSeekerWrapper(ctx, ctxreader, size)
+
+	r, err := rardecode.NewReader(reader)
 	if err != nil {
 		return nil, err
 	}
@@ -210,15 +366,26 @@ func RarLoader(reader iio.Reader, size int64) (map[string]*archiveFile, error) {
 			return nil, err
 		}
 
-		rf := func() (iio.Reader, error) {
-			return iio.NewDiskTeeReader(r)
+		name := header.Name
+		af := func(ctx context.Context) (io.ReadCloser, error) {
+			reader := ctxio.IoReadSeekerWrapper(ctx, ctxreader, size)
+			r, err := rardecode.NewReader(reader)
+			if err != nil {
+				return nil, err
+			}
+
+			for header, err := r.Next(); err != io.EOF; header, err = r.Next() {
+				if err != nil {
+					return nil, err
+				}
+				if header.Name == name {
+					return io.NopCloser(r), nil
+				}
+			}
+			return nil, fmt.Errorf("file with name '%s' not found", name)
 		}
 
-		n := filepath.Join(string(os.PathSeparator), header.Name)
-
-		af := NewArchiveFile(rf, header.UnPackedSize)
-
-		out[n] = af
+		out[AbsPath(header.Name)] = NewArchiveFile(header.Name, header.UnPackedSize, af)
 	}
 
 	return out, nil
diff --git a/src/host/vfs/archive_test.go b/src/host/vfs/archive_test.go
index 0383b56..443abe2 100644
--- a/src/host/vfs/archive_test.go
+++ b/src/host/vfs/archive_test.go
@@ -1,41 +1,75 @@
-package vfs
+package vfs_test
 
 import (
 	"archive/zip"
 	"bytes"
+	"context"
 	"io"
 	"testing"
 
-	"git.kmsign.ru/royalcat/tstor/src/iio"
+	"git.kmsign.ru/royalcat/tstor/pkg/ctxio"
+	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
 	"github.com/stretchr/testify/require"
 )
 
+// TODO
+// func TestArchiveFactories(t *testing.T) {
+// 	t.Parallel()
+
+// 	ctx := context.Background()
+
+// 	require := require.New(t)
+
+// 	require.Contains(vfs.ArchiveFactories, ".zip")
+// 	require.Contains(vfs.ArchiveFactories, ".rar")
+// 	require.Contains(vfs.ArchiveFactories, ".7z")
+
+// 	fs, err := vfs.ArchiveFactories[".zip"](ctx, &vfs.DummyFile{})
+// 	require.NoError(err)
+// 	require.NotNil(fs)
+
+// 	fs, err = vfs.ArchiveFactories[".rar"](ctx, &vfs.DummyFile{})
+// 	require.NoError(err)
+// 	require.NotNil(fs)
+
+// 	fs, err = vfs.ArchiveFactories[".7z"](ctx, &vfs.DummyFile{})
+// 	require.NoError(err)
+// 	require.NotNil(fs)
+// }
+
 var fileContent []byte = []byte("Hello World")
 
 func TestZipFilesystem(t *testing.T) {
 	t.Parallel()
 	require := require.New(t)
 
-	zReader, len := createTestZip(require)
+	zReader, size := createTestZip(require)
 
-	zfs := NewArchive(zReader, len, ZipLoader)
+	ctx := context.Background()
 
-	files, err := zfs.ReadDir("/path/to/test/file")
+	// TODO add single dir collapse test
+	zfs, err := vfs.NewArchive(ctx, "test", zReader, size, vfs.ZipLoader)
+	require.NoError(err)
+
+	files, err := zfs.ReadDir(ctx, "/path/to/test/file")
 	require.NoError(err)
 
 	require.Len(files, 1)
-	f := files["1.txt"]
-	require.NotNil(f)
+	e := files[0]
+	require.Equal("1.txt", e.Name())
+	require.NotNil(e)
 
 	out := make([]byte, 11)
-	n, err := f.Read(out)
+	f, err := zfs.Open(ctx, "/path/to/test/file/1.txt")
+	require.NoError(err)
+	n, err := f.Read(ctx, out)
 	require.Equal(io.EOF, err)
 	require.Equal(11, n)
 	require.Equal(fileContent, out)
 
 }
 
-func createTestZip(require *require.Assertions) (iio.Reader, int64) {
+func createTestZip(require *require.Assertions) (ctxio.ReaderAt, int64) {
 	buf := bytes.NewBuffer([]byte{})
 
 	zWriter := zip.NewWriter(buf)
@@ -52,15 +86,16 @@ func createTestZip(require *require.Assertions) (iio.Reader, int64) {
 }
 
 type closeableByteReader struct {
-	*bytes.Reader
+	data *bytes.Reader
 }
 
 func newCBR(b []byte) *closeableByteReader {
 	return &closeableByteReader{
-		Reader: bytes.NewReader(b),
+		data: bytes.NewReader(b),
 	}
 }
 
-func (*closeableByteReader) Close() error {
-	return nil
+// ReadAt implements ctxio.ReaderAt.
+func (c *closeableByteReader) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
+	return c.data.ReadAt(p, off)
 }
diff --git a/src/host/vfs/dir.go b/src/host/vfs/dir.go
index 3077321..51b78cc 100644
--- a/src/host/vfs/dir.go
+++ b/src/host/vfs/dir.go
@@ -1,26 +1,59 @@
 package vfs
 
-var _ File = &Dir{}
+import (
+	"context"
+	"io/fs"
+	"path"
+)
 
-type Dir struct {
+var _ File = &dirFile{}
+
+func newDirFile(name string) File {
+	return &dirFile{
+		name: path.Base(name),
+	}
 }
 
-func (d *Dir) Size() int64 {
-	return 0
+type dirFile struct {
+	name string
 }
 
-func (d *Dir) IsDir() bool {
-	return true
-}
-
-func (d *Dir) Close() error {
+// Close implements File.
+func (d *dirFile) Close(ctx context.Context) error {
 	return nil
 }
 
-func (d *Dir) Read(p []byte) (n int, err error) {
-	return 0, nil
+// Info implements File.
+func (d *dirFile) Info() (fs.FileInfo, error) {
+	return newDirInfo(d.name), nil
 }
 
-func (d *Dir) ReadAt(p []byte, off int64) (n int, err error) {
-	return 0, nil
+// IsDir implements File.
+func (d *dirFile) IsDir() bool {
+	return true
+}
+
+// Name implements File.
+func (d *dirFile) Name() string {
+	return d.name
+}
+
+// Read implements File.
+func (d *dirFile) Read(ctx context.Context, p []byte) (n int, err error) {
+	return 0, fs.ErrInvalid
+}
+
+// ReadAt implements File.
+func (d *dirFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
+	return 0, fs.ErrInvalid
+}
+
+// Size implements File.
+func (d *dirFile) Size() int64 {
+	return 0
+}
+
+// Type implements File.
+func (d *dirFile) Type() fs.FileMode {
+	return roMode | fs.ModeDir
 }
diff --git a/src/host/vfs/dummy.go b/src/host/vfs/dummy.go
new file mode 100644
index 0000000..0f57da7
--- /dev/null
+++ b/src/host/vfs/dummy.go
@@ -0,0 +1,125 @@
+package vfs
+
+import (
+	"context"
+	"io/fs"
+	"os"
+	"path"
+	"time"
+)
+
+var _ Filesystem = &DummyFs{}
+
+type DummyFs struct {
+	name string
+}
+
+// ModTime implements Filesystem.
+func (d *DummyFs) ModTime() time.Time {
+	return time.Time{}
+}
+
+// Mode implements Filesystem.
+func (d *DummyFs) Mode() fs.FileMode {
+	return fs.ModeDir
+}
+
+// Size implements Filesystem.
+func (d *DummyFs) Size() int64 {
+	panic("unimplemented")
+}
+
+// Sys implements Filesystem.
+func (d *DummyFs) Sys() any {
+	panic("unimplemented")
+}
+
+// FsName implements Filesystem.
+func (d *DummyFs) FsName() string {
+	return "dummyfs"
+}
+
+// Stat implements Filesystem.
+func (*DummyFs) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
+	return newFileInfo(path.Base(filename), 0), nil // TODO
+}
+
+func (d *DummyFs) Open(ctx context.Context, filename string) (File, error) {
+	return &DummyFile{}, nil
+}
+
+func (d *DummyFs) Unlink(ctx context.Context, filename string) error {
+	return ErrNotImplemented
+}
+
+func (d *DummyFs) ReadDir(ctx context.Context, path string) ([]fs.DirEntry, error) {
+	if path == "/dir/here" {
+		return []fs.DirEntry{
+			newFileInfo("file1.txt", 0),
+			newFileInfo("file2.txt", 0),
+		}, nil
+	}
+
+	return nil, os.ErrNotExist
+}
+
+// Info implements Filesystem.
+func (d *DummyFs) Info() (fs.FileInfo, error) {
+	return newDirInfo(d.name), nil
+}
+
+// IsDir implements Filesystem.
+func (d *DummyFs) IsDir() bool {
+	return true
+}
+
+// Name implements Filesystem.
+func (d *DummyFs) Name() string {
+	return d.name
+}
+
+// Type implements Filesystem.
+func (d *DummyFs) Type() fs.FileMode {
+	return fs.ModeDir
+}
+
+var _ File = &DummyFile{}
+
+type DummyFile struct {
+	name string
+}
+
+// Name implements File.
+func (d *DummyFile) Name() string {
+	panic("unimplemented")
+}
+
+// Type implements File.
+func (d *DummyFile) Type() fs.FileMode {
+	panic("unimplemented")
+}
+
+// Stat implements File.
+func (d *DummyFile) Info() (fs.FileInfo, error) {
+	return newFileInfo(d.name, 0), nil
+}
+
+func (d *DummyFile) Size() int64 {
+	return 0
+}
+
+func (d *DummyFile) IsDir() bool {
+	return false
+}
+
+func (d *DummyFile) Close(ctx context.Context) error {
+	return nil
+}
+
+func (d *DummyFile) Read(ctx context.Context, p []byte) (n int, err error) {
+	return 0, nil
+}
+
+func (d *DummyFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
+	return 0, nil
+}
diff --git a/src/host/vfs/fs.go b/src/host/vfs/fs.go
index 8a73aad..64f5547 100644
--- a/src/host/vfs/fs.go
+++ b/src/host/vfs/fs.go
@@ -1,44 +1,88 @@
 package vfs
 
 import (
-	"os"
+	"context"
+	"errors"
+	"io/fs"
+	"path"
 	"time"
 
-	"git.kmsign.ru/royalcat/tstor/src/iio"
+	"git.kmsign.ru/royalcat/tstor/pkg/ctxio"
+	"go.opentelemetry.io/otel"
 )
 
 type File interface {
 	IsDir() bool
 	Size() int64
 
-	iio.Reader
+	fs.DirEntry
+
+	ctxio.Reader
+	ctxio.ReaderAt
+	ctxio.Closer
 }
 
+var ErrNotImplemented = errors.New("not implemented")
+
+var tracer = otel.Tracer("git.kmsign.ru/royalcat/tstor/src/host/vfs")
+
 type Filesystem interface {
 	// Open opens the named file for reading. If successful, methods on the
 	// returned file can be used for reading; the associated file descriptor has
 	// mode O_RDONLY.
-	Open(filename string) (File, error)
+	Open(ctx context.Context, filename string) (File, error)
 
 	// ReadDir reads the directory named by dirname and returns a list of
 	// directory entries.
-	ReadDir(path string) (map[string]File, error)
+	ReadDir(ctx context.Context, path string) ([]fs.DirEntry, error)
+
+	Stat(ctx context.Context, filename string) (fs.FileInfo, error)
+	Unlink(ctx context.Context, filename string) error
+
+	// As filesystem mounted to some path, make sense to have the filesystem implement DirEntry
+	fs.DirEntry
 }
 
+// readonly
+const roMode = fs.FileMode(0555)
+
 type fileInfo struct {
 	name  string
 	size  int64
 	isDir bool
 }
 
-func NewFileInfo(name string, size int64, isDir bool) *fileInfo {
+var _ fs.FileInfo = &fileInfo{}
+var _ fs.DirEntry = &fileInfo{}
+
+func newDirInfo(name string) *fileInfo {
 	return &fileInfo{
-		name:  name,
-		size:  size,
-		isDir: isDir,
+		name:  path.Base(name),
+		size:  0,
+		isDir: true,
 	}
 }
 
+func newFileInfo(name string, size int64) *fileInfo {
+	return &fileInfo{
+		name:  path.Base(name),
+		size:  size,
+		isDir: false,
+	}
+}
+
+func (fi *fileInfo) Info() (fs.FileInfo, error) {
+	return fi, nil
+}
+
+func (fi *fileInfo) Type() fs.FileMode {
+	if fi.isDir {
+		return fs.ModeDir
+	}
+
+	return 0
+}
+
 func (fi *fileInfo) Name() string {
 	return fi.name
 }
@@ -47,17 +91,17 @@ func (fi *fileInfo) Size() int64 {
 	return fi.size
 }
 
-func (fi *fileInfo) Mode() os.FileMode {
+func (fi *fileInfo) Mode() fs.FileMode {
 	if fi.isDir {
-		return 0555 | os.ModeDir
+		return roMode | fs.ModeDir
 	}
 
-	return 0555
+	return roMode
 }
 
 func (fi *fileInfo) ModTime() time.Time {
 	// TODO fix it
-	return time.Now()
+	return time.Time{}
 }
 
 func (fi *fileInfo) IsDir() bool {
diff --git a/src/host/vfs/fs_test.go b/src/host/vfs/fs_test.go
index 8a79fdd..3ea2dcc 100644
--- a/src/host/vfs/fs_test.go
+++ b/src/host/vfs/fs_test.go
@@ -12,13 +12,32 @@ func TestFileinfo(t *testing.T) {
 
 	require := require.New(t)
 
-	fi := NewFileInfo("name", 42, false)
+	fi := newFileInfo("abc/name", 42)
 
-	require.Equal(fi.IsDir(), false)
-	require.Equal(fi.Name(), "name")
-	require.Equal(fi.Size(), int64(42))
+	require.Equal("name", fi.Name())
+	require.False(fi.IsDir())
+	require.Equal(int64(42), fi.Size())
 	require.NotNil(fi.ModTime())
-	require.Equal(fi.Mode(), fs.FileMode(0555))
-	require.Equal(fi.Sys(), nil)
+	require.Zero(fi.Type() & fs.ModeDir)
+	require.Zero(fi.Mode() & fs.ModeDir)
+	require.Equal(fs.FileMode(0555), fi.Mode())
+	require.Nil(fi.Sys())
+}
+
+func TestDirInfo(t *testing.T) {
+	t.Parallel()
+
+	require := require.New(t)
+
+	fi := newDirInfo("abc/name")
+
+	require.True(fi.IsDir())
+	require.Equal("name", fi.Name())
+	require.Equal(int64(0), fi.Size())
+	require.NotNil(fi.ModTime())
+	require.NotZero(fi.Type() & fs.ModeDir)
+	require.NotZero(fi.Mode() & fs.ModeDir)
+	require.Equal(roMode|fs.ModeDir, fi.Mode())
+	require.Nil(fi.Sys())
 
 }
diff --git a/src/host/vfs/log.go b/src/host/vfs/log.go
new file mode 100644
index 0000000..1c19e1f
--- /dev/null
+++ b/src/host/vfs/log.go
@@ -0,0 +1,286 @@
+package vfs
+
+import (
+	"context"
+	"io/fs"
+	"log/slog"
+	"reflect"
+	"time"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/rlog"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+)
+
+type LogFS struct {
+	fs  Filesystem
+	log *slog.Logger
+
+	timeout     time.Duration
+	readTimeout time.Duration
+}
+
+var _ Filesystem = (*LogFS)(nil)
+
+func WrapLogFS(fs Filesystem) *LogFS {
+	return &LogFS{
+		fs:          fs,
+		log:         rlog.ComponentLog("fs"),
+		timeout:     time.Minute * 3,
+		readTimeout: time.Minute,
+	}
+}
+
+// ModTime implements Filesystem.
+func (lfs *LogFS) ModTime() time.Time {
+	return lfs.ModTime()
+}
+
+// Mode implements Filesystem.
+func (lfs *LogFS) Mode() fs.FileMode {
+	return lfs.Mode()
+}
+
+// Size implements Filesystem.
+func (lfs *LogFS) Size() int64 {
+	return lfs.Size()
+}
+
+// Sys implements Filesystem.
+func (lfs *LogFS) Sys() any {
+	return lfs.Sys()
+}
+
+func (fs *LogFS) FsName() string {
+	return "logfs"
+}
+
+func (fs *LogFS) traceAttrs(add ...attribute.KeyValue) trace.SpanStartOption {
+	return trace.WithAttributes(append([]attribute.KeyValue{
+		attribute.String("fs", fs.FsName()),
+	}, add...)...)
+}
+
+// Info implements Filesystem.
+func (fs *LogFS) Info() (fs.FileInfo, error) {
+	return fs.fs.Info()
+}
+
+// IsDir implements Filesystem.
+func (fs *LogFS) IsDir() bool {
+	return fs.fs.IsDir()
+}
+
+// Name implements Filesystem.
+func (fs *LogFS) Name() string {
+	return fs.fs.Name()
+}
+
+// Type implements Filesystem.
+func (fs *LogFS) Type() fs.FileMode {
+	return fs.fs.Type()
+}
+
+// Open implements Filesystem.
+func (fs *LogFS) Open(ctx context.Context, filename string) (file File, err error) {
+	ctx, cancel := context.WithTimeout(ctx, fs.timeout)
+	defer cancel()
+	ctx, span := tracer.Start(ctx, "Open",
+		fs.traceAttrs(attribute.String("filename", filename)),
+	)
+	defer func() {
+		if err != nil {
+			span.RecordError(err)
+		}
+		span.End()
+	}()
+
+	file, err = fs.fs.Open(ctx, filename)
+	if err != nil {
+		fs.log.With("filename", filename).Error("Failed to open file")
+	}
+	file = WrapLogFile(file, filename, fs.log, fs.readTimeout)
+	return file, err
+}
+
+// ReadDir implements Filesystem.
+func (fs *LogFS) ReadDir(ctx context.Context, path string) (entries []fs.DirEntry, err error) {
+	ctx, cancel := context.WithTimeout(ctx, fs.timeout)
+	defer cancel()
+	ctx, span := tracer.Start(ctx, "ReadDir",
+		fs.traceAttrs(attribute.String("path", path)),
+	)
+	defer func() {
+		if err != nil {
+			span.RecordError(err)
+		}
+		span.End()
+	}()
+
+	entries, err = fs.fs.ReadDir(ctx, path)
+	if err != nil {
+		fs.log.ErrorContext(ctx, "Failed to read dir", "path", path, "error", err.Error(), "fs-type", reflect.TypeOf(fs.fs).Name())
+	}
+	return entries, err
+}
+
+// Stat implements Filesystem.
+func (fs *LogFS) Stat(ctx context.Context, filename string) (info fs.FileInfo, err error) {
+	ctx, cancel := context.WithTimeout(ctx, fs.timeout)
+	defer cancel()
+	ctx, span := tracer.Start(ctx, "Stat",
+		fs.traceAttrs(attribute.String("filename", filename)),
+	)
+	defer func() {
+		if err != nil {
+			span.RecordError(err)
+		}
+		span.End()
+	}()
+
+	info, err = fs.fs.Stat(ctx, filename)
+	if err != nil {
+		fs.log.Error("Failed to stat", "filename", filename, "error", err)
+	}
+	return info, err
+}
+
+// Unlink implements Filesystem.
+func (fs *LogFS) Unlink(ctx context.Context, filename string) (err error) {
+	ctx, cancel := context.WithTimeout(ctx, fs.timeout)
+	defer cancel()
+	ctx, span := tracer.Start(ctx, "Unlink",
+		fs.traceAttrs(attribute.String("filename", filename)),
+	)
+	defer func() {
+		if err != nil {
+			span.RecordError(err)
+		}
+		span.End()
+	}()
+
+	err = fs.fs.Unlink(ctx, filename)
+	if err != nil {
+		fs.log.Error("Failed to stat", "filename", filename, "error", err)
+	}
+	return err
+}
+
+type LogFile struct {
+	filename string
+	f        File
+
+	log     *slog.Logger
+	timeout time.Duration
+}
+
+// Name implements File.
+func (f *LogFile) Name() string {
+	return f.f.Name()
+}
+
+// Type implements File.
+func (f *LogFile) Type() fs.FileMode {
+	return f.f.Type()
+}
+
+var _ File = (*LogFile)(nil)
+
+func WrapLogFile(f File, filename string, log *slog.Logger, timeout time.Duration) *LogFile {
+	return &LogFile{
+		filename: filename,
+		f:        f,
+		log:      log.With("filename", filename),
+		timeout:  timeout,
+	}
+}
+
+// Close implements File.
+func (f *LogFile) Close(ctx context.Context) (err error) {
+	ctx, cancel := context.WithTimeout(ctx, f.timeout)
+	defer cancel()
+	ctx, span := tracer.Start(ctx, "Close",
+		trace.WithAttributes(attribute.String("filename", f.filename)),
+	)
+	defer func() {
+		if err != nil {
+			span.RecordError(err)
+		}
+		span.End()
+	}()
+
+	err = f.f.Close(ctx)
+	if err != nil {
+		f.log.ErrorContext(ctx, "Failed to close", "error", err)
+	}
+	return err
+}
+
+// IsDir implements File.
+func (f *LogFile) IsDir() bool {
+	return f.f.IsDir()
+}
+
+// Read implements File.
+func (f *LogFile) Read(ctx context.Context, p []byte) (n int, err error) {
+	ctx, cancel := context.WithTimeout(ctx, f.timeout)
+	defer cancel()
+	ctx, span := tracer.Start(ctx, "Read",
+		trace.WithAttributes(
+			attribute.String("filename", f.filename),
+			attribute.Int("length", len(p)),
+		),
+	)
+	defer func() {
+		span.SetAttributes(attribute.Int("read", n))
+		if err != nil {
+			span.RecordError(err)
+		}
+		span.End()
+	}()
+
+	n, err = f.f.Read(ctx, p)
+	if err != nil {
+		f.log.Error("Failed to read", "error", err)
+	}
+	return n, err
+}
+
+// ReadAt implements File.
+func (f *LogFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
+	ctx, cancel := context.WithTimeout(ctx, f.timeout)
+	defer cancel()
+	ctx, span := tracer.Start(ctx, "ReadAt",
+		trace.WithAttributes(
+			attribute.String("filename", f.filename),
+			attribute.Int("length", len(p)),
+		),
+	)
+	defer func() {
+		span.SetAttributes(attribute.Int("read", n))
+		if err != nil {
+			span.RecordError(err)
+		}
+		span.End()
+	}()
+
+	n, err = f.f.ReadAt(ctx, p, off)
+	if err != nil {
+		f.log.Error("Failed to read", "offset", off, "error", err)
+	}
+	return n, err
+}
+
+// Size implements File.
+func (f *LogFile) Size() int64 {
+	return f.f.Size()
+}
+
+// Stat implements File.
+func (f *LogFile) Info() (fs.FileInfo, error) {
+	info, err := f.f.Info()
+	if err != nil {
+		f.log.Error("Failed to read", "error", err)
+	}
+	return info, err
+}
diff --git a/src/host/vfs/memory.go b/src/host/vfs/memory.go
index 1031a7d..8f4f84f 100644
--- a/src/host/vfs/memory.go
+++ b/src/host/vfs/memory.go
@@ -2,48 +2,139 @@ package vfs
 
 import (
 	"bytes"
+	"context"
+	"io/fs"
+	"path"
+	"time"
 )
 
-var _ Filesystem = &MemoryFs{}
-
 type MemoryFs struct {
+	name  string
 	files map[string]*MemoryFile
 }
 
-func NewMemoryFS(files map[string]*MemoryFile) *MemoryFs {
+var _ Filesystem = (*MemoryFs)(nil)
+
+// ModTime implements Filesystem.
+func (mfs *MemoryFs) ModTime() time.Time {
+	return time.Time{}
+}
+
+// Mode implements Filesystem.
+func (mfs *MemoryFs) Mode() fs.FileMode {
+	return fs.ModeDir
+}
+
+// Size implements Filesystem.
+func (fs *MemoryFs) Size() int64 {
+	return 0
+}
+
+// Sys implements Filesystem.
+func (fs *MemoryFs) Sys() any {
+	return nil
+}
+
+// FsKind implements Filesystem.
+func (fs *MemoryFs) FsName() string {
+	return "memoryfs"
+}
+
+// Info implements Filesystem.
+func (fs *MemoryFs) Info() (fs.FileInfo, error) {
+	return newDirInfo(fs.name), nil
+}
+
+// IsDir implements Filesystem.
+func (fs *MemoryFs) IsDir() bool {
+	return true
+}
+
+// Name implements Filesystem.
+func (fs *MemoryFs) Name() string {
+	return fs.name
+}
+
+// Type implements Filesystem.
+func (mfs *MemoryFs) Type() fs.FileMode {
+	return fs.ModeDir
+}
+
+func NewMemoryFS(name string, files map[string]*MemoryFile) *MemoryFs {
 	return &MemoryFs{
+		name:  name,
 		files: files,
 	}
 }
 
-func (m *MemoryFs) Open(filename string) (File, error) {
+func (m *MemoryFs) Open(ctx context.Context, filename string) (File, error) {
 	return getFile(m.files, filename)
 }
 
-func (fs *MemoryFs) ReadDir(path string) (map[string]File, error) {
-	return listFilesInDir(fs.files, path)
+func (fs *MemoryFs) ReadDir(ctx context.Context, path string) ([]fs.DirEntry, error) {
+	return listDirFromFiles(fs.files, path)
 }
 
-var _ File = &MemoryFile{}
+// Stat implements Filesystem.
+func (mfs *MemoryFs) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
+	file, ok := mfs.files[filename]
+	if !ok {
+		return nil, ErrNotExist
+	}
+	return newFileInfo(path.Base(filename), file.Size()), nil
+}
+
+// Unlink implements Filesystem.
+func (fs *MemoryFs) Unlink(ctx context.Context, filename string) error {
+	return ErrNotImplemented
+}
+
+var _ File = (*MemoryFile)(nil)
 
 type MemoryFile struct {
-	*bytes.Reader
+	name string
+	data *bytes.Reader
 }
 
-func NewMemoryFile(data []byte) *MemoryFile {
+func NewMemoryFile(name string, data []byte) *MemoryFile {
 	return &MemoryFile{
-		Reader: bytes.NewReader(data),
+		name: name,
+		data: bytes.NewReader(data),
 	}
 }
 
+// Name implements File.
+func (d *MemoryFile) Name() string {
+	return d.name
+}
+
+// Type implements File.
+func (d *MemoryFile) Type() fs.FileMode {
+	return roMode
+}
+
+func (d *MemoryFile) Info() (fs.FileInfo, error) {
+	return newFileInfo(d.name, int64(d.data.Len())), nil
+}
+
 func (d *MemoryFile) Size() int64 {
-	return int64(d.Reader.Len())
+	return int64(d.data.Len())
 }
 
 func (d *MemoryFile) IsDir() bool {
 	return false
 }
 
-func (d *MemoryFile) Close() (err error) {
+func (d *MemoryFile) Close(ctx context.Context) (err error) {
 	return
 }
+
+// Read implements File.
+func (d *MemoryFile) Read(ctx context.Context, p []byte) (n int, err error) {
+	return d.data.Read(p)
+}
+
+// ReadAt implements File.
+func (d *MemoryFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
+	return d.data.ReadAt(p, off)
+}
diff --git a/src/host/vfs/memory_test.go b/src/host/vfs/memory_test.go
index 9d090d8..6f6788f 100644
--- a/src/host/vfs/memory_test.go
+++ b/src/host/vfs/memory_test.go
@@ -1,6 +1,7 @@
 package vfs
 
 import (
+	"context"
 	"testing"
 
 	"github.com/stretchr/testify/require"
@@ -11,9 +12,10 @@ func TestMemory(t *testing.T) {
 
 	require := require.New(t)
 	testData := "Hello"
+	ctx := context.Background()
 
-	c := NewMemoryFS(map[string]*MemoryFile{
-		"/dir/here": NewMemoryFile([]byte(testData)),
+	c := NewMemoryFS("/", map[string]*MemoryFile{
+		"/dir/here": NewMemoryFile("here", []byte(testData)),
 	})
 
 	// fss := map[string]Filesystem{
@@ -23,23 +25,23 @@ func TestMemory(t *testing.T) {
 	// c, err := NewContainerFs(fss)
 	// require.NoError(err)
 
-	f, err := c.Open("/dir/here")
+	f, err := c.Open(ctx, "/dir/here")
 	require.NoError(err)
 	require.NotNil(f)
 	require.Equal(int64(5), f.Size())
-	require.NoError(f.Close())
+	require.NoError(f.Close(ctx))
 
 	data := make([]byte, 5)
-	n, err := f.Read(data)
+	n, err := f.Read(ctx, data)
 	require.NoError(err)
-	require.Equal(n, 5)
+	require.Equal(5, n)
 	require.Equal(string(data), testData)
 
-	files, err := c.ReadDir("/")
+	files, err := c.ReadDir(ctx, "/")
 	require.NoError(err)
 	require.Len(files, 1)
 
-	files, err = c.ReadDir("/dir")
+	files, err = c.ReadDir(ctx, "/dir")
 	require.NoError(err)
 	require.Len(files, 1)
 
diff --git a/src/host/vfs/os.go b/src/host/vfs/os.go
index 035b66d..27e8f1a 100644
--- a/src/host/vfs/os.go
+++ b/src/host/vfs/os.go
@@ -1,6 +1,7 @@
 package vfs
 
 import (
+	"context"
 	"io/fs"
 	"os"
 	"path"
@@ -11,35 +12,58 @@ type OsFS struct {
 	hostDir string
 }
 
-// Open implements Filesystem.
-func (fs *OsFS) Open(filename string) (File, error) {
+var _ Filesystem = (*OsFS)(nil)
+
+// Stat implements Filesystem.
+func (fs *OsFS) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
 	if path.Clean(filename) == Separator {
-		return &Dir{}, nil
+		return newDirInfo(Separator), nil
 	}
 
-	osfile, err := os.Open(path.Join(fs.hostDir, filename))
+	info, err := os.Stat(path.Join(fs.hostDir, filename))
 	if err != nil {
 		return nil, err
 	}
-	return NewOsFile(osfile), nil
+	return info, nil
+}
+
+// Unlink implements Filesystem.
+func (fs *OsFS) Unlink(ctx context.Context, filename string) error {
+	return os.RemoveAll(path.Join(fs.hostDir, filename))
+}
+
+// Open implements Filesystem.
+func (fs *OsFS) Open(ctx context.Context, filename string) (File, error) {
+	if isRoot(filename) {
+		return newDirFile(fs.Name()), nil
+	}
+
+	return NewLazyOsFile(path.Join(fs.hostDir, filename))
 }
 
 // ReadDir implements Filesystem.
-func (o *OsFS) ReadDir(dir string) (map[string]File, error) {
-	dir = path.Join(o.hostDir, dir)
-	entries, err := os.ReadDir(dir)
-	if err != nil {
-		return nil, err
-	}
-	out := map[string]File{}
-	for _, e := range entries {
-		if e.IsDir() {
-			out[e.Name()] = &Dir{}
-		} else {
-			out[e.Name()] = NewLazyOsFile(path.Join(dir, e.Name()))
-		}
-	}
-	return out, nil
+func (o *OsFS) ReadDir(ctx context.Context, dir string) ([]fs.DirEntry, error) {
+	return os.ReadDir(path.Join(o.hostDir, dir))
+}
+
+// Info implements Filesystem.
+func (fs *OsFS) Info() (fs.FileInfo, error) {
+	return newDirInfo(fs.Name()), nil
+}
+
+// IsDir implements Filesystem.
+func (fs *OsFS) IsDir() bool {
+	return true
+}
+
+// Name implements Filesystem.
+func (fs *OsFS) Name() string {
+	return path.Base(fs.hostDir)
+}
+
+// Type implements Filesystem.
+func (ofs *OsFS) Type() fs.FileMode {
+	return fs.ModeDir
 }
 
 func NewOsFs(osDir string) *OsFS {
@@ -50,64 +74,27 @@ func NewOsFs(osDir string) *OsFS {
 
 var _ Filesystem = &OsFS{}
 
-type OsFile struct {
-	f *os.File
-}
-
-func NewOsFile(f *os.File) *OsFile {
-	return &OsFile{f: f}
-}
-
-var _ File = &OsFile{}
-
-// Close implements File.
-func (f *OsFile) Close() error {
-	return f.f.Close()
-}
-
-// Read implements File.
-func (f *OsFile) Read(p []byte) (n int, err error) {
-	return f.f.Read(p)
-}
-
-// ReadAt implements File.
-func (f *OsFile) ReadAt(p []byte, off int64) (n int, err error) {
-	return f.f.ReadAt(p, off)
-}
-
-func (f *OsFile) Stat() (fs.FileInfo, error) {
-	return f.f.Stat()
-}
-
-// Size implements File.
-func (f *OsFile) Size() int64 {
-	stat, err := f.Stat()
+func NewLazyOsFile(path string) (*LazyOsFile, error) {
+	info, err := os.Stat(path)
 	if err != nil {
-		return 0
+		return nil, err
 	}
-	return stat.Size()
-}
 
-// IsDir implements File.
-func (f *OsFile) IsDir() bool {
-	stat, err := f.Stat()
-	if err != nil {
-		return false
-	}
-	return stat.IsDir()
+	return &LazyOsFile{
+		path: path,
+		info: info,
+	}, nil
 }
 
 type LazyOsFile struct {
 	m    sync.Mutex
 	path string
 	file *os.File
+
+	info fs.FileInfo
 }
 
-func NewLazyOsFile(path string) *LazyOsFile {
-	return &LazyOsFile{path: path}
-}
-
-var _ File = &OsFile{}
+var _ File = (*LazyOsFile)(nil)
 
 func (f *LazyOsFile) open() error {
 	f.m.Lock()
@@ -125,43 +112,53 @@ func (f *LazyOsFile) open() error {
 	return nil
 }
 
+// Name implements File.
+func (f *LazyOsFile) Name() string {
+	return path.Base(f.path)
+}
+
+// Type implements File.
+func (f *LazyOsFile) Type() fs.FileMode {
+	return f.info.Mode()
+}
+
 // Close implements File.
-func (f *LazyOsFile) Close() error {
+func (f *LazyOsFile) Close(ctx context.Context) error {
+	if f.file == nil {
+		return nil
+	}
 	return f.file.Close()
 }
 
 // Read implements File.
-func (f *LazyOsFile) Read(p []byte) (n int, err error) {
+func (f *LazyOsFile) Read(ctx context.Context, p []byte) (n int, err error) {
+	err = f.open()
+	if err != nil {
+		return 0, err
+	}
 	return f.file.Read(p)
 }
 
 // ReadAt implements File.
-func (f *LazyOsFile) ReadAt(p []byte, off int64) (n int, err error) {
+func (f *LazyOsFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
+	err = f.open()
+	if err != nil {
+		return 0, err
+	}
 	return f.file.ReadAt(p, off)
 }
 
-func (f *LazyOsFile) Stat() (fs.FileInfo, error) {
-	if f.file == nil {
-		return os.Stat(f.path)
-	} else {
-		return f.file.Stat()
-	}
+func (f *LazyOsFile) Info() (fs.FileInfo, error) {
+
+	return f.info, nil
 }
 
 // Size implements File.
 func (f *LazyOsFile) Size() int64 {
-	stat, err := f.Stat()
-	if err != nil {
-		return 0
-	}
-	return stat.Size()
+	return f.info.Size()
 }
 
 // IsDir implements File.
 func (f *LazyOsFile) IsDir() bool {
-	stat, err := f.Stat()
-	if err != nil {
-		return false
-	}
-	return stat.IsDir()
+	return f.info.IsDir()
 }
diff --git a/src/host/vfs/os_test.go b/src/host/vfs/os_test.go
new file mode 100644
index 0000000..06f26e3
--- /dev/null
+++ b/src/host/vfs/os_test.go
@@ -0,0 +1,75 @@
+package vfs_test
+
+import (
+	"context"
+	"os"
+	"testing"
+
+	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
+	"github.com/stretchr/testify/require"
+)
+
+func TestOsFs(t *testing.T) {
+	const testDir = "/tmp/tstor-test"
+	require := require.New(t)
+	ctx := context.Background()
+
+	err := os.RemoveAll(testDir)
+	require.NotErrorIs(err, os.ErrNotExist)
+	defer func() {
+		err = os.RemoveAll(testDir)
+		require.NotErrorIs(err, os.ErrNotExist)
+	}()
+
+	err = os.MkdirAll(testDir, os.ModePerm)
+	require.NoError(err)
+
+	err = os.MkdirAll(testDir+"/dir1", os.ModePerm)
+	require.NoError(err)
+	err = os.MkdirAll(testDir+"/dir1/dir2", os.ModePerm)
+	require.NoError(err)
+	err = os.MkdirAll(testDir+"/dir1/dir3", os.ModePerm)
+	require.NoError(err)
+	osfile, err := os.Create(testDir + "/dir1/dir2/file")
+	require.NoError(err)
+	err = osfile.Close()
+	require.NoError(err)
+
+	fs := vfs.NewOsFs(testDir)
+
+	dirs := []string{"/", "/.", "/dir1", "/dir1/dir2"}
+
+	for _, dir := range dirs {
+		file, err := fs.Open(ctx, dir)
+		require.NoError(err)
+		require.True(file.IsDir())
+		stat, err := file.Info()
+		require.NoError(err)
+		require.True(stat.IsDir())
+		require.NoError(file.Close(ctx))
+
+		info, err := fs.Stat(ctx, dir)
+		require.NoError(err)
+		require.True(info.IsDir())
+
+		entries, err := fs.ReadDir(ctx, dir)
+		require.NoError(err)
+
+		for _, e := range entries {
+			switch e.Name() {
+			case "dir2", "dir1", "dir3":
+				require.False(e.Type().IsRegular())
+				require.True(e.Type().IsDir())
+				require.True(e.IsDir())
+			case "file":
+				require.True(e.Type().IsRegular())
+				require.False(e.Type().IsDir())
+				require.False(e.IsDir())
+			}
+		}
+	}
+
+	file, err := fs.Open(ctx, "/dir1/dir2/file")
+	require.NoError(err)
+	require.False(file.IsDir())
+}
diff --git a/src/host/vfs/resolver.go b/src/host/vfs/resolver.go
index 2b058e8..6573ab5 100644
--- a/src/host/vfs/resolver.go
+++ b/src/host/vfs/resolver.go
@@ -1,56 +1,203 @@
 package vfs
 
 import (
+	"context"
+	"errors"
 	"fmt"
+	"io/fs"
+	"log/slog"
+	"path"
+	"reflect"
+	"slices"
 	"strings"
 	"sync"
+	"time"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/rlog"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+	"golang.org/x/exp/maps"
 )
 
-type ResolveFS struct {
-	osDir    string
-	osFS     *OsFS
+type ResolverFS struct {
+	rootFS   Filesystem
 	resolver *resolver
+
+	log *slog.Logger
 }
 
-func NewResolveFS(osDir string, factories map[string]FsFactory) *ResolveFS {
-	return &ResolveFS{
-		osDir:    osDir,
-		osFS:     NewOsFs(osDir),
+func NewResolveFS(rootFs Filesystem, factories map[string]FsFactory) *ResolverFS {
+	return &ResolverFS{
+		rootFS:   rootFs,
 		resolver: newResolver(factories),
+		log:      rlog.ComponentLog("fs/resolverfs"),
 	}
 }
 
+// ModTime implements Filesystem.
+func (r *ResolverFS) ModTime() time.Time {
+	return time.Time{}
+}
+
+// Mode implements Filesystem.
+func (r *ResolverFS) Mode() fs.FileMode {
+	return fs.ModeDir
+}
+
+// Size implements Filesystem.
+func (r *ResolverFS) Size() int64 {
+	return 0
+}
+
+// Sys implements Filesystem.
+func (r *ResolverFS) Sys() any {
+	return nil
+}
+
+// FsName implements Filesystem.
+func (r *ResolverFS) FsName() string {
+	return "resolverfs"
+}
+
+func (fs *ResolverFS) traceAttrs(add ...attribute.KeyValue) trace.SpanStartOption {
+	return trace.WithAttributes(append([]attribute.KeyValue{
+		attribute.String("fs", fs.FsName()),
+	}, add...)...)
+}
+
+func (r *ResolverFS) ResolvablesExtensions() []string {
+	return maps.Keys(r.resolver.factories)
+}
+
 // Open implements Filesystem.
-func (r *ResolveFS) Open(filename string) (File, error) {
-	fsPath, nestedFs, nestedFsPath, err := r.resolver.resolvePath(filename, r.osFS.Open)
+func (r *ResolverFS) Open(ctx context.Context, filename string) (File, error) {
+	ctx, span := tracer.Start(ctx, "Open",
+		r.traceAttrs(attribute.String("filename", filename)),
+	)
+	defer span.End()
+
+	if path.Clean(filename) == Separator {
+		return newDirFile(r.Name()), nil
+	}
+
+	fsPath, nestedFs, nestedFsPath, err := r.resolver.resolvePath(ctx, filename, r.rootFS.Open)
 	if err != nil {
 		return nil, err
 	}
 	if nestedFs != nil {
-		return nestedFs.Open(nestedFsPath)
+		return nestedFs.Open(ctx, nestedFsPath)
 	}
 
-	return r.osFS.Open(fsPath)
+	return r.rootFS.Open(ctx, fsPath)
 }
 
 // ReadDir implements Filesystem.
-func (r *ResolveFS) ReadDir(dir string) (map[string]File, error) {
-	fsPath, nestedFs, nestedFsPath, err := r.resolver.resolvePath(dir, r.osFS.Open)
+func (r *ResolverFS) ReadDir(ctx context.Context, dir string) ([]fs.DirEntry, error) {
+	ctx, span := tracer.Start(ctx, "ReadDir",
+		r.traceAttrs(attribute.String("name", dir)),
+	)
+	defer span.End()
+
+	fsPath, nestedFs, nestedFsPath, err := r.resolver.resolvePath(ctx, dir, r.rootFS.Open)
 	if err != nil {
 		return nil, err
 	}
 	if nestedFs != nil {
-		return nestedFs.ReadDir(nestedFsPath)
+		return nestedFs.ReadDir(ctx, nestedFsPath)
 	}
 
-	return r.osFS.ReadDir(fsPath)
+	entries, err := r.rootFS.ReadDir(ctx, fsPath)
+	if err != nil {
+		return nil, err
+	}
+	out := make([]fs.DirEntry, 0, len(entries))
+	for _, e := range entries {
+		if r.resolver.isNestedFs(e.Name()) {
+			filepath := path.Join("/", dir, e.Name())
+			file, err := r.Open(ctx, filepath)
+			if err != nil {
+				return nil, err
+			}
+			defer file.Close(ctx)
+			nestedfs, err := r.resolver.nestedFs(ctx, filepath, file)
+			if err != nil {
+				if errors.Is(err, context.DeadlineExceeded) {
+					r.log.ErrorContext(ctx, "creating fs timed out", "filename", e.Name())
+					continue
+				}
+
+				return nil, err
+			}
+
+			out = append(out, nestedfs)
+		} else {
+			out = append(out, e)
+		}
+	}
+	return out, nil
 }
 
-var _ Filesystem = &ResolveFS{}
+// Stat implements Filesystem.
+func (r *ResolverFS) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
+	ctx, span := tracer.Start(ctx, "Stat",
+		r.traceAttrs(attribute.String("filename", filename)),
+	)
+	defer span.End()
 
-type FsFactory func(f File) (Filesystem, error)
+	if isRoot(filename) {
+		return r, nil
+	}
 
-const Separator = "/"
+	fsPath, nestedFs, nestedFsPath, err := r.resolver.resolvePath(ctx, filename, r.rootFS.Open)
+	if err != nil {
+		return nil, err
+	}
+	span.SetAttributes(attribute.String("fsPath", fsPath), attribute.String("nestedFsPath", nestedFsPath))
+
+	if nestedFs != nil {
+		span.AddEvent("calling nested fs")
+		return nestedFs.Stat(ctx, nestedFsPath)
+	}
+
+	return r.rootFS.Stat(ctx, fsPath)
+}
+
+// Unlink implements Filesystem.
+func (r *ResolverFS) Unlink(ctx context.Context, filename string) error {
+	fsPath, nestedFs, nestedFsPath, err := r.resolver.resolvePath(ctx, filename, r.rootFS.Open)
+	if err != nil {
+		return err
+	}
+	if nestedFs != nil {
+		return nestedFs.Unlink(ctx, nestedFsPath)
+	}
+
+	return r.rootFS.Unlink(ctx, fsPath)
+}
+
+// Info implements Filesystem.
+func (r *ResolverFS) Info() (fs.FileInfo, error) {
+	return r, nil
+}
+
+// IsDir implements Filesystem.
+func (r *ResolverFS) IsDir() bool {
+	return true
+}
+
+// Name implements Filesystem.
+func (r *ResolverFS) Name() string {
+	return r.rootFS.Name()
+}
+
+// Type implements Filesystem.
+func (r *ResolverFS) Type() fs.FileMode {
+	return fs.ModeDir
+}
+
+var _ Filesystem = &ResolverFS{}
+
+type FsFactory func(ctx context.Context, f File) (Filesystem, error)
 
 func newResolver(factories map[string]FsFactory) *resolver {
 	return &resolver{
@@ -66,10 +213,45 @@ type resolver struct {
 	// TODO: add fsmap clean
 }
 
-type openFile func(path string) (File, error)
+type openFile func(ctx context.Context, path string) (File, error)
+
+func (r *resolver) isNestedFs(f string) bool {
+	for ext := range r.factories {
+		if strings.HasSuffix(f, ext) {
+			return true
+		}
+	}
+	return false
+}
+
+func (r *resolver) nestedFs(ctx context.Context, fsPath string, file File) (Filesystem, error) {
+	for ext, nestFactory := range r.factories {
+		if !strings.HasSuffix(fsPath, ext) {
+			continue
+		}
+
+		if nestedFs, ok := r.fsmap[fsPath]; ok {
+			return nestedFs, nil
+		}
+
+		nestedFs, err := nestFactory(ctx, file)
+		if err != nil {
+			return nil, fmt.Errorf("error creating filesystem from file: %s with error: %w", fsPath, err)
+		}
+		r.fsmap[fsPath] = nestedFs
+
+		return nestedFs, nil
+
+	}
+	return nil, nil
+}
 
 // open requeue raw open, without resolver call
-func (r *resolver) resolvePath(name string, rawOpen openFile) (fsPath string, nestedFs Filesystem, nestedFsPath string, err error) {
+func (r *resolver) resolvePath(ctx context.Context, name string, rawOpen openFile) (fsPath string, nestedFs Filesystem, nestedFsPath string, err error) {
+	ctx, span := tracer.Start(ctx, "resolvePath")
+	defer span.End()
+
+	name = path.Clean(name)
 	name = strings.TrimPrefix(name, Separator)
 	parts := strings.Split(name, Separator)
 
@@ -88,11 +270,12 @@ PARTS_LOOP:
 	}
 
 	if nestOn == -1 {
-		return name, nil, "", nil
+		return AbsPath(name), nil, "", nil
 	}
 
-	fsPath = Clean(strings.Join(parts[:nestOn], Separator))
-	nestedFsPath = Clean(strings.Join(parts[nestOn:], Separator))
+	fsPath = AbsPath(path.Join(parts[:nestOn]...))
+
+	nestedFsPath = AbsPath(path.Join(parts[nestOn:]...))
 
 	// we dont need lock until now
 	// it must be before fsmap read to exclude race condition:
@@ -102,45 +285,70 @@ PARTS_LOOP:
 	defer r.m.Unlock()
 
 	if nestedFs, ok := r.fsmap[fsPath]; ok {
+		span.AddEvent("fs loaded from cache", trace.WithAttributes(attribute.String("nestedFs", reflect.TypeOf(nestedFs).Name())))
 		return fsPath, nestedFs, nestedFsPath, nil
 	} else {
-		fsFile, err := rawOpen(fsPath)
+		ctx, span := tracer.Start(ctx, "CreateFS")
+		defer span.End()
+
+		fsFile, err := rawOpen(ctx, fsPath)
 		if err != nil {
 			return "", nil, "", fmt.Errorf("error opening filesystem file: %s with error: %w", fsPath, err)
 		}
-		nestedFs, err := nestFactory(fsFile)
+		nestedFs, err := nestFactory(ctx, fsFile)
 		if err != nil {
 			return "", nil, "", fmt.Errorf("error creating filesystem from file: %s with error: %w", fsPath, err)
 		}
 		r.fsmap[fsPath] = nestedFs
 
+		span.AddEvent("fs created", trace.WithAttributes(attribute.String("nestedFs", reflect.TypeOf(nestedFs).Name())))
+
 		return fsPath, nestedFs, nestedFsPath, nil
 	}
 
 }
 
-// func (r *resolver) resolveFile(name string, fs Filesystem) (File, error) {
-// 	fsPath, nestedFs, nestedFsPath, err := r.resolvePath(name, fs)
-// 	if err != nil {
-// 		return nil, err
-// 	}
+var ErrNotExist = fs.ErrNotExist
 
-// 	if nestedFs == nil {
-// 		return fs.Open(fsPath)
-// 	}
+func getFile[F File](m map[string]F, name string) (File, error) {
+	if name == Separator {
+		return newDirFile(name), nil
+	}
 
-// 	return nestedFs.Open(nestedFsPath)
-// }
+	f, ok := m[name]
+	if ok {
+		return f, nil
+	}
 
-// func (r *resolver) resolveDir(name string, fs Filesystem) (map[string]File, error) {
-// 	fsPath, nestedFs, nestedFsPath, err := r.resolvePath(name, fs)
-// 	if err != nil {
-// 		return nil, err
-// 	}
+	for p := range m {
+		if strings.HasPrefix(p, name) {
+			return newDirFile(name), nil
+		}
+	}
 
-// 	if nestedFs == nil {
-// 		return fs.ReadDir(fsPath)
-// 	}
+	return nil, ErrNotExist
+}
 
-// 	return nestedFs.ReadDir(nestedFsPath)
-// }
+func listDirFromFiles[F File](m map[string]F, name string) ([]fs.DirEntry, error) {
+	out := make([]fs.DirEntry, 0, len(m))
+	name = AddTrailSlash(name)
+	for p, f := range m {
+		if strings.HasPrefix(p, name) {
+			parts := strings.Split(trimRelPath(p, name), Separator)
+			if len(parts) == 1 {
+				out = append(out, newFileInfo(parts[0], f.Size()))
+			} else {
+				out = append(out, newDirInfo(parts[0]))
+			}
+
+		}
+	}
+	slices.SortStableFunc(out, func(de1, de2 fs.DirEntry) int {
+		return strings.Compare(de1.Name(), de2.Name())
+	})
+	out = slices.CompactFunc(out, func(de1, de2 fs.DirEntry) bool {
+		return de1.Name() == de2.Name()
+	})
+
+	return out, nil
+}
diff --git a/src/host/vfs/resolver_test.go b/src/host/vfs/resolver_test.go
index 953ad82..4b927fd 100644
--- a/src/host/vfs/resolver_test.go
+++ b/src/host/vfs/resolver_test.go
@@ -1,192 +1,237 @@
-package vfs
+package vfs_test
 
 import (
-	"os"
+	"archive/zip"
+	"bytes"
+	"context"
+	"testing"
+
+	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
+	"github.com/stretchr/testify/require"
 )
 
-type Dummy struct {
-}
+func createZip(files map[string][]byte) ([]byte, error) {
+	buf := bytes.NewBuffer(nil)
+	zw := zip.NewWriter(buf)
 
-func (d *Dummy) Size() int64 {
-	return 0
-}
+	for name, data := range files {
+		fw, err := zw.Create(name)
+		if err != nil {
+			return nil, err
+		}
 
-func (d *Dummy) IsDir() bool {
-	return false
-}
-
-func (d *Dummy) Close() error {
-	return nil
-}
-
-func (d *Dummy) Read(p []byte) (n int, err error) {
-	return 0, nil
-}
-
-func (d *Dummy) ReadAt(p []byte, off int64) (n int, err error) {
-	return 0, nil
-}
-
-var _ File = &Dummy{}
-
-type DummyFs struct {
-}
-
-func (d *DummyFs) Open(filename string) (File, error) {
-	return &Dummy{}, nil
-}
-
-func (d *DummyFs) ReadDir(path string) (map[string]File, error) {
-	if path == "/dir/here" {
-		return map[string]File{
-			"file1.txt": &Dummy{},
-			"file2.txt": &Dummy{},
-		}, nil
+		_, err = fw.Write(data)
+		if err != nil {
+			return nil, err
+		}
+	}
+	err := zw.Flush()
+	if err != nil {
+		return nil, err
 	}
 
-	return nil, os.ErrNotExist
+	err = zw.Close()
+	if err != nil {
+		return nil, err
+	}
+
+	return buf.Bytes(), nil
 }
 
-var _ Filesystem = &DummyFs{}
+func TestResolverFs(t *testing.T) {
+	t.Parallel()
+	ctx := context.Background()
 
-// func TestDefaultFactories(t *testing.T) {
+	testZip, err := createZip(map[string][]byte{
+		"123.txt":       []byte("123"),
+		"files/321.txt": []byte("321"),
+	})
+	require.NoError(t, err)
+
+	fs := vfs.NewResolveFS(vfs.NewMemoryFS("/", map[string]*vfs.MemoryFile{
+		"/data/123.zip": vfs.NewMemoryFile("123.zip", testZip),
+	}), vfs.ArchiveFactories)
+
+	t.Run("dir", func(t *testing.T) {
+		t.Parallel()
+		require := require.New(t)
+
+		dirs := []string{
+			"/data", "/", "/.",
+			"/data/123.zip", "/data/123.zip/files", "/data/123.zip/files/.",
+		}
+
+		for _, dir := range dirs {
+			file, err := fs.Open(ctx, dir)
+			require.NoError(err)
+			require.True(file.IsDir())
+
+			stat, err := file.Info()
+			require.NoError(err)
+			require.True(stat.IsDir())
+		}
+
+		entries, err := fs.ReadDir(ctx, "/data")
+		require.NoError(err)
+		require.Len(entries, 1)
+
+		for _, e := range entries {
+			switch e.Name() {
+			case "123.zip":
+				require.True(e.IsDir())
+				require.IsType(&vfs.ArchiveFS{}, e)
+			}
+		}
+
+		entries, err = fs.ReadDir(ctx, "/data/123.zip/files")
+		require.NoError(err)
+		require.Len(entries, 1)
+
+		entries, err = fs.ReadDir(ctx, "/data/123.zip")
+		require.NoError(err)
+		require.Len(entries, 3)
+
+		for _, e := range entries {
+			switch e.Name() {
+			case "files":
+				require.True(e.IsDir())
+			case "123.txt":
+				require.False(e.IsDir())
+			}
+		}
+	})
+}
+
+// func TestResolver(t *testing.T) {
 // 	t.Parallel()
+// 	resolver := newResolver(ArchiveFactories)
+// 	ctx := context.Background()
 
+// 	t.Run("nested fs", func(t *testing.T) {
+// 		t.Parallel()
+// 		require := require.New(t)
+
+// 		fsPath, nestedFs, nestedFsPath, err := resolver.resolvePath(ctx, "/f1.rar/f2.rar", func(_ context.Context, path string) (File, error) {
+// 			require.Equal("/f1.rar", path)
+// 			return &vfs.Dummy{}, nil
+// 		})
+// 		require.NoError(err)
+// 		require.Equal("/f1.rar", fsPath)
+// 		require.Equal("/f2.rar", nestedFsPath)
+// 		require.IsType(&vfs.ArchiveFS{}, nestedFs)
+// 	})
+// 	t.Run("root", func(t *testing.T) {
+// 		t.Parallel()
+// 		require := require.New(t)
+
+// 		fsPath, nestedFs, nestedFsPath, err := resolver.resolvePath(ctx, "/", func(_ context.Context, path string) (File, error) {
+// 			require.Equal("/", path)
+// 			return &Dummy{}, nil
+// 		})
+// 		require.NoError(err)
+// 		require.Nil(nestedFs)
+// 		require.Equal("/", fsPath)
+// 		require.Equal("", nestedFsPath)
+// 	})
+
+// 	t.Run("root dirty", func(t *testing.T) {
+// 		t.Parallel()
+// 		require := require.New(t)
+
+// 		fsPath, nestedFs, nestedFsPath, err := resolver.resolvePath(ctx, "//.//", func(_ context.Context, path string) (File, error) {
+// 			require.Equal("/", path)
+// 			return &Dummy{}, nil
+// 		})
+// 		require.NoError(err)
+// 		require.Nil(nestedFs)
+// 		require.Equal("/", fsPath)
+// 		require.Equal("", nestedFsPath)
+// 	})
+
+// 	t.Run("root dirty 2", func(t *testing.T) {
+// 		t.Parallel()
+// 		require := require.New(t)
+
+// 		fsPath, nestedFs, nestedFsPath, err := resolver.resolvePath(ctx, "/.", func(_ context.Context, path string) (File, error) {
+// 			require.Equal("/", path)
+// 			return &Dummy{}, nil
+// 		})
+// 		require.NoError(err)
+// 		require.Nil(nestedFs)
+// 		require.Equal("/", fsPath)
+// 		require.Equal("", nestedFsPath)
+// 	})
+
+// 	t.Run("fs dirty", func(t *testing.T) {
+// 		t.Parallel()
+// 		require := require.New(t)
+
+// 		fsPath, nestedFs, nestedFsPath, err := resolver.resolvePath(ctx, "//.//f1.rar", func(_ context.Context, path string) (File, error) {
+// 			require.Equal("/f1.rar", path)
+// 			return &Dummy{}, nil
+// 		})
+// 		require.NoError(err)
+// 		require.Equal("/f1.rar", fsPath)
+// 		require.Equal("/", nestedFsPath)
+// 		require.IsType(&ArchiveFS{}, nestedFs)
+// 	})
+// 	t.Run("inside folder", func(t *testing.T) {
+// 		t.Parallel()
+// 		require := require.New(t)
+
+// 		fsPath, nestedFs, nestedFsPath, err := resolver.resolvePath(ctx, "//test1/f1.rar", func(_ context.Context, path string) (File, error) {
+// 			require.Equal("/test1/f1.rar", path)
+// 			return &Dummy{}, nil
+// 		})
+// 		require.NoError(err)
+// 		require.IsType(&ArchiveFS{}, nestedFs)
+// 		require.Equal("/test1/f1.rar", fsPath)
+// 		require.Equal("/", nestedFsPath)
+// 	})
+// }
+
+// func TestFiles(t *testing.T) {
+// 	t.Parallel()
 // 	require := require.New(t)
 
-// 	require.Contains(defaultFactories, ".zip")
-// 	require.Contains(defaultFactories, ".rar")
-// 	require.Contains(defaultFactories, ".7z")
-
-// 	fs, err := defaultFactories[".zip"](&Dummy{}, nil)
-// 	require.NoError(err)
-// 	require.NotNil(fs)
-
-// 	fs, err = defaultFactories[".rar"](&Dummy{}, nil)
-// 	require.NoError(err)
-// 	require.NotNil(fs)
-
-// 	fs, err = defaultFactories[".7z"](&Dummy{}, nil)
-// 	require.NoError(err)
-// 	require.NotNil(fs)
-// }
-
-// func TestStorageAddFs(t *testing.T) {
-// 	t.Parallel()
-
-// 	require := require.New(t)
-
-// 	s := newStorage(dummyFactories)
-
-// 	err := s.AddFS(&DummyFs{}, "/test")
-// 	require.NoError(err)
-
-// 	f, err := s.Get("/test/dir/here/file1.txt")
-// 	require.NoError(err)
-// 	require.NotNil(f)
-
-// 	err = s.AddFS(&DummyFs{}, "/test")
-// 	require.Error(err)
-// }
-
-// func TestStorageWindowsPath(t *testing.T) {
-// 	t.Parallel()
-
-// 	require := require.New(t)
-
-// 	s := newStorage(dummyFactories)
-
-// 	err := s.Add(&Dummy{}, "\\path\\to\\dummy\\file.txt")
-// 	require.NoError(err)
-
-// 	file, err := s.Get("\\path\\to\\dummy\\file.txt")
-// 	require.NoError(err)
-// 	require.Equal(&Dummy{}, file)
-
-// 	file, err = s.Get("/path/to/dummy/file.txt")
-// 	require.NoError(err)
-// 	require.Equal(&Dummy{}, file)
-// }
-
-// var dummyFactories = map[string]vfs.FsFactory{
-// 	".test": func(f vfs.File, factories map[string]vfs.FsFactory) (vfs.Filesystem, error) {
-// 		return &DummyFs{}, nil
-// 	},
-// }
-
-// func TestStorage(t *testing.T) {
-// 	t.Parallel()
-
-// 	require := require.New(t)
-
-// 	s := newStorage(dummyFactories)
-
-// 	err := s.Add(&Dummy{}, "/path/to/dummy/file.txt")
-// 	require.NoError(err)
-
-// 	err = s.Add(&Dummy{}, "/path/to/dummy/file2.txt")
-// 	require.NoError(err)
-
-// 	contains := s.Has("/path")
-// 	require.True(contains)
-
-// 	contains = s.Has("/path/to/dummy/")
-// 	require.True(contains)
-
-// 	file, err := s.Get("/path/to/dummy/file.txt")
-// 	require.NoError(err)
-// 	require.Equal(&Dummy{}, file)
-
-// 	file, err = s.Get("/path/to/dummy/file3.txt")
-// 	require.Error(err)
-// 	require.Nil(file)
-
-// 	files, err := s.Children("/path/to/dummy/")
-// 	require.NoError(err)
-// 	require.Len(files, 2)
-// 	require.Contains(files, "file.txt")
-// 	require.Contains(files, "file2.txt")
-
-// 	err = s.Add(&Dummy{}, "/path/to/dummy/folder/file.txt")
-// 	require.NoError(err)
-
-// 	files, err = s.Children("/path/to/dummy/")
-// 	require.NoError(err)
-// 	require.Len(files, 3)
-// 	require.Contains(files, "file.txt")
-// 	require.Contains(files, "file2.txt")
-// 	require.Contains(files, "folder")
-
-// 	err = s.Add(&Dummy{}, "path/file4.txt")
-// 	require.NoError(err)
-
-// 	require.True(s.Has("/path/file4.txt"))
-
-// 	files, err = s.Children("/")
-// 	require.NoError(err)
-// 	require.Len(files, 1)
-
-// 	err = s.Add(&Dummy{}, "/path/special_file.test")
-// 	require.NoError(err)
-
-// 	file, err = s.Get("/path/special_file.test/dir/here/file1.txt")
-// 	require.NoError(err)
-// 	require.Equal(&Dummy{}, file)
-
-// 	files, err = s.Children("/path/special_file.test")
-// 	require.NoError(err)
-// 	require.NotNil(files)
-
-// 	files, err = s.Children("/path/special_file.test/dir/here")
-// 	require.NoError(err)
-// 	require.Len(files, 2)
-
-// 	err = s.Add(&Dummy{}, "/path/to/__special__path/file3.txt")
-// 	require.NoError(err)
-
-// 	file, err = s.Get("/path/to/__special__path/file3.txt")
-// 	require.NoError(err)
-// 	require.Equal(&Dummy{}, file)
-
-// 	s.Clear()
+// 	files := map[string]*vfs.DummyFile{
+// 		"/test/file.txt":  &vfs.DummyFile{},
+// 		"/test/file2.txt": &vfs.DummyFile{},
+// 		"/test1/file.txt": &vfs.DummyFile{},
+// 	}
+// 	{
+// 		file, err := getFile(files, "/test")
+// 		require.NoError(err)
+// 		require.Equal(&dir{name: "test"}, file)
+// 	}
+// 	{
+// 		file, err := getFile(files, "/test/file.txt")
+// 		require.NoError(err)
+// 		require.Equal(&Dummy{}, file)
+// 	}
+// 	{
+// 		out, err := listDirFromFiles(files, "/test")
+// 		require.NoError(err)
+// 		require.Len(out, 2)
+// 		require.Equal("file.txt", out[0].Name())
+// 		require.Equal("file2.txt", out[1].Name())
+// 		require.False(out[0].IsDir())
+// 		require.False(out[1].IsDir())
+// 	}
+// 	{
+// 		out, err := listDirFromFiles(files, "/test1")
+// 		require.NoError(err)
+// 		require.Len(out, 1)
+// 		require.Equal("file.txt", out[0].Name())
+// 		require.False(out[0].IsDir())
+// 	}
+// 	{
+// 		out, err := listDirFromFiles(files, "/")
+// 		require.NoError(err)
+// 		require.Len(out, 2)
+// 		require.Equal("test", out[0].Name())
+// 		require.Equal("test1", out[1].Name())
+// 		require.True(out[0].IsDir())
+// 		require.True(out[1].IsDir())
+// 	}
 // }
diff --git a/src/host/vfs/torrent.go b/src/host/vfs/torrent.go
index 99b2af1..f33c326 100644
--- a/src/host/vfs/torrent.go
+++ b/src/host/vfs/torrent.go
@@ -3,126 +3,485 @@ package vfs
 import (
 	"context"
 	"io"
+	"io/fs"
+	"path"
+	"slices"
+	"strings"
 	"sync"
 	"time"
 
-	"git.kmsign.ru/royalcat/tstor/src/iio"
-	"github.com/anacrolix/missinggo/v2"
+	"git.kmsign.ru/royalcat/tstor/src/host/controller"
 	"github.com/anacrolix/torrent"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+	"golang.org/x/exp/maps"
 )
 
-var _ Filesystem = &TorrentFs{}
-
 type TorrentFs struct {
-	mu          sync.RWMutex
-	t           *torrent.Torrent
-	readTimeout int
+	name string
+
+	mu      sync.Mutex
+	Torrent *controller.Torrent
+
+	filesCache map[string]File
 
 	resolver *resolver
 }
 
-func NewTorrentFs(t *torrent.Torrent, readTimeout int) *TorrentFs {
+var _ Filesystem = (*TorrentFs)(nil)
+
+func NewTorrentFs(name string, c *controller.Torrent) *TorrentFs {
 	return &TorrentFs{
-		t:           t,
-		readTimeout: readTimeout,
-		resolver:    newResolver(ArchiveFactories),
+		name:     name,
+		Torrent:  c,
+		resolver: newResolver(ArchiveFactories),
 	}
 }
 
-func (fs *TorrentFs) files() map[string]*torrentFile {
-	files := make(map[string]*torrentFile)
-	<-fs.t.GotInfo()
-	for _, file := range fs.t.Files() {
-		p := Clean(file.Path())
-		files[p] = &torrentFile{
-			readerFunc: file.NewReader,
-			len:        file.Length(),
-			timeout:    fs.readTimeout,
+var _ fs.DirEntry = (*TorrentFs)(nil)
+
+// Name implements fs.DirEntry.
+func (tfs *TorrentFs) Name() string {
+	return tfs.name
+}
+
+// Info implements fs.DirEntry.
+func (tfs *TorrentFs) Info() (fs.FileInfo, error) {
+	return tfs, nil
+}
+
+// IsDir implements fs.DirEntry.
+func (tfs *TorrentFs) IsDir() bool {
+	return true
+}
+
+// Type implements fs.DirEntry.
+func (tfs *TorrentFs) Type() fs.FileMode {
+	return fs.ModeDir
+}
+
+// ModTime implements fs.FileInfo.
+func (tfs *TorrentFs) ModTime() time.Time {
+	return time.Time{}
+}
+
+// Mode implements fs.FileInfo.
+func (tfs *TorrentFs) Mode() fs.FileMode {
+	return fs.ModeDir
+}
+
+// Size implements fs.FileInfo.
+func (tfs *TorrentFs) Size() int64 {
+	return 0
+}
+
+// Sys implements fs.FileInfo.
+func (tfs *TorrentFs) Sys() any {
+	return nil
+}
+
+// FsName implements Filesystem.
+func (tfs *TorrentFs) FsName() string {
+	return "torrentfs"
+}
+
+func (fs *TorrentFs) files(ctx context.Context) (map[string]File, error) {
+	fs.mu.Lock()
+	defer fs.mu.Unlock()
+
+	if fs.filesCache != nil {
+		return fs.filesCache, nil
+	}
+
+	ctx, span := tracer.Start(ctx, "files", fs.traceAttrs())
+	defer span.End()
+
+	files, err := fs.Torrent.Files(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	fs.filesCache = make(map[string]File)
+	for _, file := range files {
+		file.SetPriority(torrent.PiecePriorityNormal)
+		p := AbsPath(file.Path())
+		tf, err := openTorrentFile(ctx, path.Base(p), file)
+		if err != nil {
+			return nil, err
+		}
+		fs.filesCache[p] = tf
+	}
+
+	// TODO optional
+	if len(fs.filesCache) == 1 && fs.resolver.isNestedFs(fs.Torrent.Name()) {
+		filepath := "/" + fs.Torrent.Name()
+		if file, ok := fs.filesCache[filepath]; ok {
+			nestedFs, err := fs.resolver.nestedFs(ctx, filepath, file)
+			if err != nil {
+				return nil, err
+			}
+			if nestedFs == nil {
+				goto DEFAULT_DIR // FIXME
+			}
+			fs.filesCache, err = fs.listFilesRecursive(ctx, nestedFs, "/")
+			if err != nil {
+				return nil, err
+			}
+
+			return fs.filesCache, nil
+		}
+
+	}
+
+DEFAULT_DIR:
+	rootDir := "/" + fs.Torrent.Name() + "/"
+	singleDir := true
+	for k, _ := range fs.filesCache {
+		if !strings.HasPrefix(k, rootDir) {
+			singleDir = false
+		}
+	}
+	if singleDir {
+		for k, f := range fs.filesCache {
+			delete(fs.filesCache, k)
+			k, _ = strings.CutPrefix(k, rootDir)
+			k = AbsPath(k)
+			fs.filesCache[k] = f
 		}
 	}
 
-	return files
+	return fs.filesCache, nil
 }
 
-func (fs *TorrentFs) rawOpen(path string) (File, error) {
-	file, err := getFile(fs.files(), path)
+// func anyPeerHasFiles(file *torrent.File) bool {
+// 	for _, conn := range file.Torrent().PeerConns() {
+// 		if bitmapHaveFile(conn.PeerPieces(), file) {
+// 			return true
+// 		}
+// 	}
+// 	return false
+// }
+
+// func bitmapHaveFile(bitmap *roaring.Bitmap, file *torrent.File) bool {
+// 	for i := file.BeginPieceIndex(); i < file.EndPieceIndex(); i++ {
+// 		if !bitmap.ContainsInt(i) {
+// 			return false
+// 		}
+// 	}
+// 	return true
+// }
+
+func (fs *TorrentFs) listFilesRecursive(ctx context.Context, vfs Filesystem, start string) (map[string]File, error) {
+	ctx, span := tracer.Start(ctx, "listFilesRecursive",
+		fs.traceAttrs(attribute.String("start", start)),
+	)
+	defer span.End()
+
+	out := make(map[string]File, 0)
+	entries, err := vfs.ReadDir(ctx, start)
+	if err != nil {
+		return nil, err
+	}
+	for _, entry := range entries {
+		filename := path.Join(start, entry.Name())
+		if entry.IsDir() {
+			rec, err := fs.listFilesRecursive(ctx, vfs, filename)
+			if err != nil {
+				return nil, err
+			}
+			maps.Copy(out, rec)
+		} else {
+			file, err := vfs.Open(ctx, filename)
+			if err != nil {
+				return nil, err
+			}
+			out[filename] = file
+		}
+	}
+
+	return out, nil
+}
+
+func (fs *TorrentFs) rawOpen(ctx context.Context, filename string) (file File, err error) {
+	ctx, span := tracer.Start(ctx, "rawOpen",
+		fs.traceAttrs(attribute.String("filename", filename)),
+	)
+	defer func() {
+		if err != nil {
+			span.RecordError(err)
+		}
+		span.End()
+	}()
+
+	files, err := fs.files(ctx)
+	if err != nil {
+		return nil, err
+	}
+	file, err = getFile(files, filename)
 	return file, err
 }
 
-func (fs *TorrentFs) Open(filename string) (File, error) {
-	fsPath, nestedFs, nestedFsPath, err := fs.resolver.resolvePath(filename, fs.rawOpen)
+func (fs *TorrentFs) rawStat(ctx context.Context, filename string) (fs.FileInfo, error) {
+	ctx, span := tracer.Start(ctx, "rawStat",
+		fs.traceAttrs(attribute.String("filename", filename)),
+	)
+	defer span.End()
+
+	files, err := fs.files(ctx)
+	if err != nil {
+		return nil, err
+	}
+	file, err := getFile(files, filename)
+	if err != nil {
+		return nil, err
+	}
+	return file.Info()
+}
+
+func (fs *TorrentFs) traceAttrs(add ...attribute.KeyValue) trace.SpanStartOption {
+	return trace.WithAttributes(append([]attribute.KeyValue{
+		attribute.String("fs", fs.FsName()),
+		attribute.String("torrent", fs.Torrent.Name()),
+		attribute.String("infohash", fs.Torrent.InfoHash()),
+	}, add...)...)
+}
+
+// Stat implements Filesystem.
+func (fs *TorrentFs) Stat(ctx context.Context, filename string) (fs.FileInfo, error) {
+	ctx, span := tracer.Start(ctx, "Stat",
+		fs.traceAttrs(attribute.String("filename", filename)),
+	)
+	defer span.End()
+
+	if path.Clean(filename) == Separator {
+		return fs, nil
+	}
+
+	fsPath, nestedFs, nestedFsPath, err := fs.resolver.resolvePath(ctx, filename, fs.rawOpen)
 	if err != nil {
 		return nil, err
 	}
 	if nestedFs != nil {
-		return nestedFs.Open(nestedFsPath)
+		return nestedFs.Stat(ctx, nestedFsPath)
 	}
 
-	return fs.rawOpen(fsPath)
+	return fs.rawStat(ctx, fsPath)
 }
 
-func (fs *TorrentFs) ReadDir(name string) (map[string]File, error) {
-	fsPath, nestedFs, nestedFsPath, err := fs.resolver.resolvePath(name, fs.rawOpen)
+func (fs *TorrentFs) Open(ctx context.Context, filename string) (File, error) {
+	ctx, span := tracer.Start(ctx, "Open",
+		fs.traceAttrs(attribute.String("filename", filename)),
+	)
+	defer span.End()
+
+	if path.Clean(filename) == Separator {
+		return newDirFile(fs.name), nil
+	}
+
+	fsPath, nestedFs, nestedFsPath, err := fs.resolver.resolvePath(ctx, filename, fs.rawOpen)
 	if err != nil {
 		return nil, err
 	}
 	if nestedFs != nil {
-		return nestedFs.ReadDir(nestedFsPath)
+		return nestedFs.Open(ctx, nestedFsPath)
 	}
 
-	return listFilesInDir(fs.files(), fsPath)
+	return fs.rawOpen(ctx, fsPath)
 }
 
-type reader interface {
-	iio.Reader
-	missinggo.ReadContexter
+func (fs *TorrentFs) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) {
+	ctx, span := tracer.Start(ctx, "ReadDir",
+		fs.traceAttrs(attribute.String("name", name)),
+	)
+	defer span.End()
+
+	fsPath, nestedFs, nestedFsPath, err := fs.resolver.resolvePath(ctx, name, fs.rawOpen)
+	if err != nil {
+		return nil, err
+	}
+	if nestedFs != nil {
+		return nestedFs.ReadDir(ctx, nestedFsPath)
+	}
+	files, err := fs.files(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	return listDirFromFiles(files, fsPath)
 }
 
-type readAtWrapper struct {
-	timeout int
-	mu      sync.Mutex
+func (fs *TorrentFs) Unlink(ctx context.Context, name string) error {
+	ctx, span := tracer.Start(ctx, "Unlink",
+		fs.traceAttrs(attribute.String("name", name)),
+	)
+	defer span.End()
 
-	torrent.Reader
-	io.ReaderAt
-	io.Closer
+	name = AbsPath(name)
+
+	fs.mu.Lock()
+	defer fs.mu.Unlock()
+
+	files, err := fs.files(ctx)
+	if err != nil {
+		return err
+	}
+
+	if !slices.Contains(maps.Keys(files), name) {
+		return ErrNotExist
+	}
+
+	file := files[name]
+	delete(fs.filesCache, name)
+
+	tfile, ok := file.(*torrentFile)
+	if !ok {
+		return ErrNotImplemented
+	}
+
+	return fs.Torrent.ExcludeFile(ctx, tfile.file)
 }
 
-func newReadAtWrapper(r torrent.Reader, timeout int) reader {
-	w := &readAtWrapper{Reader: r, timeout: timeout}
-	w.SetResponsive()
-	return w
+var _ File = (*torrentFile)(nil)
+
+type torrentFile struct {
+	name string
+
+	mu sync.Mutex
+
+	tr torrent.Reader
+
+	lastReadTimeout time.Time
+
+	file *torrent.File
 }
 
-func (rw *readAtWrapper) ReadAt(p []byte, off int64) (int, error) {
+const secondaryTimeout = time.Hour
+
+func openTorrentFile(ctx context.Context, name string, file *torrent.File) (*torrentFile, error) {
+	// select {
+	// case <-file.Torrent().GotInfo():
+	// 	break
+	// case <-ctx.Done():
+	// 	return nil, ctx.Err()
+	// }
+
+	r := file.NewReader()
+	r.SetReadahead(1024 * 1024 * 16) // TODO configurable
+	// r.SetResponsive()
+
+	return &torrentFile{
+		name: name,
+		tr:   r,
+		file: file,
+	}, nil
+}
+
+// Name implements File.
+func (tf *torrentFile) Name() string {
+	return tf.name
+}
+
+// Type implements File.
+func (tf *torrentFile) Type() fs.FileMode {
+	return roMode | fs.ModeDir
+}
+
+func (tf *torrentFile) Info() (fs.FileInfo, error) {
+	return newFileInfo(tf.name, tf.file.Length()), nil
+}
+
+func (tf *torrentFile) Size() int64 {
+	return tf.file.Length()
+}
+
+func (tf *torrentFile) IsDir() bool {
+	return false
+}
+
+func (rw *torrentFile) Close(ctx context.Context) error {
 	rw.mu.Lock()
 	defer rw.mu.Unlock()
-	_, err := rw.Seek(off, io.SeekStart)
+
+	return rw.tr.Close()
+}
+
+// Read implements ctxio.Reader.
+func (tf *torrentFile) Read(ctx context.Context, p []byte) (n int, err error) {
+	ctx, span := tracer.Start(ctx, "Read",
+		trace.WithAttributes(attribute.Int("length", len(p))),
+	)
+	defer func() {
+		span.SetAttributes(attribute.Int("read", n))
+		span.End()
+	}()
+
+	tf.mu.Lock()
+	defer tf.mu.Unlock()
+
+	if time.Since(tf.lastReadTimeout) < secondaryTimeout { // make short timeout for already faliled files
+		span.SetAttributes(attribute.Bool("short_timeout", true))
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithTimeout(ctx, time.Millisecond)
+		defer cancel()
+	}
+	defer func() {
+		if err == context.DeadlineExceeded {
+			tf.lastReadTimeout = time.Now()
+		}
+	}()
+
+	return tf.tr.ReadContext(ctx, p)
+}
+
+func (tf *torrentFile) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
+	ctx, span := tracer.Start(ctx, "ReadAt",
+		trace.WithAttributes(attribute.Int("length", len(p)), attribute.Int64("offset", off)),
+	)
+	defer func() {
+		span.SetAttributes(attribute.Int("read", n))
+		span.End()
+	}()
+
+	tf.mu.Lock()
+	defer tf.mu.Unlock()
+
+	if time.Since(tf.lastReadTimeout) < secondaryTimeout { // make short timeout for already faliled files
+		span.SetAttributes(attribute.Bool("short_timeout", true))
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithTimeout(ctx, time.Millisecond)
+		defer cancel()
+	}
+	defer func() {
+		if err == context.DeadlineExceeded {
+			tf.lastReadTimeout = time.Now()
+		}
+	}()
+
+	_, err = tf.tr.Seek(off, io.SeekStart)
 	if err != nil {
 		return 0, err
 	}
 
-	return readAtLeast(rw, rw.timeout, p, len(p))
+	// return tf.tr.ReadContext(ctx, p)
+	n, err = readAtLeast(ctx, tf.tr, p, len(p))
+
+	_, err = tf.tr.Seek(0, io.SeekStart)
+	if err != nil {
+		return 0, err
+	}
+
+	return n, err
 }
 
-func readAtLeast(r missinggo.ReadContexter, timeout int, buf []byte, min int) (n int, err error) {
+func readAtLeast(ctx context.Context, r torrent.Reader, buf []byte, min int) (n int, err error) {
 	if len(buf) < min {
 		return 0, io.ErrShortBuffer
 	}
 	for n < min && err == nil {
 		var nn int
 
-		ctx, cancel := context.WithCancel(context.Background())
-		timer := time.AfterFunc(
-			time.Duration(timeout)*time.Second,
-			func() {
-				cancel()
-			},
-		)
-
 		nn, err = r.ReadContext(ctx, buf[n:])
 		n += nn
-
-		timer.Stop()
 	}
 	if n >= min {
 		err = nil
@@ -131,64 +490,3 @@ func readAtLeast(r missinggo.ReadContexter, timeout int, buf []byte, min int) (n
 	}
 	return
 }
-
-func (rw *readAtWrapper) Close() error {
-	rw.mu.Lock()
-	defer rw.mu.Unlock()
-	return rw.Reader.Close()
-}
-
-var _ File = &torrentFile{}
-
-type torrentFile struct {
-	readerFunc func() torrent.Reader
-	reader     reader
-	len        int64
-	timeout    int
-}
-
-func (d *torrentFile) load() {
-	if d.reader != nil {
-		return
-	}
-	d.reader = newReadAtWrapper(d.readerFunc(), d.timeout)
-}
-
-func (d *torrentFile) Size() int64 {
-	return d.len
-}
-
-func (d *torrentFile) IsDir() bool {
-	return false
-}
-
-func (d *torrentFile) Close() error {
-	var err error
-	if d.reader != nil {
-		err = d.reader.Close()
-	}
-
-	d.reader = nil
-
-	return err
-}
-
-func (d *torrentFile) Read(p []byte) (n int, err error) {
-	d.load()
-	ctx, cancel := context.WithCancel(context.Background())
-	timer := time.AfterFunc(
-		time.Duration(d.timeout)*time.Second,
-		func() {
-			cancel()
-		},
-	)
-
-	defer timer.Stop()
-
-	return d.reader.ReadContext(ctx, p)
-}
-
-func (d *torrentFile) ReadAt(p []byte, off int64) (n int, err error) {
-	d.load()
-	return d.reader.ReadAt(p, off)
-}
diff --git a/src/host/vfs/torrent_test.go b/src/host/vfs/torrent_test.go
index 5f5c3cc..fcac812 100644
--- a/src/host/vfs/torrent_test.go
+++ b/src/host/vfs/torrent_test.go
@@ -5,8 +5,6 @@ import (
 	"testing"
 
 	"github.com/anacrolix/torrent"
-
-	"github.com/stretchr/testify/require"
 )
 
 const testMagnet = "magnet:?xt=urn:btih:a88fda5954e89178c372716a6a78b8180ed4dad3&dn=The+WIRED+CD+-+Rip.+Sample.+Mash.+Share&tr=udp%3A%2F%2Fexplodie.org%3A6969&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Ftracker.empire-js.us%3A1337&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337&tr=wss%3A%2F%2Ftracker.btorrent.xyz&tr=wss%3A%2F%2Ftracker.fastcast.nz&tr=wss%3A%2F%2Ftracker.openwebtorrent.com&ws=https%3A%2F%2Fwebtorrent.io%2Ftorrents%2F&xs=https%3A%2F%2Fwebtorrent.io%2Ftorrents%2Fwired-cd.torrent"
@@ -84,57 +82,61 @@ func TestMain(m *testing.M) {
 // 	require.NoError(f.Close())
 // }
 
-func TestReadAtTorrent(t *testing.T) {
-	require := require.New(t)
+// func TestReadAtTorrent(t *testing.T) {
+// 	t.Parallel()
 
-	to, err := Cli.AddMagnet(testMagnet)
-	require.NoError(err)
+// 	ctx := context.Background()
 
-	<-to.GotInfo()
-	torrFile := to.Files()[0]
+// 	require := require.New(t)
 
-	tf := torrentFile{
-		readerFunc: torrFile.NewReader,
-		len:        torrFile.Length(),
-		timeout:    500,
-	}
+// 	to, err := Cli.AddMagnet(testMagnet)
+// 	require.NoError(err)
 
-	defer tf.Close()
+// 	<-to.GotInfo()
+// 	torrFile := to.Files()[0]
 
-	toRead := make([]byte, 5)
-	n, err := tf.ReadAt(toRead, 6)
-	require.NoError(err)
-	require.Equal(5, n)
-	require.Equal([]byte{0x0, 0x0, 0x1f, 0x76, 0x54}, toRead)
+// 	tf, err := openTorrentFile(ctx, "torr", torrFile)
+// 	require.NoError(err)
 
-	n, err = tf.ReadAt(toRead, 0)
-	require.NoError(err)
-	require.Equal(5, n)
-	require.Equal([]byte{0x49, 0x44, 0x33, 0x3, 0x0}, toRead)
-}
+// 	defer tf.Close(ctx)
 
-func TestReadAtWrapper(t *testing.T) {
-	t.Parallel()
+// 	toRead := make([]byte, 5)
+// 	n, err := tf.ReadAt(ctx, toRead, 6)
+// 	require.NoError(err)
+// 	require.Equal(5, n)
+// 	require.Equal([]byte{0x0, 0x0, 0x1f, 0x76, 0x54}, toRead)
 
-	require := require.New(t)
+// 	n, err = tf.ReadAt(ctx, toRead, 0)
+// 	require.NoError(err)
+// 	require.Equal(5, n)
+// 	require.Equal([]byte{0x49, 0x44, 0x33, 0x3, 0x0}, toRead)
+// }
 
-	to, err := Cli.AddMagnet(testMagnet)
-	require.NoError(err)
+// func TestReadAtWrapper(t *testing.T) {
+// 	t.Parallel()
 
-	<-to.GotInfo()
-	torrFile := to.Files()[0]
+// 	ctx := context.Background()
 
-	r := newReadAtWrapper(torrFile.NewReader(), 10)
-	defer r.Close()
+// 	require := require.New(t)
 
-	toRead := make([]byte, 5)
-	n, err := r.ReadAt(toRead, 6)
-	require.NoError(err)
-	require.Equal(5, n)
-	require.Equal([]byte{0x0, 0x0, 0x1f, 0x76, 0x54}, toRead)
+// 	to, err := Cli.AddMagnet(testMagnet)
+// 	require.NoError(err)
 
-	n, err = r.ReadAt(toRead, 0)
-	require.NoError(err)
-	require.Equal(5, n)
-	require.Equal([]byte{0x49, 0x44, 0x33, 0x3, 0x0}, toRead)
-}
+// 	<-to.GotInfo()
+// 	torrFile := to.Files()[0]
+
+// 	r, err := openTorrentFile(ctx, "file", torrFile)
+// 	require.NoError(err)
+// 	defer r.Close(ctx)
+
+// 	toRead := make([]byte, 5)
+// 	n, err := r.ReadAt(ctx, toRead, 6)
+// 	require.NoError(err)
+// 	require.Equal(5, n)
+// 	require.Equal([]byte{0x0, 0x0, 0x1f, 0x76, 0x54}, toRead)
+
+// 	n, err = r.ReadAt(ctx, toRead, 0)
+// 	require.NoError(err)
+// 	require.Equal(5, n)
+// 	require.Equal([]byte{0x49, 0x44, 0x33, 0x3, 0x0}, toRead)
+// }
diff --git a/src/host/vfs/utils.go b/src/host/vfs/utils.go
index d4b2d77..0cfca90 100644
--- a/src/host/vfs/utils.go
+++ b/src/host/vfs/utils.go
@@ -1,55 +1,60 @@
 package vfs
 
 import (
-	"io/fs"
 	"path"
 	"strings"
+	"sync"
 )
 
-var ErrNotExist = fs.ErrNotExist
+const Separator = "/"
 
-func getFile[F File](m map[string]F, name string) (File, error) {
-	name = Clean(name)
-	if name == Separator {
-		return &Dir{}, nil
-	}
-
-	f, ok := m[name]
-	if ok {
-		return f, nil
-	}
-
-	for p := range m {
-		if strings.HasPrefix(p, name) {
-			return &Dir{}, nil
-		}
-	}
-
-	return nil, ErrNotExist
-}
-
-func listFilesInDir[F File](m map[string]F, name string) (map[string]File, error) {
-	name = Clean(name)
-
-	out := map[string]File{}
-	for p, f := range m {
-		if strings.HasPrefix(p, name) {
-			parts := strings.Split(trimRelPath(p, name), Separator)
-			if len(parts) == 1 {
-				out[parts[0]] = f
-			} else {
-				out[parts[0]] = &Dir{}
-			}
-		}
-	}
-
-	return out, nil
+func isRoot(filename string) bool {
+	return path.Clean(filename) == Separator
 }
 
 func trimRelPath(p, t string) string {
 	return strings.Trim(strings.TrimPrefix(p, t), "/")
 }
 
-func Clean(p string) string {
-	return path.Clean(Separator + strings.ReplaceAll(p, "\\", "/"))
+// func clean(p string) string {
+// 	return path.Clean(Separator + strings.ReplaceAll(p, "\\", "/"))
+// }
+
+func AbsPath(p string) string {
+	if p == "" || p[0] != '/' {
+		return Separator + p
+	}
+	return p
+}
+
+func AddTrailSlash(p string) string {
+	if p == "" || p[len(p)-1] != '/' {
+		return p + Separator
+	}
+	return p
+}
+
+// OnceValueWOErr returns a function that invokes f only once and returns the value
+// returned by f . The returned function may be called concurrently.
+//
+// If f panics, the returned function will panic with the same value on every call.
+func OnceValueWOErr[T any](f func() (T, error)) func() (T, error) {
+	var (
+		mu         sync.Mutex
+		isExecuted bool
+		r1         T
+		err        error
+	)
+
+	return func() (T, error) {
+		mu.Lock()
+		defer mu.Unlock()
+
+		if isExecuted && err == nil {
+			return r1, nil
+		}
+
+		r1, err = f()
+		return r1, err
+	}
 }
diff --git a/src/host/vfs/virtdir/vds.go b/src/host/vfs/virtdir/vds.go
new file mode 100644
index 0000000..31ccd60
--- /dev/null
+++ b/src/host/vfs/virtdir/vds.go
@@ -0,0 +1,21 @@
+package virtdir
+
+type SourceType string
+
+const (
+	VirtDirYtDlp SourceType = "yt-dlp"
+)
+
+type VirtDirSource interface {
+	SourceType() SourceType
+}
+
+var _ VirtDirSource = (*VirtDirSourceYtDlp)(nil)
+
+type VirtDirSourceYtDlp struct {
+	URL string `json:"url"`
+}
+
+func (VirtDirSourceYtDlp) SourceType() SourceType {
+	return VirtDirYtDlp
+}
diff --git a/src/iio/disk_test.go b/src/iio/disk_test.go
index c540d02..5df8814 100644
--- a/src/iio/disk_test.go
+++ b/src/iio/disk_test.go
@@ -26,7 +26,7 @@ func TestReadData(t *testing.T) {
 	require.Equal(5, n)
 	require.Equal("World", string(toRead))
 
-	r.ReadAt(toRead, 0)
+	n, err = r.ReadAt(toRead, 0)
 	require.NoError(err)
 	require.Equal(5, n)
 	require.Equal("Hello", string(toRead))
diff --git a/src/iio/wrapper_test.go b/src/iio/wrapper_test.go
index 071ce92..e53471f 100644
--- a/src/iio/wrapper_test.go
+++ b/src/iio/wrapper_test.go
@@ -1,11 +1,12 @@
 package iio_test
 
 import (
+	"context"
 	"io"
 	"testing"
 
+	"git.kmsign.ru/royalcat/tstor/pkg/ctxio"
 	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
-	"git.kmsign.ru/royalcat/tstor/src/iio"
 	"github.com/stretchr/testify/require"
 )
 
@@ -14,11 +15,12 @@ var testData []byte = []byte("Hello World")
 func TestSeekerWrapper(t *testing.T) {
 	t.Parallel()
 
+	ctx := context.Background()
 	require := require.New(t)
 
-	mf := vfs.NewMemoryFile(testData)
+	mf := vfs.NewMemoryFile("text.txt", testData)
 
-	r := iio.NewSeekerWrapper(mf, mf.Size())
+	r := ctxio.IoReadSeekCloserWrapper(ctx, mf, mf.Size())
 	defer r.Close()
 
 	n, err := r.Seek(6, io.SeekStart)
diff --git a/src/log/badger.go b/src/log/badger.go
index b6bc40f..f547dde 100644
--- a/src/log/badger.go
+++ b/src/log/badger.go
@@ -1,27 +1,35 @@
 package log
 
 import (
+	"fmt"
+	"log/slog"
 	"strings"
 
-	"github.com/rs/zerolog"
+	"github.com/dgraph-io/badger/v4"
 )
 
+var _ badger.Logger = (*Badger)(nil)
+
 type Badger struct {
-	L zerolog.Logger
+	L *slog.Logger
+}
+
+func fmtBadgerLog(m string, f ...any) string {
+	return fmt.Sprintf(strings.ReplaceAll(m, "\n", ""), f...)
 }
 
 func (l *Badger) Errorf(m string, f ...interface{}) {
-	l.L.Error().Msgf(strings.ReplaceAll(m, "\n", ""), f...)
+	l.L.Error(fmtBadgerLog(m, f...))
 }
 
 func (l *Badger) Warningf(m string, f ...interface{}) {
-	l.L.Warn().Msgf(strings.ReplaceAll(m, "\n", ""), f...)
+	l.L.Warn(fmtBadgerLog(m, f...))
 }
 
 func (l *Badger) Infof(m string, f ...interface{}) {
-	l.L.Info().Msgf(strings.ReplaceAll(m, "\n", ""), f...)
+	l.L.Info(fmtBadgerLog(m, f...))
 }
 
 func (l *Badger) Debugf(m string, f ...interface{}) {
-	l.L.Debug().Msgf(strings.ReplaceAll(m, "\n", ""), f...)
+	l.L.Debug(fmtBadgerLog(m, f...))
 }
diff --git a/src/log/log.go b/src/log/log.go
index d7539ac..6d0bcdb 100644
--- a/src/log/log.go
+++ b/src/log/log.go
@@ -1,50 +1,32 @@
 package log
 
-import (
-	"io"
-	"os"
-	"path/filepath"
-
-	"git.kmsign.ru/royalcat/tstor/src/config"
-	"github.com/mattn/go-colorable"
-	"github.com/rs/zerolog"
-	"github.com/rs/zerolog/log"
-	"gopkg.in/natefinch/lumberjack.v2"
-)
-
 const FileName = "tstor.log"
 
-func Load(config *config.Log) {
-	var writers []io.Writer
+// func Load(config *config.Log) {
+// 	level := slog.LevelInfo
+// 	if config.Debug {
+// 		level = slog.LevelDebug
+// 	}
 
-	// fix console colors on windows
-	cso := colorable.NewColorableStdout()
+// 	slog.SetDefault(slog.New(
+// 		tint.NewHandler(os.Stdout, &tint.Options{
+// 			Level:      level,
+// 			TimeFormat: time.Kitchen,
+// 			// NoColor:    !isatty.IsTerminal(os.Stdout.Fd()),
+// 		}),
+// 	))
+// }
 
-	writers = append(writers, zerolog.ConsoleWriter{Out: cso})
-	writers = append(writers, newRollingFile(config))
-	mw := io.MultiWriter(writers...)
+// func newRollingFile(config *config.Log) io.Writer {
+// 	if err := os.MkdirAll(config.Path, 0744); err != nil {
+// 		log.Error().Err(err).Str("path", config.Path).Msg("can't create log directory")
+// 		return nil
+// 	}
 
-	log.Logger = log.Output(mw)
-	zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
-
-	l := zerolog.InfoLevel
-	if config.Debug {
-		l = zerolog.DebugLevel
-	}
-
-	zerolog.SetGlobalLevel(l)
-}
-
-func newRollingFile(config *config.Log) io.Writer {
-	if err := os.MkdirAll(config.Path, 0744); err != nil {
-		log.Error().Err(err).Str("path", config.Path).Msg("can't create log directory")
-		return nil
-	}
-
-	return &lumberjack.Logger{
-		Filename:   filepath.Join(config.Path, FileName),
-		MaxBackups: config.MaxBackups, // files
-		MaxSize:    config.MaxSize,    // megabytes
-		MaxAge:     config.MaxAge,     // days
-	}
-}
+// 	return &lumberjack.Logger{
+// 		Filename:   filepath.Join(config.Path, FileName),
+// 		MaxBackups: config.MaxBackups, // files
+// 		MaxSize:    config.MaxSize,    // megabytes
+// 		MaxAge:     config.MaxAge,     // days
+// 	}
+// }
diff --git a/src/log/nfs.go b/src/log/nfs.go
new file mode 100644
index 0000000..fb08422
--- /dev/null
+++ b/src/log/nfs.go
@@ -0,0 +1,194 @@
+package log
+
+import (
+	"fmt"
+	"log"
+	"log/slog"
+
+	nfs "git.kmsign.ru/royalcat/tstor/pkg/go-nfs"
+)
+
+var _ nfs.Logger = (*NFSLog)(nil)
+
+type NFSLog struct {
+	level nfs.LogLevel
+	l     *slog.Logger
+}
+
+func NewNFSLog(r *slog.Logger) *NFSLog {
+	return &NFSLog{
+		level: nfs.DebugLevel,
+		l:     r,
+	}
+}
+
+// Debug implements nfs.Logger.
+func (l *NFSLog) Debug(args ...interface{}) {
+	if l.level < nfs.DebugLevel {
+		return
+	}
+
+	l.l.Debug(fmt.Sprint(args...))
+}
+
+// Debugf implements nfs.Logger.
+func (l *NFSLog) Debugf(format string, args ...interface{}) {
+	if l.level < nfs.DebugLevel {
+		return
+	}
+
+	l.l.Debug(fmt.Sprintf(format, args...))
+}
+
+// Error implements nfs.Logger.
+func (l *NFSLog) Error(args ...interface{}) {
+	if l.level < nfs.ErrorLevel {
+		return
+	}
+
+	l.l.Error(fmt.Sprint(args...))
+}
+
+// Errorf implements nfs.Logger.
+func (l *NFSLog) Errorf(format string, args ...interface{}) {
+	if l.level < nfs.ErrorLevel {
+		return
+	}
+
+	l.l.Error(fmt.Sprintf(format, args...))
+}
+
+// Fatal implements nfs.Logger.
+func (l *NFSLog) Fatal(args ...interface{}) {
+	if l.level < nfs.FatalLevel {
+		return
+	}
+
+	l.l.Error(fmt.Sprint(args...))
+	log.Fatal(args...)
+}
+
+// Fatalf implements nfs.Logger.
+func (l *NFSLog) Fatalf(format string, args ...interface{}) {
+	if l.level < nfs.FatalLevel {
+		return
+	}
+
+	l.l.Error(fmt.Sprintf(format, args...))
+	log.Fatalf(format, args...)
+}
+
+// Info implements nfs.Logger.
+func (l *NFSLog) Info(args ...interface{}) {
+	if l.level < nfs.InfoLevel {
+		return
+	}
+
+	l.l.Info(fmt.Sprint(args...))
+}
+
+// Infof implements nfs.Logger.
+func (l *NFSLog) Infof(format string, args ...interface{}) {
+	if l.level < nfs.InfoLevel {
+		return
+	}
+
+	l.l.Info(fmt.Sprintf(format, args...))
+}
+
+// Panic implements nfs.Logger.
+func (l *NFSLog) Panic(args ...interface{}) {
+	l.l.Error(fmt.Sprint(args...))
+	panic(args)
+}
+
+// Panicf implements nfs.Logger.
+func (l *NFSLog) Panicf(format string, args ...interface{}) {
+	l.l.Error(fmt.Sprintf(format, args...))
+	panic(args)
+}
+
+// Print implements nfs.Logger.
+func (l *NFSLog) Print(args ...interface{}) {
+	if l.level < nfs.InfoLevel {
+		return
+	}
+
+	l.l.Info(fmt.Sprint(args...))
+}
+
+// Printf implements nfs.Logger.
+func (l *NFSLog) Printf(format string, args ...interface{}) {
+	if l.level < nfs.InfoLevel {
+		return
+	}
+
+	l.l.Info(fmt.Sprintf(format, args...))
+}
+
+// Trace implements nfs.Logger.
+func (l *NFSLog) Trace(args ...interface{}) {
+	if l.level < nfs.TraceLevel {
+		return
+	}
+
+	l.l.Debug(fmt.Sprint(args...))
+}
+
+// Tracef implements nfs.Logger.
+func (l *NFSLog) Tracef(format string, args ...interface{}) {
+	if l.level < nfs.TraceLevel {
+		return
+	}
+
+	l.l.Debug(fmt.Sprintf(format, args...))
+}
+
+// Warn implements nfs.Logger.
+func (l *NFSLog) Warn(args ...interface{}) {
+	if l.level < nfs.WarnLevel {
+		return
+	}
+
+	l.l.Warn(fmt.Sprint(args...))
+}
+
+// Warnf implements nfs.Logger.
+func (l *NFSLog) Warnf(format string, args ...interface{}) {
+	if l.level < nfs.WarnLevel {
+		return
+	}
+
+	l.l.Warn(fmt.Sprintf(format, args...))
+}
+
+// GetLevel implements nfs.Logger.
+func (l *NFSLog) GetLevel() nfs.LogLevel {
+	return l.level
+}
+
+// ParseLevel implements nfs.Logger.
+func (l *NFSLog) ParseLevel(level string) (nfs.LogLevel, error) {
+	switch level {
+	case "panic":
+		return nfs.PanicLevel, nil
+	case "fatal":
+		return nfs.FatalLevel, nil
+	case "error":
+		return nfs.ErrorLevel, nil
+	case "warn":
+		return nfs.WarnLevel, nil
+	case "info":
+		return nfs.InfoLevel, nil
+	case "debug":
+		return nfs.DebugLevel, nil
+	case "trace":
+		return nfs.TraceLevel, nil
+	}
+	return 0, fmt.Errorf("invalid log level %q", level)
+}
+
+// SetLevel implements nfs.Logger.
+func (l *NFSLog) SetLevel(level nfs.LogLevel) {
+	l.level = level
+}
diff --git a/src/log/torrent.go b/src/log/torrent.go
index 4d28d8d..62c2c8d 100644
--- a/src/log/torrent.go
+++ b/src/log/torrent.go
@@ -1,32 +1,34 @@
 package log
 
 import (
+	"context"
+	"log/slog"
+
 	"github.com/anacrolix/log"
-	"github.com/rs/zerolog"
 )
 
 var _ log.Handler = &Torrent{}
 
 type Torrent struct {
-	L zerolog.Logger
+	L *slog.Logger
 }
 
 func (l *Torrent) Handle(r log.Record) {
-	e := l.L.Info()
+	lvl := slog.LevelInfo
 	switch r.Level {
 	case log.Debug:
-		e = l.L.Debug()
+		lvl = slog.LevelInfo
 	case log.Info:
-		e = l.L.Debug().Str("error-type", "info")
+		lvl = slog.LevelInfo
 	case log.Warning:
-		e = l.L.Warn()
+		lvl = slog.LevelWarn
 	case log.Error:
-		e = l.L.Warn().Str("error-type", "error")
+		lvl = slog.LevelError
 	case log.Critical:
-		e = l.L.Warn().Str("error-type", "critical")
+		lvl = slog.LevelError
 	}
 
 	// TODO set log values somehow
 
-	e.Msgf(r.Text())
+	l.L.Log(context.Background(), lvl, r.Msg.String())
 }
diff --git a/src/mounts/httpfs/httpfs.go b/src/mounts/httpfs/httpfs.go
deleted file mode 100644
index f593903..0000000
--- a/src/mounts/httpfs/httpfs.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package httpfs
-
-import (
-	"io"
-	"io/fs"
-	"net/http"
-	"os"
-	"sync"
-
-	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
-	"git.kmsign.ru/royalcat/tstor/src/iio"
-)
-
-var _ http.FileSystem = &HTTPFS{}
-
-type HTTPFS struct {
-	fs vfs.Filesystem
-}
-
-func NewHTTPFS(fs vfs.Filesystem) *HTTPFS {
-	return &HTTPFS{fs: fs}
-}
-
-func (fs *HTTPFS) Open(name string) (http.File, error) {
-	f, err := fs.fs.Open(name)
-	if err != nil {
-		return nil, err
-	}
-
-	fi := vfs.NewFileInfo(name, f.Size(), f.IsDir())
-
-	// TODO make this lazy
-	fis, err := fs.filesToFileInfo(name)
-	if err != nil {
-		return nil, err
-	}
-
-	return newHTTPFile(f, fis, fi), nil
-}
-
-func (fs *HTTPFS) filesToFileInfo(path string) ([]fs.FileInfo, error) {
-	files, err := fs.fs.ReadDir(path)
-	if err != nil {
-		return nil, err
-	}
-
-	var out []os.FileInfo
-	for n, f := range files {
-		out = append(out, vfs.NewFileInfo(n, f.Size(), f.IsDir()))
-	}
-
-	return out, nil
-}
-
-var _ http.File = &httpFile{}
-
-type httpFile struct {
-	iio.ReaderSeeker
-
-	mu sync.Mutex
-	// dirPos is protected by mu.
-	dirPos     int
-	dirContent []os.FileInfo
-
-	fi fs.FileInfo
-}
-
-func newHTTPFile(f vfs.File, fis []fs.FileInfo, fi fs.FileInfo) *httpFile {
-	return &httpFile{
-		dirContent: fis,
-		fi:         fi,
-
-		ReaderSeeker: iio.NewSeekerWrapper(f, f.Size()),
-	}
-}
-
-func (f *httpFile) Readdir(count int) ([]fs.FileInfo, error) {
-	f.mu.Lock()
-	defer f.mu.Unlock()
-
-	if !f.fi.IsDir() {
-		return nil, os.ErrInvalid
-	}
-
-	old := f.dirPos
-	if old >= len(f.dirContent) {
-		// The os.File Readdir docs say that at the end of a directory,
-		// the error is io.EOF if count > 0 and nil if count <= 0.
-		if count > 0 {
-			return nil, io.EOF
-		}
-		return nil, nil
-	}
-	if count > 0 {
-		f.dirPos += count
-		if f.dirPos > len(f.dirContent) {
-			f.dirPos = len(f.dirContent)
-		}
-	} else {
-		f.dirPos = len(f.dirContent)
-		old = 0
-	}
-
-	return f.dirContent[old:f.dirPos], nil
-}
-
-func (f *httpFile) Stat() (fs.FileInfo, error) {
-	return f.fi, nil
-}
diff --git a/src/mounts/webdav/http.go b/src/mounts/webdav/http.go
deleted file mode 100644
index 3b95a83..0000000
--- a/src/mounts/webdav/http.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package webdav
-
-import (
-	"fmt"
-	"net/http"
-
-	"git.kmsign.ru/royalcat/tstor/src/host/vfs"
-	"github.com/rs/zerolog/log"
-)
-
-func NewWebDAVServer(fs vfs.Filesystem, port int, user, pass string) error {
-	log.Info().Str("host", fmt.Sprintf("0.0.0.0:%d", port)).Msg("starting webDAV server")
-
-	srv := newHandler(fs)
-
-	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
-		username, password, _ := r.BasicAuth()
-		if username == user && password == pass {
-			srv.ServeHTTP(w, r)
-			return
-		}
-
-		w.Header().Set("WWW-Authenticate", `Basic realm="BASIC WebDAV REALM"`)
-		w.WriteHeader(401)
-		w.Write([]byte("401 Unauthorized\n"))
-	})
-
-	return http.ListenAndServe(fmt.Sprintf("0.0.0.0:%d", port), nil)
-}
diff --git a/src/telemetry/setup.go b/src/telemetry/setup.go
new file mode 100644
index 0000000..5e86ba0
--- /dev/null
+++ b/src/telemetry/setup.go
@@ -0,0 +1,126 @@
+package telemetry
+
+import (
+	"context"
+	"log/slog"
+	"os"
+
+	"git.kmsign.ru/royalcat/tstor/pkg/rlog"
+	"github.com/agoda-com/opentelemetry-go/otelslog"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs"
+	"github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp"
+	logsdk "github.com/agoda-com/opentelemetry-logs-go/sdk/logs"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
+	"go.opentelemetry.io/otel/exporters/prometheus"
+	"go.opentelemetry.io/otel/sdk/metric"
+	"go.opentelemetry.io/otel/sdk/resource"
+	"go.opentelemetry.io/otel/sdk/trace"
+	semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+)
+
+type Client struct {
+	log *slog.Logger
+
+	tracerProvider *trace.TracerProvider
+	metricProvider *metric.MeterProvider
+	loggerProvider *logsdk.LoggerProvider
+}
+
+func (client *Client) Shutdown(ctx context.Context) {
+	log := rlog.FunctionLog(client.log, "Shutdown")
+	if client.metricProvider == nil {
+		err := client.metricProvider.Shutdown(ctx)
+		if err != nil {
+			log.Error("error shutting down metric provider", rlog.Err(err))
+		}
+	}
+	if client.tracerProvider == nil {
+		err := client.tracerProvider.Shutdown(ctx)
+		if err != nil {
+			log.Error("error shutting down tracer provider", rlog.Err(err))
+		}
+	}
+	if client.loggerProvider == nil {
+		err := client.loggerProvider.Shutdown(ctx)
+		if err != nil {
+			log.Error("error shutting down logger provider", rlog.Err(err))
+		}
+	}
+}
+
+const appName = "tstor"
+
+func Setup(ctx context.Context, endpoint string) (*Client, error) {
+	log := rlog.ComponentLog("telemetry")
+
+	client := &Client{
+		log: log,
+	}
+	otel.SetErrorHandler(otel.ErrorHandlerFunc(func(cause error) {
+		log.Error("otel error", rlog.Err(cause))
+	}))
+
+	hostName, _ := os.Hostname()
+
+	r, err := resource.Merge(
+		resource.Default(),
+		resource.NewWithAttributes(
+			semconv.SchemaURL,
+			semconv.ServiceName(appName),
+			semconv.HostName(hostName),
+		),
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	metricExporter, err := prometheus.New(prometheus.WithNamespace(appName))
+	if err != nil {
+		return nil, err
+	}
+	client.metricProvider = metric.NewMeterProvider(
+		metric.WithReader(metricExporter),
+		metric.WithResource(r),
+	)
+	otel.SetMeterProvider(client.metricProvider)
+	log.Info("prometheus metrics provider initialized")
+
+	traceExporter, err := otlptracehttp.New(ctx,
+		otlptracehttp.WithEndpoint(endpoint),
+		otlptracehttp.WithRetry(otlptracehttp.RetryConfig{
+			Enabled: false,
+		}),
+	)
+	if err != nil {
+		return nil, err
+	}
+	client.tracerProvider = trace.NewTracerProvider(
+		trace.WithBatcher(traceExporter),
+		trace.WithResource(r),
+	)
+	otel.SetTracerProvider(client.tracerProvider)
+	log.Info("otel tracing provider initialized")
+
+	logExporter, err := otlplogs.NewExporter(ctx,
+		otlplogs.WithClient(
+			otlplogshttp.NewClient(otlplogshttp.WithEndpoint(endpoint)),
+		),
+	)
+	if err != nil {
+		return nil, err
+	}
+	client.loggerProvider = logsdk.NewLoggerProvider(
+		logsdk.WithBatcher(logExporter),
+		logsdk.WithResource(r),
+	)
+
+	rlog.AddHandler(otelslog.NewOtelHandler(client.loggerProvider,
+		&otelslog.HandlerOptions{
+			Level: slog.LevelDebug,
+		}),
+	)
+	client.log = slog.Default()
+
+	return client, nil
+}
diff --git a/tools.go b/tools.go
new file mode 100644
index 0000000..a33b059
--- /dev/null
+++ b/tools.go
@@ -0,0 +1,9 @@
+//go:build tools
+// +build tools
+
+//go:generate go run github.com/99designs/gqlgen
+package tstor
+
+import (
+	_ "github.com/99designs/gqlgen"
+)