oprimized, working
This commit is contained in:
parent
2b39afca3b
commit
0350ecba9a
38 changed files with 1809 additions and 826 deletions
src
|
@ -20,6 +20,10 @@ var defaultConfig = Config{
|
|||
Fuse: Fuse{
|
||||
Enabled: false,
|
||||
},
|
||||
NFS: NFS{
|
||||
Enabled: false,
|
||||
Port: 8122,
|
||||
},
|
||||
},
|
||||
|
||||
TorrentClient: TorrentClient{
|
||||
|
|
|
@ -42,7 +42,10 @@ func Load(path string) (*Config, error) {
|
|||
}
|
||||
|
||||
conf := Config{}
|
||||
k.Unmarshal("", &conf)
|
||||
err = k.Unmarshal("", &conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &conf, nil
|
||||
}
|
||||
|
|
|
@ -61,6 +61,12 @@ type Mounts struct {
|
|||
WebDAV WebDAV `koanf:"webdav"`
|
||||
HttpFs HttpFs `koanf:"httpfs"`
|
||||
Fuse Fuse `koanf:"fuse"`
|
||||
NFS NFS `koanf:"nfs"`
|
||||
}
|
||||
|
||||
type NFS struct {
|
||||
Enabled bool `koanf:"enabled"`
|
||||
Port int `koanf:"port"`
|
||||
}
|
||||
|
||||
type HttpFs struct {
|
||||
|
|
|
@ -5,11 +5,7 @@ import (
|
|||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
)
|
||||
|
||||
type storage struct {
|
||||
factories map[string]vfs.FsFactory
|
||||
}
|
||||
|
||||
func NewStorage(downPath string, tsrv *torrent.Service) vfs.Filesystem {
|
||||
func NewStorage(dataPath string, tsrv *torrent.Service) vfs.Filesystem {
|
||||
factories := map[string]vfs.FsFactory{
|
||||
".torrent": tsrv.NewTorrentFs,
|
||||
}
|
||||
|
@ -19,102 +15,5 @@ func NewStorage(downPath string, tsrv *torrent.Service) vfs.Filesystem {
|
|||
factories[k] = v
|
||||
}
|
||||
|
||||
return vfs.NewResolveFS(downPath, factories)
|
||||
return vfs.NewResolveFS(vfs.NewOsFs(dataPath), factories)
|
||||
}
|
||||
|
||||
// func (s *storage) Clear() {
|
||||
// s.files = make(map[string]vfs.File)
|
||||
// }
|
||||
|
||||
// func (s *storage) Has(path string) bool {
|
||||
// path = clean(path)
|
||||
|
||||
// f := s.files[path]
|
||||
// if f != nil {
|
||||
// return true
|
||||
// }
|
||||
|
||||
// if f, _ := s.getFileFromFs(path); f != nil {
|
||||
// return true
|
||||
// }
|
||||
|
||||
// return false
|
||||
// }
|
||||
|
||||
// func (s *storage) createParent(p string, f File) error {
|
||||
// base, filename := path.Split(p)
|
||||
// base = clean(base)
|
||||
|
||||
// if err := s.Add(&Dir{}, base); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// if _, ok := s.children[base]; !ok {
|
||||
// s.children[base] = make(map[string]File)
|
||||
// }
|
||||
|
||||
// if filename != "" {
|
||||
// s.children[base][filename] = f
|
||||
// }
|
||||
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// func (s *storage) Children(path string) (map[string]File, error) {
|
||||
// path = clean(path)
|
||||
|
||||
// files, err := s.getDirFromFs(path)
|
||||
// if err == nil {
|
||||
// return files, nil
|
||||
// }
|
||||
|
||||
// if !os.IsNotExist(err) {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
// l := make(map[string]File)
|
||||
// for n, f := range s.children[path] {
|
||||
// l[n] = f
|
||||
// }
|
||||
|
||||
// return l, nil
|
||||
// }
|
||||
|
||||
// func (s *storage) Get(path string) (File, error) {
|
||||
// path = clean(path)
|
||||
// if !s.Has(path) {
|
||||
// return nil, os.ErrNotExist
|
||||
// }
|
||||
|
||||
// file, ok := s.files[path]
|
||||
// if ok {
|
||||
// return file, nil
|
||||
// }
|
||||
|
||||
// return s.getFileFromFs(path)
|
||||
// }
|
||||
|
||||
// func (s *storage) getFileFromFs(p string) (File, error) {
|
||||
// for fsp, fs := range s.filesystems {
|
||||
// if strings.HasPrefix(p, fsp) {
|
||||
// return fs.Open(separator + strings.TrimPrefix(p, fsp))
|
||||
// }
|
||||
// }
|
||||
|
||||
// return nil, os.ErrNotExist
|
||||
// }
|
||||
|
||||
// func (s *storage) getDirFromFs(p string) (map[string]File, error) {
|
||||
// for fsp, fs := range s.filesystems {
|
||||
// if strings.HasPrefix(p, fsp) {
|
||||
// path := strings.TrimPrefix(p, fsp)
|
||||
// return fs.ReadDir(path)
|
||||
// }
|
||||
// }
|
||||
|
||||
// return nil, os.ErrNotExist
|
||||
// }
|
||||
|
||||
// func clean(p string) string {
|
||||
// return path.Clean(separator + strings.ReplaceAll(p, "\\", "/"))
|
||||
// }
|
||||
|
|
|
@ -17,10 +17,22 @@ import (
|
|||
func NewClient(st storage.ClientImpl, fis bep44.Store, cfg *config.TorrentClient, id [20]byte) (*torrent.Client, error) {
|
||||
// TODO download and upload limits
|
||||
torrentCfg := torrent.NewDefaultClientConfig()
|
||||
torrentCfg.Seed = true
|
||||
torrentCfg.PeerID = string(id[:])
|
||||
torrentCfg.DefaultStorage = st
|
||||
torrentCfg.DisableIPv6 = cfg.DisableIPv6
|
||||
|
||||
// torrentCfg.DisableIPv6 = cfg.DisableIPv6
|
||||
// torrentCfg.DropDuplicatePeerIds = true
|
||||
// torrentCfg.TorrentPeersLowWater = 10
|
||||
// torrentCfg.TorrentPeersHighWater = 100
|
||||
// torrentCfg.DisableWebtorrent = true
|
||||
// torrentCfg.DisableAggressiveUpload = true
|
||||
// torrentCfg.DisableWebseeds = true
|
||||
// torrentCfg.DisableUTP = false
|
||||
// torrentCfg.NoDefaultPortForwarding = true
|
||||
// torrentCfg.AlwaysWantConns = false
|
||||
// torrentCfg.ClientDhtConfig = torrent.ClientDhtConfig{
|
||||
// NoDHT: true,
|
||||
// }
|
||||
|
||||
l := log.Logger.With().Str("component", "torrent-client").Logger()
|
||||
|
||||
|
|
131
src/host/torrent/piece-completion.go
Normal file
131
src/host/torrent/piece-completion.go
Normal file
|
@ -0,0 +1,131 @@
|
|||
package torrent
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/anacrolix/torrent/storage"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
)
|
||||
|
||||
type PieceCompletionState byte
|
||||
|
||||
const (
|
||||
PieceNotComplete PieceCompletionState = 0
|
||||
PieceComplete PieceCompletionState = 1<<8 - 1
|
||||
)
|
||||
|
||||
func pieceCompletionState(i bool) PieceCompletionState {
|
||||
if i {
|
||||
return PieceComplete
|
||||
} else {
|
||||
return PieceNotComplete
|
||||
}
|
||||
}
|
||||
|
||||
type badgerPieceCompletion struct {
|
||||
db *badger.DB
|
||||
}
|
||||
|
||||
var _ storage.PieceCompletion = (*badgerPieceCompletion)(nil)
|
||||
|
||||
func NewBadgerPieceCompletion(dir string) (storage.PieceCompletion, error) {
|
||||
opts := badger.
|
||||
DefaultOptions(dir).
|
||||
WithLogger(badgerSlog{slog: slog.With("component", "piece-completion")})
|
||||
db, err := badger.Open(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &badgerPieceCompletion{db}, nil
|
||||
}
|
||||
|
||||
func pkToBytes(pk metainfo.PieceKey) []byte {
|
||||
key := make([]byte, len(pk.InfoHash.Bytes()))
|
||||
copy(key, pk.InfoHash.Bytes())
|
||||
binary.BigEndian.AppendUint32(key, uint32(pk.Index))
|
||||
return key
|
||||
}
|
||||
|
||||
func (k *badgerPieceCompletion) Get(pk metainfo.PieceKey) (storage.Completion, error) {
|
||||
completion := storage.Completion{
|
||||
Ok: true,
|
||||
}
|
||||
err := k.db.View(func(tx *badger.Txn) error {
|
||||
item, err := tx.Get(pkToBytes(pk))
|
||||
if err != nil {
|
||||
if err == badger.ErrKeyNotFound {
|
||||
completion.Ok = false
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("getting value: %w", err)
|
||||
}
|
||||
|
||||
valCopy, err := item.ValueCopy(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("copying value: %w", err)
|
||||
}
|
||||
compl := PieceCompletionState(valCopy[0])
|
||||
|
||||
completion.Ok = true
|
||||
switch compl {
|
||||
case PieceComplete:
|
||||
completion.Complete = true
|
||||
case PieceNotComplete:
|
||||
completion.Complete = false
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
return completion, err
|
||||
}
|
||||
|
||||
func (me badgerPieceCompletion) Set(pk metainfo.PieceKey, b bool) error {
|
||||
if c, err := me.Get(pk); err == nil && c.Ok && c.Complete == b {
|
||||
return nil
|
||||
}
|
||||
|
||||
return me.db.Update(func(txn *badger.Txn) error {
|
||||
return txn.Set(pkToBytes(pk), []byte{byte(pieceCompletionState(b))})
|
||||
})
|
||||
}
|
||||
|
||||
func (k *badgerPieceCompletion) Delete(key string) error {
|
||||
return k.db.Update(
|
||||
func(txn *badger.Txn) error {
|
||||
return txn.Delete([]byte(key))
|
||||
})
|
||||
}
|
||||
|
||||
func (me *badgerPieceCompletion) Close() error {
|
||||
return me.db.Close()
|
||||
}
|
||||
|
||||
type badgerSlog struct {
|
||||
slog *slog.Logger
|
||||
}
|
||||
|
||||
// Debugf implements badger.Logger.
|
||||
func (log badgerSlog) Debugf(f string, a ...interface{}) {
|
||||
log.slog.Debug(f, a...)
|
||||
}
|
||||
|
||||
// Errorf implements badger.Logger.
|
||||
func (log badgerSlog) Errorf(f string, a ...interface{}) {
|
||||
log.slog.Error(f, a...)
|
||||
}
|
||||
|
||||
// Infof implements badger.Logger.
|
||||
func (log badgerSlog) Infof(f string, a ...interface{}) {
|
||||
log.slog.Info(f, a...)
|
||||
}
|
||||
|
||||
// Warningf implements badger.Logger.
|
||||
func (log badgerSlog) Warningf(f string, a ...interface{}) {
|
||||
log.slog.Warn(f, a...)
|
||||
}
|
||||
|
||||
var _ badger.Logger = (*badgerSlog)(nil)
|
|
@ -1,31 +1,33 @@
|
|||
package torrent
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
"github.com/anacrolix/torrent"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/anacrolix/torrent/types"
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
c *torrent.Client
|
||||
|
||||
// stats *Stats
|
||||
DefaultPriority types.PiecePriority
|
||||
|
||||
mu sync.Mutex
|
||||
|
||||
log zerolog.Logger
|
||||
log *slog.Logger
|
||||
addTimeout, readTimeout int
|
||||
}
|
||||
|
||||
func NewService(c *torrent.Client, addTimeout, readTimeout int) *Service {
|
||||
l := log.Logger.With().Str("component", "torrent-service").Logger()
|
||||
l := slog.With("component", "torrent-service")
|
||||
return &Service{
|
||||
log: l,
|
||||
c: c,
|
||||
log: l,
|
||||
c: c,
|
||||
DefaultPriority: types.PiecePriorityNone,
|
||||
// stats: newStats(), // TODO persistent
|
||||
addTimeout: addTimeout,
|
||||
readTimeout: readTimeout,
|
||||
|
@ -35,20 +37,30 @@ func NewService(c *torrent.Client, addTimeout, readTimeout int) *Service {
|
|||
var _ vfs.FsFactory = (*Service)(nil).NewTorrentFs
|
||||
|
||||
func (s *Service) NewTorrentFs(f vfs.File) (vfs.Filesystem, error) {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Second*time.Duration(s.addTimeout))
|
||||
defer cancel()
|
||||
defer f.Close()
|
||||
|
||||
mi, err := metainfo.Load(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t, err := s.c.AddTorrent(mi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
<-t.GotInfo()
|
||||
t.AllowDataDownload()
|
||||
for _, f := range t.Files() {
|
||||
f.SetPriority(torrent.PiecePriorityReadahead)
|
||||
|
||||
t, ok := s.c.Torrent(mi.HashInfoBytes())
|
||||
if !ok {
|
||||
t, err = s.c.AddTorrent(mi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, fmt.Errorf("creating torrent fs timed out")
|
||||
case <-t.GotInfo():
|
||||
}
|
||||
for _, f := range t.Files() {
|
||||
f.SetPriority(s.DefaultPriority)
|
||||
}
|
||||
t.AllowDataDownload()
|
||||
}
|
||||
|
||||
return vfs.NewTorrentFs(t, s.readTimeout), nil
|
||||
|
@ -57,161 +69,3 @@ func (s *Service) NewTorrentFs(f vfs.File) (vfs.Filesystem, error) {
|
|||
func (s *Service) Stats() (*Stats, error) {
|
||||
return &Stats{}, nil
|
||||
}
|
||||
|
||||
// func (s *Service) Load() (map[string]vfs.Filesystem, error) {
|
||||
// // Load from config
|
||||
// s.log.Info().Msg("adding torrents from configuration")
|
||||
// for _, loader := range s.loaders {
|
||||
// if err := s.load(loader); err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Load from DB
|
||||
// s.log.Info().Msg("adding torrents from database")
|
||||
// return s.fss, s.load(s.db)
|
||||
// }
|
||||
|
||||
// func (s *Service) load(l loader.Loader) error {
|
||||
// list, err := l.ListMagnets()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// for r, ms := range list {
|
||||
// s.addRoute(r)
|
||||
// for _, m := range ms {
|
||||
// if err := s.addMagnet(r, m); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// list, err = l.ListTorrentPaths()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// for r, ms := range list {
|
||||
// s.addRoute(r)
|
||||
// for _, p := range ms {
|
||||
// if err := s.addTorrentPath(r, p); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// func (s *Service) AddMagnet(r, m string) error {
|
||||
// if err := s.addMagnet(r, m); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// // Add to db
|
||||
// return s.db.AddMagnet(r, m)
|
||||
// }
|
||||
|
||||
// func (s *Service) addTorrentPath(r, p string) error {
|
||||
// // Add to client
|
||||
// t, err := s.c.AddTorrentFromFile(p)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// return s.addTorrent(r, t)
|
||||
// }
|
||||
|
||||
// func (s *Service) addMagnet(r, m string) error {
|
||||
// // Add to client
|
||||
// t, err := s.c.AddMagnet(m)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// return s.addTorrent(r, t)
|
||||
|
||||
// }
|
||||
|
||||
// func (s *Service) addRoute(r string) {
|
||||
// s.s.AddRoute(r)
|
||||
|
||||
// // Add to filesystems
|
||||
// folder := path.Join("/", r)
|
||||
// s.mu.Lock()
|
||||
// defer s.mu.Unlock()
|
||||
// _, ok := s.fss[folder]
|
||||
// if !ok {
|
||||
// s.fss[folder] = vfs.NewTorrentFs(s.readTimeout)
|
||||
// }
|
||||
// }
|
||||
|
||||
// func (s *Service) addTorrent(r string, t *torrent.Torrent) error {
|
||||
// // only get info if name is not available
|
||||
// if t.Info() == nil {
|
||||
// s.log.Info().Str("hash", t.InfoHash().String()).Msg("getting torrent info")
|
||||
// select {
|
||||
// case <-time.After(time.Duration(s.addTimeout) * time.Second):
|
||||
// s.log.Error().Str("hash", t.InfoHash().String()).Msg("timeout getting torrent info")
|
||||
// return errors.New("timeout getting torrent info")
|
||||
// case <-t.GotInfo():
|
||||
// s.log.Info().Str("hash", t.InfoHash().String()).Msg("obtained torrent info")
|
||||
// }
|
||||
|
||||
// }
|
||||
|
||||
// // Add to stats
|
||||
// s.s.Add(r, t)
|
||||
|
||||
// // Add to filesystems
|
||||
// folder := path.Join("/", r)
|
||||
// s.mu.Lock()
|
||||
// defer s.mu.Unlock()
|
||||
|
||||
// tfs, ok := s.fss[folder].(*vfs.TorrentFs)
|
||||
// if !ok {
|
||||
// return errors.New("error adding torrent to filesystem")
|
||||
// }
|
||||
|
||||
// tfs.AddTorrent(t)
|
||||
// s.log.Info().Str("name", t.Info().Name).Str("route", r).Msg("torrent added")
|
||||
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// func (s *Service) RemoveFromHash(r, h string) error {
|
||||
// // Remove from db
|
||||
// deleted, err := s.db.RemoveFromHash(r, h)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// if !deleted {
|
||||
// return fmt.Errorf("element with hash %v on route %v cannot be removed", h, r)
|
||||
// }
|
||||
|
||||
// // Remove from stats
|
||||
// s.s.Del(r, h)
|
||||
|
||||
// // Remove from fs
|
||||
// folder := path.Join("/", r)
|
||||
|
||||
// tfs, ok := s.fss[folder].(*vfs.TorrentFs)
|
||||
// if !ok {
|
||||
// return errors.New("error removing torrent from filesystem")
|
||||
// }
|
||||
|
||||
// tfs.RemoveTorrent(h)
|
||||
|
||||
// // Remove from client
|
||||
// var mh metainfo.Hash
|
||||
// if err := mh.FromHexString(h); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// t, ok := s.c.Torrent(metainfo.NewHashFromHex(h))
|
||||
// if ok {
|
||||
// t.Drop()
|
||||
// }
|
||||
|
||||
// return nil
|
||||
// }
|
||||
|
|
306
src/host/torrent/storage.go
Normal file
306
src/host/torrent/storage.go
Normal file
|
@ -0,0 +1,306 @@
|
|||
package torrent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/config"
|
||||
"github.com/anacrolix/missinggo"
|
||||
"github.com/anacrolix/torrent"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/anacrolix/torrent/mmap_span"
|
||||
"github.com/anacrolix/torrent/storage"
|
||||
"github.com/edsrzf/mmap-go"
|
||||
)
|
||||
|
||||
type Torrent struct {
|
||||
client *torrent.Client
|
||||
data storage.ClientImplCloser
|
||||
pc storage.PieceCompletion
|
||||
}
|
||||
|
||||
func SetupStorage(cfg config.TorrentClient) (storage.ClientImplCloser, storage.PieceCompletion, error) {
|
||||
pcp := filepath.Join(cfg.DataFolder, "piece-completion")
|
||||
if err := os.MkdirAll(pcp, 0744); err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating piece completion folder: %w", err)
|
||||
}
|
||||
pc, err := storage.NewBoltPieceCompletion(pcp)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating servers piece completion: %w", err)
|
||||
}
|
||||
|
||||
// pc, err := NewBadgerPieceCompletion(pcp)
|
||||
// if err != nil {
|
||||
// return nil, nil, fmt.Errorf("error creating servers piece completion: %w", err)
|
||||
// }
|
||||
|
||||
// TODO implement cache/storage switching
|
||||
// cacheDir := filepath.Join(tcfg.DataFolder, "cache")
|
||||
// if err := os.MkdirAll(cacheDir, 0744); err != nil {
|
||||
// return nil, nil, fmt.Errorf("error creating piece completion folder: %w", err)
|
||||
// }
|
||||
// fc, err := filecache.NewCache(cacheDir)
|
||||
// if err != nil {
|
||||
// return nil, nil, fmt.Errorf("error creating cache: %w", err)
|
||||
// }
|
||||
// log.Info().Msg(fmt.Sprintf("setting cache size to %d MB", 1024))
|
||||
// fc.SetCapacity(1024 * 1024 * 1024)
|
||||
|
||||
// rp := storage.NewResourcePieces(fc.AsResourceProvider())
|
||||
// st := &stc{rp}
|
||||
|
||||
filesDir := filepath.Join(cfg.DataFolder, "files")
|
||||
if err := os.MkdirAll(pcp, 0744); err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating piece completion folder: %w", err)
|
||||
}
|
||||
|
||||
// st := storage.NewMMapWithCompletion(filesDir, pc)
|
||||
st := storage.NewFileOpts(storage.NewFileClientOpts{
|
||||
ClientBaseDir: filesDir,
|
||||
PieceCompletion: pc,
|
||||
})
|
||||
|
||||
return st, pc, nil
|
||||
}
|
||||
|
||||
func (s Torrent) Remove(f *torrent.File) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// type dupePieces struct {
|
||||
// }
|
||||
|
||||
// func (s Torrent) dedupe(f1, f2 *os.File) error {
|
||||
// for _, t := range s.client.Torrents() {
|
||||
// for i := 0; i < t.NumPieces(); i++ {
|
||||
// p := t.Piece(i)
|
||||
// p.Info().Hash()
|
||||
// }
|
||||
// }
|
||||
|
||||
// // https://go-review.googlesource.com/c/sys/+/284352/10/unix/syscall_linux_test.go#856
|
||||
// // dedupe := unix.FileDedupeRange{
|
||||
// // Src_offset: uint64(0),
|
||||
// // Src_length: uint64(4096),
|
||||
// // Info: []unix.FileDedupeRangeInfo{
|
||||
// // unix.FileDedupeRangeInfo{
|
||||
// // Dest_fd: int64(f2.Fd()),
|
||||
// // Dest_offset: uint64(0),
|
||||
// // },
|
||||
// // unix.FileDedupeRangeInfo{
|
||||
// // Dest_fd: int64(f2.Fd()),
|
||||
// // Dest_offset: uint64(4096),
|
||||
// // },
|
||||
// // }}
|
||||
// // err := unix.IoctlFileDedupeRange(int(f1.Fd()), &dedupe)
|
||||
// // if err == unix.EOPNOTSUPP || err == unix.EINVAL {
|
||||
// // t.Skip("deduplication not supported on this filesystem")
|
||||
// // } else if err != nil {
|
||||
// // t.Fatal(err)
|
||||
// // }
|
||||
|
||||
// return nil
|
||||
// }
|
||||
|
||||
type mmapClientImpl struct {
|
||||
baseDir string
|
||||
pc storage.PieceCompletion
|
||||
}
|
||||
|
||||
func NewMMapWithCompletion(baseDir string, completion storage.PieceCompletion) *mmapClientImpl {
|
||||
return &mmapClientImpl{
|
||||
baseDir: baseDir,
|
||||
pc: completion,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *mmapClientImpl) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (_ storage.TorrentImpl, err error) {
|
||||
t, err := newMMapTorrent(info, infoHash, s.baseDir, s.pc)
|
||||
if err != nil {
|
||||
return storage.TorrentImpl{}, err
|
||||
}
|
||||
return storage.TorrentImpl{Piece: t.Piece, Close: t.Close, Flush: t.Flush}, nil
|
||||
}
|
||||
|
||||
func (s *mmapClientImpl) Close() error {
|
||||
return s.pc.Close()
|
||||
}
|
||||
|
||||
func newMMapTorrent(md *metainfo.Info, infoHash metainfo.Hash, location string, pc storage.PieceCompletionGetSetter) (*mmapTorrent, error) {
|
||||
span := &mmap_span.MMapSpan{}
|
||||
basePath, err := storage.ToSafeFilePath(md.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
basePath = filepath.Join(location, basePath)
|
||||
|
||||
for _, miFile := range md.UpvertedFiles() {
|
||||
var safeName string
|
||||
safeName, err = storage.ToSafeFilePath(miFile.Path...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fileName := filepath.Join(basePath, safeName)
|
||||
var mm FileMapping
|
||||
mm, err = mmapFile(fileName, miFile.Length)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("file %q: %s", miFile.DisplayPath(md), err)
|
||||
return nil, err
|
||||
}
|
||||
span.Append(mm)
|
||||
}
|
||||
span.InitIndex()
|
||||
|
||||
return &mmapTorrent{
|
||||
infoHash: infoHash,
|
||||
span: span,
|
||||
pc: pc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type mmapTorrent struct {
|
||||
infoHash metainfo.Hash
|
||||
span *mmap_span.MMapSpan
|
||||
pc storage.PieceCompletionGetSetter
|
||||
}
|
||||
|
||||
func (ts *mmapTorrent) Piece(p metainfo.Piece) storage.PieceImpl {
|
||||
return mmapPiece{
|
||||
pc: ts.pc,
|
||||
p: p,
|
||||
ih: ts.infoHash,
|
||||
ReaderAt: io.NewSectionReader(ts.span, p.Offset(), p.Length()),
|
||||
WriterAt: missinggo.NewSectionWriter(ts.span, p.Offset(), p.Length()),
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *mmapTorrent) Close() error {
|
||||
errs := ts.span.Close()
|
||||
if len(errs) > 0 {
|
||||
return errs[0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ts *mmapTorrent) Flush() error {
|
||||
errs := ts.span.Flush()
|
||||
if len(errs) > 0 {
|
||||
return errs[0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type mmapPiece struct {
|
||||
pc storage.PieceCompletionGetSetter
|
||||
p metainfo.Piece
|
||||
ih metainfo.Hash
|
||||
io.ReaderAt
|
||||
io.WriterAt
|
||||
}
|
||||
|
||||
func (me mmapPiece) pieceKey() metainfo.PieceKey {
|
||||
return metainfo.PieceKey{InfoHash: me.ih, Index: me.p.Index()}
|
||||
}
|
||||
|
||||
func (sp mmapPiece) Completion() storage.Completion {
|
||||
c, err := sp.pc.Get(sp.pieceKey())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (sp mmapPiece) MarkComplete() error {
|
||||
return sp.pc.Set(sp.pieceKey(), true)
|
||||
}
|
||||
|
||||
func (sp mmapPiece) MarkNotComplete() error {
|
||||
return sp.pc.Set(sp.pieceKey(), false)
|
||||
}
|
||||
|
||||
func mmapFile(name string, size int64) (_ FileMapping, err error) {
|
||||
dir := filepath.Dir(name)
|
||||
err = os.MkdirAll(dir, 0o750)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("making directory %q: %s", dir, err)
|
||||
}
|
||||
var file *os.File
|
||||
file, err = os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0o666)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
file.Close()
|
||||
}
|
||||
}()
|
||||
var fi os.FileInfo
|
||||
fi, err = file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fi.Size() < size {
|
||||
// I think this is necessary on HFS+. Maybe Linux will SIGBUS too if
|
||||
// you overmap a file but I'm not sure.
|
||||
err = file.Truncate(size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return func() (ret mmapWithFile, err error) {
|
||||
ret.f = file
|
||||
if size == 0 {
|
||||
// Can't mmap() regions with length 0.
|
||||
return
|
||||
}
|
||||
intLen := int(size)
|
||||
if int64(intLen) != size {
|
||||
err = errors.New("size too large for system")
|
||||
return
|
||||
}
|
||||
ret.mmap, err = mmap.MapRegion(file, intLen, mmap.RDWR, 0, 0)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error mapping region: %s", err)
|
||||
return
|
||||
}
|
||||
if int64(len(ret.mmap)) != size {
|
||||
panic(len(ret.mmap))
|
||||
}
|
||||
return
|
||||
}()
|
||||
}
|
||||
|
||||
type FileMapping = mmap_span.Mmap
|
||||
|
||||
// Handles closing the mmap's file handle (needed for Windows). Could be implemented differently by
|
||||
// OS.
|
||||
type mmapWithFile struct {
|
||||
f *os.File
|
||||
mmap mmap.MMap
|
||||
}
|
||||
|
||||
func (m mmapWithFile) Flush() error {
|
||||
return m.mmap.Flush()
|
||||
}
|
||||
|
||||
func (m mmapWithFile) Unmap() (err error) {
|
||||
if m.mmap != nil {
|
||||
err = m.mmap.Unmap()
|
||||
}
|
||||
fileErr := m.f.Close()
|
||||
if err == nil {
|
||||
err = fileErr
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (m mmapWithFile) Bytes() []byte {
|
||||
if m.mmap == nil {
|
||||
return nil
|
||||
}
|
||||
return m.mmap
|
||||
}
|
|
@ -3,8 +3,11 @@ package vfs
|
|||
import (
|
||||
"archive/zip"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/iio"
|
||||
|
@ -46,6 +49,8 @@ func NewArchive(r iio.Reader, size int64, loader ArchiveLoader) *archive {
|
|||
}
|
||||
}
|
||||
|
||||
var _ Filesystem = &archive{}
|
||||
|
||||
func (a *archive) Open(filename string) (File, error) {
|
||||
files, err := a.files()
|
||||
if err != nil {
|
||||
|
@ -55,28 +60,56 @@ func (a *archive) Open(filename string) (File, error) {
|
|||
return getFile(files, filename)
|
||||
}
|
||||
|
||||
func (fs *archive) ReadDir(path string) (map[string]File, error) {
|
||||
func (fs *archive) ReadDir(path string) ([]fs.DirEntry, error) {
|
||||
files, err := fs.files()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return listFilesInDir(files, path)
|
||||
return listDirFromFiles(files, path)
|
||||
}
|
||||
|
||||
// Stat implements Filesystem.
|
||||
func (afs *archive) Stat(filename string) (fs.FileInfo, error) {
|
||||
files, err := afs.files()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if file, ok := files[filename]; ok {
|
||||
return newFileInfo(path.Base(filename), file.Size()), nil
|
||||
}
|
||||
|
||||
for p, _ := range files {
|
||||
if strings.HasPrefix(p, filename) {
|
||||
return newDirInfo(path.Base(filename)), nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, ErrNotExist
|
||||
|
||||
}
|
||||
|
||||
var _ File = &archiveFile{}
|
||||
|
||||
func NewArchiveFile(readerFunc func() (iio.Reader, error), len int64) *archiveFile {
|
||||
func NewArchiveFile(name string, readerFunc func() (iio.Reader, error), size int64) *archiveFile {
|
||||
return &archiveFile{
|
||||
name: name,
|
||||
readerFunc: readerFunc,
|
||||
len: len,
|
||||
size: size,
|
||||
}
|
||||
}
|
||||
|
||||
type archiveFile struct {
|
||||
name string
|
||||
|
||||
readerFunc func() (iio.Reader, error)
|
||||
reader iio.Reader
|
||||
len int64
|
||||
size int64
|
||||
}
|
||||
|
||||
func (d *archiveFile) Stat() (fs.FileInfo, error) {
|
||||
return newFileInfo(d.name, d.size), nil
|
||||
}
|
||||
|
||||
func (d *archiveFile) load() error {
|
||||
|
@ -94,7 +127,7 @@ func (d *archiveFile) load() error {
|
|||
}
|
||||
|
||||
func (d *archiveFile) Size() int64 {
|
||||
return d.len
|
||||
return d.size
|
||||
}
|
||||
|
||||
func (d *archiveFile) IsDir() bool {
|
||||
|
@ -151,7 +184,7 @@ func ZipLoader(reader iio.Reader, size int64) (map[string]*archiveFile, error) {
|
|||
}
|
||||
|
||||
n := filepath.Join(string(os.PathSeparator), f.Name)
|
||||
af := NewArchiveFile(rf, f.FileInfo().Size())
|
||||
af := NewArchiveFile(f.Name, rf, f.FileInfo().Size())
|
||||
|
||||
out[n] = af
|
||||
}
|
||||
|
@ -183,7 +216,7 @@ func SevenZipLoader(reader iio.Reader, size int64) (map[string]*archiveFile, err
|
|||
return iio.NewDiskTeeReader(zr)
|
||||
}
|
||||
|
||||
af := NewArchiveFile(rf, f.FileInfo().Size())
|
||||
af := NewArchiveFile(f.Name, rf, f.FileInfo().Size())
|
||||
n := filepath.Join(string(os.PathSeparator), f.Name)
|
||||
|
||||
out[n] = af
|
||||
|
@ -216,7 +249,7 @@ func RarLoader(reader iio.Reader, size int64) (map[string]*archiveFile, error) {
|
|||
|
||||
n := filepath.Join(string(os.PathSeparator), header.Name)
|
||||
|
||||
af := NewArchiveFile(rf, header.UnPackedSize)
|
||||
af := NewArchiveFile(header.Name, rf, header.UnPackedSize)
|
||||
|
||||
out[n] = af
|
||||
}
|
||||
|
|
|
@ -16,18 +16,21 @@ func TestZipFilesystem(t *testing.T) {
|
|||
t.Parallel()
|
||||
require := require.New(t)
|
||||
|
||||
zReader, len := createTestZip(require)
|
||||
zReader, size := createTestZip(require)
|
||||
|
||||
zfs := NewArchive(zReader, len, ZipLoader)
|
||||
zfs := NewArchive(zReader, size, ZipLoader)
|
||||
|
||||
files, err := zfs.ReadDir("/path/to/test/file")
|
||||
require.NoError(err)
|
||||
|
||||
require.Len(files, 1)
|
||||
f := files["1.txt"]
|
||||
require.NotNil(f)
|
||||
e := files[0]
|
||||
require.Equal("1.txt", e.Name())
|
||||
require.NotNil(e)
|
||||
|
||||
out := make([]byte, 11)
|
||||
f, err := zfs.Open("/path/to/test/file/1.txt")
|
||||
require.NoError(err)
|
||||
n, err := f.Read(out)
|
||||
require.Equal(io.EOF, err)
|
||||
require.Equal(11, n)
|
||||
|
|
|
@ -1,26 +1,43 @@
|
|||
package vfs
|
||||
|
||||
var _ File = &Dir{}
|
||||
import (
|
||||
"io/fs"
|
||||
"path"
|
||||
)
|
||||
|
||||
type Dir struct {
|
||||
var _ File = &dir{}
|
||||
|
||||
func NewDir(name string) File {
|
||||
return &dir{
|
||||
name: path.Base(name),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Dir) Size() int64 {
|
||||
type dir struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// Info implements File.
|
||||
func (d *dir) Stat() (fs.FileInfo, error) {
|
||||
return newDirInfo(d.name), nil
|
||||
}
|
||||
|
||||
func (d *dir) Size() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *Dir) IsDir() bool {
|
||||
func (d *dir) IsDir() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *Dir) Close() error {
|
||||
func (d *dir) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Dir) Read(p []byte) (n int, err error) {
|
||||
func (d *dir) Read(p []byte) (n int, err error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (d *Dir) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
func (d *dir) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
package vfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/iio"
|
||||
|
@ -10,10 +12,13 @@ import (
|
|||
type File interface {
|
||||
IsDir() bool
|
||||
Size() int64
|
||||
Stat() (fs.FileInfo, error)
|
||||
|
||||
iio.Reader
|
||||
}
|
||||
|
||||
var ErrNotImplemented = errors.New("not implemented")
|
||||
|
||||
type Filesystem interface {
|
||||
// Open opens the named file for reading. If successful, methods on the
|
||||
// returned file can be used for reading; the associated file descriptor has
|
||||
|
@ -22,23 +27,50 @@ type Filesystem interface {
|
|||
|
||||
// ReadDir reads the directory named by dirname and returns a list of
|
||||
// directory entries.
|
||||
ReadDir(path string) (map[string]File, error)
|
||||
ReadDir(path string) ([]fs.DirEntry, error)
|
||||
|
||||
Stat(filename string) (fs.FileInfo, error)
|
||||
}
|
||||
|
||||
const defaultMode = fs.FileMode(0555)
|
||||
|
||||
type fileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
isDir bool
|
||||
}
|
||||
|
||||
func NewFileInfo(name string, size int64, isDir bool) *fileInfo {
|
||||
var _ fs.FileInfo = &fileInfo{}
|
||||
var _ fs.DirEntry = &fileInfo{}
|
||||
|
||||
func newDirInfo(name string) *fileInfo {
|
||||
return &fileInfo{
|
||||
name: name,
|
||||
size: size,
|
||||
isDir: isDir,
|
||||
name: path.Base(name),
|
||||
size: 0,
|
||||
isDir: true,
|
||||
}
|
||||
}
|
||||
|
||||
func newFileInfo(name string, size int64) *fileInfo {
|
||||
return &fileInfo{
|
||||
name: path.Base(name),
|
||||
size: size,
|
||||
isDir: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (fi *fileInfo) Info() (fs.FileInfo, error) {
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
func (fi *fileInfo) Type() fs.FileMode {
|
||||
if fi.isDir {
|
||||
return fs.ModeDir
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func (fi *fileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
|
@ -47,17 +79,17 @@ func (fi *fileInfo) Size() int64 {
|
|||
return fi.size
|
||||
}
|
||||
|
||||
func (fi *fileInfo) Mode() os.FileMode {
|
||||
func (fi *fileInfo) Mode() fs.FileMode {
|
||||
if fi.isDir {
|
||||
return 0555 | os.ModeDir
|
||||
return defaultMode | fs.ModeDir
|
||||
}
|
||||
|
||||
return 0555
|
||||
return defaultMode
|
||||
}
|
||||
|
||||
func (fi *fileInfo) ModTime() time.Time {
|
||||
// TODO fix it
|
||||
return time.Now()
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
func (fi *fileInfo) IsDir() bool {
|
||||
|
|
|
@ -12,13 +12,32 @@ func TestFileinfo(t *testing.T) {
|
|||
|
||||
require := require.New(t)
|
||||
|
||||
fi := NewFileInfo("name", 42, false)
|
||||
fi := newFileInfo("abc/name", 42)
|
||||
|
||||
require.Equal(fi.IsDir(), false)
|
||||
require.Equal(fi.Name(), "name")
|
||||
require.Equal(fi.Size(), int64(42))
|
||||
require.Equal("name", fi.Name())
|
||||
require.False(fi.IsDir())
|
||||
require.Equal(int64(42), fi.Size())
|
||||
require.NotNil(fi.ModTime())
|
||||
require.Equal(fi.Mode(), fs.FileMode(0555))
|
||||
require.Equal(fi.Sys(), nil)
|
||||
require.Zero(fi.Type() & fs.ModeDir)
|
||||
require.Zero(fi.Mode() & fs.ModeDir)
|
||||
require.Equal(fs.FileMode(0555), fi.Mode())
|
||||
require.Equal(nil, fi.Sys())
|
||||
}
|
||||
|
||||
func TestDirInfo(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
require := require.New(t)
|
||||
|
||||
fi := newDirInfo("abc/name")
|
||||
|
||||
require.True(fi.IsDir())
|
||||
require.Equal("name", fi.Name())
|
||||
require.Equal(int64(0), fi.Size())
|
||||
require.NotNil(fi.ModTime())
|
||||
require.NotZero(fi.Type() & fs.ModeDir)
|
||||
require.NotZero(fi.Mode() & fs.ModeDir)
|
||||
require.Equal(defaultMode|fs.ModeDir, fi.Mode())
|
||||
require.Equal(nil, fi.Sys())
|
||||
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@ package vfs
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"io/fs"
|
||||
"path"
|
||||
)
|
||||
|
||||
var _ Filesystem = &MemoryFs{}
|
||||
|
@ -20,22 +22,37 @@ func (m *MemoryFs) Open(filename string) (File, error) {
|
|||
return getFile(m.files, filename)
|
||||
}
|
||||
|
||||
func (fs *MemoryFs) ReadDir(path string) (map[string]File, error) {
|
||||
return listFilesInDir(fs.files, path)
|
||||
func (fs *MemoryFs) ReadDir(path string) ([]fs.DirEntry, error) {
|
||||
return listDirFromFiles(fs.files, path)
|
||||
}
|
||||
|
||||
// Stat implements Filesystem.
|
||||
func (mfs *MemoryFs) Stat(filename string) (fs.FileInfo, error) {
|
||||
file, ok := mfs.files[filename]
|
||||
if !ok {
|
||||
return nil, ErrNotExist
|
||||
}
|
||||
return newFileInfo(path.Base(filename), file.Size()), nil
|
||||
}
|
||||
|
||||
var _ File = &MemoryFile{}
|
||||
|
||||
type MemoryFile struct {
|
||||
name string
|
||||
*bytes.Reader
|
||||
}
|
||||
|
||||
func NewMemoryFile(data []byte) *MemoryFile {
|
||||
func NewMemoryFile(name string, data []byte) *MemoryFile {
|
||||
return &MemoryFile{
|
||||
name: name,
|
||||
Reader: bytes.NewReader(data),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *MemoryFile) Stat() (fs.FileInfo, error) {
|
||||
return newFileInfo(d.name, int64(d.Reader.Len())), nil
|
||||
}
|
||||
|
||||
func (d *MemoryFile) Size() int64 {
|
||||
return int64(d.Reader.Len())
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ func TestMemory(t *testing.T) {
|
|||
testData := "Hello"
|
||||
|
||||
c := NewMemoryFS(map[string]*MemoryFile{
|
||||
"/dir/here": NewMemoryFile([]byte(testData)),
|
||||
"/dir/here": NewMemoryFile("here", []byte(testData)),
|
||||
})
|
||||
|
||||
// fss := map[string]Filesystem{
|
||||
|
@ -32,7 +32,7 @@ func TestMemory(t *testing.T) {
|
|||
data := make([]byte, 5)
|
||||
n, err := f.Read(data)
|
||||
require.NoError(err)
|
||||
require.Equal(n, 5)
|
||||
require.Equal(5, n)
|
||||
require.Equal(string(data), testData)
|
||||
|
||||
files, err := c.ReadDir("/")
|
||||
|
|
|
@ -11,10 +11,19 @@ type OsFS struct {
|
|||
hostDir string
|
||||
}
|
||||
|
||||
// Stat implements Filesystem.
|
||||
func (fs *OsFS) Stat(filename string) (fs.FileInfo, error) {
|
||||
if path.Clean(filename) == Separator {
|
||||
return newDirInfo(Separator), nil
|
||||
}
|
||||
|
||||
return os.Stat(path.Join(fs.hostDir, filename))
|
||||
}
|
||||
|
||||
// Open implements Filesystem.
|
||||
func (fs *OsFS) Open(filename string) (File, error) {
|
||||
if path.Clean(filename) == Separator {
|
||||
return &Dir{}, nil
|
||||
return NewDir(filename), nil
|
||||
}
|
||||
|
||||
osfile, err := os.Open(path.Join(fs.hostDir, filename))
|
||||
|
@ -25,21 +34,9 @@ func (fs *OsFS) Open(filename string) (File, error) {
|
|||
}
|
||||
|
||||
// ReadDir implements Filesystem.
|
||||
func (o *OsFS) ReadDir(dir string) (map[string]File, error) {
|
||||
func (o *OsFS) ReadDir(dir string) ([]fs.DirEntry, error) {
|
||||
dir = path.Join(o.hostDir, dir)
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := map[string]File{}
|
||||
for _, e := range entries {
|
||||
if e.IsDir() {
|
||||
out[e.Name()] = &Dir{}
|
||||
} else {
|
||||
out[e.Name()] = NewLazyOsFile(path.Join(dir, e.Name()))
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
return os.ReadDir(dir)
|
||||
}
|
||||
|
||||
func NewOsFs(osDir string) *OsFS {
|
||||
|
@ -60,6 +57,11 @@ func NewOsFile(f *os.File) *OsFile {
|
|||
|
||||
var _ File = &OsFile{}
|
||||
|
||||
// Info implements File.
|
||||
func (f *OsFile) Info() (fs.FileInfo, error) {
|
||||
return f.f.Stat()
|
||||
}
|
||||
|
||||
// Close implements File.
|
||||
func (f *OsFile) Close() error {
|
||||
return f.f.Close()
|
||||
|
@ -101,6 +103,9 @@ type LazyOsFile struct {
|
|||
m sync.Mutex
|
||||
path string
|
||||
file *os.File
|
||||
|
||||
// cached field
|
||||
info fs.FileInfo
|
||||
}
|
||||
|
||||
func NewLazyOsFile(path string) *LazyOsFile {
|
||||
|
@ -127,25 +132,49 @@ func (f *LazyOsFile) open() error {
|
|||
|
||||
// Close implements File.
|
||||
func (f *LazyOsFile) Close() error {
|
||||
if f.file == nil {
|
||||
return nil
|
||||
}
|
||||
return f.file.Close()
|
||||
}
|
||||
|
||||
// Read implements File.
|
||||
func (f *LazyOsFile) Read(p []byte) (n int, err error) {
|
||||
err = f.open()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return f.file.Read(p)
|
||||
}
|
||||
|
||||
// ReadAt implements File.
|
||||
func (f *LazyOsFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
err = f.open()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return f.file.ReadAt(p, off)
|
||||
}
|
||||
|
||||
func (f *LazyOsFile) Stat() (fs.FileInfo, error) {
|
||||
if f.file == nil {
|
||||
return os.Stat(f.path)
|
||||
} else {
|
||||
return f.file.Stat()
|
||||
f.m.Lock()
|
||||
if f.info == nil {
|
||||
if f.file == nil {
|
||||
info, err := os.Stat(f.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.info = info
|
||||
} else {
|
||||
info, err := f.file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.info = info
|
||||
}
|
||||
}
|
||||
f.m.Unlock()
|
||||
return f.info, nil
|
||||
}
|
||||
|
||||
// Size implements File.
|
||||
|
|
|
@ -3,27 +3,27 @@ package vfs
|
|||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type ResolveFS struct {
|
||||
osDir string
|
||||
osFS *OsFS
|
||||
rootFS Filesystem
|
||||
resolver *resolver
|
||||
}
|
||||
|
||||
func NewResolveFS(osDir string, factories map[string]FsFactory) *ResolveFS {
|
||||
func NewResolveFS(rootFs Filesystem, factories map[string]FsFactory) *ResolveFS {
|
||||
return &ResolveFS{
|
||||
osDir: osDir,
|
||||
osFS: NewOsFs(osDir),
|
||||
rootFS: rootFs,
|
||||
resolver: newResolver(factories),
|
||||
}
|
||||
}
|
||||
|
||||
// Open implements Filesystem.
|
||||
func (r *ResolveFS) Open(filename string) (File, error) {
|
||||
fsPath, nestedFs, nestedFsPath, err := r.resolver.resolvePath(filename, r.osFS.Open)
|
||||
fsPath, nestedFs, nestedFsPath, err := r.resolver.resolvePath(filename, r.rootFS.Open)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -31,12 +31,12 @@ func (r *ResolveFS) Open(filename string) (File, error) {
|
|||
return nestedFs.Open(nestedFsPath)
|
||||
}
|
||||
|
||||
return r.osFS.Open(fsPath)
|
||||
return r.rootFS.Open(fsPath)
|
||||
}
|
||||
|
||||
// ReadDir implements Filesystem.
|
||||
func (r *ResolveFS) ReadDir(dir string) (map[string]File, error) {
|
||||
fsPath, nestedFs, nestedFsPath, err := r.resolver.resolvePath(dir, r.osFS.Open)
|
||||
func (r *ResolveFS) ReadDir(dir string) ([]fs.DirEntry, error) {
|
||||
fsPath, nestedFs, nestedFsPath, err := r.resolver.resolvePath(dir, r.rootFS.Open)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -44,7 +44,32 @@ func (r *ResolveFS) ReadDir(dir string) (map[string]File, error) {
|
|||
return nestedFs.ReadDir(nestedFsPath)
|
||||
}
|
||||
|
||||
return r.osFS.ReadDir(fsPath)
|
||||
entries, err := r.rootFS.ReadDir(fsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := make([]fs.DirEntry, 0, len(entries))
|
||||
for _, e := range entries {
|
||||
if r.resolver.isNestedFs(e.Name()) {
|
||||
out = append(out, newDirInfo(e.Name()))
|
||||
} else {
|
||||
out = append(out, e)
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Stat implements Filesystem.
|
||||
func (r *ResolveFS) Stat(filename string) (fs.FileInfo, error) {
|
||||
fsPath, nestedFs, nestedFsPath, err := r.resolver.resolvePath(filename, r.rootFS.Open)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if nestedFs != nil {
|
||||
return nestedFs.Stat(nestedFsPath)
|
||||
}
|
||||
|
||||
return r.rootFS.Stat(fsPath)
|
||||
}
|
||||
|
||||
var _ Filesystem = &ResolveFS{}
|
||||
|
@ -69,8 +94,18 @@ type resolver struct {
|
|||
|
||||
type openFile func(path string) (File, error)
|
||||
|
||||
func (r *resolver) isNestedFs(f string) bool {
|
||||
for ext := range r.factories {
|
||||
if strings.HasSuffix(f, ext) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// open requeue raw open, without resolver call
|
||||
func (r *resolver) resolvePath(name string, rawOpen openFile) (fsPath string, nestedFs Filesystem, nestedFsPath string, err error) {
|
||||
name = path.Clean(name)
|
||||
name = strings.TrimPrefix(name, Separator)
|
||||
parts := strings.Split(name, Separator)
|
||||
|
||||
|
@ -89,11 +124,12 @@ PARTS_LOOP:
|
|||
}
|
||||
|
||||
if nestOn == -1 {
|
||||
return clean(name), nil, "", nil
|
||||
return AbsPath(name), nil, "", nil
|
||||
}
|
||||
|
||||
fsPath = clean(strings.Join(parts[:nestOn], Separator))
|
||||
nestedFsPath = clean(strings.Join(parts[nestOn:], Separator))
|
||||
fsPath = AbsPath(path.Join(parts[:nestOn]...))
|
||||
|
||||
nestedFsPath = AbsPath(path.Join(parts[nestOn:]...))
|
||||
|
||||
// we dont need lock until now
|
||||
// it must be before fsmap read to exclude race condition:
|
||||
|
@ -123,9 +159,8 @@ PARTS_LOOP:
|
|||
var ErrNotExist = fs.ErrNotExist
|
||||
|
||||
func getFile[F File](m map[string]F, name string) (File, error) {
|
||||
name = clean(name)
|
||||
if name == Separator {
|
||||
return &Dir{}, nil
|
||||
return &dir{}, nil
|
||||
}
|
||||
|
||||
f, ok := m[name]
|
||||
|
@ -135,27 +170,30 @@ func getFile[F File](m map[string]F, name string) (File, error) {
|
|||
|
||||
for p := range m {
|
||||
if strings.HasPrefix(p, name) {
|
||||
return &Dir{}, nil
|
||||
return &dir{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, ErrNotExist
|
||||
}
|
||||
|
||||
func listFilesInDir[F File](m map[string]F, name string) (map[string]File, error) {
|
||||
name = clean(name)
|
||||
|
||||
out := map[string]File{}
|
||||
func listDirFromFiles[F File](m map[string]F, name string) ([]fs.DirEntry, error) {
|
||||
out := make([]fs.DirEntry, 0, len(m))
|
||||
name = AddTrailSlash(name)
|
||||
for p, f := range m {
|
||||
if strings.HasPrefix(p, name) {
|
||||
parts := strings.Split(trimRelPath(p, name), Separator)
|
||||
if len(parts) == 1 {
|
||||
out[parts[0]] = f
|
||||
out = append(out, newFileInfo(parts[0], f.Size()))
|
||||
} else {
|
||||
out[parts[0]] = &Dir{}
|
||||
out = append(out, newDirInfo(parts[0]))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
out = slices.CompactFunc(out, func(de1, de2 fs.DirEntry) bool {
|
||||
return de1.Name() == de2.Name()
|
||||
})
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
|
|
@ -1,13 +1,21 @@
|
|||
package vfs
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type Dummy struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// Stat implements File.
|
||||
func (d *Dummy) Stat() (fs.FileInfo, error) {
|
||||
return newFileInfo(d.name, 0), nil
|
||||
}
|
||||
|
||||
func (d *Dummy) Size() int64 {
|
||||
|
@ -35,15 +43,20 @@ var _ File = &Dummy{}
|
|||
type DummyFs struct {
|
||||
}
|
||||
|
||||
// Stat implements Filesystem.
|
||||
func (*DummyFs) Stat(filename string) (fs.FileInfo, error) {
|
||||
return newFileInfo(path.Base(filename), 0), nil // TODO
|
||||
}
|
||||
|
||||
func (d *DummyFs) Open(filename string) (File, error) {
|
||||
return &Dummy{}, nil
|
||||
}
|
||||
|
||||
func (d *DummyFs) ReadDir(path string) (map[string]File, error) {
|
||||
func (d *DummyFs) ReadDir(path string) ([]fs.DirEntry, error) {
|
||||
if path == "/dir/here" {
|
||||
return map[string]File{
|
||||
"file1.txt": &Dummy{},
|
||||
"file2.txt": &Dummy{},
|
||||
return []fs.DirEntry{
|
||||
newFileInfo("file1.txt", 0),
|
||||
newFileInfo("file2.txt", 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -63,7 +76,7 @@ func TestResolver(t *testing.T) {
|
|||
require.Equal("/f1.rar", path)
|
||||
return &Dummy{}, nil
|
||||
})
|
||||
require.Nil(err)
|
||||
require.NoError(err)
|
||||
require.Equal("/f1.rar", fsPath)
|
||||
require.Equal("/f2.rar", nestedFsPath)
|
||||
require.IsType(&archive{}, nestedFs)
|
||||
|
@ -76,7 +89,7 @@ func TestResolver(t *testing.T) {
|
|||
require.Equal("/", path)
|
||||
return &Dummy{}, nil
|
||||
})
|
||||
require.Nil(err)
|
||||
require.NoError(err)
|
||||
require.Nil(nestedFs)
|
||||
require.Equal("/", fsPath)
|
||||
require.Equal("", nestedFsPath)
|
||||
|
@ -90,7 +103,7 @@ func TestResolver(t *testing.T) {
|
|||
require.Equal("/", path)
|
||||
return &Dummy{}, nil
|
||||
})
|
||||
require.Nil(err)
|
||||
require.NoError(err)
|
||||
require.Nil(nestedFs)
|
||||
require.Equal("/", fsPath)
|
||||
require.Equal("", nestedFsPath)
|
||||
|
@ -103,7 +116,7 @@ func TestResolver(t *testing.T) {
|
|||
require.Equal("/f1.rar", path)
|
||||
return &Dummy{}, nil
|
||||
})
|
||||
require.Nil(err)
|
||||
require.NoError(err)
|
||||
require.Equal("/f1.rar", fsPath)
|
||||
require.Equal("/", nestedFsPath)
|
||||
require.IsType(&archive{}, nestedFs)
|
||||
|
@ -116,7 +129,7 @@ func TestResolver(t *testing.T) {
|
|||
require.Equal("/test1/f1.rar", path)
|
||||
return &Dummy{}, nil
|
||||
})
|
||||
require.Nil(err)
|
||||
require.NoError(err)
|
||||
require.IsType(&archive{}, nestedFs)
|
||||
require.Equal("/test1/f1.rar", fsPath)
|
||||
require.Equal("/", nestedFsPath)
|
||||
|
@ -150,23 +163,43 @@ func TestFiles(t *testing.T) {
|
|||
require := require.New(t)
|
||||
|
||||
files := map[string]*Dummy{
|
||||
"/test/file.txt": &Dummy{},
|
||||
"/test/file.txt": &Dummy{},
|
||||
"/test/file2.txt": &Dummy{},
|
||||
"/test1/file.txt": &Dummy{},
|
||||
}
|
||||
{
|
||||
file, err := getFile(files, "/test")
|
||||
require.Nil(err)
|
||||
require.Equal(&Dir{}, file)
|
||||
require.NoError(err)
|
||||
require.Equal(&dir{}, file)
|
||||
}
|
||||
{
|
||||
file, err := getFile(files, "/test/file.txt")
|
||||
require.Nil(err)
|
||||
require.NoError(err)
|
||||
require.Equal(&Dummy{}, file)
|
||||
}
|
||||
|
||||
{
|
||||
out, err := listFilesInDir(files, "/test")
|
||||
require.Nil(err)
|
||||
require.Contains(out, "file.txt")
|
||||
require.Equal(&Dummy{}, out["file.txt"])
|
||||
out, err := listDirFromFiles(files, "/test")
|
||||
require.NoError(err)
|
||||
require.Len(out, 2)
|
||||
require.Equal("file.txt", out[0].Name())
|
||||
require.Equal("file2.txt", out[1].Name())
|
||||
require.False(out[0].IsDir())
|
||||
require.False(out[1].IsDir())
|
||||
}
|
||||
{
|
||||
out, err := listDirFromFiles(files, "/test1")
|
||||
require.NoError(err)
|
||||
require.Len(out, 1)
|
||||
require.Equal("file.txt", out[0].Name())
|
||||
require.False(out[0].IsDir())
|
||||
}
|
||||
{
|
||||
out, err := listDirFromFiles(files, "/")
|
||||
require.NoError(err)
|
||||
require.Len(out, 2)
|
||||
require.Equal("test", out[0].Name())
|
||||
require.Equal("test1", out[1].Name())
|
||||
require.True(out[0].IsDir())
|
||||
require.True(out[1].IsDir())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,6 +3,8 @@ package vfs
|
|||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/fs"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -14,10 +16,14 @@ import (
|
|||
var _ Filesystem = &TorrentFs{}
|
||||
|
||||
type TorrentFs struct {
|
||||
mu sync.RWMutex
|
||||
t *torrent.Torrent
|
||||
mu sync.Mutex
|
||||
t *torrent.Torrent
|
||||
|
||||
readTimeout int
|
||||
|
||||
//cache
|
||||
filesCache map[string]*torrentFile
|
||||
|
||||
resolver *resolver
|
||||
}
|
||||
|
||||
|
@ -30,22 +36,24 @@ func NewTorrentFs(t *torrent.Torrent, readTimeout int) *TorrentFs {
|
|||
}
|
||||
|
||||
func (fs *TorrentFs) files() map[string]*torrentFile {
|
||||
files := make(map[string]*torrentFile)
|
||||
<-fs.t.GotInfo()
|
||||
for _, file := range fs.t.Files() {
|
||||
if file.Priority() == torrent.PiecePriorityNone {
|
||||
continue
|
||||
}
|
||||
|
||||
p := clean(file.Path())
|
||||
files[p] = &torrentFile{
|
||||
readerFunc: file.NewReader,
|
||||
len: file.Length(),
|
||||
timeout: fs.readTimeout,
|
||||
if fs.filesCache == nil {
|
||||
fs.mu.Lock()
|
||||
<-fs.t.GotInfo()
|
||||
files := fs.t.Files()
|
||||
fs.filesCache = make(map[string]*torrentFile)
|
||||
for _, file := range files {
|
||||
p := AbsPath(file.Path())
|
||||
fs.filesCache[p] = &torrentFile{
|
||||
name: path.Base(p),
|
||||
readerFunc: file.NewReader,
|
||||
len: file.Length(),
|
||||
timeout: fs.readTimeout,
|
||||
}
|
||||
}
|
||||
fs.mu.Unlock()
|
||||
}
|
||||
|
||||
return files
|
||||
return fs.filesCache
|
||||
}
|
||||
|
||||
func (fs *TorrentFs) rawOpen(path string) (File, error) {
|
||||
|
@ -53,6 +61,36 @@ func (fs *TorrentFs) rawOpen(path string) (File, error) {
|
|||
return file, err
|
||||
}
|
||||
|
||||
func (fs *TorrentFs) rawStat(filename string) (fs.FileInfo, error) {
|
||||
file, err := getFile(fs.files(), filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if file.IsDir() {
|
||||
return newDirInfo(path.Base(filename)), nil
|
||||
} else {
|
||||
return newFileInfo(path.Base(filename), file.Size()), nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Stat implements Filesystem.
|
||||
func (fs *TorrentFs) Stat(filename string) (fs.FileInfo, error) {
|
||||
if filename == Separator {
|
||||
return newDirInfo(filename), nil
|
||||
}
|
||||
|
||||
fsPath, nestedFs, nestedFsPath, err := fs.resolver.resolvePath(filename, fs.rawOpen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if nestedFs != nil {
|
||||
return nestedFs.Stat(nestedFsPath)
|
||||
}
|
||||
|
||||
return fs.rawStat(fsPath)
|
||||
}
|
||||
|
||||
func (fs *TorrentFs) Open(filename string) (File, error) {
|
||||
fsPath, nestedFs, nestedFsPath, err := fs.resolver.resolvePath(filename, fs.rawOpen)
|
||||
if err != nil {
|
||||
|
@ -65,7 +103,7 @@ func (fs *TorrentFs) Open(filename string) (File, error) {
|
|||
return fs.rawOpen(fsPath)
|
||||
}
|
||||
|
||||
func (fs *TorrentFs) ReadDir(name string) (map[string]File, error) {
|
||||
func (fs *TorrentFs) ReadDir(name string) ([]fs.DirEntry, error) {
|
||||
fsPath, nestedFs, nestedFsPath, err := fs.resolver.resolvePath(name, fs.rawOpen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -74,7 +112,7 @@ func (fs *TorrentFs) ReadDir(name string) (map[string]File, error) {
|
|||
return nestedFs.ReadDir(nestedFsPath)
|
||||
}
|
||||
|
||||
return listFilesInDir(fs.files(), fsPath)
|
||||
return listDirFromFiles(fs.files(), fsPath)
|
||||
}
|
||||
|
||||
func (fs *TorrentFs) Unlink(name string) error {
|
||||
|
@ -144,12 +182,18 @@ func (rw *readAtWrapper) Close() error {
|
|||
var _ File = &torrentFile{}
|
||||
|
||||
type torrentFile struct {
|
||||
name string
|
||||
|
||||
readerFunc func() torrent.Reader
|
||||
reader reader
|
||||
len int64
|
||||
timeout int
|
||||
}
|
||||
|
||||
func (d *torrentFile) Stat() (fs.FileInfo, error) {
|
||||
return newFileInfo(d.name, d.len), nil
|
||||
}
|
||||
|
||||
func (d *torrentFile) load() {
|
||||
if d.reader != nil {
|
||||
return
|
||||
|
|
|
@ -1,14 +1,25 @@
|
|||
package vfs
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
import "strings"
|
||||
|
||||
func trimRelPath(p, t string) string {
|
||||
return strings.Trim(strings.TrimPrefix(p, t), "/")
|
||||
}
|
||||
|
||||
func clean(p string) string {
|
||||
return path.Clean(Separator + strings.ReplaceAll(p, "\\", "/"))
|
||||
// func clean(p string) string {
|
||||
// return path.Clean(Separator + strings.ReplaceAll(p, "\\", "/"))
|
||||
// }
|
||||
|
||||
func AbsPath(p string) string {
|
||||
if p == "" || p[0] != '/' {
|
||||
return Separator + p
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func AddTrailSlash(p string) string {
|
||||
if p == "" || p[len(p)-1] != '/' {
|
||||
return p + Separator
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"git.kmsign.ru/royalcat/tstor/src/config"
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/torrent"
|
||||
"github.com/anacrolix/missinggo/v2/filecache"
|
||||
"github.com/gin-contrib/pprof"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/shurcooL/httpfs/html/vfstemplate"
|
||||
|
@ -19,6 +20,7 @@ func New(fc *filecache.Cache, ss *torrent.Stats, s *torrent.Service, logPath str
|
|||
r.Use(gin.Recovery())
|
||||
r.Use(gin.ErrorLogger())
|
||||
r.Use(Logger())
|
||||
pprof.Register(r)
|
||||
|
||||
r.GET("/assets/*filepath", func(c *gin.Context) {
|
||||
c.FileFromFS(c.Request.URL.Path, http.FS(tstor.Assets))
|
||||
|
|
|
@ -16,7 +16,7 @@ func TestSeekerWrapper(t *testing.T) {
|
|||
|
||||
require := require.New(t)
|
||||
|
||||
mf := vfs.NewMemoryFile(testData)
|
||||
mf := vfs.NewMemoryFile("text.txt", testData)
|
||||
|
||||
r := iio.NewSeekerWrapper(mf, mf.Size())
|
||||
defer r.Close()
|
||||
|
|
|
@ -174,13 +174,13 @@ func (fh *fileHandler) ListDir(path string) ([]string, error) {
|
|||
fh.mu.RLock()
|
||||
defer fh.mu.RUnlock()
|
||||
|
||||
var out []string
|
||||
files, err := fh.fs.ReadDir(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for p := range files {
|
||||
out = append(out, p)
|
||||
out := make([]string, 0, len(files))
|
||||
for _, p := range files {
|
||||
out = append(out, p.Name())
|
||||
}
|
||||
|
||||
return out, nil
|
||||
|
|
|
@ -25,7 +25,7 @@ func TestHandler(t *testing.T) {
|
|||
h := NewHandler(false, p)
|
||||
|
||||
mem := vfs.NewMemoryFS(map[string]*vfs.MemoryFile{
|
||||
"/test.txt": vfs.NewMemoryFile([]byte("test")),
|
||||
"/test.txt": vfs.NewMemoryFile("test.txt", []byte("test")),
|
||||
})
|
||||
|
||||
err := h.Mount(mem)
|
||||
|
@ -52,7 +52,7 @@ func TestHandlerDriveLetter(t *testing.T) {
|
|||
h := NewHandler(false, p)
|
||||
|
||||
mem := vfs.NewMemoryFS(map[string]*vfs.MemoryFile{
|
||||
"/test.txt": vfs.NewMemoryFile([]byte("test")),
|
||||
"/test.txt": vfs.NewMemoryFile("test.txt", []byte("test")),
|
||||
})
|
||||
|
||||
err := h.Mount(mem)
|
||||
|
|
|
@ -21,32 +21,37 @@ func NewHTTPFS(fs vfs.Filesystem) *HTTPFS {
|
|||
return &HTTPFS{fs: fs}
|
||||
}
|
||||
|
||||
func (fs *HTTPFS) Open(name string) (http.File, error) {
|
||||
f, err := fs.fs.Open(name)
|
||||
func (hfs *HTTPFS) Open(name string) (http.File, error) {
|
||||
f, err := hfs.fs.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fi := vfs.NewFileInfo(name, f.Size(), f.IsDir())
|
||||
|
||||
// TODO make this lazy
|
||||
fis, err := fs.filesToFileInfo(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var fis []fs.FileInfo
|
||||
if f.IsDir() {
|
||||
// TODO make this lazy
|
||||
fis, err = hfs.filesToFileInfo(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return newHTTPFile(f, fis, fi), nil
|
||||
return newHTTPFile(f, fis), nil
|
||||
}
|
||||
|
||||
func (fs *HTTPFS) filesToFileInfo(path string) ([]fs.FileInfo, error) {
|
||||
files, err := fs.fs.ReadDir(path)
|
||||
func (hfs *HTTPFS) filesToFileInfo(name string) ([]fs.FileInfo, error) {
|
||||
files, err := hfs.fs.ReadDir(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var out []os.FileInfo
|
||||
for n, f := range files {
|
||||
out = append(out, vfs.NewFileInfo(n, f.Size(), f.IsDir()))
|
||||
out := make([]os.FileInfo, 0, len(files))
|
||||
for _, f := range files {
|
||||
info, err := f.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, info)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
|
@ -55,33 +60,32 @@ func (fs *HTTPFS) filesToFileInfo(path string) ([]fs.FileInfo, error) {
|
|||
var _ http.File = &httpFile{}
|
||||
|
||||
type httpFile struct {
|
||||
f vfs.File
|
||||
|
||||
iio.ReaderSeeker
|
||||
|
||||
mu sync.Mutex
|
||||
// dirPos is protected by mu.
|
||||
dirPos int
|
||||
dirContent []os.FileInfo
|
||||
|
||||
fi fs.FileInfo
|
||||
}
|
||||
|
||||
func newHTTPFile(f vfs.File, fis []fs.FileInfo, fi fs.FileInfo) *httpFile {
|
||||
func newHTTPFile(f vfs.File, dirContent []os.FileInfo) *httpFile {
|
||||
return &httpFile{
|
||||
dirContent: fis,
|
||||
fi: fi,
|
||||
|
||||
f: f,
|
||||
dirContent: dirContent,
|
||||
ReaderSeeker: iio.NewSeekerWrapper(f, f.Size()),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *httpFile) Readdir(count int) ([]fs.FileInfo, error) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
if !f.fi.IsDir() {
|
||||
if !f.f.IsDir() {
|
||||
return nil, os.ErrInvalid
|
||||
}
|
||||
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
old := f.dirPos
|
||||
if old >= len(f.dirContent) {
|
||||
// The os.File Readdir docs say that at the end of a directory,
|
||||
|
@ -105,5 +109,5 @@ func (f *httpFile) Readdir(count int) ([]fs.FileInfo, error) {
|
|||
}
|
||||
|
||||
func (f *httpFile) Stat() (fs.FileInfo, error) {
|
||||
return f.fi, nil
|
||||
return f.f.Stat()
|
||||
}
|
||||
|
|
16
src/mounts/nfs/handler.go
Normal file
16
src/mounts/nfs/handler.go
Normal file
|
@ -0,0 +1,16 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
nfs "github.com/willscott/go-nfs"
|
||||
nfshelper "github.com/willscott/go-nfs/helpers"
|
||||
)
|
||||
|
||||
func NewNFSv3Handler(fs vfs.Filesystem) (nfs.Handler, error) {
|
||||
bfs := &billyFsWrapper{fs: fs}
|
||||
handler := nfshelper.NewNullAuthHandler(bfs)
|
||||
cacheHelper := nfshelper.NewCachingHandler(handler, 1024*16)
|
||||
// cacheHelper := NewCachingHandler(handler)
|
||||
|
||||
return cacheHelper, nil
|
||||
}
|
190
src/mounts/nfs/wrapper-v3.go
Normal file
190
src/mounts/nfs/wrapper-v3.go
Normal file
|
@ -0,0 +1,190 @@
|
|||
package nfs
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
|
||||
"git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
"github.com/go-git/go-billy/v5"
|
||||
)
|
||||
|
||||
type billyFsWrapper struct {
|
||||
fs vfs.Filesystem
|
||||
}
|
||||
|
||||
var _ billy.Filesystem = (*billyFsWrapper)(nil)
|
||||
var _ billy.Dir = (*billyFsWrapper)(nil)
|
||||
|
||||
// Chroot implements billy.Filesystem.
|
||||
func (*billyFsWrapper) Chroot(path string) (billy.Filesystem, error) {
|
||||
return nil, billy.ErrNotSupported
|
||||
}
|
||||
|
||||
// Create implements billy.Filesystem.
|
||||
func (*billyFsWrapper) Create(filename string) (billy.File, error) {
|
||||
return nil, billy.ErrNotSupported
|
||||
}
|
||||
|
||||
// Join implements billy.Filesystem.
|
||||
func (*billyFsWrapper) Join(elem ...string) string {
|
||||
return filepath.Join(elem...)
|
||||
}
|
||||
|
||||
// Lstat implements billy.Filesystem.
|
||||
func (fs *billyFsWrapper) Lstat(filename string) (fs.FileInfo, error) {
|
||||
info, err := fs.fs.Stat(filename)
|
||||
if err != nil {
|
||||
return nil, billyErr(err)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// MkdirAll implements billy.Filesystem.
|
||||
func (*billyFsWrapper) MkdirAll(filename string, perm fs.FileMode) error {
|
||||
return billy.ErrNotSupported
|
||||
}
|
||||
|
||||
// Open implements billy.Filesystem.
|
||||
func (f *billyFsWrapper) Open(filename string) (billy.File, error) {
|
||||
file, err := f.fs.Open(filename)
|
||||
if err != nil {
|
||||
return nil, billyErr(err)
|
||||
}
|
||||
return &billyFile{
|
||||
name: filename,
|
||||
file: file,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// OpenFile implements billy.Filesystem.
|
||||
func (f *billyFsWrapper) OpenFile(filename string, flag int, perm fs.FileMode) (billy.File, error) {
|
||||
file, err := f.fs.Open(filename)
|
||||
if err != nil {
|
||||
return nil, billyErr(err)
|
||||
}
|
||||
return &billyFile{
|
||||
name: filename,
|
||||
file: file,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ReadDir implements billy.Filesystem.
|
||||
func (bfs *billyFsWrapper) ReadDir(path string) ([]fs.FileInfo, error) {
|
||||
ffs, err := bfs.fs.ReadDir(path)
|
||||
if err != nil {
|
||||
return nil, billyErr(err)
|
||||
}
|
||||
|
||||
out := make([]fs.FileInfo, 0, len(ffs))
|
||||
for _, v := range ffs {
|
||||
if info, ok := v.(fs.FileInfo); ok {
|
||||
out = append(out, info)
|
||||
} else {
|
||||
info, err := v.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, info)
|
||||
}
|
||||
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Readlink implements billy.Filesystem.
|
||||
func (*billyFsWrapper) Readlink(link string) (string, error) {
|
||||
return "", billy.ErrNotSupported
|
||||
}
|
||||
|
||||
// Remove implements billy.Filesystem.
|
||||
func (*billyFsWrapper) Remove(filename string) error {
|
||||
return billy.ErrNotSupported
|
||||
}
|
||||
|
||||
// Rename implements billy.Filesystem.
|
||||
func (*billyFsWrapper) Rename(oldpath string, newpath string) error {
|
||||
return billy.ErrNotSupported
|
||||
}
|
||||
|
||||
// Root implements billy.Filesystem.
|
||||
func (*billyFsWrapper) Root() string {
|
||||
return "/"
|
||||
}
|
||||
|
||||
// Stat implements billy.Filesystem.
|
||||
func (f *billyFsWrapper) Stat(filename string) (fs.FileInfo, error) {
|
||||
info, err := f.fs.Stat(filename)
|
||||
if err != nil {
|
||||
return nil, billyErr(err)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Symlink implements billy.Filesystem.
|
||||
func (*billyFsWrapper) Symlink(target string, link string) error {
|
||||
return billyErr(vfs.ErrNotImplemented)
|
||||
}
|
||||
|
||||
// TempFile implements billy.Filesystem.
|
||||
func (*billyFsWrapper) TempFile(dir string, prefix string) (billy.File, error) {
|
||||
return nil, billyErr(vfs.ErrNotImplemented)
|
||||
}
|
||||
|
||||
type billyFile struct {
|
||||
name string
|
||||
file vfs.File
|
||||
}
|
||||
|
||||
var _ billy.File = (*billyFile)(nil)
|
||||
|
||||
// Close implements billy.File.
|
||||
func (f *billyFile) Close() error {
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
// Name implements billy.File.
|
||||
func (f *billyFile) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Read implements billy.File.
|
||||
func (f *billyFile) Read(p []byte) (n int, err error) {
|
||||
return f.Read(p)
|
||||
}
|
||||
|
||||
// ReadAt implements billy.File.
|
||||
func (f *billyFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
return f.ReadAt(p, off)
|
||||
}
|
||||
|
||||
// Seek implements billy.File.
|
||||
func (*billyFile) Seek(offset int64, whence int) (int64, error) {
|
||||
return 0, billyErr(vfs.ErrNotImplemented)
|
||||
}
|
||||
|
||||
// Truncate implements billy.File.
|
||||
func (*billyFile) Truncate(size int64) error {
|
||||
return billyErr(vfs.ErrNotImplemented)
|
||||
}
|
||||
|
||||
// Write implements billy.File.
|
||||
func (*billyFile) Write(p []byte) (n int, err error) {
|
||||
return 0, billyErr(vfs.ErrNotImplemented)
|
||||
}
|
||||
|
||||
// Lock implements billy.File.
|
||||
func (*billyFile) Lock() error {
|
||||
return nil // TODO
|
||||
}
|
||||
|
||||
// Unlock implements billy.File.
|
||||
func (*billyFile) Unlock() error {
|
||||
return nil // TODO
|
||||
}
|
||||
|
||||
func billyErr(err error) error {
|
||||
if err == vfs.ErrNotImplemented {
|
||||
return billy.ErrNotSupported
|
||||
}
|
||||
return err
|
||||
}
|
164
src/mounts/nfs/wrapper-v4.go
Normal file
164
src/mounts/nfs/wrapper-v4.go
Normal file
|
@ -0,0 +1,164 @@
|
|||
package nfs
|
||||
|
||||
// import (
|
||||
// "io/fs"
|
||||
|
||||
// "git.kmsign.ru/royalcat/tstor/src/host/vfs"
|
||||
// nfsfs "github.com/smallfz/libnfs-go/fs"
|
||||
// )
|
||||
|
||||
// type nfsFsWrapper struct {
|
||||
// fs vfs.Filesystem
|
||||
// }
|
||||
|
||||
// var _ nfsfs.FS = (*nfsFsWrapper)(nil)
|
||||
|
||||
// // Attributes implements fs.FS.
|
||||
// func (*nfsFsWrapper) Attributes() *nfsfs.Attributes {
|
||||
// return &nfsfs.Attributes{
|
||||
// LinkSupport: true,
|
||||
// SymlinkSupport: false, // unsopported
|
||||
// ChownRestricted: true, // unsopported
|
||||
// MaxName: 255, // common value
|
||||
// NoTrunc: false,
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Stat implements fs.FS.
|
||||
// func (*nfsFsWrapper) Stat(string) (nfsfs.FileInfo, error) {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // Chmod implements fs.FS.
|
||||
// func (*nfsFsWrapper) Chmod(string, fs.FileMode) error {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // Chown implements fs.FS.
|
||||
// func (*nfsFsWrapper) Chown(string, int, int) error {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // GetFileId implements fs.FS.
|
||||
// func (*nfsFsWrapper) GetFileId(nfsfs.FileInfo) uint64 {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // GetHandle implements fs.FS.
|
||||
// func (*nfsFsWrapper) GetHandle(nfsfs.FileInfo) ([]byte, error) {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // GetRootHandle implements fs.FS.
|
||||
// func (*nfsFsWrapper) GetRootHandle() []byte {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // Link implements fs.FS.
|
||||
// func (*nfsFsWrapper) Link(string, string) error {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // MkdirAll implements fs.FS.
|
||||
// func (*nfsFsWrapper) MkdirAll(string, fs.FileMode) error {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // Open implements fs.FS.
|
||||
// func (w *nfsFsWrapper) Open(name string) (nfsfs.File, error) {
|
||||
// f, err := w.fs.Open(name)
|
||||
// if err != nil {
|
||||
// return nil, nfsErr(err)
|
||||
// }
|
||||
// }
|
||||
|
||||
// // OpenFile implements fs.FS.
|
||||
// func (w *nfsFsWrapper) OpenFile(string, int, fs.FileMode) (nfsfs.File, error) {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // Readlink implements fs.FS.
|
||||
// func (*nfsFsWrapper) Readlink(string) (string, error) {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // Remove implements fs.FS.
|
||||
// func (*nfsFsWrapper) Remove(string) error {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // Rename implements fs.FS.
|
||||
// func (*nfsFsWrapper) Rename(string, string) error {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // ResolveHandle implements fs.FS.
|
||||
// func (*nfsFsWrapper) ResolveHandle([]byte) (string, error) {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // Symlink implements fs.FS.
|
||||
// func (*nfsFsWrapper) Symlink(string, string) error {
|
||||
// return NotImplementedError
|
||||
// }
|
||||
|
||||
// var NotImplementedError = vfs.NotImplemented
|
||||
|
||||
// func nfsErr(err error) error {
|
||||
// if err == vfs.NotImplemented {
|
||||
// return NotImplementedError
|
||||
// }
|
||||
// return err
|
||||
// }
|
||||
|
||||
// type nfsFile struct {
|
||||
// name string
|
||||
// f vfs.File
|
||||
// }
|
||||
|
||||
// // Close implements fs.File.
|
||||
// func (f *nfsFile) Close() error {
|
||||
// return f.f.Close()
|
||||
// }
|
||||
|
||||
// // Name implements fs.File.
|
||||
// func (f *nfsFile) Name() string {
|
||||
// return f.name
|
||||
// }
|
||||
|
||||
// // Read implements fs.File.
|
||||
// func (f *nfsFile) Read(p []byte) (n int, err error) {
|
||||
// return f.f.Read(p)
|
||||
// }
|
||||
|
||||
// // Readdir implements fs.File.
|
||||
// func (f *nfsFile) Readdir(int) ([]nfsfs.FileInfo, error) {
|
||||
// f.f.IsDir()
|
||||
// }
|
||||
|
||||
// // Seek implements fs.File.
|
||||
// func (*nfsFile) Seek(offset int64, whence int) (int64, error) {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // Stat implements fs.File.
|
||||
// func (*nfsFile) Stat() (nfsfs.FileInfo, error) {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // Sync implements fs.File.
|
||||
// func (*nfsFile) Sync() error {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // Truncate implements fs.File.
|
||||
// func (*nfsFile) Truncate() error {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// // Write implements fs.File.
|
||||
// func (*nfsFile) Write(p []byte) (n int, err error) {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// var _ nfsfs.File = (*nfsFile)(nil)
|
|
@ -3,8 +3,9 @@ package webdav
|
|||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -24,30 +25,25 @@ func newFS(fs vfs.Filesystem) *WebDAV {
|
|||
}
|
||||
|
||||
func (wd *WebDAV) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (webdav.File, error) {
|
||||
p := "/" + name
|
||||
name = vfs.AbsPath(name)
|
||||
|
||||
// TODO handle flag and permissions
|
||||
f, err := wd.lookupFile(p)
|
||||
f, err := wd.lookupFile(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wdf := newFile(filepath.Base(p), f, func() ([]os.FileInfo, error) {
|
||||
return wd.listDir(p)
|
||||
wdf := newFile(path.Base(name), f, func() ([]fs.FileInfo, error) {
|
||||
return wd.listDir(name)
|
||||
})
|
||||
return wdf, nil
|
||||
}
|
||||
|
||||
func (wd *WebDAV) Stat(ctx context.Context, name string) (os.FileInfo, error) {
|
||||
p := "/" + name
|
||||
f, err := wd.lookupFile(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fi := newFileInfo(name, f.Size(), f.IsDir())
|
||||
return fi, nil
|
||||
func (wd *WebDAV) Stat(ctx context.Context, name string) (fs.FileInfo, error) {
|
||||
return wd.fs.Stat(vfs.AbsPath(name))
|
||||
}
|
||||
|
||||
func (wd *WebDAV) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
|
||||
func (wd *WebDAV) Mkdir(ctx context.Context, name string, perm fs.FileMode) error {
|
||||
return webdav.ErrNotImplemented
|
||||
}
|
||||
|
||||
|
@ -59,8 +55,8 @@ func (wd *WebDAV) Rename(ctx context.Context, oldName, newName string) error {
|
|||
return webdav.ErrNotImplemented
|
||||
}
|
||||
|
||||
func (wd *WebDAV) lookupFile(path string) (vfs.File, error) {
|
||||
return wd.fs.Open(path)
|
||||
func (wd *WebDAV) lookupFile(name string) (vfs.File, error) {
|
||||
return wd.fs.Open(path.Clean(name))
|
||||
}
|
||||
|
||||
func (wd *WebDAV) listDir(path string) ([]os.FileInfo, error) {
|
||||
|
@ -69,9 +65,13 @@ func (wd *WebDAV) listDir(path string) ([]os.FileInfo, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var out []os.FileInfo
|
||||
for n, f := range files {
|
||||
out = append(out, newFileInfo(n, f.Size(), f.IsDir()))
|
||||
out := make([]os.FileInfo, 0, len(files))
|
||||
for _, f := range files {
|
||||
info, err := f.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, info)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
|
|
|
@ -13,16 +13,17 @@ import (
|
|||
|
||||
func TestWebDAVFilesystem(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.Background()
|
||||
|
||||
require := require.New(t)
|
||||
|
||||
mfs := vfs.NewMemoryFS(map[string]*vfs.MemoryFile{
|
||||
"/folder/file.txt": vfs.NewMemoryFile([]byte("test file content.")),
|
||||
"/folder/file.txt": vfs.NewMemoryFile("file.txt", []byte("test file content.")),
|
||||
})
|
||||
|
||||
wfs := newFS(mfs)
|
||||
|
||||
dir, err := wfs.OpenFile(context.Background(), "/", 0, 0)
|
||||
dir, err := wfs.OpenFile(ctx, "/", 0, 0)
|
||||
require.NoError(err)
|
||||
|
||||
fi, err := dir.Readdir(0)
|
||||
|
@ -30,7 +31,7 @@ func TestWebDAVFilesystem(t *testing.T) {
|
|||
require.Len(fi, 1)
|
||||
require.Equal("folder", fi[0].Name())
|
||||
|
||||
file, err := wfs.OpenFile(context.Background(), "/folder/file.txt", 0, 0)
|
||||
file, err := wfs.OpenFile(ctx, "/folder/file.txt", 0, 0)
|
||||
require.NoError(err)
|
||||
_, err = file.Readdir(0)
|
||||
require.ErrorIs(err, os.ErrInvalid)
|
||||
|
@ -57,7 +58,7 @@ func TestWebDAVFilesystem(t *testing.T) {
|
|||
fInfo, err := wfs.Stat(context.Background(), "/folder/file.txt")
|
||||
require.NoError(err)
|
||||
require.Equal("/folder/file.txt", fInfo.Name())
|
||||
require.Equal(false, fInfo.IsDir())
|
||||
require.False(fInfo.IsDir())
|
||||
require.Equal(int64(18), fInfo.Size())
|
||||
}
|
||||
|
||||
|
@ -67,7 +68,7 @@ func TestErrNotImplemented(t *testing.T) {
|
|||
require := require.New(t)
|
||||
|
||||
mfs := vfs.NewMemoryFS(map[string]*vfs.MemoryFile{
|
||||
"/folder/file.txt": vfs.NewMemoryFile([]byte("test file content.")),
|
||||
"/folder/file.txt": vfs.NewMemoryFile("file.txt", []byte("test file content.")),
|
||||
})
|
||||
|
||||
wfs := newFS(mfs)
|
||||
|
|
|
@ -9,11 +9,12 @@ import (
|
|||
)
|
||||
|
||||
func NewWebDAVServer(fs vfs.Filesystem, port int, user, pass string) error {
|
||||
log.Info().Str("host", fmt.Sprintf("0.0.0.0:%d", port)).Msg("starting webDAV server")
|
||||
|
||||
srv := newHandler(fs)
|
||||
|
||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
serveMux := http.NewServeMux()
|
||||
|
||||
serveMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
username, password, _ := r.BasicAuth()
|
||||
if username == user && password == pass {
|
||||
srv.ServeHTTP(w, r)
|
||||
|
@ -22,8 +23,16 @@ func NewWebDAVServer(fs vfs.Filesystem, port int, user, pass string) error {
|
|||
|
||||
w.Header().Set("WWW-Authenticate", `Basic realm="BASIC WebDAV REALM"`)
|
||||
w.WriteHeader(401)
|
||||
w.Write([]byte("401 Unauthorized\n"))
|
||||
_, _ = w.Write([]byte("401 Unauthorized\n"))
|
||||
})
|
||||
|
||||
return http.ListenAndServe(fmt.Sprintf("0.0.0.0:%d", port), nil)
|
||||
//nolint:exhaustruct
|
||||
httpServer := &http.Server{
|
||||
Addr: fmt.Sprintf("0.0.0.0:%d", port),
|
||||
Handler: serveMux,
|
||||
}
|
||||
|
||||
log.Info().Str("host", httpServer.Addr).Msg("starting webDAV server")
|
||||
|
||||
return httpServer.ListenAndServe()
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue