small refactor*

This commit is contained in:
royalcat 2025-03-22 08:49:14 +04:00
parent b6b541e050
commit 24a4d30275
232 changed files with 2164 additions and 1906 deletions

View file

@ -0,0 +1,71 @@
package ioutils
import (
"context"
"errors"
"io"
"sync"
"github.com/royalcat/ctxio"
)
type FileReader interface {
ctxio.ReaderAt
ctxio.Reader
ctxio.Closer
}
type CacheReader struct {
m sync.Mutex
fo int64
fr *FileBuffer
to int64
tr ctxio.Reader
}
var _ FileReader = (*CacheReader)(nil)
func NewCacheReader(r ctxio.Reader) (FileReader, error) {
fr := NewFileBuffer(nil)
tr := ctxio.TeeReader(r, fr)
return &CacheReader{fr: fr, tr: tr}, nil
}
func (dtr *CacheReader) ReadAt(ctx context.Context, p []byte, off int64) (int, error) {
dtr.m.Lock()
defer dtr.m.Unlock()
tb := off + int64(len(p))
if tb > dtr.fo {
w, err := ctxio.CopyN(ctx, ctxio.Discard, dtr.tr, tb-dtr.fo)
dtr.to += w
if err != nil && err != io.EOF {
return 0, err
}
}
n, err := dtr.fr.ReadAt(ctx, p, off)
dtr.fo += int64(n)
return n, err
}
func (dtr *CacheReader) Read(ctx context.Context, p []byte) (n int, err error) {
dtr.m.Lock()
defer dtr.m.Unlock()
// use directly tee reader here
n, err = dtr.tr.Read(ctx, p)
dtr.to += int64(n)
return
}
func (dtr *CacheReader) Close(ctx context.Context) error {
frcloser := dtr.fr.Close(ctx)
var closeerr error
if rc, ok := dtr.tr.(ctxio.ReadCloser); ok {
closeerr = rc.Close(ctx)
}
return errors.Join(frcloser, closeerr)
}

View file

@ -0,0 +1,72 @@
package ioutils
import (
"context"
"io"
"os"
"sync"
"github.com/royalcat/ctxio"
)
type DiskCacheReader struct {
m sync.Mutex
fo int64
fr *os.File
to int64
tr ctxio.Reader
}
var _ ctxio.ReaderAt = (*DiskCacheReader)(nil)
var _ ctxio.Reader = (*DiskCacheReader)(nil)
var _ ctxio.Closer = (*DiskCacheReader)(nil)
func NewDiskCacheReader(r ctxio.Reader) (*DiskCacheReader, error) {
tempDir, err := os.MkdirTemp("/tmp", "tstor")
if err != nil {
return nil, err
}
fr, err := os.CreateTemp(tempDir, "dtb_tmp")
if err != nil {
return nil, err
}
tr := ctxio.TeeReader(r, ctxio.WrapIoWriter(fr))
return &DiskCacheReader{fr: fr, tr: tr}, nil
}
func (dtr *DiskCacheReader) ReadAt(ctx context.Context, p []byte, off int64) (int, error) {
dtr.m.Lock()
defer dtr.m.Unlock()
tb := off + int64(len(p))
if tb > dtr.fo {
w, err := ctxio.CopyN(ctx, ctxio.Discard, dtr.tr, tb-dtr.fo)
dtr.to += w
if err != nil && err != io.EOF {
return 0, err
}
}
n, err := dtr.fr.ReadAt(p, off)
dtr.fo += int64(n)
return n, err
}
func (dtr *DiskCacheReader) Read(ctx context.Context, p []byte) (n int, err error) {
dtr.m.Lock()
defer dtr.m.Unlock()
// use directly tee reader here
n, err = dtr.tr.Read(ctx, p)
dtr.to += int64(n)
return
}
func (dtr *DiskCacheReader) Close(ctx context.Context) error {
if err := dtr.fr.Close(); err != nil {
return err
}
return os.Remove(dtr.fr.Name())
}

View file

@ -0,0 +1,205 @@
package ioutils
import (
"bytes"
"context"
"errors"
"io"
"os"
"sync"
"github.com/royalcat/ctxio"
)
// FileBuffer implements interfaces implemented by files.
// The main purpose of this type is to have an in memory replacement for a
// file.
type FileBuffer struct {
// buff is the backing buffer
buff *bytes.Buffer
// index indicates where in the buffer we are at
index int64
isClosed bool
mu sync.RWMutex
}
var _ FileReader = (*FileBuffer)(nil)
var _ ctxio.Writer = (*FileBuffer)(nil)
// NewFileBuffer returns a new populated Buffer
func NewFileBuffer(b []byte) *FileBuffer {
return &FileBuffer{buff: bytes.NewBuffer(b)}
}
// NewFileBufferFromReader is a convenience method that returns a new populated Buffer
// whose contents are sourced from a supplied reader by loading it entirely
// into memory.
func NewFileBufferFromReader(ctx context.Context, reader ctxio.Reader) (*FileBuffer, error) {
data, err := ctxio.ReadAll(ctx, reader)
if err != nil {
return nil, err
}
return NewFileBuffer(data), nil
}
// NewFileBufferFromReader is a convenience method that returns a new populated Buffer
// whose contents are sourced from a supplied reader by loading it entirely
// into memory.
func NewFileBufferFromIoReader(reader io.Reader) (*FileBuffer, error) {
data, err := io.ReadAll(reader)
if err != nil {
return nil, err
}
return NewFileBuffer(data), nil
}
// Bytes returns the bytes available until the end of the buffer.
func (f *FileBuffer) Bytes() []byte {
f.mu.RLock()
defer f.mu.RUnlock()
if f.isClosed || f.index >= int64(f.buff.Len()) {
return []byte{}
}
return bytes.Clone(f.buff.Bytes()[f.index:])
}
// String implements the Stringer interface
func (f *FileBuffer) String() string {
f.mu.RLock()
defer f.mu.RUnlock()
return string(f.buff.Bytes()[f.index:])
}
// Read implements io.Reader https://golang.org/pkg/io/#Reader
// Read reads up to len(p) bytes into p. It returns the number of bytes read (0 <= n <= len(p))
// and any error encountered. Even if Read returns n < len(p), it may use all of p as scratch
// space during the call. If some data is available but not len(p) bytes, Read conventionally
// returns what is available instead of waiting for more.
// When Read encounters an error or end-of-file condition after successfully reading n > 0 bytes,
// it returns the number of bytes read. It may return the (non-nil) error from the same call or
// return the error (and n == 0) from a subsequent call. An instance of this general case is
// that a Reader returning a non-zero number of bytes at the end of the input stream may return
// either err == EOF or err == nil. The next Read should return 0, EOF.
func (f *FileBuffer) Read(ctx context.Context, b []byte) (n int, err error) {
f.mu.RLock()
defer f.mu.RUnlock()
if f.isClosed {
return 0, os.ErrClosed
}
if len(b) == 0 {
return 0, nil
}
if f.index >= int64(f.buff.Len()) {
return 0, io.EOF
}
n, err = bytes.NewBuffer(f.buff.Bytes()[f.index:]).Read(b)
f.index += int64(n)
return n, err
}
// ReadAt implements io.ReaderAt https://golang.org/pkg/io/#ReaderAt
// ReadAt reads len(p) bytes into p starting at offset off in the underlying input source.
// It returns the number of bytes read (0 <= n <= len(p)) and any error encountered.
//
// When ReadAt returns n < len(p), it returns a non-nil error explaining why more bytes were not returned.
// In this respect, ReadAt is stricter than Read.
//
// Even if ReadAt returns n < len(p), it may use all of p as scratch space during the call.
// If some data is available but not len(p) bytes, ReadAt blocks until either all the data is available or an error occurs.
// In this respect ReadAt is different from Read.
//
// If the n = len(p) bytes returned by ReadAt are at the end of the input source,
// ReadAt may return either err == EOF or err == nil.
//
// If ReadAt is reading from an input source with a seek offset,
// ReadAt should not affect nor be affected by the underlying seek offset.
// Clients of ReadAt can execute parallel ReadAt calls on the same input source.
func (f *FileBuffer) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
f.mu.RLock()
defer f.mu.RUnlock()
if f.isClosed {
return 0, os.ErrClosed
}
if off < 0 {
return 0, errors.New("filebuffer.ReadAt: negative offset")
}
reqLen := len(p)
buffLen := int64(f.buff.Len())
if off >= buffLen {
return 0, io.EOF
}
n = copy(p, f.buff.Bytes()[off:])
if n < reqLen {
err = io.EOF
}
return n, err
}
// Write implements io.Writer https://golang.org/pkg/io/#Writer
// by appending the passed bytes to the buffer unless the buffer is closed or index negative.
func (f *FileBuffer) Write(ctx context.Context, p []byte) (n int, err error) {
f.mu.Lock()
defer f.mu.Unlock()
if f.isClosed {
return 0, os.ErrClosed
}
if f.index < 0 {
return 0, io.EOF
}
// we might have rewinded, let's reset the buffer before appending to it
idx := int(f.index)
buffLen := f.buff.Len()
if idx != buffLen && idx <= buffLen {
f.buff = bytes.NewBuffer(f.Bytes()[:f.index])
}
n, err = f.buff.Write(p)
f.index += int64(n)
return n, err
}
// Seek implements io.Seeker https://golang.org/pkg/io/#Seeker
func (f *FileBuffer) Seek(offset int64, whence int) (idx int64, err error) {
f.mu.Lock()
defer f.mu.Unlock()
if f.isClosed {
return 0, os.ErrClosed
}
var abs int64
switch whence {
case 0:
abs = offset
case 1:
abs = int64(f.index) + offset
case 2:
abs = int64(f.buff.Len()) + offset
default:
return 0, errors.New("filebuffer.Seek: invalid whence")
}
if abs < 0 {
return 0, errors.New("filebuffer.Seek: negative position")
}
f.index = abs
return abs, nil
}
// Close implements io.Closer https://golang.org/pkg/io/#Closer
// It closes the buffer, rendering it unusable for I/O. It returns an error, if any.
func (f *FileBuffer) Close(ctx context.Context) error {
f.mu.Lock()
defer f.mu.Unlock()
f.isClosed = true
f.buff = nil
return nil
}

View file

@ -0,0 +1,49 @@
package ioutils
import (
"context"
"sync"
"github.com/royalcat/ctxio"
)
type ReaderReaderAtWrapper struct {
mu sync.Mutex
rat ctxio.ReaderAt
offset int64
}
func NewReaderReaderAtWrapper(rat ctxio.ReaderAt) *ReaderReaderAtWrapper {
return &ReaderReaderAtWrapper{
rat: rat,
}
}
var _ ctxio.Reader = (*ReaderReaderAtWrapper)(nil)
var _ ctxio.ReaderAt = (*ReaderReaderAtWrapper)(nil)
var _ ctxio.Closer = (*ReaderReaderAtWrapper)(nil)
// Read implements Reader.
func (r *ReaderReaderAtWrapper) Read(ctx context.Context, p []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
n, err = r.rat.ReadAt(ctx, p, r.offset)
r.offset += int64(n)
return n, err
}
// ReadAt implements ReaderAt.
func (r *ReaderReaderAtWrapper) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
return r.rat.ReadAt(ctx, p, off)
}
// Close implements Closer.
func (r *ReaderReaderAtWrapper) Close(ctx context.Context) (err error) {
if c, ok := r.rat.(ctxio.Closer); ok {
err = c.Close(ctx)
if err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,104 @@
package ioutils
import (
"context"
"io"
"sync"
"github.com/royalcat/ctxio"
)
type ioSeekerWrapper struct {
ctx context.Context
mu sync.Mutex
pos int64
size int64
r ctxio.ReaderAt
}
func WrapIoReadSeeker(ctx context.Context, r ctxio.ReaderAt, size int64) io.ReadSeeker {
return &ioSeekerWrapper{
ctx: ctx,
r: r,
size: size,
}
}
func (r *ioSeekerWrapper) Seek(offset int64, whence int) (int64, error) {
r.mu.Lock()
defer r.mu.Unlock()
switch whence {
case io.SeekStart:
r.pos = offset
case io.SeekCurrent:
r.pos = r.pos + offset
case io.SeekEnd:
r.pos = r.size + offset
}
return r.pos, nil
}
func (r *ioSeekerWrapper) Read(p []byte) (int, error) {
r.mu.Lock()
defer r.mu.Unlock()
n, err := r.r.ReadAt(r.ctx, p, r.pos)
r.pos += int64(n)
return n, err
}
var _ io.ReadSeekCloser = (*ioSeekerCloserWrapper)(nil)
type ioSeekerCloserWrapper struct {
ctx context.Context
mu sync.Mutex
pos int64
size int64
r FileReader
}
func IoReadSeekCloserWrapper(ctx context.Context, r FileReader, size int64) io.ReadSeekCloser {
return &ioSeekerCloserWrapper{
ctx: ctx,
r: r,
size: size,
}
}
func (r *ioSeekerCloserWrapper) Seek(offset int64, whence int) (int64, error) {
r.mu.Lock()
defer r.mu.Unlock()
switch whence {
case io.SeekStart:
r.pos = offset
case io.SeekCurrent:
r.pos = r.pos + offset
case io.SeekEnd:
r.pos = r.size + offset
}
return r.pos, nil
}
func (r *ioSeekerCloserWrapper) Read(p []byte) (int, error) {
r.mu.Lock()
defer r.mu.Unlock()
n, err := r.r.ReadAt(r.ctx, p, r.pos)
r.pos += int64(n)
return n, err
}
// Close implements io.ReadSeekCloser.
func (r *ioSeekerCloserWrapper) Close() error {
return r.r.Close(r.ctx)
}