package archive

import (
	"context"
	"errors"
	"fmt"
	"io"
	"sync"

	"git.kmsign.ru/royalcat/tstor/server/src/vfs"
	"github.com/hashicorp/golang-lru/arc/v2"
	"github.com/royalcat/ctxio"
	"go.opentelemetry.io/otel/attribute"
	"go.opentelemetry.io/otel/trace"
)

// TODO переделать кеш в демон

const blockSize = 1024 * 16              // 16KB
const cacheSize = 1024 * 1024 * 1024 * 4 // 4GB of total usage
const defaultBlockCount = cacheSize / blockSize

type archiveFileReaderFactory func(ctx context.Context) (ctxio.ReadCloser, error)

type archiveFileIndex struct {
	archiveHash vfs.Hash
	filename    string
}

type blockIndex struct {
	fileIndex archiveFileIndex
	off       int64
}

type block struct {
	data [blockSize]byte
	len  int
}

func newRandomReaderFromLinear(blockCache *arc.ARCCache[blockIndex, block], index archiveFileIndex, size int64, readerFactory archiveFileReaderFactory) *randomReaderFromLinear {
	return &randomReaderFromLinear{
		blockCache:    blockCache,
		index:         index,
		size:          size,
		readerFactory: readerFactory,
	}
}

type randomReaderFromLinear struct {
	blockCache *arc.ARCCache[blockIndex, block]

	index         archiveFileIndex
	readerFactory archiveFileReaderFactory
	reader        ctxio.ReadCloser
	readen        int64
	readerMutex   sync.Mutex
	size          int64
	closed        bool
}

var _ ctxio.ReaderAt = (*randomReaderFromLinear)(nil)
var _ ctxio.Closer = (*randomReaderFromLinear)(nil)

// ReadAt implements ctxio.ReaderAt.
func (a *randomReaderFromLinear) ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) {
	ctx, span := tracer.Start(ctx, "archive.RandomReader.ReadAt")
	defer span.End()

	if a.closed {
		return 0, errors.New("reader is closed")
	}

	if off >= a.size {
		return 0, ctxio.EOF
	}

	aligntOff := (off / blockSize) * blockSize
	bI := blockIndex{fileIndex: a.index, off: aligntOff}

	block, err := a.readBlock(ctx, bI)
	if err != nil && err != ctxio.EOF {
		return 0, err
	}

	if off-aligntOff >= int64(block.len) {
		return 0, ctxio.EOF
	}

	return copy(p, block.data[off-aligntOff:block.len]), err
}

func (a *randomReaderFromLinear) readBlock(ctx context.Context, bI blockIndex) (block, error) {
	ctx, span := tracer.Start(ctx, "archive.RandomReader.readBlock")
	defer span.End()

	// check block in cache before locking
	if b, ok := a.blockCache.Get(bI); ok && b.len != 0 {
		return b, nil
	}

	a.readerMutex.Lock()
	defer a.readerMutex.Unlock()

	if b, ok := a.blockCache.Get(bI); ok && b.len != 0 { // check again, maybe another goroutine already read this block
		return b, nil
	}

	if a.reader == nil || a.readen > bI.off {
		span.AddEvent("reader not valid, creating new reader", trace.WithAttributes(
			attribute.Bool("reader_initialized", a.reader != nil),
			attribute.Int64("readen", a.readen),
			attribute.Int64("target_offset", bI.off),
		))

		if a.reader != nil {
			if err := a.reader.Close(ctx); err != nil {
				return block{}, fmt.Errorf("failed to close previous reader: %w", err)
			}
		}

		var err error
		a.reader, err = a.readerFactory(context.TODO())
		if err != nil {
			return block{}, err
		}
		a.readen = 0
	}

	for off := a.readen; off <= bI.off; off += blockSize {
		// TODO sync.Pool ?
		buf := [blockSize]byte{}
		n, err := a.reader.Read(ctx, buf[:])
		if err != nil && err != ctxio.EOF {
			return block{}, err
		}
		a.readen += int64(n)

		if n == 0 {
			return block{}, io.EOF
		}

		a.blockCache.Add(blockIndex{bI.fileIndex, off}, block{len: n, data: buf})

		if off == bI.off {
			return block{len: n, data: buf}, err
		}

		if n < int(blockSize) && errors.Is(err, ctxio.EOF) {
			return block{}, err
		}
	}

	return block{}, io.EOF
}

// Close implements ctxio.Closer.
func (a *randomReaderFromLinear) Close(ctx context.Context) error {
	if a.closed {
		return nil
	}
	a.closed = true

	var errs []error

	if a.reader != nil {
		errs = append(errs, a.reader.Close(ctx))
	}

	for _, block := range a.blockCache.Keys() {
		if block.fileIndex == a.index {
			a.blockCache.Remove(block)
		}
	}

	return errors.Join(errs...)
}