node build fixed

This commit is contained in:
ra_ma
2025-09-20 14:08:38 +01:00
parent c6ebbe069d
commit 3d298fa434
1516 changed files with 535727 additions and 2 deletions

View File

@@ -0,0 +1,388 @@
package httputil
import (
"context"
"errors"
"io"
"os"
"sync"
"time"
"github.com/rs/zerolog"
)
type piece struct {
start int64
end int64
}
// FileStream saves a HTTP file being streamed to disk.
// It allows multiple readers to read the file concurrently.
// It works by being fed the stream from the HTTP response body. It will simultaneously write to disk and to the HTTP writer.
type FileStream struct {
length int64
file *os.File
closed bool
mu sync.Mutex
pieces map[int64]*piece
readers []FileStreamReader
readersMu sync.Mutex
ctx context.Context
cancel context.CancelFunc
logger *zerolog.Logger
}
type FileStreamReader interface {
io.ReadSeekCloser
}
// NewFileStream creates a new FileStream instance with a temporary file
func NewFileStream(ctx context.Context, logger *zerolog.Logger, contentLength int64) (*FileStream, error) {
file, err := os.CreateTemp("", "filestream_*.tmp")
if err != nil {
return nil, err
}
// Pre-allocate the file to the expected content length
if contentLength > 0 {
if err := file.Truncate(contentLength); err != nil {
_ = file.Close()
_ = os.Remove(file.Name())
return nil, err
}
}
ctx, cancel := context.WithCancel(ctx)
return &FileStream{
file: file,
ctx: ctx,
cancel: cancel,
logger: logger,
pieces: make(map[int64]*piece),
length: contentLength,
}, nil
}
// WriteAndFlush writes the stream to the file at the given offset and flushes it to the HTTP writer
func (fs *FileStream) WriteAndFlush(src io.Reader, dst io.Writer, offset int64) error {
fs.mu.Lock()
if fs.closed {
fs.mu.Unlock()
return io.ErrClosedPipe
}
fs.mu.Unlock()
buffer := make([]byte, 32*1024) // 32KB buffer
currentOffset := offset
for {
select {
case <-fs.ctx.Done():
return fs.ctx.Err()
default:
}
n, readErr := src.Read(buffer)
if n > 0 {
// Write to file
fs.mu.Lock()
if !fs.closed {
if _, err := fs.file.WriteAt(buffer[:n], currentOffset); err != nil {
fs.mu.Unlock()
return err
}
// Update pieces map
pieceEnd := currentOffset + int64(n) - 1
fs.updatePieces(currentOffset, pieceEnd)
}
fs.mu.Unlock()
// Write to HTTP response
if _, err := dst.Write(buffer[:n]); err != nil {
return err
}
// Flush if possible
if flusher, ok := dst.(interface{ Flush() }); ok {
flusher.Flush()
}
currentOffset += int64(n)
}
if readErr != nil {
if readErr == io.EOF {
break
}
return readErr
}
}
// Sync file to ensure data is written
fs.mu.Lock()
if !fs.closed {
_ = fs.file.Sync()
}
fs.mu.Unlock()
return nil
}
// updatePieces merges the new piece with existing pieces
func (fs *FileStream) updatePieces(start, end int64) {
newPiece := &piece{start: start, end: end}
// Find overlapping or adjacent pieces
var toMerge []*piece
var toDelete []int64
for key, p := range fs.pieces {
if p.start <= end+1 && p.end >= start-1 {
toMerge = append(toMerge, p)
toDelete = append(toDelete, key)
}
}
// Merge all overlapping pieces
for _, p := range toMerge {
if p.start < newPiece.start {
newPiece.start = p.start
}
if p.end > newPiece.end {
newPiece.end = p.end
}
}
// Delete old pieces
for _, key := range toDelete {
delete(fs.pieces, key)
}
// Add the merged piece
fs.pieces[newPiece.start] = newPiece
}
// isRangeAvailable checks if a given range is completely downloaded
func (fs *FileStream) isRangeAvailable(start, end int64) bool {
for _, p := range fs.pieces {
if p.start <= start && p.end >= end {
return true
}
}
return false
}
// NewReader creates a new FileStreamReader for concurrent reading
func (fs *FileStream) NewReader() (FileStreamReader, error) {
fs.mu.Lock()
defer fs.mu.Unlock()
if fs.closed {
return nil, io.ErrClosedPipe
}
reader := &fileStreamReader{
fs: fs,
file: fs.file,
offset: 0,
}
fs.readersMu.Lock()
fs.readers = append(fs.readers, reader)
fs.readersMu.Unlock()
return reader, nil
}
// Close closes the FileStream and cleans up resources
func (fs *FileStream) Close() error {
fs.mu.Lock()
defer fs.mu.Unlock()
if fs.closed {
return nil
}
fs.closed = true
fs.cancel()
// Close all readers
fs.readersMu.Lock()
for _, reader := range fs.readers {
go reader.Close()
}
fs.readers = nil
fs.readersMu.Unlock()
// Remove the temp file and close
fileName := fs.file.Name()
_ = fs.file.Close()
_ = os.Remove(fileName)
return nil
}
// Length returns the current length of the stream
func (fs *FileStream) Length() int64 {
return fs.length
}
// fileStreamReader implements FileStreamReader interface
type fileStreamReader struct {
fs *FileStream
file *os.File
offset int64
closed bool
mu sync.Mutex
}
// Read reads data from the file stream, blocking if data is not yet available
func (r *fileStreamReader) Read(p []byte) (int, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.closed {
return 0, io.ErrClosedPipe
}
readSize := int64(len(p))
readEnd := r.offset + readSize - 1
if readEnd >= r.fs.length {
readEnd = r.fs.length - 1
readSize = r.fs.length - r.offset
if readSize <= 0 {
return 0, io.EOF
}
}
for {
select {
case <-r.fs.ctx.Done():
return 0, r.fs.ctx.Err()
default:
}
r.fs.mu.Lock()
streamClosed := r.fs.closed
// Check if the range we want to read is available
available := r.fs.isRangeAvailable(r.offset, readEnd)
// If not fully available, check what we can read
var actualReadSize int64 = readSize
if !available {
// Find the largest available chunk starting from our offset
var maxRead int64 = 0
for _, piece := range r.fs.pieces {
if piece.start <= r.offset && piece.end >= r.offset {
chunkEnd := piece.end
if chunkEnd >= readEnd {
maxRead = readSize
} else {
maxRead = chunkEnd - r.offset + 1
}
break
}
}
actualReadSize = maxRead
}
r.fs.mu.Unlock()
// If we have some data to read, or if stream is closed, attempt the read
if available || actualReadSize > 0 || streamClosed {
var n int
var err error
if actualReadSize > 0 {
n, err = r.file.ReadAt(p[:actualReadSize], r.offset)
} else if streamClosed {
// If stream is closed and no data available, try reading anyway to get proper EOF
n, err = r.file.ReadAt(p[:readSize], r.offset)
}
if n > 0 {
r.offset += int64(n)
}
// If we read less than requested and stream is closed, return EOF
if n < len(p) && streamClosed && r.offset >= r.fs.length {
if err == nil {
err = io.EOF
}
}
// If no data was read and stream is closed, return EOF
if n == 0 && streamClosed {
return 0, io.EOF
}
// Return what we got, even if it's 0 bytes (this prevents hanging)
return n, err
}
// Wait a bit before checking again
r.mu.Unlock()
select {
case <-r.fs.ctx.Done():
r.mu.Lock()
return 0, r.fs.ctx.Err()
case <-time.After(10 * time.Millisecond):
r.mu.Lock()
}
}
}
// Seek sets the offset for the next Read
func (r *fileStreamReader) Seek(offset int64, whence int) (int64, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.closed {
return 0, io.ErrClosedPipe
}
switch whence {
case io.SeekStart:
r.offset = offset
case io.SeekCurrent:
r.offset += offset
case io.SeekEnd:
r.fs.mu.Lock()
r.offset = r.fs.length + offset
r.fs.mu.Unlock()
default:
return 0, errors.New("invalid whence")
}
if r.offset < 0 {
r.offset = 0
}
return r.offset, nil
}
// Close closes the reader
func (r *fileStreamReader) Close() error {
r.mu.Lock()
defer r.mu.Unlock()
if r.closed {
return nil
}
r.closed = true
r.fs.readersMu.Lock()
for i, reader := range r.fs.readers {
if reader == r {
r.fs.readers = append(r.fs.readers[:i], r.fs.readers[i+1:]...)
break
}
}
r.fs.readersMu.Unlock()
return nil
}

View File

@@ -0,0 +1,106 @@
package httputil
import (
"errors"
"fmt"
"net/textproto"
"strconv"
"strings"
)
// Range specifies the byte range to be sent to the client.
type Range struct {
Start int64
Length int64
}
// ContentRange returns Content-Range header value.
func (r Range) ContentRange(size int64) string {
return fmt.Sprintf("bytes %d-%d/%d", r.Start, r.Start+r.Length-1, size)
}
var (
// ErrNoOverlap is returned by ParseRange if first-byte-pos of
// all of the byte-range-spec values is greater than the content size.
ErrNoOverlap = errors.New("invalid range: failed to overlap")
// ErrInvalid is returned by ParseRange on invalid input.
ErrInvalid = errors.New("invalid range")
)
// ParseRange parses a Range header string as per RFC 7233.
// ErrNoOverlap is returned if none of the ranges overlap.
// ErrInvalid is returned if s is invalid range.
func ParseRange(s string, size int64) ([]Range, error) {
if s == "" {
return nil, nil // header not present
}
const b = "bytes="
if !strings.HasPrefix(s, b) {
return nil, ErrInvalid
}
var ranges []Range
noOverlap := false
for _, ra := range strings.Split(s[len(b):], ",") {
ra = textproto.TrimString(ra)
if ra == "" {
continue
}
i := strings.Index(ra, "-")
if i < 0 {
return nil, ErrInvalid
}
start, end := textproto.TrimString(ra[:i]), textproto.TrimString(ra[i+1:])
var r Range
if start == "" {
// If no start is specified, end specifies the
// range start relative to the end of the file,
// and we are dealing with <suffix-length>
// which has to be a non-negative integer as per
// RFC 7233 Section 2.1 "Byte-Ranges".
if end == "" || end[0] == '-' {
return nil, ErrInvalid
}
i, err := strconv.ParseInt(end, 10, 64)
if i < 0 || err != nil {
return nil, ErrInvalid
}
if i > size {
i = size
}
r.Start = size - i
r.Length = size - r.Start
} else {
i, err := strconv.ParseInt(start, 10, 64)
if err != nil || i < 0 {
return nil, ErrInvalid
}
if i >= size {
// If the range begins after the size of the content,
// then it does not overlap.
noOverlap = true
continue
}
r.Start = i
if end == "" {
// If no end is specified, range extends to end of the file.
r.Length = size - r.Start
} else {
i, err := strconv.ParseInt(end, 10, 64)
if err != nil || r.Start > i {
return nil, ErrInvalid
}
if i >= size {
i = size - 1
}
r.Length = i - r.Start + 1
}
}
ranges = append(ranges, r)
}
if noOverlap && len(ranges) == 0 {
// The specified ranges did not overlap with the content.
return nil, ErrNoOverlap
}
return ranges, nil
}

View File

@@ -0,0 +1,289 @@
package httputil
// Original source: https://github.com/jfbus/httprs/tree/master
/*
Package httprs provides a ReadSeeker for http.Response.Body.
Usage :
resp, err := http.Get(url)
rs := httprs.NewHttpReadSeeker(resp)
defer rs.Close()
io.ReadFull(rs, buf) // reads the first bytes from the response body
rs.Seek(1024, 0) // moves the position, but does no range request
io.ReadFull(rs, buf) // does a range request and reads from the response body
If you want to use a specific http.Client for additional range requests :
rs := httprs.NewHttpReadSeeker(resp, client)
*/
import (
"fmt"
"io"
"net/http"
"seanime/internal/util/limiter"
"strconv"
"strings"
"sync"
)
// HttpReadSeeker implements io.ReadSeeker for HTTP responses
// It allows seeking within an HTTP response by using HTTP Range requests
type HttpReadSeeker struct {
url string // The URL of the resource
client *http.Client // HTTP client to use for requests
resp *http.Response // Current response
offset int64 // Current offset in the resource
size int64 // Size of the resource, -1 if unknown
readBuf []byte // Buffer for reading
readOffset int // Current offset in readBuf
mu sync.Mutex // Mutex for thread safety
rateLimiter *limiter.Limiter
}
// NewHttpReadSeeker creates a new HttpReadSeeker from an http.Response
func NewHttpReadSeeker(resp *http.Response) *HttpReadSeeker {
url := ""
if resp.Request != nil {
url = resp.Request.URL.String()
}
size := int64(-1)
if resp.ContentLength > 0 {
size = resp.ContentLength
}
return &HttpReadSeeker{
url: url,
client: http.DefaultClient,
resp: resp,
offset: 0,
size: size,
readBuf: nil,
readOffset: 0,
}
}
func NewHttpReadSeekerFromURL(url string) (*HttpReadSeeker, error) {
resp, err := http.Get(url)
if err != nil {
return nil, fmt.Errorf("httprs: failed to get URL %s: %w", url, err)
}
return NewHttpReadSeeker(resp), nil
}
// Read implements io.Reader
func (hrs *HttpReadSeeker) Read(p []byte) (n int, err error) {
hrs.mu.Lock()
defer hrs.mu.Unlock()
// If we have buffered data, read from it first
if hrs.readBuf != nil && hrs.readOffset < len(hrs.readBuf) {
n = copy(p, hrs.readBuf[hrs.readOffset:])
hrs.readOffset += n
hrs.offset += int64(n)
// Clear buffer if we've read it all
if hrs.readOffset >= len(hrs.readBuf) {
hrs.readBuf = nil
hrs.readOffset = 0
}
return n, nil
}
// If we don't have a response or it's been closed, get a new one
if hrs.resp == nil {
if err := hrs.makeRangeRequest(); err != nil {
return 0, err
}
}
// Read from the response body
n, err = hrs.resp.Body.Read(p)
hrs.offset += int64(n)
return n, err
}
// Seek implements io.Seeker
func (hrs *HttpReadSeeker) Seek(offset int64, whence int) (int64, error) {
hrs.mu.Lock()
defer hrs.mu.Unlock()
var newOffset int64
switch whence {
case io.SeekStart:
newOffset = offset
case io.SeekCurrent:
newOffset = hrs.offset + offset
case io.SeekEnd:
if hrs.size < 0 {
// If we don't know the size, we need to determine it
if err := hrs.determineSize(); err != nil {
return hrs.offset, err
}
}
newOffset = hrs.size + offset
default:
return hrs.offset, fmt.Errorf("httprs: invalid whence %d", whence)
}
if newOffset < 0 {
return hrs.offset, fmt.Errorf("httprs: negative position")
}
// If we're just moving the offset without reading, we can skip the request
// We'll make a new request when Read is called
if hrs.resp != nil {
hrs.resp.Body.Close()
hrs.resp = nil
}
hrs.offset = newOffset
hrs.readBuf = nil
hrs.readOffset = 0
return hrs.offset, nil
}
// Close closes the underlying response body
func (hrs *HttpReadSeeker) Close() error {
hrs.mu.Lock()
defer hrs.mu.Unlock()
if hrs.resp != nil {
err := hrs.resp.Body.Close()
hrs.resp = nil
return err
}
return nil
}
// makeRangeRequest makes a new HTTP request with the Range header
func (hrs *HttpReadSeeker) makeRangeRequest() error {
req, err := http.NewRequest("GET", hrs.url, nil)
if err != nil {
return err
}
// Set Range header from current offset
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", hrs.offset))
// Make the request
resp, err := hrs.client.Do(req)
if err != nil {
return err
}
// Check if the server supports range requests
if resp.StatusCode != http.StatusPartialContent && hrs.offset > 0 {
resp.Body.Close()
return fmt.Errorf("httprs: server does not support range requests")
}
// Update our response and offset
if hrs.resp != nil {
hrs.resp.Body.Close()
}
hrs.resp = resp
// Update the size if we get it from Content-Range
if contentRange := resp.Header.Get("Content-Range"); contentRange != "" {
// Format: bytes <start>-<end>/<size>
parts := strings.Split(contentRange, "/")
if len(parts) > 1 && parts[1] != "*" {
if size, err := strconv.ParseInt(parts[1], 10, 64); err == nil {
hrs.size = size
}
}
} else if resp.ContentLength > 0 {
// If we don't have a Content-Range header but we do have Content-Length,
// then the size is the current offset plus the content length
hrs.size = hrs.offset + resp.ContentLength
}
return nil
}
// determineSize makes a HEAD request to determine the size of the resource
func (hrs *HttpReadSeeker) determineSize() error {
req, err := http.NewRequest("HEAD", hrs.url, nil)
if err != nil {
return err
}
resp, err := hrs.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.ContentLength > 0 {
hrs.size = resp.ContentLength
} else {
// If we still don't know the size, return an error
return fmt.Errorf("httprs: unable to determine resource size")
}
return nil
}
// ReadAt implements io.ReaderAt
func (hrs *HttpReadSeeker) ReadAt(p []byte, off int64) (n int, err error) {
// Save current offset
currentOffset := hrs.offset
// Seek to the requested offset
if _, err := hrs.Seek(off, io.SeekStart); err != nil {
return 0, err
}
// Read the data
n, err = hrs.Read(p)
// Restore the original offset
if _, seekErr := hrs.Seek(currentOffset, io.SeekStart); seekErr != nil {
// If we can't restore the offset, return that error instead
if err == nil {
err = seekErr
}
}
return n, err
}
// Size returns the size of the resource, or -1 if unknown
func (hrs *HttpReadSeeker) Size() int64 {
hrs.mu.Lock()
defer hrs.mu.Unlock()
if hrs.size < 0 {
// Try to determine the size
_ = hrs.determineSize()
}
return hrs.size
}
// WithClient returns a new HttpReadSeeker with the specified client
func (hrs *HttpReadSeeker) WithClient(client *http.Client) *HttpReadSeeker {
hrs.mu.Lock()
defer hrs.mu.Unlock()
hrs.client = client
return hrs
}
func (hrs *HttpReadSeeker) WithRateLimiter(rl *limiter.Limiter) *HttpReadSeeker {
hrs.mu.Lock()
defer hrs.mu.Unlock()
hrs.rateLimiter = rl
return hrs
}