node build fixed
This commit is contained in:
2
seanime-2.9.10/internal/mediastream/transcoder/README.md
Normal file
2
seanime-2.9.10/internal/mediastream/transcoder/README.md
Normal file
@@ -0,0 +1,2 @@
|
||||
The transcoder implementation was adapted from [zoriya/Kyoo](https://github.com/zoriya/Kyoo/tree/master/transcoder),
|
||||
licensed under GPL-3.0.
|
||||
@@ -0,0 +1,44 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type AudioStream struct {
|
||||
Stream
|
||||
index int32
|
||||
logger *zerolog.Logger
|
||||
settings *Settings
|
||||
}
|
||||
|
||||
// NewAudioStream creates a new AudioStream for a file, at a given audio index.
|
||||
func NewAudioStream(file *FileStream, idx int32, logger *zerolog.Logger, settings *Settings) *AudioStream {
|
||||
logger.Trace().Str("file", filepath.Base(file.Path)).Int32("idx", idx).Msgf("trancoder: Creating audio stream")
|
||||
ret := new(AudioStream)
|
||||
ret.index = idx
|
||||
ret.logger = logger
|
||||
ret.settings = settings
|
||||
NewStream(fmt.Sprintf("audio %d", idx), file, ret, &ret.Stream, settings, logger)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (as *AudioStream) getOutPath(encoderId int) string {
|
||||
return filepath.Join(as.file.Out, fmt.Sprintf("segment-a%d-%d-%%d.ts", as.index, encoderId))
|
||||
}
|
||||
|
||||
func (as *AudioStream) getFlags() Flags {
|
||||
return AudioF
|
||||
}
|
||||
|
||||
func (as *AudioStream) getTranscodeArgs(segments string) []string {
|
||||
return []string{
|
||||
"-map", fmt.Sprintf("0:a:%d", as.index),
|
||||
"-c:a", "aac",
|
||||
// TODO: Support 5.1 audio streams.
|
||||
"-ac", "2",
|
||||
// TODO: Support multi audio qualities.
|
||||
"-b:a", "128k",
|
||||
}
|
||||
}
|
||||
261
seanime-2.9.10/internal/mediastream/transcoder/filestream.go
Normal file
261
seanime-2.9.10/internal/mediastream/transcoder/filestream.go
Normal file
@@ -0,0 +1,261 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"seanime/internal/mediastream/videofile"
|
||||
"seanime/internal/util/result"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// FileStream represents a stream of file data.
|
||||
// It holds the keyframes, media information, video streams, and audio streams.
|
||||
type FileStream struct {
|
||||
ready sync.WaitGroup // A WaitGroup to synchronize go routines.
|
||||
err error // An error that might occur during processing.
|
||||
Path string // The path of the file.
|
||||
Out string // The output path.
|
||||
Keyframes *Keyframe // The keyframes of the video.
|
||||
Info *videofile.MediaInfo // The media information of the file.
|
||||
videos *result.Map[Quality, *VideoStream] // A map of video streams.
|
||||
audios *result.Map[int32, *AudioStream] // A map of audio streams.
|
||||
logger *zerolog.Logger
|
||||
settings *Settings
|
||||
}
|
||||
|
||||
// NewFileStream creates a new FileStream.
|
||||
func NewFileStream(
|
||||
path string,
|
||||
sha string,
|
||||
mediaInfo *videofile.MediaInfo,
|
||||
settings *Settings,
|
||||
logger *zerolog.Logger,
|
||||
) *FileStream {
|
||||
ret := &FileStream{
|
||||
Path: path,
|
||||
Out: filepath.Join(settings.StreamDir, sha),
|
||||
videos: result.NewResultMap[Quality, *VideoStream](),
|
||||
audios: result.NewResultMap[int32, *AudioStream](),
|
||||
logger: logger,
|
||||
settings: settings,
|
||||
Info: mediaInfo,
|
||||
}
|
||||
|
||||
ret.ready.Add(1)
|
||||
go func() {
|
||||
defer ret.ready.Done()
|
||||
ret.Keyframes = GetKeyframes(path, sha, logger, settings)
|
||||
}()
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// Kill stops all streams.
|
||||
func (fs *FileStream) Kill() {
|
||||
fs.videos.Range(func(_ Quality, s *VideoStream) bool {
|
||||
s.Kill()
|
||||
return true
|
||||
})
|
||||
fs.audios.Range(func(_ int32, s *AudioStream) bool {
|
||||
s.Kill()
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// Destroy stops all streams and removes the output directory.
|
||||
func (fs *FileStream) Destroy() {
|
||||
fs.logger.Debug().Msg("filestream: Destroying streams")
|
||||
fs.Kill()
|
||||
_ = os.RemoveAll(fs.Out)
|
||||
}
|
||||
|
||||
// GetMaster generates the master playlist.
|
||||
func (fs *FileStream) GetMaster() string {
|
||||
master := "#EXTM3U\n"
|
||||
if fs.Info.Video != nil {
|
||||
var transmuxQuality Quality
|
||||
for _, quality := range Qualities {
|
||||
if quality.Height() >= fs.Info.Video.Quality.Height() || quality.AverageBitrate() >= fs.Info.Video.Bitrate {
|
||||
transmuxQuality = quality
|
||||
break
|
||||
}
|
||||
}
|
||||
{
|
||||
bitrate := float64(fs.Info.Video.Bitrate)
|
||||
master += "#EXT-X-STREAM-INF:"
|
||||
master += fmt.Sprintf("AVERAGE-BANDWIDTH=%d,", int(math.Min(bitrate*0.8, float64(transmuxQuality.AverageBitrate()))))
|
||||
master += fmt.Sprintf("BANDWIDTH=%d,", int(math.Min(bitrate, float64(transmuxQuality.MaxBitrate()))))
|
||||
master += fmt.Sprintf("RESOLUTION=%dx%d,", fs.Info.Video.Width, fs.Info.Video.Height)
|
||||
if fs.Info.Video.MimeCodec != nil {
|
||||
master += fmt.Sprintf("CODECS=\"%s\",", *fs.Info.Video.MimeCodec)
|
||||
}
|
||||
master += "AUDIO=\"audio\","
|
||||
master += "CLOSED-CAPTIONS=NONE\n"
|
||||
master += fmt.Sprintf("./%s/index.m3u8\n", Original)
|
||||
}
|
||||
aspectRatio := float32(fs.Info.Video.Width) / float32(fs.Info.Video.Height)
|
||||
// codec is the prefix + the level, the level is not part of the codec we want to compare for the same_codec check bellow
|
||||
transmuxPrefix := "avc1.6400"
|
||||
transmuxCodec := transmuxPrefix + "28"
|
||||
|
||||
for _, quality := range Qualities {
|
||||
sameCodec := fs.Info.Video.MimeCodec != nil && strings.HasPrefix(*fs.Info.Video.MimeCodec, transmuxPrefix)
|
||||
includeLvl := quality.Height() < fs.Info.Video.Quality.Height() || (quality.Height() == fs.Info.Video.Quality.Height() && !sameCodec)
|
||||
|
||||
if includeLvl {
|
||||
master += "#EXT-X-STREAM-INF:"
|
||||
master += fmt.Sprintf("AVERAGE-BANDWIDTH=%d,", quality.AverageBitrate())
|
||||
master += fmt.Sprintf("BANDWIDTH=%d,", quality.MaxBitrate())
|
||||
master += fmt.Sprintf("RESOLUTION=%dx%d,", int(aspectRatio*float32(quality.Height())+0.5), quality.Height())
|
||||
master += fmt.Sprintf("CODECS=\"%s\",", transmuxCodec)
|
||||
master += "AUDIO=\"audio\","
|
||||
master += "CLOSED-CAPTIONS=NONE\n"
|
||||
master += fmt.Sprintf("./%s/index.m3u8\n", quality)
|
||||
}
|
||||
}
|
||||
|
||||
//for _, quality := range Qualities {
|
||||
// if quality.Height() < fs.Info.Video.Quality.Height() && quality.AverageBitrate() < fs.Info.Video.Bitrate {
|
||||
// master += "#EXT-X-STREAM-INF:"
|
||||
// master += fmt.Sprintf("AVERAGE-BANDWIDTH=%d,", quality.AverageBitrate())
|
||||
// master += fmt.Sprintf("BANDWIDTH=%d,", quality.MaxBitrate())
|
||||
// master += fmt.Sprintf("RESOLUTION=%dx%d,", int(aspectRatio*float32(quality.Height())+0.5), quality.Height())
|
||||
// master += "CODECS=\"avc1.640028\","
|
||||
// master += "AUDIO=\"audio\","
|
||||
// master += "CLOSED-CAPTIONS=NONE\n"
|
||||
// master += fmt.Sprintf("./%s/index.m3u8\n", quality)
|
||||
// }
|
||||
//}
|
||||
}
|
||||
for _, audio := range fs.Info.Audios {
|
||||
master += "#EXT-X-MEDIA:TYPE=AUDIO,"
|
||||
master += "GROUP-ID=\"audio\","
|
||||
if audio.Language != nil {
|
||||
master += fmt.Sprintf("LANGUAGE=\"%s\",", *audio.Language)
|
||||
}
|
||||
if audio.Title != nil {
|
||||
master += fmt.Sprintf("NAME=\"%s\",", *audio.Title)
|
||||
} else if audio.Language != nil {
|
||||
master += fmt.Sprintf("NAME=\"%s\",", *audio.Language)
|
||||
} else {
|
||||
master += fmt.Sprintf("NAME=\"Audio %d\",", audio.Index)
|
||||
}
|
||||
if audio.IsDefault {
|
||||
master += "DEFAULT=YES,"
|
||||
}
|
||||
master += "CHANNELS=\"2\","
|
||||
master += fmt.Sprintf("URI=\"./audio/%d/index.m3u8\"\n", audio.Index)
|
||||
}
|
||||
return master
|
||||
}
|
||||
|
||||
// GetVideoIndex gets the index of a video stream of a specific quality.
|
||||
func (fs *FileStream) GetVideoIndex(quality Quality) (string, error) {
|
||||
stream := fs.getVideoStream(quality)
|
||||
return stream.GetIndex()
|
||||
}
|
||||
|
||||
// getVideoStream gets a video stream of a specific quality.
|
||||
// It creates a new stream if it does not exist.
|
||||
func (fs *FileStream) getVideoStream(quality Quality) *VideoStream {
|
||||
stream, _ := fs.videos.GetOrSet(quality, func() (*VideoStream, error) {
|
||||
return NewVideoStream(fs, quality, fs.logger, fs.settings), nil
|
||||
})
|
||||
return stream
|
||||
}
|
||||
|
||||
// GetVideoSegment gets a segment of a video stream of a specific quality.
|
||||
//func (fs *FileStream) GetVideoSegment(quality Quality, segment int32) (string, error) {
|
||||
// stream := fs.getVideoStream(quality)
|
||||
// return stream.GetSegment(segment)
|
||||
//}
|
||||
|
||||
// GetVideoSegment gets a segment of a video stream of a specific quality.
|
||||
func (fs *FileStream) GetVideoSegment(quality Quality, segment int32) (string, error) {
|
||||
streamLogger.Debug().Msgf("filestream: Retrieving video segment %d (%s)", segment, quality)
|
||||
// Debug
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
|
||||
defer cancel()
|
||||
debugStreamRequest(fmt.Sprintf("video %s, segment %d", quality, segment), ctx)
|
||||
|
||||
//stream := fs.getVideoStream(quality)
|
||||
//return stream.GetSegment(segment)
|
||||
|
||||
// Channel to signal completion
|
||||
done := make(chan struct{})
|
||||
|
||||
var ret string
|
||||
var err error
|
||||
|
||||
// Execute the retrieval operation in a goroutine
|
||||
go func() {
|
||||
defer close(done)
|
||||
stream := fs.getVideoStream(quality)
|
||||
ret, err = stream.GetSegment(segment)
|
||||
}()
|
||||
|
||||
// Wait for either the operation to complete or the timeout to occur
|
||||
select {
|
||||
case <-done:
|
||||
return ret, err
|
||||
case <-ctx.Done():
|
||||
return "", fmt.Errorf("filestream: timeout while retrieving video segment %d (%s)", segment, quality)
|
||||
}
|
||||
}
|
||||
|
||||
// GetAudioIndex gets the index of an audio stream of a specific index.
|
||||
func (fs *FileStream) GetAudioIndex(audio int32) (string, error) {
|
||||
stream := fs.getAudioStream(audio)
|
||||
return stream.GetIndex()
|
||||
}
|
||||
|
||||
// GetAudioSegment gets a segment of an audio stream of a specific index.
|
||||
func (fs *FileStream) GetAudioSegment(audio int32, segment int32) (string, error) {
|
||||
streamLogger.Debug().Msgf("filestream: Retrieving audio %d segment %d", audio, segment)
|
||||
// Debug
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
debugStreamRequest(fmt.Sprintf("audio %d, segment %d", audio, segment), ctx)
|
||||
|
||||
stream := fs.getAudioStream(audio)
|
||||
return stream.GetSegment(segment)
|
||||
}
|
||||
|
||||
// getAudioStream gets an audio stream of a specific index.
|
||||
// It creates a new stream if it does not exist.
|
||||
func (fs *FileStream) getAudioStream(audio int32) *AudioStream {
|
||||
stream, _ := fs.audios.GetOrSet(audio, func() (*AudioStream, error) {
|
||||
return NewAudioStream(fs, audio, fs.logger, fs.settings), nil
|
||||
})
|
||||
return stream
|
||||
}
|
||||
|
||||
func debugStreamRequest(text string, ctx context.Context) {
|
||||
//ctx, cancel := context.WithCancel(context.Background())
|
||||
//defer cancel()
|
||||
if debugStream {
|
||||
start := time.Now()
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
if debugStream {
|
||||
time.Sleep(2 * time.Second)
|
||||
streamLogger.Debug().Msgf("t: %s has been running for %.2f", text, time.Since(start).Seconds())
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
149
seanime-2.9.10/internal/mediastream/transcoder/hwaccel.go
Normal file
149
seanime-2.9.10/internal/mediastream/transcoder/hwaccel.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
)
|
||||
|
||||
type (
|
||||
HwAccelOptions struct {
|
||||
Kind string
|
||||
Preset string
|
||||
CustomSettings string
|
||||
}
|
||||
)
|
||||
|
||||
func GetHardwareAccelSettings(opts HwAccelOptions) HwAccelSettings {
|
||||
name := opts.Kind
|
||||
if name == "" || name == "auto" || name == "cpu" || name == "none" {
|
||||
name = "disabled"
|
||||
}
|
||||
streamLogger.Debug().Msgf("transcoder: Hardware acceleration: %s", name)
|
||||
|
||||
var customHwAccelSettings HwAccelSettings
|
||||
if opts.CustomSettings != "" && name == "custom" {
|
||||
err := json.Unmarshal([]byte(opts.CustomSettings), &customHwAccelSettings)
|
||||
if err != nil {
|
||||
streamLogger.Error().Err(err).Msg("transcoder: Failed to parse custom hardware acceleration settings, falling back to CPU")
|
||||
name = "disabled"
|
||||
}
|
||||
customHwAccelSettings.Name = "custom"
|
||||
} else if opts.CustomSettings == "" && name == "custom" {
|
||||
name = "disabled"
|
||||
}
|
||||
|
||||
defaultOSDevice := "/dev/dri/renderD128"
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
defaultOSDevice = "auto"
|
||||
}
|
||||
|
||||
// superfast or ultrafast would produce heavy files, so opt for "fast" by default.
|
||||
// vaapi does not have any presets so this flag is unused for vaapi hwaccel.
|
||||
preset := opts.Preset
|
||||
|
||||
switch name {
|
||||
case "disabled":
|
||||
return HwAccelSettings{
|
||||
Name: "disabled",
|
||||
DecodeFlags: []string{},
|
||||
EncodeFlags: []string{
|
||||
"-c:v", "libx264",
|
||||
"-preset", preset,
|
||||
// sc_threshold is a scene detection mechanism used to create a keyframe when the scene changes
|
||||
// this is on by default and inserts keyframes where we don't want to (it also breaks force_key_frames)
|
||||
// we disable it to prevents whole scenes from being removed due to the -f segment failing to find the corresponding keyframe
|
||||
"-sc_threshold", "0",
|
||||
// force 8bits output (by default it keeps the same as the source but 10bits is not playable on some devices)
|
||||
"-pix_fmt", "yuv420p",
|
||||
},
|
||||
// we could put :force_original_aspect_ratio=decrease:force_divisible_by=2 here but we already calculate a correct width and
|
||||
// aspect ratio in our code so there is no need.
|
||||
ScaleFilter: "scale=%d:%d",
|
||||
WithForcedIdr: true,
|
||||
}
|
||||
case "vaapi":
|
||||
return HwAccelSettings{
|
||||
Name: name,
|
||||
DecodeFlags: []string{
|
||||
"-hwaccel", "vaapi",
|
||||
"-hwaccel_device", GetEnvOr("SEANIME_TRANSCODER_VAAPI_RENDERER", defaultOSDevice),
|
||||
"-hwaccel_output_format", "vaapi",
|
||||
},
|
||||
EncodeFlags: []string{
|
||||
// h264_vaapi does not have any preset or scenecut flags.
|
||||
"-c:v", "h264_vaapi",
|
||||
},
|
||||
// if the hardware decoder could not work and fallback to soft decode, we need to instruct ffmpeg to
|
||||
// upload back frames to gpu space (after converting them)
|
||||
// see https://trac.ffmpeg.org/wiki/Hardware/VAAPI#Encoding for more info
|
||||
// we also need to force the format to be nv12 since 10bits is not supported via hwaccel.
|
||||
// this filter is equivalent to this pseudocode:
|
||||
// if (vaapi) {
|
||||
// hwupload, passthrough, keep vaapi as is
|
||||
// convert whatever to nv12 on GPU
|
||||
// } else {
|
||||
// convert whatever to nv12 on CPU
|
||||
// hwupload to vaapi(nv12)
|
||||
// convert whatever to nv12 on GPU // scale_vaapi doesn't support passthrough option, so it has to make a copy
|
||||
// }
|
||||
// See https://www.reddit.com/r/ffmpeg/comments/1bqn60w/hardware_accelerated_decoding_without_hwdownload/ for more info
|
||||
ScaleFilter: "format=nv12|vaapi,hwupload,scale_vaapi=%d:%d:format=nv12",
|
||||
WithForcedIdr: true,
|
||||
}
|
||||
case "qsv", "intel":
|
||||
return HwAccelSettings{
|
||||
Name: name,
|
||||
DecodeFlags: []string{
|
||||
"-hwaccel", "qsv",
|
||||
"-qsv_device", GetEnvOr("SEANIME_TRANSCODER_QSV_RENDERER", defaultOSDevice),
|
||||
"-hwaccel_output_format", "qsv",
|
||||
},
|
||||
EncodeFlags: []string{
|
||||
"-c:v", "h264_qsv",
|
||||
"-preset", preset,
|
||||
},
|
||||
// see note on ScaleFilter of the vaapi HwAccel, this is the same filter but adapted to qsv
|
||||
ScaleFilter: "format=nv12|qsv,hwupload,scale_qsv=%d:%d:format=nv12",
|
||||
WithForcedIdr: true,
|
||||
}
|
||||
case "nvidia":
|
||||
return HwAccelSettings{
|
||||
Name: "nvidia",
|
||||
DecodeFlags: []string{
|
||||
"-hwaccel", "cuda",
|
||||
// this flag prevents data to go from gpu space to cpu space
|
||||
// it forces the whole dec/enc to be on the gpu. We want that.
|
||||
"-hwaccel_output_format", "cuda",
|
||||
},
|
||||
EncodeFlags: []string{
|
||||
"-c:v", "h264_nvenc",
|
||||
"-preset", preset,
|
||||
// the exivalent of -sc_threshold on nvidia.
|
||||
"-no-scenecut", "1",
|
||||
},
|
||||
// see note on ScaleFilter of the vaapi HwAccel, this is the same filter but adapted to cuda
|
||||
ScaleFilter: "format=nv12|cuda,hwupload,scale_cuda=%d:%d:format=nv12",
|
||||
WithForcedIdr: true,
|
||||
}
|
||||
case "videotoolbox":
|
||||
return HwAccelSettings{
|
||||
Name: "videotoolbox",
|
||||
DecodeFlags: []string{
|
||||
"-hwaccel", "videotoolbox",
|
||||
},
|
||||
EncodeFlags: []string{
|
||||
"-c:v", "h264_videotoolbox",
|
||||
"-profile:v", "main",
|
||||
},
|
||||
ScaleFilter: "scale=%d:%d",
|
||||
WithForcedIdr: true,
|
||||
}
|
||||
case "custom":
|
||||
return customHwAccelSettings
|
||||
default:
|
||||
streamLogger.Fatal().Msgf("No hardware accelerator named: %s", name)
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
212
seanime-2.9.10/internal/mediastream/transcoder/keyframes.go
Normal file
212
seanime-2.9.10/internal/mediastream/transcoder/keyframes.go
Normal file
@@ -0,0 +1,212 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"path/filepath"
|
||||
"seanime/internal/mediastream/videofile"
|
||||
"seanime/internal/util"
|
||||
"seanime/internal/util/result"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type Keyframe struct {
|
||||
Sha string
|
||||
Keyframes []float64
|
||||
IsDone bool
|
||||
info *KeyframeInfo
|
||||
}
|
||||
type KeyframeInfo struct {
|
||||
mutex sync.RWMutex
|
||||
ready sync.WaitGroup
|
||||
listeners []func(keyframes []float64)
|
||||
}
|
||||
|
||||
func (kf *Keyframe) Get(idx int32) float64 {
|
||||
kf.info.mutex.RLock()
|
||||
defer kf.info.mutex.RUnlock()
|
||||
return kf.Keyframes[idx]
|
||||
}
|
||||
|
||||
func (kf *Keyframe) Slice(start int32, end int32) []float64 {
|
||||
if end <= start {
|
||||
return []float64{}
|
||||
}
|
||||
kf.info.mutex.RLock()
|
||||
defer kf.info.mutex.RUnlock()
|
||||
ref := kf.Keyframes[start:end]
|
||||
ret := make([]float64, end-start)
|
||||
copy(ret, ref)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (kf *Keyframe) Length() (int32, bool) {
|
||||
kf.info.mutex.RLock()
|
||||
defer kf.info.mutex.RUnlock()
|
||||
return int32(len(kf.Keyframes)), kf.IsDone
|
||||
}
|
||||
|
||||
func (kf *Keyframe) add(values []float64) {
|
||||
kf.info.mutex.Lock()
|
||||
defer kf.info.mutex.Unlock()
|
||||
kf.Keyframes = append(kf.Keyframes, values...)
|
||||
for _, listener := range kf.info.listeners {
|
||||
listener(kf.Keyframes)
|
||||
}
|
||||
}
|
||||
|
||||
func (kf *Keyframe) AddListener(callback func(keyframes []float64)) {
|
||||
kf.info.mutex.Lock()
|
||||
defer kf.info.mutex.Unlock()
|
||||
kf.info.listeners = append(kf.info.listeners, callback)
|
||||
}
|
||||
|
||||
var keyframes = result.NewResultMap[string, *Keyframe]()
|
||||
|
||||
func GetKeyframes(
|
||||
path string,
|
||||
hash string,
|
||||
logger *zerolog.Logger,
|
||||
settings *Settings,
|
||||
) *Keyframe {
|
||||
ret, _ := keyframes.GetOrSet(hash, func() (*Keyframe, error) {
|
||||
kf := &Keyframe{
|
||||
Sha: hash,
|
||||
IsDone: false,
|
||||
info: &KeyframeInfo{},
|
||||
}
|
||||
kf.info.ready.Add(1)
|
||||
go func() {
|
||||
keyframesPath := filepath.Join(settings.StreamDir, hash, "keyframes.json")
|
||||
if err := getSavedInfo(keyframesPath, kf); err == nil {
|
||||
logger.Trace().Msgf("transcoder: Keyframes Cache HIT")
|
||||
kf.info.ready.Done()
|
||||
return
|
||||
}
|
||||
|
||||
err := getKeyframes(settings.FfprobePath, path, kf, hash, logger)
|
||||
if err == nil {
|
||||
saveInfo(keyframesPath, kf)
|
||||
}
|
||||
}()
|
||||
return kf, nil
|
||||
})
|
||||
ret.info.ready.Wait()
|
||||
return ret
|
||||
}
|
||||
|
||||
func getKeyframes(ffprobePath string, path string, kf *Keyframe, hash string, logger *zerolog.Logger) error {
|
||||
defer printExecTime(logger, "ffprobe analysis for %s", path)()
|
||||
// Execute ffprobe to retrieve all IFrames. IFrames are specific points in the video we can divide it into segments.
|
||||
// We instruct ffprobe to return the timestamp and flags of each frame.
|
||||
// Although it's possible to request ffprobe to return only i-frames (keyframes) using the -skip_frame nokey option, this approach is highly inefficient.
|
||||
// The inefficiency arises because when this option is used, ffmpeg processes every single frame, which significantly slows down the operation.
|
||||
cmd := util.NewCmd(
|
||||
"ffprobe",
|
||||
"-loglevel", "error",
|
||||
"-select_streams", "v:0",
|
||||
"-show_entries", "packet=pts_time,flags",
|
||||
"-of", "csv=print_section=0",
|
||||
path,
|
||||
)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
|
||||
ret := make([]float64, 0, 1000)
|
||||
max := 100
|
||||
done := 0
|
||||
for scanner.Scan() {
|
||||
frame := scanner.Text()
|
||||
if frame == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
x := strings.Split(frame, ",")
|
||||
pts, flags := x[0], x[1]
|
||||
|
||||
// if no video track
|
||||
if pts == "N/A" {
|
||||
break
|
||||
}
|
||||
|
||||
// Only take keyframes
|
||||
if flags[0] != 'K' {
|
||||
continue
|
||||
}
|
||||
|
||||
fpts, err := strconv.ParseFloat(pts, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Previously, the aim was to save only those keyframes that had a minimum gap of 3 seconds between them.
|
||||
// This was to avoid creating segments as short as 0.2 seconds.
|
||||
// However, there were instances where the -f segment muxer would ignore the specified segment time and choose a random keyframe to cut at.
|
||||
// To counter this, treat every keyframe as a potential segment.
|
||||
//if done == 0 && len(ret) == 0 {
|
||||
//
|
||||
// // There are instances where videos may not start exactly at 0:00. This needs to be considered,
|
||||
// // and we should only include keyframes that occur after the video's start time. If not done so,
|
||||
// // it can lead to a discrepancy in our segment count and potentially duplicate the same segment in the stream.
|
||||
//
|
||||
// // For simplicity in code comprehension, we designate 0 as the initial keyframe, even though it's not genuine.
|
||||
// // This value is never actually passed to ffmpeg.
|
||||
// ret = append(ret, 0)
|
||||
// continue
|
||||
//}
|
||||
ret = append(ret, fpts)
|
||||
|
||||
if len(ret) == max {
|
||||
kf.add(ret)
|
||||
if done == 0 {
|
||||
kf.info.ready.Done()
|
||||
} else if done >= 500 {
|
||||
max = 500
|
||||
}
|
||||
done += max
|
||||
// clear the array without reallocing it
|
||||
ret = ret[:0]
|
||||
}
|
||||
}
|
||||
|
||||
// If there is less than 2 (i.e. equals 0 or 1 (it happens for audio files with poster))
|
||||
if len(ret) < 2 {
|
||||
dummy, err := getDummyKeyframes(ffprobePath, path, hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ret = dummy
|
||||
}
|
||||
|
||||
kf.add(ret)
|
||||
if done == 0 {
|
||||
kf.info.ready.Done()
|
||||
}
|
||||
kf.IsDone = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDummyKeyframes(ffprobePath string, path string, sha string) ([]float64, error) {
|
||||
dummyKeyframeDuration := float64(2)
|
||||
info, err := videofile.FfprobeGetInfo(ffprobePath, path, sha)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
segmentCount := int((float64(info.Duration) / dummyKeyframeDuration) + 1)
|
||||
ret := make([]float64, segmentCount)
|
||||
for segmentIndex := 0; segmentIndex < segmentCount; segmentIndex += 1 {
|
||||
ret[segmentIndex] = float64(segmentIndex) * dummyKeyframeDuration
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
121
seanime-2.9.10/internal/mediastream/transcoder/quality.go
Normal file
121
seanime-2.9.10/internal/mediastream/transcoder/quality.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
type Quality string
|
||||
|
||||
const (
|
||||
P240 Quality = "240p"
|
||||
P360 Quality = "360p"
|
||||
P480 Quality = "480p"
|
||||
P720 Quality = "720p"
|
||||
P1080 Quality = "1080p"
|
||||
P1440 Quality = "1440p"
|
||||
P4k Quality = "4k"
|
||||
P8k Quality = "8k"
|
||||
Original Quality = "original"
|
||||
)
|
||||
|
||||
// Qualities
|
||||
// Original is not included in this list because it is a special case
|
||||
var Qualities = []Quality{P240, P360, P480, P720, P1080, P1440, P4k, P8k}
|
||||
|
||||
func QualityFromString(str string) (Quality, error) {
|
||||
if str == string(Original) {
|
||||
return Original, nil
|
||||
}
|
||||
|
||||
qualities := Qualities
|
||||
for _, quality := range qualities {
|
||||
if string(quality) == str {
|
||||
return quality, nil
|
||||
}
|
||||
}
|
||||
return Original, errors.New("invalid quality string")
|
||||
}
|
||||
|
||||
// AverageBitrate
|
||||
// Note: Not accurate
|
||||
func (q Quality) AverageBitrate() uint32 {
|
||||
switch q {
|
||||
case P240:
|
||||
return 400_000
|
||||
case P360:
|
||||
return 800_000
|
||||
case P480:
|
||||
return 1_200_000
|
||||
case P720:
|
||||
return 2_400_000
|
||||
case P1080:
|
||||
return 4_800_000
|
||||
case P1440:
|
||||
return 9_600_000
|
||||
case P4k:
|
||||
return 16_000_000
|
||||
case P8k:
|
||||
return 28_000_000
|
||||
case Original:
|
||||
panic("Original quality must be handled specially")
|
||||
}
|
||||
panic("Invalid quality value")
|
||||
}
|
||||
|
||||
func (q Quality) MaxBitrate() uint32 {
|
||||
switch q {
|
||||
case P240:
|
||||
return 700_000
|
||||
case P360:
|
||||
return 1_400_000
|
||||
case P480:
|
||||
return 2_100_000
|
||||
case P720:
|
||||
return 4_000_000
|
||||
case P1080:
|
||||
return 8_000_000
|
||||
case P1440:
|
||||
return 12_000_000
|
||||
case P4k:
|
||||
return 28_000_000
|
||||
case P8k:
|
||||
return 40_000_000
|
||||
case Original:
|
||||
panic("Original quality must be handled specially")
|
||||
}
|
||||
panic("Invalid quality value")
|
||||
}
|
||||
|
||||
func (q Quality) Height() uint32 {
|
||||
switch q {
|
||||
case P240:
|
||||
return 240
|
||||
case P360:
|
||||
return 360
|
||||
case P480:
|
||||
return 480
|
||||
case P720:
|
||||
return 720
|
||||
case P1080:
|
||||
return 1080
|
||||
case P1440:
|
||||
return 1440
|
||||
case P4k:
|
||||
return 2160
|
||||
case P8k:
|
||||
return 4320
|
||||
case Original:
|
||||
panic("Original quality must be handled specially")
|
||||
}
|
||||
panic("Invalid quality value")
|
||||
}
|
||||
|
||||
func QualityFromHeight(height uint32) Quality {
|
||||
qualities := Qualities
|
||||
for _, quality := range qualities {
|
||||
if quality.Height() >= height {
|
||||
return quality
|
||||
}
|
||||
}
|
||||
return P240
|
||||
}
|
||||
19
seanime-2.9.10/internal/mediastream/transcoder/settings.go
Normal file
19
seanime-2.9.10/internal/mediastream/transcoder/settings.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package transcoder
|
||||
|
||||
import "os"
|
||||
|
||||
func GetEnvOr(env string, def string) string {
|
||||
out := os.Getenv(env)
|
||||
if out == "" {
|
||||
return def
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type HwAccelSettings struct {
|
||||
Name string `json:"name"`
|
||||
DecodeFlags []string `json:"decodeFlags"`
|
||||
EncodeFlags []string `json:"encodeFlags"`
|
||||
ScaleFilter string `json:"scaleFilter"`
|
||||
WithForcedIdr bool `json:"removeForcedIdr"`
|
||||
}
|
||||
667
seanime-2.9.10/internal/mediastream/transcoder/stream.go
Normal file
667
seanime-2.9.10/internal/mediastream/transcoder/stream.go
Normal file
@@ -0,0 +1,667 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"seanime/internal/util"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/samber/lo"
|
||||
lop "github.com/samber/lo/parallel"
|
||||
)
|
||||
|
||||
type Flags int32
|
||||
|
||||
const (
|
||||
AudioF Flags = 1 << 0
|
||||
VideoF Flags = 1 << 1
|
||||
Transmux Flags = 1 << 3
|
||||
)
|
||||
|
||||
type StreamHandle interface {
|
||||
getTranscodeArgs(segments string) []string
|
||||
getOutPath(encoderId int) string
|
||||
getFlags() Flags
|
||||
}
|
||||
|
||||
type Stream struct {
|
||||
kind string
|
||||
handle StreamHandle
|
||||
file *FileStream
|
||||
segments []Segment
|
||||
heads []Head
|
||||
// the lock used for the heads
|
||||
//lock sync.RWMutex
|
||||
|
||||
segmentsLock sync.RWMutex
|
||||
headsLock sync.RWMutex
|
||||
|
||||
logger *zerolog.Logger
|
||||
settings *Settings
|
||||
killCh chan struct{}
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
type Segment struct {
|
||||
// channel open if the segment is not ready. closed if ready.
|
||||
// one can check if segment 1 is open by doing:
|
||||
//
|
||||
// ts.isSegmentReady(1).
|
||||
//
|
||||
// You can also wait for it to be ready (non-blocking if already ready) by doing:
|
||||
// <-ts.segments[i]
|
||||
channel chan struct{}
|
||||
encoder int
|
||||
}
|
||||
|
||||
type Head struct {
|
||||
segment int32
|
||||
end int32
|
||||
command *exec.Cmd
|
||||
stdin io.WriteCloser
|
||||
}
|
||||
|
||||
var DeletedHead = Head{
|
||||
segment: -1,
|
||||
end: -1,
|
||||
command: nil,
|
||||
}
|
||||
|
||||
var streamLogger = util.NewLogger()
|
||||
|
||||
func NewStream(
|
||||
kind string,
|
||||
file *FileStream,
|
||||
handle StreamHandle,
|
||||
ret *Stream,
|
||||
settings *Settings,
|
||||
logger *zerolog.Logger,
|
||||
) {
|
||||
ret.kind = kind
|
||||
ret.handle = handle
|
||||
ret.file = file
|
||||
ret.heads = make([]Head, 0)
|
||||
ret.settings = settings
|
||||
ret.logger = logger
|
||||
ret.killCh = make(chan struct{})
|
||||
ret.ctx, ret.cancel = context.WithCancel(context.Background())
|
||||
|
||||
length, isDone := file.Keyframes.Length()
|
||||
ret.segments = make([]Segment, length, max(length, 2000))
|
||||
for seg := range ret.segments {
|
||||
ret.segments[seg].channel = make(chan struct{})
|
||||
}
|
||||
|
||||
if !isDone {
|
||||
file.Keyframes.AddListener(func(keyframes []float64) {
|
||||
ret.segmentsLock.Lock()
|
||||
defer ret.segmentsLock.Unlock()
|
||||
oldLength := len(ret.segments)
|
||||
if cap(ret.segments) > len(keyframes) {
|
||||
ret.segments = ret.segments[:len(keyframes)]
|
||||
} else {
|
||||
ret.segments = append(ret.segments, make([]Segment, len(keyframes)-oldLength)...)
|
||||
}
|
||||
for seg := oldLength; seg < len(keyframes); seg++ {
|
||||
ret.segments[seg].channel = make(chan struct{})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *Stream) GetIndex() (string, error) {
|
||||
// playlist type is event since we can append to the list if Keyframe.IsDone is false.
|
||||
// start time offset makes the stream start at 0s instead of ~3segments from the end (requires version 6 of hls)
|
||||
index := `#EXTM3U
|
||||
#EXT-X-VERSION:6
|
||||
#EXT-X-PLAYLIST-TYPE:EVENT
|
||||
#EXT-X-START:TIME-OFFSET=0
|
||||
#EXT-X-TARGETDURATION:4
|
||||
#EXT-X-MEDIA-SEQUENCE:0
|
||||
#EXT-X-INDEPENDENT-SEGMENTS
|
||||
`
|
||||
length, isDone := ts.file.Keyframes.Length()
|
||||
|
||||
for segment := int32(0); segment < length-1; segment++ {
|
||||
index += fmt.Sprintf("#EXTINF:%.6f\n", ts.file.Keyframes.Get(segment+1)-ts.file.Keyframes.Get(segment))
|
||||
index += fmt.Sprintf("segment-%d.ts\n", segment)
|
||||
}
|
||||
// do not forget to add the last segment between the last keyframe and the end of the file
|
||||
// if the keyframes extraction is not done, do not bother to add it, it will be retrived on the next index retrival
|
||||
if isDone {
|
||||
index += fmt.Sprintf("#EXTINF:%.6f\n", float64(ts.file.Info.Duration)-ts.file.Keyframes.Get(length-1))
|
||||
index += fmt.Sprintf("segment-%d.ts\n", length-1)
|
||||
index += `#EXT-X-ENDLIST`
|
||||
}
|
||||
return index, nil
|
||||
}
|
||||
|
||||
// GetSegment returns the path to the segment and waits for it to be ready.
|
||||
func (ts *Stream) GetSegment(segment int32) (string, error) {
|
||||
// DEVNOTE: Reset the kill channel
|
||||
// This is needed because when the segment is needed again, this channel should be open
|
||||
ts.killCh = make(chan struct{})
|
||||
if debugStream {
|
||||
streamLogger.Trace().Msgf("transcoder: Getting segment %d [GetSegment]", segment)
|
||||
defer streamLogger.Trace().Msgf("transcoder: Retrieved segment %d [GetSegment]", segment)
|
||||
}
|
||||
|
||||
ts.segmentsLock.RLock()
|
||||
ts.headsLock.RLock()
|
||||
ready := ts.isSegmentReady(segment)
|
||||
// we want to calculate distance in the same lock else it can be funky
|
||||
distance := 0.
|
||||
isScheduled := false
|
||||
if !ready {
|
||||
distance = ts.getMinEncoderDistance(segment)
|
||||
for _, head := range ts.heads {
|
||||
if head.segment <= segment && segment < head.end {
|
||||
isScheduled = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
readyChan := ts.segments[segment].channel
|
||||
|
||||
ts.segmentsLock.RUnlock()
|
||||
ts.headsLock.RUnlock()
|
||||
|
||||
if !ready {
|
||||
// Only start a new encode if there is too big a distance between the current encoder and the segment.
|
||||
if distance > 60 || !isScheduled {
|
||||
streamLogger.Trace().Msgf("transcoder: New encoder for segment %d", segment)
|
||||
err := ts.run(segment)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
streamLogger.Trace().Msgf("transcoder: Awaiting segment %d - %.2fs gap", segment, distance)
|
||||
}
|
||||
|
||||
select {
|
||||
// DEVNOTE: This can cause issues if the segment is called again but was "killed" beforehand
|
||||
// It's used to interrupt the waiting process but might not be needed since there's a timeout
|
||||
case <-ts.killCh:
|
||||
return "", fmt.Errorf("transcoder: Stream killed while waiting for segment %d", segment)
|
||||
case <-readyChan:
|
||||
break
|
||||
case <-time.After(25 * time.Second):
|
||||
streamLogger.Error().Msgf("transcoder: Could not retrieve %s segment %d (timeout)", ts.kind, segment)
|
||||
return "", errors.New("could not retrieve segment (timeout)")
|
||||
}
|
||||
}
|
||||
//go ts.prepareNextSegments(segment)
|
||||
ts.prepareNextSegments(segment)
|
||||
return fmt.Sprintf(filepath.ToSlash(ts.handle.getOutPath(ts.segments[segment].encoder)), segment), nil
|
||||
}
|
||||
|
||||
// prepareNextSegments will start the next segments if they are not already started.
|
||||
func (ts *Stream) prepareNextSegments(segment int32) {
|
||||
//if ts.IsKilled() {
|
||||
// return
|
||||
//}
|
||||
// Audio is way cheaper to create than video, so we don't need to run them in advance
|
||||
// Running it in advance might actually slow down the video encode since less compute
|
||||
// power can be used, so we simply disable that.
|
||||
if ts.handle.getFlags()&VideoF == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
ts.segmentsLock.RLock()
|
||||
defer ts.segmentsLock.RUnlock()
|
||||
ts.headsLock.RLock()
|
||||
defer ts.headsLock.RUnlock()
|
||||
|
||||
for i := segment + 1; i <= min(segment+10, int32(len(ts.segments)-1)); i++ {
|
||||
// If the segment is already ready, we don't need to start a new encoder.
|
||||
if ts.isSegmentReady(i) {
|
||||
continue
|
||||
}
|
||||
// only start encode for segments not planned (getMinEncoderDistance returns Inf for them)
|
||||
// or if they are 60s away (assume 5s per segments)
|
||||
if ts.getMinEncoderDistance(i) < 60+(5*float64(i-segment)) {
|
||||
continue
|
||||
}
|
||||
streamLogger.Trace().Msgf("transcoder: Creating new encoder head for future segment %d", i)
|
||||
go func() {
|
||||
_ = ts.run(i)
|
||||
}()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *Stream) getMinEncoderDistance(segment int32) float64 {
|
||||
t := ts.file.Keyframes.Get(segment)
|
||||
distances := lop.Map(ts.heads, func(head Head, _ int) float64 {
|
||||
// ignore killed heads or heads after the current time
|
||||
if head.segment < 0 || ts.file.Keyframes.Get(head.segment) > t || segment >= head.end {
|
||||
return math.Inf(1)
|
||||
}
|
||||
return t - ts.file.Keyframes.Get(head.segment)
|
||||
})
|
||||
if len(distances) == 0 {
|
||||
return math.Inf(1)
|
||||
}
|
||||
return slices.Min(distances)
|
||||
}
|
||||
|
||||
func (ts *Stream) Kill() {
|
||||
streamLogger.Trace().Msgf("transcoder: Killing %s stream", ts.kind)
|
||||
defer streamLogger.Trace().Msg("transcoder: Stream killed")
|
||||
ts.lockHeads()
|
||||
defer ts.unlockHeads()
|
||||
|
||||
for id := range ts.heads {
|
||||
ts.KillHead(id)
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *Stream) IsKilled() bool {
|
||||
select {
|
||||
case <-ts.killCh:
|
||||
// if the channel returned, it means it was closed
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// KillHead
|
||||
// Stream is assumed to be locked
|
||||
func (ts *Stream) KillHead(encoderId int) {
|
||||
//streamLogger.Trace().Int("eid", encoderId).Msgf("transcoder: Killing %s encoder head", ts.kind)
|
||||
defer streamLogger.Trace().Int("eid", encoderId).Msgf("transcoder: Killed %s encoder head", ts.kind)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
}
|
||||
}()
|
||||
close(ts.killCh)
|
||||
ts.cancel()
|
||||
if ts.heads[encoderId] == DeletedHead || ts.heads[encoderId].command == nil {
|
||||
return
|
||||
}
|
||||
ts.heads[encoderId].command.Process.Signal(os.Interrupt)
|
||||
//_, _ = ts.heads[encoderId].stdin.Write([]byte("q"))
|
||||
//_ = ts.heads[encoderId].stdin.Close()
|
||||
|
||||
ts.heads[encoderId] = DeletedHead
|
||||
}
|
||||
|
||||
func (ts *Stream) SetIsKilled() {
|
||||
}
|
||||
|
||||
//////////////////////////////
|
||||
|
||||
// Remember to lock before calling this.
|
||||
func (ts *Stream) isSegmentReady(segment int32) bool {
|
||||
select {
|
||||
case <-ts.segments[segment].channel:
|
||||
// if the channel returned, it means it was closed
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *Stream) isSegmentTranscoding(segment int32) bool {
|
||||
for _, head := range ts.heads {
|
||||
if head.segment == segment {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func toSegmentStr(segments []float64) string {
|
||||
return strings.Join(lo.Map(segments, func(seg float64, _ int) string {
|
||||
return fmt.Sprintf("%.6f", seg)
|
||||
}), ",")
|
||||
}
|
||||
|
||||
func (ts *Stream) run(start int32) error {
|
||||
//if ts.IsKilled() {
|
||||
// return nil
|
||||
//}
|
||||
ts.logger.Trace().Msgf("transcoder: Running %s encoder head from %d", ts.kind, start)
|
||||
// Start the transcoder up to the 100th segment (or less)
|
||||
length, isDone := ts.file.Keyframes.Length()
|
||||
end := min(start+100, length)
|
||||
// if keyframes analysis is not finished, always have a 1-segment padding
|
||||
// for the extra segment needed for precise split (look comment before -to flag)
|
||||
if !isDone {
|
||||
end -= 2
|
||||
}
|
||||
// Stop at the first finished segment
|
||||
ts.lockSegments()
|
||||
for i := start; i < end; i++ {
|
||||
if ts.isSegmentReady(i) || ts.isSegmentTranscoding(i) {
|
||||
end = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if start >= end {
|
||||
// this can happen if the start segment was finished between the check
|
||||
// to call run() and the actual call.
|
||||
// since most checks are done in a RLock() instead of a Lock() this can
|
||||
// happens when two goroutines try to make the same segment ready
|
||||
ts.unlockSegments()
|
||||
return nil
|
||||
}
|
||||
ts.unlockSegments()
|
||||
|
||||
ts.lockHeads()
|
||||
encoderId := len(ts.heads)
|
||||
ts.heads = append(ts.heads, Head{segment: start, end: end, command: nil})
|
||||
ts.unlockHeads()
|
||||
|
||||
streamLogger.Trace().Any("eid", encoderId).Msgf(
|
||||
"transcoder: Transcoding %d-%d/%d segments for %s",
|
||||
start,
|
||||
end,
|
||||
length,
|
||||
ts.kind,
|
||||
)
|
||||
|
||||
// Include both the start and end delimiter because -ss and -to are not accurate
|
||||
// Having an extra segment allows us to cut precisely the segments we want with the
|
||||
// -f segment that does cut the beginning and the end at the keyframe like asked
|
||||
startRef := float64(0)
|
||||
startSeg := start
|
||||
if start != 0 {
|
||||
// we always take on segment before the current one, for different reasons for audio/video:
|
||||
// - Audio: we need context before the starting point, without that ffmpeg doesn't know what to do and leave ~100ms of silence
|
||||
// - Video: if a segment is really short (between 20 and 100ms), the padding given in the else block bellow is not enough and
|
||||
// the previous segment is played another time. the -segment_times is way more precise, so it does not do the same with this one
|
||||
startSeg = start - 1
|
||||
if ts.handle.getFlags()&AudioF != 0 {
|
||||
startRef = ts.file.Keyframes.Get(startSeg)
|
||||
} else {
|
||||
// the param for the -ss takes the keyframe before the specified time
|
||||
// (if the specified time is a keyframe, it either takes that keyframe or the one before)
|
||||
// to prevent this weird behavior, we specify a bit after the keyframe that interest us
|
||||
|
||||
// this can't be used with audio since we need to have context before the start-time
|
||||
// without this context, the cut loses a bit of audio (audio gap of ~100ms)
|
||||
if startSeg+1 == length {
|
||||
startRef = (ts.file.Keyframes.Get(startSeg) + float64(ts.file.Info.Duration)) / 2
|
||||
} else {
|
||||
startRef = (ts.file.Keyframes.Get(startSeg) + ts.file.Keyframes.Get(startSeg+1)) / 2
|
||||
}
|
||||
}
|
||||
}
|
||||
endPadding := int32(1)
|
||||
if end == length {
|
||||
endPadding = 0
|
||||
}
|
||||
segments := ts.file.Keyframes.Slice(start+1, end+endPadding)
|
||||
if len(segments) == 0 {
|
||||
// we can't leave that empty else ffmpeg errors out.
|
||||
segments = []float64{9999999}
|
||||
}
|
||||
|
||||
outpath := ts.handle.getOutPath(encoderId)
|
||||
err := os.MkdirAll(filepath.Dir(outpath), 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"-nostats", "-hide_banner", "-loglevel", "warning",
|
||||
}
|
||||
|
||||
args = append(args, ts.settings.HwAccel.DecodeFlags...)
|
||||
|
||||
if startRef != 0 {
|
||||
if ts.handle.getFlags()&VideoF != 0 {
|
||||
// This is the default behavior in transmux mode and needed to force pre/post segment to work
|
||||
// This must be disabled when processing only audio because it creates gaps in audio
|
||||
args = append(args, "-noaccurate_seek")
|
||||
}
|
||||
args = append(args,
|
||||
"-ss", fmt.Sprintf("%.6f", startRef),
|
||||
)
|
||||
}
|
||||
// do not include -to if we want the file to go to the end
|
||||
if end+1 < length {
|
||||
// sometimes, the duration is shorter than expected (only during transcode it seems)
|
||||
// always include more and use the -f segment to split the file where we want
|
||||
endRef := ts.file.Keyframes.Get(end + 1)
|
||||
// it seems that the -to is confused when -ss seek before the given time (because it searches for a keyframe)
|
||||
// add back the time that would be lost otherwise
|
||||
// this only happens when -to is before -i but having -to after -i gave a bug (not sure, don't remember)
|
||||
endRef += startRef - ts.file.Keyframes.Get(startSeg)
|
||||
args = append(args,
|
||||
"-to", fmt.Sprintf("%.6f", endRef),
|
||||
)
|
||||
}
|
||||
args = append(args,
|
||||
"-i", ts.file.Path,
|
||||
// this makes behaviors consistent between soft and hardware decodes.
|
||||
// this also means that after a -ss 50, the output video will start at 50s
|
||||
"-start_at_zero",
|
||||
// for hls streams, -copyts is mandatory
|
||||
"-copyts",
|
||||
// this makes output file start at 0s instead of a random delay + the -ss value
|
||||
// this also cancel -start_at_zero weird delay.
|
||||
// this is not always respected, but generally it gives better results.
|
||||
// even when this is not respected, it does not result in a bugged experience but this is something
|
||||
// to keep in mind when debugging
|
||||
"-muxdelay", "0",
|
||||
)
|
||||
args = append(args, ts.handle.getTranscodeArgs(toSegmentStr(segments))...)
|
||||
args = append(args,
|
||||
"-f", "segment",
|
||||
// needed for rounding issues when forcing keyframes
|
||||
// recommended value is 1/(2*frame_rate), which for a 24fps is ~0.021
|
||||
// we take a little bit more than that to be extra safe but too much can be harmful
|
||||
// when segments are short (can make the video repeat itself)
|
||||
"-segment_time_delta", "0.05",
|
||||
"-segment_format", "mpegts",
|
||||
"-segment_times", toSegmentStr(lop.Map(segments, func(seg float64, _ int) float64 {
|
||||
// segment_times want durations, not timestamps so we must substract the -ss param
|
||||
// since we give a greater value to -ss to prevent wrong seeks but -segment_times
|
||||
// needs precise segments, we use the keyframe we want to seek to as a reference.
|
||||
return seg - ts.file.Keyframes.Get(startSeg)
|
||||
})),
|
||||
"-segment_list_type", "flat",
|
||||
"-segment_list", "pipe:1",
|
||||
"-segment_start_number", fmt.Sprint(start),
|
||||
outpath,
|
||||
)
|
||||
|
||||
// Added logging for ffmpeg command and hardware transcoding state
|
||||
streamLogger.Trace().Msgf("transcoder: ffmpeg command: %s %s", ts.settings.FfmpegPath, strings.Join(args, " "))
|
||||
if len(ts.settings.HwAccel.DecodeFlags) > 0 {
|
||||
streamLogger.Trace().Msgf("transcoder: Hardware transcoding enabled with flags: %v", ts.settings.HwAccel.DecodeFlags)
|
||||
} else {
|
||||
streamLogger.Trace().Msg("transcoder: Hardware transcoding not enabled")
|
||||
}
|
||||
|
||||
cmd := util.NewCmdCtx(context.Background(), ts.settings.FfmpegPath, args...)
|
||||
streamLogger.Trace().Msgf("transcoder: Executing ffmpeg for segments %d-%d of %s", start, end, ts.kind)
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var stderr strings.Builder
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ts.lockHeads()
|
||||
ts.heads[encoderId].command = cmd
|
||||
ts.heads[encoderId].stdin = stdin
|
||||
ts.unlockHeads()
|
||||
|
||||
go func(stdin io.WriteCloser) {
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
format := filepath.Base(outpath)
|
||||
shouldStop := false
|
||||
|
||||
for scanner.Scan() {
|
||||
var segment int32
|
||||
_, _ = fmt.Sscanf(scanner.Text(), format, &segment)
|
||||
|
||||
// If the segment number is less than the starting segment (start), it means it's not relevant for the current processing, so we skip it
|
||||
if segment < start {
|
||||
// This happens because we use -f segments for accurate cutting (since -ss is not)
|
||||
// check comment at beginning of function for more info
|
||||
continue
|
||||
}
|
||||
ts.lockHeads()
|
||||
ts.heads[encoderId].segment = segment
|
||||
ts.unlockHeads()
|
||||
if debugFfmpegOutput {
|
||||
streamLogger.Debug().Int("eid", encoderId).Msgf("t: \t ffmpeg finished segment %d/%d (%d-%d) of %s", segment, end, start, end, ts.kind)
|
||||
}
|
||||
|
||||
ts.lockSegments()
|
||||
// If the segment is already marked as done, we can stop the ffmpeg process
|
||||
if ts.isSegmentReady(segment) {
|
||||
// the current segment is already marked as done so another process has already gone up to here.
|
||||
_, _ = stdin.Write([]byte("q"))
|
||||
_ = stdin.Close()
|
||||
//cmd.Process.Signal(os.Interrupt)
|
||||
if debugFfmpeg {
|
||||
streamLogger.Trace().Int("eid", encoderId).Msgf("transcoder: Terminated ffmpeg, segment %d is ready", segment)
|
||||
}
|
||||
shouldStop = true
|
||||
} else {
|
||||
// Mark the segment as ready
|
||||
ts.segments[segment].encoder = encoderId
|
||||
close(ts.segments[segment].channel)
|
||||
if segment == end-1 {
|
||||
// file finished, ffmpeg will finish soon on its own
|
||||
shouldStop = true
|
||||
} else if ts.isSegmentReady(segment + 1) {
|
||||
// If the next segment is already marked as done, we can stop the ffmpeg process
|
||||
_, _ = stdin.Write([]byte("q"))
|
||||
_ = stdin.Close()
|
||||
//cmd.Process.Signal(os.Interrupt)
|
||||
if debugFfmpeg {
|
||||
streamLogger.Trace().Int("eid", encoderId).Msgf("transcoder: Terminated ffmpeg, next segment %d is ready", segment)
|
||||
}
|
||||
shouldStop = true
|
||||
}
|
||||
}
|
||||
ts.unlockSegments()
|
||||
// we need this and not a return in the condition because we want to unlock
|
||||
// the lock (and can't defer since this is a loop)
|
||||
if shouldStop {
|
||||
if debugFfmpeg {
|
||||
streamLogger.Trace().Int("eid", encoderId).Msgf("transcoder: ffmpeg completed segments %d-%d/%d of %s", start, end, length, ts.kind)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
streamLogger.Error().Int("eid", encoderId).Err(err).Msg("transcoder: Error scanning ffmpeg output")
|
||||
return
|
||||
}
|
||||
}(stdin)
|
||||
|
||||
// Listen for kill signal
|
||||
go func(stdin io.WriteCloser) {
|
||||
select {
|
||||
case <-ts.ctx.Done():
|
||||
streamLogger.Trace().Int("eid", encoderId).Msgf("transcoder: Aborting ffmpeg process for %s", ts.kind)
|
||||
_, _ = stdin.Write([]byte("q"))
|
||||
_ = stdin.Close()
|
||||
return
|
||||
}
|
||||
}(stdin)
|
||||
|
||||
// Listen for process termination
|
||||
go func() {
|
||||
err := cmd.Wait()
|
||||
var exitErr *exec.ExitError
|
||||
// Check if hardware acceleration was attempted and if stderr indicates a failure to use it
|
||||
if len(ts.settings.HwAccel.DecodeFlags) > 0 {
|
||||
lowerOutput := strings.ToLower(stderr.String())
|
||||
if strings.Contains(lowerOutput, "failed") &&
|
||||
(strings.Contains(lowerOutput, "hwaccel") || strings.Contains(lowerOutput, "vaapi") || strings.Contains(lowerOutput, "cuvid") || strings.Contains(lowerOutput, "vdpau")) {
|
||||
streamLogger.Warn().Int("eid", encoderId).Msg("transcoder: ffmpeg failed to use hardware acceleration settings; falling back to CPU")
|
||||
}
|
||||
}
|
||||
|
||||
if errors.As(err, &exitErr) && exitErr.ExitCode() == 255 {
|
||||
streamLogger.Trace().Int("eid", encoderId).Msgf("transcoder: ffmpeg process was terminated")
|
||||
} else if err != nil {
|
||||
streamLogger.Error().Int("eid", encoderId).Err(fmt.Errorf("%s: %s", err, stderr.String())).Msgf("transcoder: ffmpeg process failed")
|
||||
} else {
|
||||
streamLogger.Trace().Int("eid", encoderId).Msgf("transcoder: ffmpeg process for %s exited", ts.kind)
|
||||
}
|
||||
|
||||
ts.lockHeads()
|
||||
defer ts.unlockHeads()
|
||||
// we can't delete the head directly because it would invalidate the others encoderId
|
||||
ts.heads[encoderId] = DeletedHead
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const debugLocks = false
|
||||
const debugFfmpeg = true
|
||||
const debugFfmpegOutput = false
|
||||
const debugStream = false
|
||||
|
||||
func (ts *Stream) lockHeads() {
|
||||
if debugLocks {
|
||||
streamLogger.Debug().Msg("t: Locking heads")
|
||||
}
|
||||
ts.headsLock.Lock()
|
||||
if debugLocks {
|
||||
streamLogger.Debug().Msg("t: \t\tLocked heads")
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *Stream) unlockHeads() {
|
||||
if debugLocks {
|
||||
streamLogger.Debug().Msg("t: Unlocking heads")
|
||||
}
|
||||
ts.headsLock.Unlock()
|
||||
if debugLocks {
|
||||
streamLogger.Debug().Msg("t: \t\tUnlocked heads")
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *Stream) lockSegments() {
|
||||
if debugLocks {
|
||||
streamLogger.Debug().Msg("t: Locking segments")
|
||||
}
|
||||
ts.segmentsLock.Lock()
|
||||
if debugLocks {
|
||||
streamLogger.Debug().Msg("t: \t\tLocked segments")
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *Stream) unlockSegments() {
|
||||
if debugLocks {
|
||||
streamLogger.Debug().Msg("t: Unlocking segments")
|
||||
}
|
||||
ts.segmentsLock.Unlock()
|
||||
if debugLocks {
|
||||
streamLogger.Debug().Msg("t: \t\tUnlocked segments")
|
||||
}
|
||||
}
|
||||
249
seanime-2.9.10/internal/mediastream/transcoder/tracker.go
Normal file
249
seanime-2.9.10/internal/mediastream/transcoder/tracker.go
Normal file
@@ -0,0 +1,249 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type ClientInfo struct {
|
||||
client string
|
||||
path string
|
||||
quality *Quality
|
||||
audio int32
|
||||
head int32
|
||||
}
|
||||
|
||||
type Tracker struct {
|
||||
// key: client_id
|
||||
clients map[string]ClientInfo
|
||||
// key: client_id
|
||||
visitDate map[string]time.Time
|
||||
// key: path
|
||||
lastUsage map[string]time.Time
|
||||
transcoder *Transcoder
|
||||
deletedStream chan string
|
||||
logger *zerolog.Logger
|
||||
killCh chan struct{} // Close channel to stop tracker
|
||||
}
|
||||
|
||||
func NewTracker(t *Transcoder) *Tracker {
|
||||
ret := &Tracker{
|
||||
clients: make(map[string]ClientInfo),
|
||||
visitDate: make(map[string]time.Time),
|
||||
lastUsage: make(map[string]time.Time),
|
||||
transcoder: t,
|
||||
logger: t.logger,
|
||||
deletedStream: make(chan string, 1000),
|
||||
killCh: make(chan struct{}),
|
||||
}
|
||||
go ret.start()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (t *Tracker) Stop() {
|
||||
close(t.killCh)
|
||||
}
|
||||
|
||||
func Abs(x int32) int32 {
|
||||
if x < 0 {
|
||||
return -x
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func (t *Tracker) start() {
|
||||
inactiveTime := 1 * time.Hour
|
||||
timer := time.NewTicker(inactiveTime)
|
||||
defer timer.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-t.killCh:
|
||||
return
|
||||
case info, ok := <-t.transcoder.clientChan:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
old, ok := t.clients[info.client]
|
||||
// First fixup the info. Most routes return partial infos
|
||||
if ok && old.path == info.path {
|
||||
if info.quality == nil {
|
||||
info.quality = old.quality
|
||||
}
|
||||
if info.audio == -1 {
|
||||
info.audio = old.audio
|
||||
}
|
||||
if info.head == -1 {
|
||||
info.head = old.head
|
||||
}
|
||||
}
|
||||
|
||||
t.clients[info.client] = info
|
||||
t.visitDate[info.client] = time.Now()
|
||||
t.lastUsage[info.path] = time.Now()
|
||||
|
||||
// now that the new info is stored and fixed, kill old streams
|
||||
if ok && old.path == info.path {
|
||||
if old.audio != info.audio && old.audio != -1 {
|
||||
t.KillAudioIfDead(old.path, old.audio)
|
||||
}
|
||||
if old.quality != info.quality && old.quality != nil {
|
||||
t.KillQualityIfDead(old.path, *old.quality)
|
||||
}
|
||||
if old.head != -1 && Abs(info.head-old.head) > 100 {
|
||||
t.KillOrphanedHeads(old.path, old.quality, old.audio)
|
||||
}
|
||||
} else if ok {
|
||||
t.KillStreamIfDead(old.path)
|
||||
}
|
||||
|
||||
case <-timer.C:
|
||||
// Purge old clients
|
||||
for client, date := range t.visitDate {
|
||||
if time.Since(date) < inactiveTime {
|
||||
continue
|
||||
}
|
||||
|
||||
info := t.clients[client]
|
||||
|
||||
if !t.KillStreamIfDead(info.path) {
|
||||
audioCleanup := info.audio != -1 && t.KillAudioIfDead(info.path, info.audio)
|
||||
videoCleanup := info.quality != nil && t.KillQualityIfDead(info.path, *info.quality)
|
||||
if !audioCleanup || !videoCleanup {
|
||||
t.KillOrphanedHeads(info.path, info.quality, info.audio)
|
||||
}
|
||||
}
|
||||
|
||||
delete(t.clients, client)
|
||||
delete(t.visitDate, client)
|
||||
}
|
||||
case path := <-t.deletedStream:
|
||||
t.DestroyStreamIfOld(path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tracker) KillStreamIfDead(path string) bool {
|
||||
for _, stream := range t.clients {
|
||||
if stream.path == path {
|
||||
return false
|
||||
}
|
||||
}
|
||||
t.logger.Trace().Msgf("Killing stream %s", path)
|
||||
|
||||
stream, ok := t.transcoder.streams.Get(path)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
stream.Kill()
|
||||
go func() {
|
||||
select {
|
||||
case <-t.killCh:
|
||||
return
|
||||
case <-time.After(4 * time.Hour):
|
||||
t.deletedStream <- path
|
||||
}
|
||||
//time.Sleep(4 * time.Hour)
|
||||
//t.deletedStream <- path
|
||||
}()
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *Tracker) DestroyStreamIfOld(path string) {
|
||||
if time.Since(t.lastUsage[path]) < 4*time.Hour {
|
||||
return
|
||||
}
|
||||
stream, ok := t.transcoder.streams.Get(path)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
t.transcoder.streams.Delete(path)
|
||||
stream.Destroy()
|
||||
}
|
||||
|
||||
func (t *Tracker) KillAudioIfDead(path string, audio int32) bool {
|
||||
for _, stream := range t.clients {
|
||||
if stream.path == path && stream.audio == audio {
|
||||
return false
|
||||
}
|
||||
}
|
||||
t.logger.Trace().Msgf("Killing audio %d of %s", audio, path)
|
||||
|
||||
stream, ok := t.transcoder.streams.Get(path)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
astream, aok := stream.audios.Get(audio)
|
||||
if !aok {
|
||||
return false
|
||||
}
|
||||
astream.Kill()
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *Tracker) KillQualityIfDead(path string, quality Quality) bool {
|
||||
for _, stream := range t.clients {
|
||||
if stream.path == path && stream.quality != nil && *stream.quality == quality {
|
||||
return false
|
||||
}
|
||||
}
|
||||
//start := time.Now()
|
||||
t.logger.Trace().Msgf("transcoder: Killing %s video stream ", quality)
|
||||
|
||||
stream, ok := t.transcoder.streams.Get(path)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
vstream, vok := stream.videos.Get(quality)
|
||||
if !vok {
|
||||
return false
|
||||
}
|
||||
vstream.Kill()
|
||||
|
||||
//t.logger.Trace().Msgf("transcoder: Killed %s video stream in %.2fs", quality, time.Since(start).Seconds())
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *Tracker) KillOrphanedHeads(path string, quality *Quality, audio int32) {
|
||||
stream, ok := t.transcoder.streams.Get(path)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if quality != nil {
|
||||
vstream, vok := stream.videos.Get(*quality)
|
||||
if vok {
|
||||
t.killOrphanedHeads(&vstream.Stream)
|
||||
}
|
||||
}
|
||||
if audio != -1 {
|
||||
astream, aok := stream.audios.Get(audio)
|
||||
if aok {
|
||||
t.killOrphanedHeads(&astream.Stream)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tracker) killOrphanedHeads(stream *Stream) {
|
||||
stream.headsLock.RLock()
|
||||
defer stream.headsLock.RUnlock()
|
||||
|
||||
for encoderId, head := range stream.heads {
|
||||
if head == DeletedHead {
|
||||
continue
|
||||
}
|
||||
|
||||
distance := int32(99999)
|
||||
for _, info := range t.clients {
|
||||
if info.head == -1 {
|
||||
continue
|
||||
}
|
||||
distance = min(Abs(info.head-head.segment), distance)
|
||||
}
|
||||
if distance > 20 {
|
||||
t.logger.Trace().Msgf("transcoder: Killing orphaned head %d", encoderId)
|
||||
stream.KillHead(encoderId)
|
||||
}
|
||||
}
|
||||
}
|
||||
247
seanime-2.9.10/internal/mediastream/transcoder/transcoder.go
Normal file
247
seanime-2.9.10/internal/mediastream/transcoder/transcoder.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"seanime/internal/mediastream/videofile"
|
||||
"seanime/internal/util/result"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type (
|
||||
Transcoder struct {
|
||||
// All file streams currently running, index is file path
|
||||
streams *result.Map[string, *FileStream]
|
||||
clientChan chan ClientInfo
|
||||
tracker *Tracker
|
||||
logger *zerolog.Logger
|
||||
settings Settings
|
||||
}
|
||||
|
||||
Settings struct {
|
||||
StreamDir string
|
||||
HwAccel HwAccelSettings
|
||||
FfmpegPath string
|
||||
FfprobePath string
|
||||
}
|
||||
|
||||
NewTranscoderOptions struct {
|
||||
Logger *zerolog.Logger
|
||||
HwAccelKind string
|
||||
Preset string
|
||||
TempOutDir string
|
||||
FfmpegPath string
|
||||
FfprobePath string
|
||||
HwAccelCustomSettings string
|
||||
}
|
||||
)
|
||||
|
||||
func NewTranscoder(opts *NewTranscoderOptions) (*Transcoder, error) {
|
||||
|
||||
// Create a directory that'll hold the stream segments if it doesn't exist
|
||||
streamDir := filepath.Join(opts.TempOutDir, "streams")
|
||||
_ = os.MkdirAll(streamDir, 0755)
|
||||
|
||||
// Clear the directory containing the streams
|
||||
dir, err := os.ReadDir(streamDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, d := range dir {
|
||||
_ = os.RemoveAll(path.Join(streamDir, d.Name()))
|
||||
}
|
||||
|
||||
ret := &Transcoder{
|
||||
streams: result.NewResultMap[string, *FileStream](),
|
||||
clientChan: make(chan ClientInfo, 1000),
|
||||
logger: opts.Logger,
|
||||
settings: Settings{
|
||||
StreamDir: streamDir,
|
||||
HwAccel: GetHardwareAccelSettings(HwAccelOptions{
|
||||
Kind: opts.HwAccelKind,
|
||||
Preset: opts.Preset,
|
||||
CustomSettings: opts.HwAccelCustomSettings,
|
||||
}),
|
||||
FfmpegPath: opts.FfmpegPath,
|
||||
FfprobePath: opts.FfprobePath,
|
||||
},
|
||||
}
|
||||
ret.tracker = NewTracker(ret)
|
||||
|
||||
ret.logger.Info().Msg("transcoder: Initialized")
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (t *Transcoder) GetSettings() *Settings {
|
||||
return &t.settings
|
||||
}
|
||||
|
||||
// Destroy stops all streams and removes the output directory.
|
||||
// A new transcoder should be created after calling this function.
|
||||
func (t *Transcoder) Destroy() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
}
|
||||
}()
|
||||
t.tracker.Stop()
|
||||
|
||||
t.logger.Debug().Msg("transcoder: Destroying transcoder")
|
||||
for _, s := range t.streams.Values() {
|
||||
s.Destroy()
|
||||
}
|
||||
t.streams.Clear()
|
||||
//close(t.clientChan)
|
||||
t.streams = result.NewResultMap[string, *FileStream]()
|
||||
t.clientChan = make(chan ClientInfo, 10)
|
||||
t.logger.Debug().Msg("transcoder: Transcoder destroyed")
|
||||
}
|
||||
|
||||
func (t *Transcoder) getFileStream(path string, hash string, mediaInfo *videofile.MediaInfo) (*FileStream, error) {
|
||||
if debugStream {
|
||||
start := time.Now()
|
||||
t.logger.Trace().Msgf("transcoder: Getting filestream")
|
||||
defer t.logger.Trace().Msgf("transcoder: Filestream retrieved in %.2fs", time.Since(start).Seconds())
|
||||
}
|
||||
ret, _ := t.streams.GetOrSet(path, func() (*FileStream, error) {
|
||||
return NewFileStream(path, hash, mediaInfo, &t.settings, t.logger), nil
|
||||
})
|
||||
if ret == nil {
|
||||
return nil, fmt.Errorf("could not get filestream, file may not exist")
|
||||
}
|
||||
ret.ready.Wait()
|
||||
if ret.err != nil {
|
||||
t.streams.Delete(path)
|
||||
return nil, ret.err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (t *Transcoder) GetMaster(path string, hash string, mediaInfo *videofile.MediaInfo, client string) (string, error) {
|
||||
if debugStream {
|
||||
start := time.Now()
|
||||
t.logger.Trace().Msgf("transcoder: Retrieving master file")
|
||||
defer t.logger.Trace().Msgf("transcoder: Master file retrieved in %.2fs", time.Since(start).Seconds())
|
||||
}
|
||||
stream, err := t.getFileStream(path, hash, mediaInfo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
t.clientChan <- ClientInfo{
|
||||
client: client,
|
||||
path: path,
|
||||
quality: nil,
|
||||
audio: -1,
|
||||
head: -1,
|
||||
}
|
||||
return stream.GetMaster(), nil
|
||||
}
|
||||
|
||||
func (t *Transcoder) GetVideoIndex(
|
||||
path string,
|
||||
hash string,
|
||||
mediaInfo *videofile.MediaInfo,
|
||||
quality Quality,
|
||||
client string,
|
||||
) (string, error) {
|
||||
if debugStream {
|
||||
start := time.Now()
|
||||
t.logger.Trace().Msgf("transcoder: Retrieving video index file (%s)", quality)
|
||||
defer t.logger.Trace().Msgf("transcoder: Video index file retrieved in %.2fs", time.Since(start).Seconds())
|
||||
}
|
||||
stream, err := t.getFileStream(path, hash, mediaInfo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
t.clientChan <- ClientInfo{
|
||||
client: client,
|
||||
path: path,
|
||||
quality: &quality,
|
||||
audio: -1,
|
||||
head: -1,
|
||||
}
|
||||
return stream.GetVideoIndex(quality)
|
||||
}
|
||||
|
||||
func (t *Transcoder) GetAudioIndex(
|
||||
path string,
|
||||
hash string,
|
||||
mediaInfo *videofile.MediaInfo,
|
||||
audio int32,
|
||||
client string,
|
||||
) (string, error) {
|
||||
if debugStream {
|
||||
start := time.Now()
|
||||
t.logger.Trace().Msgf("transcoder: Retrieving audio index file (%d)", audio)
|
||||
defer t.logger.Trace().Msgf("transcoder: Audio index file retrieved in %.2fs", time.Since(start).Seconds())
|
||||
}
|
||||
stream, err := t.getFileStream(path, hash, mediaInfo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
t.clientChan <- ClientInfo{
|
||||
client: client,
|
||||
path: path,
|
||||
audio: audio,
|
||||
head: -1,
|
||||
}
|
||||
return stream.GetAudioIndex(audio)
|
||||
}
|
||||
|
||||
func (t *Transcoder) GetVideoSegment(
|
||||
path string,
|
||||
hash string,
|
||||
mediaInfo *videofile.MediaInfo,
|
||||
quality Quality,
|
||||
segment int32,
|
||||
client string,
|
||||
) (string, error) {
|
||||
if debugStream {
|
||||
start := time.Now()
|
||||
t.logger.Trace().Msgf("transcoder: Retrieving video segment %d (%s) [GetVideoSegment]", segment, quality)
|
||||
defer t.logger.Trace().Msgf("transcoder: Video segment retrieved in %.2fs", time.Since(start).Seconds())
|
||||
}
|
||||
stream, err := t.getFileStream(path, hash, mediaInfo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
//t.logger.Trace().Msgf("transcoder: Sending client info, segment %d (%s) [GetVideoSegment]", segment, quality)
|
||||
t.clientChan <- ClientInfo{
|
||||
client: client,
|
||||
path: path,
|
||||
quality: &quality,
|
||||
audio: -1,
|
||||
head: segment,
|
||||
}
|
||||
//t.logger.Trace().Msgf("transcoder: Getting video segment %d (%s) [GetVideoSegment]", segment, quality)
|
||||
return stream.GetVideoSegment(quality, segment)
|
||||
}
|
||||
|
||||
func (t *Transcoder) GetAudioSegment(
|
||||
path string,
|
||||
hash string,
|
||||
mediaInfo *videofile.MediaInfo,
|
||||
audio int32,
|
||||
segment int32,
|
||||
client string,
|
||||
) (string, error) {
|
||||
if debugStream {
|
||||
start := time.Now()
|
||||
t.logger.Trace().Msgf("transcoder: Retrieving audio segment %d (%d)", segment, audio)
|
||||
defer t.logger.Trace().Msgf("transcoder: Audio segment %d (%d) retrieved in %.2fs", segment, audio, time.Since(start).Seconds())
|
||||
}
|
||||
stream, err := t.getFileStream(path, hash, mediaInfo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
t.clientChan <- ClientInfo{
|
||||
client: client,
|
||||
path: path,
|
||||
audio: audio,
|
||||
head: segment,
|
||||
}
|
||||
return stream.GetAudioSegment(audio, segment)
|
||||
}
|
||||
58
seanime-2.9.10/internal/mediastream/transcoder/utils.go
Normal file
58
seanime-2.9.10/internal/mediastream/transcoder/utils.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
func ParseSegment(segment string) (int32, error) {
|
||||
var ret int32
|
||||
_, err := fmt.Sscanf(segment, "segment-%d.ts", &ret)
|
||||
if err != nil {
|
||||
return 0, errors.New("could not parse segment")
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func getSavedInfo[T any](savePath string, mi *T) error {
|
||||
savedFile, err := os.Open(savePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
saved, err := io.ReadAll(savedFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal(saved, mi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func saveInfo[T any](savePath string, mi *T) error {
|
||||
content, err := json.Marshal(*mi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// create directory if it doesn't exist
|
||||
_ = os.MkdirAll(filepath.Dir(savePath), 0755)
|
||||
return os.WriteFile(savePath, content, 0666)
|
||||
}
|
||||
|
||||
func printExecTime(logger *zerolog.Logger, message string, args ...any) func() {
|
||||
msg := fmt.Sprintf(message, args...)
|
||||
start := time.Now()
|
||||
logger.Trace().Msgf("transcoder: Running %s", msg)
|
||||
|
||||
return func() {
|
||||
logger.Trace().Msgf("transcoder: %s finished in %s", msg, time.Since(start))
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,91 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type VideoStream struct {
|
||||
Stream
|
||||
quality Quality
|
||||
logger *zerolog.Logger
|
||||
settings *Settings
|
||||
}
|
||||
|
||||
func NewVideoStream(file *FileStream, quality Quality, logger *zerolog.Logger, settings *Settings) *VideoStream {
|
||||
logger.Trace().Str("file", filepath.Base(file.Path)).Any("quality", quality).Msgf("transcoder: Creating video stream")
|
||||
ret := new(VideoStream)
|
||||
ret.quality = quality
|
||||
ret.logger = logger
|
||||
ret.settings = settings
|
||||
NewStream(fmt.Sprintf("video (%s)", quality), file, ret, &ret.Stream, settings, logger)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (vs *VideoStream) getFlags() Flags {
|
||||
if vs.quality == Original {
|
||||
return VideoF | Transmux
|
||||
}
|
||||
return VideoF
|
||||
}
|
||||
|
||||
func (vs *VideoStream) getOutPath(encoderId int) string {
|
||||
return filepath.Join(vs.file.Out, fmt.Sprintf("segment-%s-%d-%%d.ts", vs.quality, encoderId))
|
||||
}
|
||||
|
||||
func closestMultiple(n int32, x int32) int32 {
|
||||
if x > n {
|
||||
return x
|
||||
}
|
||||
|
||||
n = n + x/2
|
||||
n = n - (n % x)
|
||||
return n
|
||||
}
|
||||
|
||||
func (vs *VideoStream) getTranscodeArgs(segments string) []string {
|
||||
args := []string{
|
||||
"-map", "0:V:0",
|
||||
}
|
||||
|
||||
if vs.quality == Original {
|
||||
args = append(args,
|
||||
"-c:v", "copy",
|
||||
)
|
||||
vs.logger.Debug().Msg("videostream: Transcoding to original quality")
|
||||
return args
|
||||
}
|
||||
|
||||
vs.logger.Debug().Interface("hwaccelArgs", vs.settings.HwAccel).Msg("videostream: Hardware Acceleration")
|
||||
|
||||
args = append(args, vs.settings.HwAccel.EncodeFlags...)
|
||||
width := int32(float64(vs.quality.Height()) / float64(vs.file.Info.Video.Height) * float64(vs.file.Info.Video.Width))
|
||||
// force a width that is a multiple of two else some apps behave badly.
|
||||
width = closestMultiple(width, 2)
|
||||
args = append(args,
|
||||
"-vf", fmt.Sprintf(vs.settings.HwAccel.ScaleFilter, width, vs.quality.Height()),
|
||||
// Even less sure but buf size are 5x the average bitrate since the average bitrate is only
|
||||
// useful for hls segments.
|
||||
"-bufsize", fmt.Sprint(vs.quality.MaxBitrate()*5),
|
||||
"-b:v", fmt.Sprint(vs.quality.AverageBitrate()),
|
||||
"-maxrate", fmt.Sprint(vs.quality.MaxBitrate()),
|
||||
)
|
||||
if vs.settings.HwAccel.WithForcedIdr {
|
||||
// Force segments to be split exactly on keyframes (only works when transcoding)
|
||||
// forced-idr is needed to force keyframes to be an idr-frame (by default it can be any i frames)
|
||||
// without this option, some hardware encoders uses others i-frames and the -f segment can't cut at them.
|
||||
args = append(args, "-forced-idr", "1")
|
||||
}
|
||||
|
||||
args = append(args,
|
||||
"-force_key_frames", segments,
|
||||
// make ffmpeg globally less buggy
|
||||
"-strict", "-2",
|
||||
)
|
||||
|
||||
vs.logger.Debug().Interface("args", args).Msgf("videostream: Transcoding to %s quality", vs.quality)
|
||||
|
||||
return args
|
||||
}
|
||||
Reference in New Issue
Block a user