node build fixed
This commit is contained in:
74
seanime-2.9.10/internal/mediastream/attachments.go
Normal file
74
seanime-2.9.10/internal/mediastream/attachments.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package mediastream
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"seanime/internal/events"
|
||||
"seanime/internal/mediastream/videofile"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
||||
func (r *Repository) ServeEchoExtractedSubtitles(c echo.Context) error {
|
||||
|
||||
if !r.IsInitialized() {
|
||||
r.wsEventManager.SendEvent(events.MediastreamShutdownStream, "Module not initialized")
|
||||
return errors.New("module not initialized")
|
||||
}
|
||||
|
||||
if !r.TranscoderIsInitialized() {
|
||||
r.wsEventManager.SendEvent(events.MediastreamShutdownStream, "Transcoder not initialized")
|
||||
return errors.New("transcoder not initialized")
|
||||
}
|
||||
|
||||
// Get the parameter group
|
||||
subFilePath := c.Param("*")
|
||||
|
||||
// Get current media
|
||||
mediaContainer, found := r.playbackManager.currentMediaContainer.Get()
|
||||
if !found {
|
||||
return errors.New("no file has been loaded")
|
||||
}
|
||||
|
||||
retPath := videofile.GetFileSubsCacheDir(r.cacheDir, mediaContainer.Hash)
|
||||
|
||||
if retPath == "" {
|
||||
return errors.New("could not find subtitles")
|
||||
}
|
||||
|
||||
r.logger.Trace().Msgf("mediastream: Serving subtitles from %s", retPath)
|
||||
|
||||
return c.File(filepath.Join(retPath, subFilePath))
|
||||
}
|
||||
|
||||
func (r *Repository) ServeEchoExtractedAttachments(c echo.Context) error {
|
||||
if !r.IsInitialized() {
|
||||
r.wsEventManager.SendEvent(events.MediastreamShutdownStream, "Module not initialized")
|
||||
return errors.New("module not initialized")
|
||||
}
|
||||
|
||||
if !r.TranscoderIsInitialized() {
|
||||
r.wsEventManager.SendEvent(events.MediastreamShutdownStream, "Transcoder not initialized")
|
||||
return errors.New("transcoder not initialized")
|
||||
}
|
||||
|
||||
// Get the parameter group
|
||||
subFilePath := c.Param("*")
|
||||
|
||||
// Get current media
|
||||
mediaContainer, found := r.playbackManager.currentMediaContainer.Get()
|
||||
if !found {
|
||||
return errors.New("no file has been loaded")
|
||||
}
|
||||
|
||||
retPath := videofile.GetFileAttCacheDir(r.cacheDir, mediaContainer.Hash)
|
||||
|
||||
if retPath == "" {
|
||||
return errors.New("could not find subtitles")
|
||||
}
|
||||
|
||||
subFilePath, _ = url.PathUnescape(subFilePath)
|
||||
|
||||
return c.File(filepath.Join(retPath, subFilePath))
|
||||
}
|
||||
89
seanime-2.9.10/internal/mediastream/directplay.go
Normal file
89
seanime-2.9.10/internal/mediastream/directplay.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package mediastream
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"seanime/internal/events"
|
||||
"seanime/internal/util"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Direct
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (r *Repository) ServeEchoFile(c echo.Context, rawFilePath string, clientId string, libraryPaths []string) error {
|
||||
// Unescape the file path, ignore errors
|
||||
filePath, _ := url.PathUnescape(rawFilePath)
|
||||
|
||||
// If the file path is base64 encoded, decode it
|
||||
if util.IsBase64(rawFilePath) {
|
||||
var err error
|
||||
filePath, err = util.Base64DecodeStr(rawFilePath)
|
||||
if err != nil {
|
||||
// this shouldn't happen, but just in case IsBase64 is wrong
|
||||
filePath, _ = url.PathUnescape(rawFilePath)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure the file is in the library directories
|
||||
inLibrary := false
|
||||
for _, libraryPath := range libraryPaths {
|
||||
if util.IsFileUnderDir(filePath, libraryPath) {
|
||||
inLibrary = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !inLibrary {
|
||||
return c.NoContent(http.StatusNotFound)
|
||||
}
|
||||
|
||||
r.logger.Trace().Str("filepath", filePath).Str("payload", rawFilePath).Msg("mediastream: Served file")
|
||||
// Content disposition
|
||||
filename := filepath.Base(filePath)
|
||||
c.Response().Header().Set("Content-Disposition", fmt.Sprintf("inline; filename=\"%s\"", filename))
|
||||
|
||||
return c.File(filePath)
|
||||
}
|
||||
|
||||
func (r *Repository) ServeEchoDirectPlay(c echo.Context, clientId string) error {
|
||||
|
||||
if !r.IsInitialized() {
|
||||
r.wsEventManager.SendEvent(events.MediastreamShutdownStream, "Module not initialized")
|
||||
return errors.New("module not initialized")
|
||||
}
|
||||
|
||||
// Get current media
|
||||
mediaContainer, found := r.playbackManager.currentMediaContainer.Get()
|
||||
if !found {
|
||||
r.wsEventManager.SendEvent(events.MediastreamShutdownStream, "no file has been loaded")
|
||||
return errors.New("no file has been loaded")
|
||||
}
|
||||
|
||||
if c.Request().Method == http.MethodHead {
|
||||
r.logger.Trace().Msg("mediastream: Received HEAD request for direct play")
|
||||
|
||||
// Get the file size
|
||||
fileInfo, err := os.Stat(mediaContainer.Filepath)
|
||||
if err != nil {
|
||||
r.logger.Error().Msg("mediastream: Failed to get file info")
|
||||
return c.NoContent(http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// Set the content length
|
||||
c.Response().Header().Set("Content-Length", fmt.Sprintf("%d", fileInfo.Size()))
|
||||
c.Response().Header().Set("Content-Type", "video/mp4")
|
||||
c.Response().Header().Set("Accept-Ranges", "bytes")
|
||||
filename := filepath.Base(mediaContainer.Filepath)
|
||||
c.Response().Header().Set("Content-Disposition", fmt.Sprintf("inline; filename=\"%s\"", filename))
|
||||
return c.NoContent(http.StatusOK)
|
||||
}
|
||||
|
||||
return c.File(mediaContainer.Filepath)
|
||||
}
|
||||
87
seanime-2.9.10/internal/mediastream/optimizer/optimizer.go
Normal file
87
seanime-2.9.10/internal/mediastream/optimizer/optimizer.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package optimizer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/samber/mo"
|
||||
"seanime/internal/events"
|
||||
"seanime/internal/mediastream/videofile"
|
||||
"seanime/internal/util"
|
||||
)
|
||||
|
||||
const (
|
||||
QualityLow Quality = "low"
|
||||
QualityMedium Quality = "medium"
|
||||
QualityHigh Quality = "high"
|
||||
QualityMax Quality = "max"
|
||||
)
|
||||
|
||||
type (
|
||||
Quality string
|
||||
|
||||
Optimizer struct {
|
||||
wsEventManager events.WSEventManagerInterface
|
||||
logger *zerolog.Logger
|
||||
libraryDir mo.Option[string]
|
||||
concurrentTasks int
|
||||
}
|
||||
|
||||
NewOptimizerOptions struct {
|
||||
Logger *zerolog.Logger
|
||||
WSEventManager events.WSEventManagerInterface
|
||||
}
|
||||
)
|
||||
|
||||
func NewOptimizer(opts *NewOptimizerOptions) *Optimizer {
|
||||
ret := &Optimizer{
|
||||
logger: opts.Logger,
|
||||
wsEventManager: opts.WSEventManager,
|
||||
libraryDir: mo.None[string](),
|
||||
concurrentTasks: 2,
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (o *Optimizer) SetLibraryDir(libraryDir string) {
|
||||
o.libraryDir = mo.Some[string](libraryDir)
|
||||
}
|
||||
|
||||
/////////////
|
||||
|
||||
type StartMediaOptimizationOptions struct {
|
||||
Filepath string
|
||||
Quality Quality
|
||||
AudioChannelIndex int
|
||||
MediaInfo *videofile.MediaInfo
|
||||
}
|
||||
|
||||
func (o *Optimizer) StartMediaOptimization(opts *StartMediaOptimizationOptions) (err error) {
|
||||
defer util.HandlePanicInModuleWithError("mediastream/optimizer/StartMediaOptimization", &err)
|
||||
|
||||
o.logger.Debug().Any("opts", opts).Msg("mediastream: Starting media optimization")
|
||||
|
||||
if !o.libraryDir.IsPresent() {
|
||||
return fmt.Errorf("library directory not set")
|
||||
}
|
||||
|
||||
if opts.Filepath == "" {
|
||||
return fmt.Errorf("no filepath")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func qualityToPreset(quality Quality) string {
|
||||
switch quality {
|
||||
case QualityLow:
|
||||
return "ultrafast"
|
||||
case QualityMedium:
|
||||
return "veryfast"
|
||||
case QualityHigh:
|
||||
return "fast"
|
||||
case QualityMax:
|
||||
return "medium"
|
||||
default:
|
||||
return "veryfast"
|
||||
}
|
||||
}
|
||||
176
seanime-2.9.10/internal/mediastream/playback.go
Normal file
176
seanime-2.9.10/internal/mediastream/playback.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package mediastream
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"seanime/internal/mediastream/videofile"
|
||||
"seanime/internal/util/result"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/samber/mo"
|
||||
)
|
||||
|
||||
const (
|
||||
StreamTypeTranscode StreamType = "transcode" // On-the-fly transcoding
|
||||
StreamTypeOptimized StreamType = "optimized" // Pre-transcoded
|
||||
StreamTypeDirect StreamType = "direct" // Direct streaming
|
||||
)
|
||||
|
||||
type (
|
||||
StreamType string
|
||||
|
||||
PlaybackManager struct {
|
||||
logger *zerolog.Logger
|
||||
currentMediaContainer mo.Option[*MediaContainer] // The current media being played.
|
||||
repository *Repository
|
||||
mediaContainers *result.Map[string, *MediaContainer] // Temporary cache for the media containers.
|
||||
}
|
||||
|
||||
PlaybackState struct {
|
||||
MediaId int `json:"mediaId"` // The media ID
|
||||
}
|
||||
|
||||
MediaContainer struct {
|
||||
Filepath string `json:"filePath"`
|
||||
Hash string `json:"hash"`
|
||||
StreamType StreamType `json:"streamType"` // Tells the frontend how to play the media.
|
||||
StreamUrl string `json:"streamUrl"` // The relative endpoint to stream the media.
|
||||
MediaInfo *videofile.MediaInfo `json:"mediaInfo"`
|
||||
//Metadata *Metadata `json:"metadata"`
|
||||
// todo: add more fields (e.g. metadata)
|
||||
}
|
||||
)
|
||||
|
||||
func NewPlaybackManager(repository *Repository) *PlaybackManager {
|
||||
return &PlaybackManager{
|
||||
logger: repository.logger,
|
||||
repository: repository,
|
||||
mediaContainers: result.NewResultMap[string, *MediaContainer](),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PlaybackManager) KillPlayback() {
|
||||
p.logger.Debug().Msg("mediastream: Killing playback")
|
||||
if p.currentMediaContainer.IsPresent() {
|
||||
p.currentMediaContainer = mo.None[*MediaContainer]()
|
||||
p.logger.Trace().Msg("mediastream: Removed current media container")
|
||||
}
|
||||
}
|
||||
|
||||
// RequestPlayback is called by the frontend to stream a media file
|
||||
func (p *PlaybackManager) RequestPlayback(filepath string, streamType StreamType) (ret *MediaContainer, err error) {
|
||||
|
||||
p.logger.Debug().Str("filepath", filepath).Any("type", streamType).Msg("mediastream: Requesting playback")
|
||||
|
||||
// Create a new media container
|
||||
ret, err = p.newMediaContainer(filepath, streamType)
|
||||
|
||||
if err != nil {
|
||||
p.logger.Error().Err(err).Msg("mediastream: Failed to create media container")
|
||||
return nil, fmt.Errorf("failed to create media container: %v", err)
|
||||
}
|
||||
|
||||
// Set the current media container.
|
||||
p.currentMediaContainer = mo.Some(ret)
|
||||
|
||||
p.logger.Info().Str("filepath", filepath).Msg("mediastream: Ready to play media")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// PreloadPlayback is called by the frontend to preload a media container so that the data is stored in advanced
|
||||
func (p *PlaybackManager) PreloadPlayback(filepath string, streamType StreamType) (ret *MediaContainer, err error) {
|
||||
|
||||
p.logger.Debug().Str("filepath", filepath).Any("type", streamType).Msg("mediastream: Preloading playback")
|
||||
|
||||
// Create a new media container
|
||||
ret, err = p.newMediaContainer(filepath, streamType)
|
||||
|
||||
if err != nil {
|
||||
p.logger.Error().Err(err).Msg("mediastream: Failed to create media container")
|
||||
return nil, fmt.Errorf("failed to create media container: %v", err)
|
||||
}
|
||||
|
||||
p.logger.Info().Str("filepath", filepath).Msg("mediastream: Ready to play media")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Optimize
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (p *PlaybackManager) newMediaContainer(filepath string, streamType StreamType) (ret *MediaContainer, err error) {
|
||||
p.logger.Debug().Str("filepath", filepath).Any("type", streamType).Msg("mediastream: New media container requested")
|
||||
// Get the hash of the file.
|
||||
hash, err := videofile.GetHashFromPath(filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.logger.Trace().Str("hash", hash).Msg("mediastream: Checking cache")
|
||||
|
||||
// Check the cache ONLY if the stream type is the same.
|
||||
if mc, ok := p.mediaContainers.Get(hash); ok && mc.StreamType == streamType {
|
||||
p.logger.Debug().Str("hash", hash).Msg("mediastream: Media container cache HIT")
|
||||
return mc, nil
|
||||
}
|
||||
|
||||
p.logger.Trace().Str("hash", hash).Msg("mediastream: Creating media container")
|
||||
|
||||
// Get the media information of the file.
|
||||
ret = &MediaContainer{
|
||||
Filepath: filepath,
|
||||
Hash: hash,
|
||||
StreamType: streamType,
|
||||
}
|
||||
|
||||
p.logger.Debug().Msg("mediastream: Extracting media info")
|
||||
|
||||
ret.MediaInfo, err = p.repository.mediaInfoExtractor.GetInfo(p.repository.settings.MustGet().FfprobePath, filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.logger.Debug().Msg("mediastream: Extracted media info, extracting attachments")
|
||||
|
||||
// Extract the attachments from the file.
|
||||
err = videofile.ExtractAttachment(p.repository.settings.MustGet().FfmpegPath, filepath, hash, ret.MediaInfo, p.repository.cacheDir, p.logger)
|
||||
if err != nil {
|
||||
p.logger.Error().Err(err).Msg("mediastream: Failed to extract attachments")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.logger.Debug().Msg("mediastream: Extracted attachments")
|
||||
|
||||
streamUrl := ""
|
||||
switch streamType {
|
||||
case StreamTypeDirect:
|
||||
// Directly serve the file.
|
||||
streamUrl = "/api/v1/mediastream/direct"
|
||||
case StreamTypeTranscode:
|
||||
// Live transcode the file.
|
||||
streamUrl = "/api/v1/mediastream/transcode/master.m3u8"
|
||||
case StreamTypeOptimized:
|
||||
// TODO: Check if the file is already transcoded when the feature is implemented.
|
||||
// ...
|
||||
streamUrl = "/api/v1/mediastream/hls/master.m3u8"
|
||||
}
|
||||
|
||||
// TODO: Add metadata to the media container.
|
||||
// ...
|
||||
|
||||
if streamUrl == "" {
|
||||
return nil, errors.New("invalid stream type")
|
||||
}
|
||||
|
||||
// Set the stream URL.
|
||||
ret.StreamUrl = streamUrl
|
||||
|
||||
// Store the media container in the map.
|
||||
p.mediaContainers.Set(hash, ret)
|
||||
|
||||
return
|
||||
}
|
||||
284
seanime-2.9.10/internal/mediastream/repository.go
Normal file
284
seanime-2.9.10/internal/mediastream/repository.go
Normal file
@@ -0,0 +1,284 @@
|
||||
package mediastream
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/samber/mo"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"seanime/internal/database/models"
|
||||
"seanime/internal/events"
|
||||
"seanime/internal/mediastream/optimizer"
|
||||
"seanime/internal/mediastream/transcoder"
|
||||
"seanime/internal/mediastream/videofile"
|
||||
"seanime/internal/util/filecache"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type (
|
||||
Repository struct {
|
||||
transcoder mo.Option[*transcoder.Transcoder]
|
||||
optimizer *optimizer.Optimizer
|
||||
settings mo.Option[*models.MediastreamSettings]
|
||||
playbackManager *PlaybackManager
|
||||
mediaInfoExtractor *videofile.MediaInfoExtractor
|
||||
logger *zerolog.Logger
|
||||
wsEventManager events.WSEventManagerInterface
|
||||
fileCacher *filecache.Cacher
|
||||
reqMu sync.Mutex
|
||||
cacheDir string // where attachments are stored
|
||||
transcodeDir string // where stream segments are stored
|
||||
}
|
||||
|
||||
NewRepositoryOptions struct {
|
||||
Logger *zerolog.Logger
|
||||
WSEventManager events.WSEventManagerInterface
|
||||
FileCacher *filecache.Cacher
|
||||
}
|
||||
)
|
||||
|
||||
func NewRepository(opts *NewRepositoryOptions) *Repository {
|
||||
ret := &Repository{
|
||||
logger: opts.Logger,
|
||||
optimizer: optimizer.NewOptimizer(&optimizer.NewOptimizerOptions{
|
||||
Logger: opts.Logger,
|
||||
WSEventManager: opts.WSEventManager,
|
||||
}),
|
||||
settings: mo.None[*models.MediastreamSettings](),
|
||||
transcoder: mo.None[*transcoder.Transcoder](),
|
||||
wsEventManager: opts.WSEventManager,
|
||||
fileCacher: opts.FileCacher,
|
||||
mediaInfoExtractor: videofile.NewMediaInfoExtractor(opts.FileCacher, opts.Logger),
|
||||
}
|
||||
ret.playbackManager = NewPlaybackManager(ret)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (r *Repository) IsInitialized() bool {
|
||||
return r.settings.IsPresent()
|
||||
}
|
||||
|
||||
func (r *Repository) OnCleanup() {
|
||||
|
||||
}
|
||||
|
||||
func (r *Repository) InitializeModules(settings *models.MediastreamSettings, cacheDir string, transcodeDir string) {
|
||||
if settings == nil {
|
||||
r.logger.Error().Msg("mediastream: Settings not present")
|
||||
return
|
||||
}
|
||||
// Create the temp directory
|
||||
err := os.MkdirAll(transcodeDir, 0755)
|
||||
if err != nil {
|
||||
r.logger.Error().Err(err).Msg("mediastream: Failed to create transcode directory")
|
||||
}
|
||||
|
||||
if settings.FfmpegPath == "" {
|
||||
settings.FfmpegPath = "ffmpeg"
|
||||
}
|
||||
|
||||
if settings.FfprobePath == "" {
|
||||
settings.FfprobePath = "ffprobe"
|
||||
}
|
||||
|
||||
// Set the settings
|
||||
r.settings = mo.Some[*models.MediastreamSettings](settings)
|
||||
|
||||
r.cacheDir = cacheDir
|
||||
r.transcodeDir = transcodeDir
|
||||
|
||||
// Set the optimizer settings
|
||||
r.optimizer.SetLibraryDir(settings.PreTranscodeLibraryDir)
|
||||
|
||||
// Initialize the transcoder
|
||||
if ok := r.initializeTranscoder(r.settings); ok {
|
||||
}
|
||||
|
||||
r.logger.Info().Msg("mediastream: Module initialized")
|
||||
}
|
||||
|
||||
// CacheWasCleared should be called when the cache directory is manually cleared.
|
||||
func (r *Repository) CacheWasCleared() {
|
||||
r.playbackManager.mediaContainers.Clear()
|
||||
}
|
||||
|
||||
func (r *Repository) ClearTranscodeDir() {
|
||||
r.reqMu.Lock()
|
||||
defer r.reqMu.Unlock()
|
||||
|
||||
r.logger.Trace().Msg("mediastream: Clearing transcode directory")
|
||||
|
||||
// Empty the transcode directory
|
||||
if r.transcodeDir != "" {
|
||||
files, err := os.ReadDir(r.transcodeDir)
|
||||
if err != nil {
|
||||
r.logger.Error().Err(err).Msg("mediastream: Failed to read transcode directory")
|
||||
return
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
err = os.RemoveAll(filepath.Join(r.transcodeDir, file.Name()))
|
||||
if err != nil {
|
||||
r.logger.Error().Err(err).Msg("mediastream: Failed to remove file from transcode directory")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.logger.Debug().Msg("mediastream: Transcode directory cleared")
|
||||
|
||||
r.playbackManager.mediaContainers.Clear()
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Optimize
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type StartMediaOptimizationOptions struct {
|
||||
Filepath string
|
||||
Quality optimizer.Quality
|
||||
AudioChannelIndex int
|
||||
}
|
||||
|
||||
func (r *Repository) StartMediaOptimization(opts *StartMediaOptimizationOptions) (err error) {
|
||||
if !r.IsInitialized() {
|
||||
return errors.New("module not initialized")
|
||||
}
|
||||
|
||||
mediaInfo, err := r.mediaInfoExtractor.GetInfo(r.settings.MustGet().FfmpegPath, opts.Filepath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = r.optimizer.StartMediaOptimization(&optimizer.StartMediaOptimizationOptions{
|
||||
Filepath: opts.Filepath,
|
||||
Quality: opts.Quality,
|
||||
MediaInfo: mediaInfo,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Repository) RequestOptimizedStream(filepath string) (ret *MediaContainer, err error) {
|
||||
if !r.IsInitialized() {
|
||||
return nil, errors.New("module not initialized")
|
||||
}
|
||||
|
||||
ret, err = r.playbackManager.RequestPlayback(filepath, StreamTypeOptimized)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Transcode
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (r *Repository) TranscoderIsInitialized() bool {
|
||||
return r.IsInitialized() && r.transcoder.IsPresent()
|
||||
}
|
||||
|
||||
func (r *Repository) RequestTranscodeStream(filepath string, clientId string) (ret *MediaContainer, err error) {
|
||||
r.reqMu.Lock()
|
||||
defer r.reqMu.Unlock()
|
||||
|
||||
r.logger.Debug().Str("filepath", filepath).Msg("mediastream: Transcode stream requested")
|
||||
|
||||
if !r.IsInitialized() {
|
||||
return nil, errors.New("module not initialized")
|
||||
}
|
||||
|
||||
// Reinitialize the transcoder for each new transcode request
|
||||
if ok := r.initializeTranscoder(r.settings); !ok {
|
||||
return nil, errors.New("real-time transcoder not initialized, check your settings")
|
||||
}
|
||||
|
||||
ret, err = r.playbackManager.RequestPlayback(filepath, StreamTypeTranscode)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Repository) RequestPreloadTranscodeStream(filepath string) (err error) {
|
||||
r.logger.Debug().Str("filepath", filepath).Msg("mediastream: Transcode stream preloading requested")
|
||||
|
||||
if !r.IsInitialized() {
|
||||
return errors.New("module not initialized")
|
||||
}
|
||||
|
||||
_, err = r.playbackManager.PreloadPlayback(filepath, StreamTypeTranscode)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Direct Play
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (r *Repository) RequestDirectPlay(filepath string, clientId string) (ret *MediaContainer, err error) {
|
||||
r.reqMu.Lock()
|
||||
defer r.reqMu.Unlock()
|
||||
|
||||
r.logger.Debug().Str("filepath", filepath).Msg("mediastream: Direct play requested")
|
||||
|
||||
if !r.IsInitialized() {
|
||||
return nil, errors.New("module not initialized")
|
||||
}
|
||||
|
||||
ret, err = r.playbackManager.RequestPlayback(filepath, StreamTypeDirect)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Repository) RequestPreloadDirectPlay(filepath string) (err error) {
|
||||
r.logger.Debug().Str("filepath", filepath).Msg("mediastream: Direct stream preloading requested")
|
||||
|
||||
if !r.IsInitialized() {
|
||||
return errors.New("module not initialized")
|
||||
}
|
||||
|
||||
_, err = r.playbackManager.PreloadPlayback(filepath, StreamTypeDirect)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (r *Repository) initializeTranscoder(settings mo.Option[*models.MediastreamSettings]) bool {
|
||||
// Destroy the old transcoder if it exists
|
||||
if r.transcoder.IsPresent() {
|
||||
tc, _ := r.transcoder.Get()
|
||||
tc.Destroy()
|
||||
}
|
||||
|
||||
r.transcoder = mo.None[*transcoder.Transcoder]()
|
||||
|
||||
// If the transcoder is not enabled, don't initialize the transcoder
|
||||
if !settings.MustGet().TranscodeEnabled {
|
||||
return false
|
||||
}
|
||||
|
||||
// If the temp directory is not set, don't initialize the transcoder
|
||||
if r.transcodeDir == "" {
|
||||
r.logger.Error().Msg("mediastream: Transcode directory not set, could not initialize transcoder")
|
||||
return false
|
||||
}
|
||||
|
||||
opts := &transcoder.NewTranscoderOptions{
|
||||
Logger: r.logger,
|
||||
HwAccelKind: settings.MustGet().TranscodeHwAccel,
|
||||
Preset: settings.MustGet().TranscodePreset,
|
||||
FfmpegPath: settings.MustGet().FfmpegPath,
|
||||
FfprobePath: settings.MustGet().FfprobePath,
|
||||
HwAccelCustomSettings: settings.MustGet().TranscodeHwAccelCustomSettings,
|
||||
TempOutDir: r.transcodeDir,
|
||||
}
|
||||
|
||||
tc, err := transcoder.NewTranscoder(opts)
|
||||
if err != nil {
|
||||
r.logger.Error().Err(err).Msg("mediastream: Failed to initialize transcoder")
|
||||
return false
|
||||
}
|
||||
|
||||
r.logger.Info().Msg("mediastream: Transcoder module initialized")
|
||||
r.transcoder = mo.Some[*transcoder.Transcoder](tc)
|
||||
|
||||
return true
|
||||
}
|
||||
46
seanime-2.9.10/internal/mediastream/trans_test.go
Normal file
46
seanime-2.9.10/internal/mediastream/trans_test.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package mediastream
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/xfrr/goffmpeg/transcoder"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTrans(t *testing.T) {
|
||||
t.Skip("Do not run")
|
||||
var dest = "E:\\TRANSCODING_TEMP\\id\\index.m3u8"
|
||||
var videopath = "E:\\ANIME\\Dungeon Meshi\\[EMBER] Dungeon Meshi - 15.mkv"
|
||||
_ = os.MkdirAll(filepath.Dir(dest), 0755)
|
||||
|
||||
trans := new(transcoder.Transcoder)
|
||||
|
||||
err := trans.Initialize(videopath, dest)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
trans.MediaFile().SetHardwareAcceleration("auto")
|
||||
//trans.MediaFile().SetSeekTime("00:10:00")
|
||||
trans.MediaFile().SetPreset("veryfast")
|
||||
trans.MediaFile().SetVideoCodec("libx264")
|
||||
trans.MediaFile().SetHlsPlaylistType("vod")
|
||||
trans.MediaFile().SetCRF(32)
|
||||
trans.MediaFile().SetHlsMasterPlaylistName("index.m3u8")
|
||||
trans.MediaFile().SetHlsSegmentDuration(4)
|
||||
trans.MediaFile().SetHlsSegmentFilename("segment-%03d.ts")
|
||||
//trans.MediaFile().SetHlsListSize(0)
|
||||
trans.MediaFile().SetPixFmt("yuv420p")
|
||||
trans.MediaFile().SetAudioCodec("aac")
|
||||
trans.MediaFile().SetTags(map[string]string{"-map": "0:v:0 0:a:0"})
|
||||
|
||||
done := trans.Run(true)
|
||||
progress := trans.Output()
|
||||
for p := range progress {
|
||||
fmt.Println(p)
|
||||
}
|
||||
|
||||
fmt.Println(<-done)
|
||||
|
||||
}
|
||||
176
seanime-2.9.10/internal/mediastream/transcode.go
Normal file
176
seanime-2.9.10/internal/mediastream/transcode.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package mediastream
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"seanime/internal/events"
|
||||
"seanime/internal/mediastream/transcoder"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/samber/mo"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Transcode
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (r *Repository) ServeEchoTranscodeStream(c echo.Context, clientId string) error {
|
||||
|
||||
if !r.IsInitialized() {
|
||||
r.wsEventManager.SendEvent(events.MediastreamShutdownStream, "Module not initialized")
|
||||
return errors.New("module not initialized")
|
||||
}
|
||||
|
||||
if !r.TranscoderIsInitialized() {
|
||||
r.wsEventManager.SendEvent(events.MediastreamShutdownStream, "Transcoder not initialized")
|
||||
return errors.New("transcoder not initialized")
|
||||
}
|
||||
|
||||
path := c.Param("*")
|
||||
|
||||
mediaContainer, found := r.playbackManager.currentMediaContainer.Get()
|
||||
if !found {
|
||||
return errors.New("no file has been loaded")
|
||||
}
|
||||
|
||||
if path == "master.m3u8" {
|
||||
ret, err := r.transcoder.MustGet().GetMaster(mediaContainer.Filepath, mediaContainer.Hash, mediaContainer.MediaInfo, clientId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.String(200, ret)
|
||||
}
|
||||
|
||||
// Video stream
|
||||
// /:quality/index.m3u8
|
||||
if strings.HasSuffix(path, "index.m3u8") && !strings.Contains(path, "audio") {
|
||||
split := strings.Split(path, "/")
|
||||
if len(split) != 2 {
|
||||
return errors.New("invalid index.m3u8 path")
|
||||
}
|
||||
|
||||
quality, err := transcoder.QualityFromString(split[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ret, err := r.transcoder.MustGet().GetVideoIndex(mediaContainer.Filepath, mediaContainer.Hash, mediaContainer.MediaInfo, quality, clientId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.String(200, ret)
|
||||
}
|
||||
|
||||
// Audio stream
|
||||
// /audio/:audio/index.m3u8
|
||||
if strings.HasSuffix(path, "index.m3u8") && strings.Contains(path, "audio") {
|
||||
split := strings.Split(path, "/")
|
||||
if len(split) != 3 {
|
||||
return errors.New("invalid index.m3u8 path")
|
||||
}
|
||||
|
||||
audio, err := strconv.ParseInt(split[1], 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ret, err := r.transcoder.MustGet().GetAudioIndex(mediaContainer.Filepath, mediaContainer.Hash, mediaContainer.MediaInfo, int32(audio), clientId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.String(200, ret)
|
||||
}
|
||||
|
||||
// Video segment
|
||||
// /:quality/segments-:chunk.ts
|
||||
if strings.HasSuffix(path, ".ts") && !strings.Contains(path, "audio") {
|
||||
split := strings.Split(path, "/")
|
||||
if len(split) != 2 {
|
||||
return errors.New("invalid segments-:chunk.ts path")
|
||||
}
|
||||
|
||||
quality, err := transcoder.QualityFromString(split[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
segment, err := transcoder.ParseSegment(split[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ret, err := r.transcoder.MustGet().GetVideoSegment(mediaContainer.Filepath, mediaContainer.Hash, mediaContainer.MediaInfo, quality, segment, clientId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.File(ret)
|
||||
}
|
||||
|
||||
// Audio segment
|
||||
// /audio/:audio/segments-:chunk.ts
|
||||
if strings.HasSuffix(path, ".ts") && strings.Contains(path, "audio") {
|
||||
split := strings.Split(path, "/")
|
||||
if len(split) != 3 {
|
||||
return errors.New("invalid segments-:chunk.ts path")
|
||||
}
|
||||
|
||||
audio, err := strconv.ParseInt(split[1], 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
segment, err := transcoder.ParseSegment(split[2])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ret, err := r.transcoder.MustGet().GetAudioSegment(mediaContainer.Filepath, mediaContainer.Hash, mediaContainer.MediaInfo, int32(audio), segment, clientId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.File(ret)
|
||||
}
|
||||
|
||||
return errors.New("invalid path")
|
||||
}
|
||||
|
||||
// ShutdownTranscodeStream It should be called when unmounting the player (playback is no longer needed).
|
||||
// This will also send an events.MediastreamShutdownStream event.
|
||||
func (r *Repository) ShutdownTranscodeStream(clientId string) {
|
||||
r.reqMu.Lock()
|
||||
defer r.reqMu.Unlock()
|
||||
|
||||
if !r.IsInitialized() {
|
||||
return
|
||||
}
|
||||
|
||||
if !r.TranscoderIsInitialized() {
|
||||
return
|
||||
}
|
||||
|
||||
r.logger.Warn().Str("client_id", clientId).Msg("mediastream: Received shutdown transcode stream request")
|
||||
|
||||
if !r.playbackManager.currentMediaContainer.IsPresent() {
|
||||
return
|
||||
}
|
||||
|
||||
// Kill playback
|
||||
r.playbackManager.KillPlayback()
|
||||
|
||||
// Destroy the current transcoder
|
||||
r.transcoder.MustGet().Destroy()
|
||||
|
||||
// Load a new transcoder
|
||||
r.transcoder = mo.None[*transcoder.Transcoder]()
|
||||
r.initializeTranscoder(r.settings)
|
||||
|
||||
// Send event
|
||||
r.wsEventManager.SendEvent(events.MediastreamShutdownStream, nil)
|
||||
}
|
||||
2
seanime-2.9.10/internal/mediastream/transcoder/README.md
Normal file
2
seanime-2.9.10/internal/mediastream/transcoder/README.md
Normal file
@@ -0,0 +1,2 @@
|
||||
The transcoder implementation was adapted from [zoriya/Kyoo](https://github.com/zoriya/Kyoo/tree/master/transcoder),
|
||||
licensed under GPL-3.0.
|
||||
@@ -0,0 +1,44 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type AudioStream struct {
|
||||
Stream
|
||||
index int32
|
||||
logger *zerolog.Logger
|
||||
settings *Settings
|
||||
}
|
||||
|
||||
// NewAudioStream creates a new AudioStream for a file, at a given audio index.
|
||||
func NewAudioStream(file *FileStream, idx int32, logger *zerolog.Logger, settings *Settings) *AudioStream {
|
||||
logger.Trace().Str("file", filepath.Base(file.Path)).Int32("idx", idx).Msgf("trancoder: Creating audio stream")
|
||||
ret := new(AudioStream)
|
||||
ret.index = idx
|
||||
ret.logger = logger
|
||||
ret.settings = settings
|
||||
NewStream(fmt.Sprintf("audio %d", idx), file, ret, &ret.Stream, settings, logger)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (as *AudioStream) getOutPath(encoderId int) string {
|
||||
return filepath.Join(as.file.Out, fmt.Sprintf("segment-a%d-%d-%%d.ts", as.index, encoderId))
|
||||
}
|
||||
|
||||
func (as *AudioStream) getFlags() Flags {
|
||||
return AudioF
|
||||
}
|
||||
|
||||
func (as *AudioStream) getTranscodeArgs(segments string) []string {
|
||||
return []string{
|
||||
"-map", fmt.Sprintf("0:a:%d", as.index),
|
||||
"-c:a", "aac",
|
||||
// TODO: Support 5.1 audio streams.
|
||||
"-ac", "2",
|
||||
// TODO: Support multi audio qualities.
|
||||
"-b:a", "128k",
|
||||
}
|
||||
}
|
||||
261
seanime-2.9.10/internal/mediastream/transcoder/filestream.go
Normal file
261
seanime-2.9.10/internal/mediastream/transcoder/filestream.go
Normal file
@@ -0,0 +1,261 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"seanime/internal/mediastream/videofile"
|
||||
"seanime/internal/util/result"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// FileStream represents a stream of file data.
|
||||
// It holds the keyframes, media information, video streams, and audio streams.
|
||||
type FileStream struct {
|
||||
ready sync.WaitGroup // A WaitGroup to synchronize go routines.
|
||||
err error // An error that might occur during processing.
|
||||
Path string // The path of the file.
|
||||
Out string // The output path.
|
||||
Keyframes *Keyframe // The keyframes of the video.
|
||||
Info *videofile.MediaInfo // The media information of the file.
|
||||
videos *result.Map[Quality, *VideoStream] // A map of video streams.
|
||||
audios *result.Map[int32, *AudioStream] // A map of audio streams.
|
||||
logger *zerolog.Logger
|
||||
settings *Settings
|
||||
}
|
||||
|
||||
// NewFileStream creates a new FileStream.
|
||||
func NewFileStream(
|
||||
path string,
|
||||
sha string,
|
||||
mediaInfo *videofile.MediaInfo,
|
||||
settings *Settings,
|
||||
logger *zerolog.Logger,
|
||||
) *FileStream {
|
||||
ret := &FileStream{
|
||||
Path: path,
|
||||
Out: filepath.Join(settings.StreamDir, sha),
|
||||
videos: result.NewResultMap[Quality, *VideoStream](),
|
||||
audios: result.NewResultMap[int32, *AudioStream](),
|
||||
logger: logger,
|
||||
settings: settings,
|
||||
Info: mediaInfo,
|
||||
}
|
||||
|
||||
ret.ready.Add(1)
|
||||
go func() {
|
||||
defer ret.ready.Done()
|
||||
ret.Keyframes = GetKeyframes(path, sha, logger, settings)
|
||||
}()
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// Kill stops all streams.
|
||||
func (fs *FileStream) Kill() {
|
||||
fs.videos.Range(func(_ Quality, s *VideoStream) bool {
|
||||
s.Kill()
|
||||
return true
|
||||
})
|
||||
fs.audios.Range(func(_ int32, s *AudioStream) bool {
|
||||
s.Kill()
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// Destroy stops all streams and removes the output directory.
|
||||
func (fs *FileStream) Destroy() {
|
||||
fs.logger.Debug().Msg("filestream: Destroying streams")
|
||||
fs.Kill()
|
||||
_ = os.RemoveAll(fs.Out)
|
||||
}
|
||||
|
||||
// GetMaster generates the master playlist.
|
||||
func (fs *FileStream) GetMaster() string {
|
||||
master := "#EXTM3U\n"
|
||||
if fs.Info.Video != nil {
|
||||
var transmuxQuality Quality
|
||||
for _, quality := range Qualities {
|
||||
if quality.Height() >= fs.Info.Video.Quality.Height() || quality.AverageBitrate() >= fs.Info.Video.Bitrate {
|
||||
transmuxQuality = quality
|
||||
break
|
||||
}
|
||||
}
|
||||
{
|
||||
bitrate := float64(fs.Info.Video.Bitrate)
|
||||
master += "#EXT-X-STREAM-INF:"
|
||||
master += fmt.Sprintf("AVERAGE-BANDWIDTH=%d,", int(math.Min(bitrate*0.8, float64(transmuxQuality.AverageBitrate()))))
|
||||
master += fmt.Sprintf("BANDWIDTH=%d,", int(math.Min(bitrate, float64(transmuxQuality.MaxBitrate()))))
|
||||
master += fmt.Sprintf("RESOLUTION=%dx%d,", fs.Info.Video.Width, fs.Info.Video.Height)
|
||||
if fs.Info.Video.MimeCodec != nil {
|
||||
master += fmt.Sprintf("CODECS=\"%s\",", *fs.Info.Video.MimeCodec)
|
||||
}
|
||||
master += "AUDIO=\"audio\","
|
||||
master += "CLOSED-CAPTIONS=NONE\n"
|
||||
master += fmt.Sprintf("./%s/index.m3u8\n", Original)
|
||||
}
|
||||
aspectRatio := float32(fs.Info.Video.Width) / float32(fs.Info.Video.Height)
|
||||
// codec is the prefix + the level, the level is not part of the codec we want to compare for the same_codec check bellow
|
||||
transmuxPrefix := "avc1.6400"
|
||||
transmuxCodec := transmuxPrefix + "28"
|
||||
|
||||
for _, quality := range Qualities {
|
||||
sameCodec := fs.Info.Video.MimeCodec != nil && strings.HasPrefix(*fs.Info.Video.MimeCodec, transmuxPrefix)
|
||||
includeLvl := quality.Height() < fs.Info.Video.Quality.Height() || (quality.Height() == fs.Info.Video.Quality.Height() && !sameCodec)
|
||||
|
||||
if includeLvl {
|
||||
master += "#EXT-X-STREAM-INF:"
|
||||
master += fmt.Sprintf("AVERAGE-BANDWIDTH=%d,", quality.AverageBitrate())
|
||||
master += fmt.Sprintf("BANDWIDTH=%d,", quality.MaxBitrate())
|
||||
master += fmt.Sprintf("RESOLUTION=%dx%d,", int(aspectRatio*float32(quality.Height())+0.5), quality.Height())
|
||||
master += fmt.Sprintf("CODECS=\"%s\",", transmuxCodec)
|
||||
master += "AUDIO=\"audio\","
|
||||
master += "CLOSED-CAPTIONS=NONE\n"
|
||||
master += fmt.Sprintf("./%s/index.m3u8\n", quality)
|
||||
}
|
||||
}
|
||||
|
||||
//for _, quality := range Qualities {
|
||||
// if quality.Height() < fs.Info.Video.Quality.Height() && quality.AverageBitrate() < fs.Info.Video.Bitrate {
|
||||
// master += "#EXT-X-STREAM-INF:"
|
||||
// master += fmt.Sprintf("AVERAGE-BANDWIDTH=%d,", quality.AverageBitrate())
|
||||
// master += fmt.Sprintf("BANDWIDTH=%d,", quality.MaxBitrate())
|
||||
// master += fmt.Sprintf("RESOLUTION=%dx%d,", int(aspectRatio*float32(quality.Height())+0.5), quality.Height())
|
||||
// master += "CODECS=\"avc1.640028\","
|
||||
// master += "AUDIO=\"audio\","
|
||||
// master += "CLOSED-CAPTIONS=NONE\n"
|
||||
// master += fmt.Sprintf("./%s/index.m3u8\n", quality)
|
||||
// }
|
||||
//}
|
||||
}
|
||||
for _, audio := range fs.Info.Audios {
|
||||
master += "#EXT-X-MEDIA:TYPE=AUDIO,"
|
||||
master += "GROUP-ID=\"audio\","
|
||||
if audio.Language != nil {
|
||||
master += fmt.Sprintf("LANGUAGE=\"%s\",", *audio.Language)
|
||||
}
|
||||
if audio.Title != nil {
|
||||
master += fmt.Sprintf("NAME=\"%s\",", *audio.Title)
|
||||
} else if audio.Language != nil {
|
||||
master += fmt.Sprintf("NAME=\"%s\",", *audio.Language)
|
||||
} else {
|
||||
master += fmt.Sprintf("NAME=\"Audio %d\",", audio.Index)
|
||||
}
|
||||
if audio.IsDefault {
|
||||
master += "DEFAULT=YES,"
|
||||
}
|
||||
master += "CHANNELS=\"2\","
|
||||
master += fmt.Sprintf("URI=\"./audio/%d/index.m3u8\"\n", audio.Index)
|
||||
}
|
||||
return master
|
||||
}
|
||||
|
||||
// GetVideoIndex gets the index of a video stream of a specific quality.
|
||||
func (fs *FileStream) GetVideoIndex(quality Quality) (string, error) {
|
||||
stream := fs.getVideoStream(quality)
|
||||
return stream.GetIndex()
|
||||
}
|
||||
|
||||
// getVideoStream gets a video stream of a specific quality.
|
||||
// It creates a new stream if it does not exist.
|
||||
func (fs *FileStream) getVideoStream(quality Quality) *VideoStream {
|
||||
stream, _ := fs.videos.GetOrSet(quality, func() (*VideoStream, error) {
|
||||
return NewVideoStream(fs, quality, fs.logger, fs.settings), nil
|
||||
})
|
||||
return stream
|
||||
}
|
||||
|
||||
// GetVideoSegment gets a segment of a video stream of a specific quality.
|
||||
//func (fs *FileStream) GetVideoSegment(quality Quality, segment int32) (string, error) {
|
||||
// stream := fs.getVideoStream(quality)
|
||||
// return stream.GetSegment(segment)
|
||||
//}
|
||||
|
||||
// GetVideoSegment gets a segment of a video stream of a specific quality.
|
||||
func (fs *FileStream) GetVideoSegment(quality Quality, segment int32) (string, error) {
|
||||
streamLogger.Debug().Msgf("filestream: Retrieving video segment %d (%s)", segment, quality)
|
||||
// Debug
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
|
||||
defer cancel()
|
||||
debugStreamRequest(fmt.Sprintf("video %s, segment %d", quality, segment), ctx)
|
||||
|
||||
//stream := fs.getVideoStream(quality)
|
||||
//return stream.GetSegment(segment)
|
||||
|
||||
// Channel to signal completion
|
||||
done := make(chan struct{})
|
||||
|
||||
var ret string
|
||||
var err error
|
||||
|
||||
// Execute the retrieval operation in a goroutine
|
||||
go func() {
|
||||
defer close(done)
|
||||
stream := fs.getVideoStream(quality)
|
||||
ret, err = stream.GetSegment(segment)
|
||||
}()
|
||||
|
||||
// Wait for either the operation to complete or the timeout to occur
|
||||
select {
|
||||
case <-done:
|
||||
return ret, err
|
||||
case <-ctx.Done():
|
||||
return "", fmt.Errorf("filestream: timeout while retrieving video segment %d (%s)", segment, quality)
|
||||
}
|
||||
}
|
||||
|
||||
// GetAudioIndex gets the index of an audio stream of a specific index.
|
||||
func (fs *FileStream) GetAudioIndex(audio int32) (string, error) {
|
||||
stream := fs.getAudioStream(audio)
|
||||
return stream.GetIndex()
|
||||
}
|
||||
|
||||
// GetAudioSegment gets a segment of an audio stream of a specific index.
|
||||
func (fs *FileStream) GetAudioSegment(audio int32, segment int32) (string, error) {
|
||||
streamLogger.Debug().Msgf("filestream: Retrieving audio %d segment %d", audio, segment)
|
||||
// Debug
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
debugStreamRequest(fmt.Sprintf("audio %d, segment %d", audio, segment), ctx)
|
||||
|
||||
stream := fs.getAudioStream(audio)
|
||||
return stream.GetSegment(segment)
|
||||
}
|
||||
|
||||
// getAudioStream gets an audio stream of a specific index.
|
||||
// It creates a new stream if it does not exist.
|
||||
func (fs *FileStream) getAudioStream(audio int32) *AudioStream {
|
||||
stream, _ := fs.audios.GetOrSet(audio, func() (*AudioStream, error) {
|
||||
return NewAudioStream(fs, audio, fs.logger, fs.settings), nil
|
||||
})
|
||||
return stream
|
||||
}
|
||||
|
||||
func debugStreamRequest(text string, ctx context.Context) {
|
||||
//ctx, cancel := context.WithCancel(context.Background())
|
||||
//defer cancel()
|
||||
if debugStream {
|
||||
start := time.Now()
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
if debugStream {
|
||||
time.Sleep(2 * time.Second)
|
||||
streamLogger.Debug().Msgf("t: %s has been running for %.2f", text, time.Since(start).Seconds())
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
149
seanime-2.9.10/internal/mediastream/transcoder/hwaccel.go
Normal file
149
seanime-2.9.10/internal/mediastream/transcoder/hwaccel.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
)
|
||||
|
||||
type (
|
||||
HwAccelOptions struct {
|
||||
Kind string
|
||||
Preset string
|
||||
CustomSettings string
|
||||
}
|
||||
)
|
||||
|
||||
func GetHardwareAccelSettings(opts HwAccelOptions) HwAccelSettings {
|
||||
name := opts.Kind
|
||||
if name == "" || name == "auto" || name == "cpu" || name == "none" {
|
||||
name = "disabled"
|
||||
}
|
||||
streamLogger.Debug().Msgf("transcoder: Hardware acceleration: %s", name)
|
||||
|
||||
var customHwAccelSettings HwAccelSettings
|
||||
if opts.CustomSettings != "" && name == "custom" {
|
||||
err := json.Unmarshal([]byte(opts.CustomSettings), &customHwAccelSettings)
|
||||
if err != nil {
|
||||
streamLogger.Error().Err(err).Msg("transcoder: Failed to parse custom hardware acceleration settings, falling back to CPU")
|
||||
name = "disabled"
|
||||
}
|
||||
customHwAccelSettings.Name = "custom"
|
||||
} else if opts.CustomSettings == "" && name == "custom" {
|
||||
name = "disabled"
|
||||
}
|
||||
|
||||
defaultOSDevice := "/dev/dri/renderD128"
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
defaultOSDevice = "auto"
|
||||
}
|
||||
|
||||
// superfast or ultrafast would produce heavy files, so opt for "fast" by default.
|
||||
// vaapi does not have any presets so this flag is unused for vaapi hwaccel.
|
||||
preset := opts.Preset
|
||||
|
||||
switch name {
|
||||
case "disabled":
|
||||
return HwAccelSettings{
|
||||
Name: "disabled",
|
||||
DecodeFlags: []string{},
|
||||
EncodeFlags: []string{
|
||||
"-c:v", "libx264",
|
||||
"-preset", preset,
|
||||
// sc_threshold is a scene detection mechanism used to create a keyframe when the scene changes
|
||||
// this is on by default and inserts keyframes where we don't want to (it also breaks force_key_frames)
|
||||
// we disable it to prevents whole scenes from being removed due to the -f segment failing to find the corresponding keyframe
|
||||
"-sc_threshold", "0",
|
||||
// force 8bits output (by default it keeps the same as the source but 10bits is not playable on some devices)
|
||||
"-pix_fmt", "yuv420p",
|
||||
},
|
||||
// we could put :force_original_aspect_ratio=decrease:force_divisible_by=2 here but we already calculate a correct width and
|
||||
// aspect ratio in our code so there is no need.
|
||||
ScaleFilter: "scale=%d:%d",
|
||||
WithForcedIdr: true,
|
||||
}
|
||||
case "vaapi":
|
||||
return HwAccelSettings{
|
||||
Name: name,
|
||||
DecodeFlags: []string{
|
||||
"-hwaccel", "vaapi",
|
||||
"-hwaccel_device", GetEnvOr("SEANIME_TRANSCODER_VAAPI_RENDERER", defaultOSDevice),
|
||||
"-hwaccel_output_format", "vaapi",
|
||||
},
|
||||
EncodeFlags: []string{
|
||||
// h264_vaapi does not have any preset or scenecut flags.
|
||||
"-c:v", "h264_vaapi",
|
||||
},
|
||||
// if the hardware decoder could not work and fallback to soft decode, we need to instruct ffmpeg to
|
||||
// upload back frames to gpu space (after converting them)
|
||||
// see https://trac.ffmpeg.org/wiki/Hardware/VAAPI#Encoding for more info
|
||||
// we also need to force the format to be nv12 since 10bits is not supported via hwaccel.
|
||||
// this filter is equivalent to this pseudocode:
|
||||
// if (vaapi) {
|
||||
// hwupload, passthrough, keep vaapi as is
|
||||
// convert whatever to nv12 on GPU
|
||||
// } else {
|
||||
// convert whatever to nv12 on CPU
|
||||
// hwupload to vaapi(nv12)
|
||||
// convert whatever to nv12 on GPU // scale_vaapi doesn't support passthrough option, so it has to make a copy
|
||||
// }
|
||||
// See https://www.reddit.com/r/ffmpeg/comments/1bqn60w/hardware_accelerated_decoding_without_hwdownload/ for more info
|
||||
ScaleFilter: "format=nv12|vaapi,hwupload,scale_vaapi=%d:%d:format=nv12",
|
||||
WithForcedIdr: true,
|
||||
}
|
||||
case "qsv", "intel":
|
||||
return HwAccelSettings{
|
||||
Name: name,
|
||||
DecodeFlags: []string{
|
||||
"-hwaccel", "qsv",
|
||||
"-qsv_device", GetEnvOr("SEANIME_TRANSCODER_QSV_RENDERER", defaultOSDevice),
|
||||
"-hwaccel_output_format", "qsv",
|
||||
},
|
||||
EncodeFlags: []string{
|
||||
"-c:v", "h264_qsv",
|
||||
"-preset", preset,
|
||||
},
|
||||
// see note on ScaleFilter of the vaapi HwAccel, this is the same filter but adapted to qsv
|
||||
ScaleFilter: "format=nv12|qsv,hwupload,scale_qsv=%d:%d:format=nv12",
|
||||
WithForcedIdr: true,
|
||||
}
|
||||
case "nvidia":
|
||||
return HwAccelSettings{
|
||||
Name: "nvidia",
|
||||
DecodeFlags: []string{
|
||||
"-hwaccel", "cuda",
|
||||
// this flag prevents data to go from gpu space to cpu space
|
||||
// it forces the whole dec/enc to be on the gpu. We want that.
|
||||
"-hwaccel_output_format", "cuda",
|
||||
},
|
||||
EncodeFlags: []string{
|
||||
"-c:v", "h264_nvenc",
|
||||
"-preset", preset,
|
||||
// the exivalent of -sc_threshold on nvidia.
|
||||
"-no-scenecut", "1",
|
||||
},
|
||||
// see note on ScaleFilter of the vaapi HwAccel, this is the same filter but adapted to cuda
|
||||
ScaleFilter: "format=nv12|cuda,hwupload,scale_cuda=%d:%d:format=nv12",
|
||||
WithForcedIdr: true,
|
||||
}
|
||||
case "videotoolbox":
|
||||
return HwAccelSettings{
|
||||
Name: "videotoolbox",
|
||||
DecodeFlags: []string{
|
||||
"-hwaccel", "videotoolbox",
|
||||
},
|
||||
EncodeFlags: []string{
|
||||
"-c:v", "h264_videotoolbox",
|
||||
"-profile:v", "main",
|
||||
},
|
||||
ScaleFilter: "scale=%d:%d",
|
||||
WithForcedIdr: true,
|
||||
}
|
||||
case "custom":
|
||||
return customHwAccelSettings
|
||||
default:
|
||||
streamLogger.Fatal().Msgf("No hardware accelerator named: %s", name)
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
212
seanime-2.9.10/internal/mediastream/transcoder/keyframes.go
Normal file
212
seanime-2.9.10/internal/mediastream/transcoder/keyframes.go
Normal file
@@ -0,0 +1,212 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"path/filepath"
|
||||
"seanime/internal/mediastream/videofile"
|
||||
"seanime/internal/util"
|
||||
"seanime/internal/util/result"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type Keyframe struct {
|
||||
Sha string
|
||||
Keyframes []float64
|
||||
IsDone bool
|
||||
info *KeyframeInfo
|
||||
}
|
||||
type KeyframeInfo struct {
|
||||
mutex sync.RWMutex
|
||||
ready sync.WaitGroup
|
||||
listeners []func(keyframes []float64)
|
||||
}
|
||||
|
||||
func (kf *Keyframe) Get(idx int32) float64 {
|
||||
kf.info.mutex.RLock()
|
||||
defer kf.info.mutex.RUnlock()
|
||||
return kf.Keyframes[idx]
|
||||
}
|
||||
|
||||
func (kf *Keyframe) Slice(start int32, end int32) []float64 {
|
||||
if end <= start {
|
||||
return []float64{}
|
||||
}
|
||||
kf.info.mutex.RLock()
|
||||
defer kf.info.mutex.RUnlock()
|
||||
ref := kf.Keyframes[start:end]
|
||||
ret := make([]float64, end-start)
|
||||
copy(ret, ref)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (kf *Keyframe) Length() (int32, bool) {
|
||||
kf.info.mutex.RLock()
|
||||
defer kf.info.mutex.RUnlock()
|
||||
return int32(len(kf.Keyframes)), kf.IsDone
|
||||
}
|
||||
|
||||
func (kf *Keyframe) add(values []float64) {
|
||||
kf.info.mutex.Lock()
|
||||
defer kf.info.mutex.Unlock()
|
||||
kf.Keyframes = append(kf.Keyframes, values...)
|
||||
for _, listener := range kf.info.listeners {
|
||||
listener(kf.Keyframes)
|
||||
}
|
||||
}
|
||||
|
||||
func (kf *Keyframe) AddListener(callback func(keyframes []float64)) {
|
||||
kf.info.mutex.Lock()
|
||||
defer kf.info.mutex.Unlock()
|
||||
kf.info.listeners = append(kf.info.listeners, callback)
|
||||
}
|
||||
|
||||
var keyframes = result.NewResultMap[string, *Keyframe]()
|
||||
|
||||
func GetKeyframes(
|
||||
path string,
|
||||
hash string,
|
||||
logger *zerolog.Logger,
|
||||
settings *Settings,
|
||||
) *Keyframe {
|
||||
ret, _ := keyframes.GetOrSet(hash, func() (*Keyframe, error) {
|
||||
kf := &Keyframe{
|
||||
Sha: hash,
|
||||
IsDone: false,
|
||||
info: &KeyframeInfo{},
|
||||
}
|
||||
kf.info.ready.Add(1)
|
||||
go func() {
|
||||
keyframesPath := filepath.Join(settings.StreamDir, hash, "keyframes.json")
|
||||
if err := getSavedInfo(keyframesPath, kf); err == nil {
|
||||
logger.Trace().Msgf("transcoder: Keyframes Cache HIT")
|
||||
kf.info.ready.Done()
|
||||
return
|
||||
}
|
||||
|
||||
err := getKeyframes(settings.FfprobePath, path, kf, hash, logger)
|
||||
if err == nil {
|
||||
saveInfo(keyframesPath, kf)
|
||||
}
|
||||
}()
|
||||
return kf, nil
|
||||
})
|
||||
ret.info.ready.Wait()
|
||||
return ret
|
||||
}
|
||||
|
||||
func getKeyframes(ffprobePath string, path string, kf *Keyframe, hash string, logger *zerolog.Logger) error {
|
||||
defer printExecTime(logger, "ffprobe analysis for %s", path)()
|
||||
// Execute ffprobe to retrieve all IFrames. IFrames are specific points in the video we can divide it into segments.
|
||||
// We instruct ffprobe to return the timestamp and flags of each frame.
|
||||
// Although it's possible to request ffprobe to return only i-frames (keyframes) using the -skip_frame nokey option, this approach is highly inefficient.
|
||||
// The inefficiency arises because when this option is used, ffmpeg processes every single frame, which significantly slows down the operation.
|
||||
cmd := util.NewCmd(
|
||||
"ffprobe",
|
||||
"-loglevel", "error",
|
||||
"-select_streams", "v:0",
|
||||
"-show_entries", "packet=pts_time,flags",
|
||||
"-of", "csv=print_section=0",
|
||||
path,
|
||||
)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
|
||||
ret := make([]float64, 0, 1000)
|
||||
max := 100
|
||||
done := 0
|
||||
for scanner.Scan() {
|
||||
frame := scanner.Text()
|
||||
if frame == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
x := strings.Split(frame, ",")
|
||||
pts, flags := x[0], x[1]
|
||||
|
||||
// if no video track
|
||||
if pts == "N/A" {
|
||||
break
|
||||
}
|
||||
|
||||
// Only take keyframes
|
||||
if flags[0] != 'K' {
|
||||
continue
|
||||
}
|
||||
|
||||
fpts, err := strconv.ParseFloat(pts, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Previously, the aim was to save only those keyframes that had a minimum gap of 3 seconds between them.
|
||||
// This was to avoid creating segments as short as 0.2 seconds.
|
||||
// However, there were instances where the -f segment muxer would ignore the specified segment time and choose a random keyframe to cut at.
|
||||
// To counter this, treat every keyframe as a potential segment.
|
||||
//if done == 0 && len(ret) == 0 {
|
||||
//
|
||||
// // There are instances where videos may not start exactly at 0:00. This needs to be considered,
|
||||
// // and we should only include keyframes that occur after the video's start time. If not done so,
|
||||
// // it can lead to a discrepancy in our segment count and potentially duplicate the same segment in the stream.
|
||||
//
|
||||
// // For simplicity in code comprehension, we designate 0 as the initial keyframe, even though it's not genuine.
|
||||
// // This value is never actually passed to ffmpeg.
|
||||
// ret = append(ret, 0)
|
||||
// continue
|
||||
//}
|
||||
ret = append(ret, fpts)
|
||||
|
||||
if len(ret) == max {
|
||||
kf.add(ret)
|
||||
if done == 0 {
|
||||
kf.info.ready.Done()
|
||||
} else if done >= 500 {
|
||||
max = 500
|
||||
}
|
||||
done += max
|
||||
// clear the array without reallocing it
|
||||
ret = ret[:0]
|
||||
}
|
||||
}
|
||||
|
||||
// If there is less than 2 (i.e. equals 0 or 1 (it happens for audio files with poster))
|
||||
if len(ret) < 2 {
|
||||
dummy, err := getDummyKeyframes(ffprobePath, path, hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ret = dummy
|
||||
}
|
||||
|
||||
kf.add(ret)
|
||||
if done == 0 {
|
||||
kf.info.ready.Done()
|
||||
}
|
||||
kf.IsDone = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDummyKeyframes(ffprobePath string, path string, sha string) ([]float64, error) {
|
||||
dummyKeyframeDuration := float64(2)
|
||||
info, err := videofile.FfprobeGetInfo(ffprobePath, path, sha)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
segmentCount := int((float64(info.Duration) / dummyKeyframeDuration) + 1)
|
||||
ret := make([]float64, segmentCount)
|
||||
for segmentIndex := 0; segmentIndex < segmentCount; segmentIndex += 1 {
|
||||
ret[segmentIndex] = float64(segmentIndex) * dummyKeyframeDuration
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
121
seanime-2.9.10/internal/mediastream/transcoder/quality.go
Normal file
121
seanime-2.9.10/internal/mediastream/transcoder/quality.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
type Quality string
|
||||
|
||||
const (
|
||||
P240 Quality = "240p"
|
||||
P360 Quality = "360p"
|
||||
P480 Quality = "480p"
|
||||
P720 Quality = "720p"
|
||||
P1080 Quality = "1080p"
|
||||
P1440 Quality = "1440p"
|
||||
P4k Quality = "4k"
|
||||
P8k Quality = "8k"
|
||||
Original Quality = "original"
|
||||
)
|
||||
|
||||
// Qualities
|
||||
// Original is not included in this list because it is a special case
|
||||
var Qualities = []Quality{P240, P360, P480, P720, P1080, P1440, P4k, P8k}
|
||||
|
||||
func QualityFromString(str string) (Quality, error) {
|
||||
if str == string(Original) {
|
||||
return Original, nil
|
||||
}
|
||||
|
||||
qualities := Qualities
|
||||
for _, quality := range qualities {
|
||||
if string(quality) == str {
|
||||
return quality, nil
|
||||
}
|
||||
}
|
||||
return Original, errors.New("invalid quality string")
|
||||
}
|
||||
|
||||
// AverageBitrate
|
||||
// Note: Not accurate
|
||||
func (q Quality) AverageBitrate() uint32 {
|
||||
switch q {
|
||||
case P240:
|
||||
return 400_000
|
||||
case P360:
|
||||
return 800_000
|
||||
case P480:
|
||||
return 1_200_000
|
||||
case P720:
|
||||
return 2_400_000
|
||||
case P1080:
|
||||
return 4_800_000
|
||||
case P1440:
|
||||
return 9_600_000
|
||||
case P4k:
|
||||
return 16_000_000
|
||||
case P8k:
|
||||
return 28_000_000
|
||||
case Original:
|
||||
panic("Original quality must be handled specially")
|
||||
}
|
||||
panic("Invalid quality value")
|
||||
}
|
||||
|
||||
func (q Quality) MaxBitrate() uint32 {
|
||||
switch q {
|
||||
case P240:
|
||||
return 700_000
|
||||
case P360:
|
||||
return 1_400_000
|
||||
case P480:
|
||||
return 2_100_000
|
||||
case P720:
|
||||
return 4_000_000
|
||||
case P1080:
|
||||
return 8_000_000
|
||||
case P1440:
|
||||
return 12_000_000
|
||||
case P4k:
|
||||
return 28_000_000
|
||||
case P8k:
|
||||
return 40_000_000
|
||||
case Original:
|
||||
panic("Original quality must be handled specially")
|
||||
}
|
||||
panic("Invalid quality value")
|
||||
}
|
||||
|
||||
func (q Quality) Height() uint32 {
|
||||
switch q {
|
||||
case P240:
|
||||
return 240
|
||||
case P360:
|
||||
return 360
|
||||
case P480:
|
||||
return 480
|
||||
case P720:
|
||||
return 720
|
||||
case P1080:
|
||||
return 1080
|
||||
case P1440:
|
||||
return 1440
|
||||
case P4k:
|
||||
return 2160
|
||||
case P8k:
|
||||
return 4320
|
||||
case Original:
|
||||
panic("Original quality must be handled specially")
|
||||
}
|
||||
panic("Invalid quality value")
|
||||
}
|
||||
|
||||
func QualityFromHeight(height uint32) Quality {
|
||||
qualities := Qualities
|
||||
for _, quality := range qualities {
|
||||
if quality.Height() >= height {
|
||||
return quality
|
||||
}
|
||||
}
|
||||
return P240
|
||||
}
|
||||
19
seanime-2.9.10/internal/mediastream/transcoder/settings.go
Normal file
19
seanime-2.9.10/internal/mediastream/transcoder/settings.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package transcoder
|
||||
|
||||
import "os"
|
||||
|
||||
func GetEnvOr(env string, def string) string {
|
||||
out := os.Getenv(env)
|
||||
if out == "" {
|
||||
return def
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
type HwAccelSettings struct {
|
||||
Name string `json:"name"`
|
||||
DecodeFlags []string `json:"decodeFlags"`
|
||||
EncodeFlags []string `json:"encodeFlags"`
|
||||
ScaleFilter string `json:"scaleFilter"`
|
||||
WithForcedIdr bool `json:"removeForcedIdr"`
|
||||
}
|
||||
667
seanime-2.9.10/internal/mediastream/transcoder/stream.go
Normal file
667
seanime-2.9.10/internal/mediastream/transcoder/stream.go
Normal file
@@ -0,0 +1,667 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"seanime/internal/util"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/samber/lo"
|
||||
lop "github.com/samber/lo/parallel"
|
||||
)
|
||||
|
||||
type Flags int32
|
||||
|
||||
const (
|
||||
AudioF Flags = 1 << 0
|
||||
VideoF Flags = 1 << 1
|
||||
Transmux Flags = 1 << 3
|
||||
)
|
||||
|
||||
type StreamHandle interface {
|
||||
getTranscodeArgs(segments string) []string
|
||||
getOutPath(encoderId int) string
|
||||
getFlags() Flags
|
||||
}
|
||||
|
||||
type Stream struct {
|
||||
kind string
|
||||
handle StreamHandle
|
||||
file *FileStream
|
||||
segments []Segment
|
||||
heads []Head
|
||||
// the lock used for the heads
|
||||
//lock sync.RWMutex
|
||||
|
||||
segmentsLock sync.RWMutex
|
||||
headsLock sync.RWMutex
|
||||
|
||||
logger *zerolog.Logger
|
||||
settings *Settings
|
||||
killCh chan struct{}
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
type Segment struct {
|
||||
// channel open if the segment is not ready. closed if ready.
|
||||
// one can check if segment 1 is open by doing:
|
||||
//
|
||||
// ts.isSegmentReady(1).
|
||||
//
|
||||
// You can also wait for it to be ready (non-blocking if already ready) by doing:
|
||||
// <-ts.segments[i]
|
||||
channel chan struct{}
|
||||
encoder int
|
||||
}
|
||||
|
||||
type Head struct {
|
||||
segment int32
|
||||
end int32
|
||||
command *exec.Cmd
|
||||
stdin io.WriteCloser
|
||||
}
|
||||
|
||||
var DeletedHead = Head{
|
||||
segment: -1,
|
||||
end: -1,
|
||||
command: nil,
|
||||
}
|
||||
|
||||
var streamLogger = util.NewLogger()
|
||||
|
||||
func NewStream(
|
||||
kind string,
|
||||
file *FileStream,
|
||||
handle StreamHandle,
|
||||
ret *Stream,
|
||||
settings *Settings,
|
||||
logger *zerolog.Logger,
|
||||
) {
|
||||
ret.kind = kind
|
||||
ret.handle = handle
|
||||
ret.file = file
|
||||
ret.heads = make([]Head, 0)
|
||||
ret.settings = settings
|
||||
ret.logger = logger
|
||||
ret.killCh = make(chan struct{})
|
||||
ret.ctx, ret.cancel = context.WithCancel(context.Background())
|
||||
|
||||
length, isDone := file.Keyframes.Length()
|
||||
ret.segments = make([]Segment, length, max(length, 2000))
|
||||
for seg := range ret.segments {
|
||||
ret.segments[seg].channel = make(chan struct{})
|
||||
}
|
||||
|
||||
if !isDone {
|
||||
file.Keyframes.AddListener(func(keyframes []float64) {
|
||||
ret.segmentsLock.Lock()
|
||||
defer ret.segmentsLock.Unlock()
|
||||
oldLength := len(ret.segments)
|
||||
if cap(ret.segments) > len(keyframes) {
|
||||
ret.segments = ret.segments[:len(keyframes)]
|
||||
} else {
|
||||
ret.segments = append(ret.segments, make([]Segment, len(keyframes)-oldLength)...)
|
||||
}
|
||||
for seg := oldLength; seg < len(keyframes); seg++ {
|
||||
ret.segments[seg].channel = make(chan struct{})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *Stream) GetIndex() (string, error) {
|
||||
// playlist type is event since we can append to the list if Keyframe.IsDone is false.
|
||||
// start time offset makes the stream start at 0s instead of ~3segments from the end (requires version 6 of hls)
|
||||
index := `#EXTM3U
|
||||
#EXT-X-VERSION:6
|
||||
#EXT-X-PLAYLIST-TYPE:EVENT
|
||||
#EXT-X-START:TIME-OFFSET=0
|
||||
#EXT-X-TARGETDURATION:4
|
||||
#EXT-X-MEDIA-SEQUENCE:0
|
||||
#EXT-X-INDEPENDENT-SEGMENTS
|
||||
`
|
||||
length, isDone := ts.file.Keyframes.Length()
|
||||
|
||||
for segment := int32(0); segment < length-1; segment++ {
|
||||
index += fmt.Sprintf("#EXTINF:%.6f\n", ts.file.Keyframes.Get(segment+1)-ts.file.Keyframes.Get(segment))
|
||||
index += fmt.Sprintf("segment-%d.ts\n", segment)
|
||||
}
|
||||
// do not forget to add the last segment between the last keyframe and the end of the file
|
||||
// if the keyframes extraction is not done, do not bother to add it, it will be retrived on the next index retrival
|
||||
if isDone {
|
||||
index += fmt.Sprintf("#EXTINF:%.6f\n", float64(ts.file.Info.Duration)-ts.file.Keyframes.Get(length-1))
|
||||
index += fmt.Sprintf("segment-%d.ts\n", length-1)
|
||||
index += `#EXT-X-ENDLIST`
|
||||
}
|
||||
return index, nil
|
||||
}
|
||||
|
||||
// GetSegment returns the path to the segment and waits for it to be ready.
|
||||
func (ts *Stream) GetSegment(segment int32) (string, error) {
|
||||
// DEVNOTE: Reset the kill channel
|
||||
// This is needed because when the segment is needed again, this channel should be open
|
||||
ts.killCh = make(chan struct{})
|
||||
if debugStream {
|
||||
streamLogger.Trace().Msgf("transcoder: Getting segment %d [GetSegment]", segment)
|
||||
defer streamLogger.Trace().Msgf("transcoder: Retrieved segment %d [GetSegment]", segment)
|
||||
}
|
||||
|
||||
ts.segmentsLock.RLock()
|
||||
ts.headsLock.RLock()
|
||||
ready := ts.isSegmentReady(segment)
|
||||
// we want to calculate distance in the same lock else it can be funky
|
||||
distance := 0.
|
||||
isScheduled := false
|
||||
if !ready {
|
||||
distance = ts.getMinEncoderDistance(segment)
|
||||
for _, head := range ts.heads {
|
||||
if head.segment <= segment && segment < head.end {
|
||||
isScheduled = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
readyChan := ts.segments[segment].channel
|
||||
|
||||
ts.segmentsLock.RUnlock()
|
||||
ts.headsLock.RUnlock()
|
||||
|
||||
if !ready {
|
||||
// Only start a new encode if there is too big a distance between the current encoder and the segment.
|
||||
if distance > 60 || !isScheduled {
|
||||
streamLogger.Trace().Msgf("transcoder: New encoder for segment %d", segment)
|
||||
err := ts.run(segment)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
streamLogger.Trace().Msgf("transcoder: Awaiting segment %d - %.2fs gap", segment, distance)
|
||||
}
|
||||
|
||||
select {
|
||||
// DEVNOTE: This can cause issues if the segment is called again but was "killed" beforehand
|
||||
// It's used to interrupt the waiting process but might not be needed since there's a timeout
|
||||
case <-ts.killCh:
|
||||
return "", fmt.Errorf("transcoder: Stream killed while waiting for segment %d", segment)
|
||||
case <-readyChan:
|
||||
break
|
||||
case <-time.After(25 * time.Second):
|
||||
streamLogger.Error().Msgf("transcoder: Could not retrieve %s segment %d (timeout)", ts.kind, segment)
|
||||
return "", errors.New("could not retrieve segment (timeout)")
|
||||
}
|
||||
}
|
||||
//go ts.prepareNextSegments(segment)
|
||||
ts.prepareNextSegments(segment)
|
||||
return fmt.Sprintf(filepath.ToSlash(ts.handle.getOutPath(ts.segments[segment].encoder)), segment), nil
|
||||
}
|
||||
|
||||
// prepareNextSegments will start the next segments if they are not already started.
|
||||
func (ts *Stream) prepareNextSegments(segment int32) {
|
||||
//if ts.IsKilled() {
|
||||
// return
|
||||
//}
|
||||
// Audio is way cheaper to create than video, so we don't need to run them in advance
|
||||
// Running it in advance might actually slow down the video encode since less compute
|
||||
// power can be used, so we simply disable that.
|
||||
if ts.handle.getFlags()&VideoF == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
ts.segmentsLock.RLock()
|
||||
defer ts.segmentsLock.RUnlock()
|
||||
ts.headsLock.RLock()
|
||||
defer ts.headsLock.RUnlock()
|
||||
|
||||
for i := segment + 1; i <= min(segment+10, int32(len(ts.segments)-1)); i++ {
|
||||
// If the segment is already ready, we don't need to start a new encoder.
|
||||
if ts.isSegmentReady(i) {
|
||||
continue
|
||||
}
|
||||
// only start encode for segments not planned (getMinEncoderDistance returns Inf for them)
|
||||
// or if they are 60s away (assume 5s per segments)
|
||||
if ts.getMinEncoderDistance(i) < 60+(5*float64(i-segment)) {
|
||||
continue
|
||||
}
|
||||
streamLogger.Trace().Msgf("transcoder: Creating new encoder head for future segment %d", i)
|
||||
go func() {
|
||||
_ = ts.run(i)
|
||||
}()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *Stream) getMinEncoderDistance(segment int32) float64 {
|
||||
t := ts.file.Keyframes.Get(segment)
|
||||
distances := lop.Map(ts.heads, func(head Head, _ int) float64 {
|
||||
// ignore killed heads or heads after the current time
|
||||
if head.segment < 0 || ts.file.Keyframes.Get(head.segment) > t || segment >= head.end {
|
||||
return math.Inf(1)
|
||||
}
|
||||
return t - ts.file.Keyframes.Get(head.segment)
|
||||
})
|
||||
if len(distances) == 0 {
|
||||
return math.Inf(1)
|
||||
}
|
||||
return slices.Min(distances)
|
||||
}
|
||||
|
||||
func (ts *Stream) Kill() {
|
||||
streamLogger.Trace().Msgf("transcoder: Killing %s stream", ts.kind)
|
||||
defer streamLogger.Trace().Msg("transcoder: Stream killed")
|
||||
ts.lockHeads()
|
||||
defer ts.unlockHeads()
|
||||
|
||||
for id := range ts.heads {
|
||||
ts.KillHead(id)
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *Stream) IsKilled() bool {
|
||||
select {
|
||||
case <-ts.killCh:
|
||||
// if the channel returned, it means it was closed
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// KillHead
|
||||
// Stream is assumed to be locked
|
||||
func (ts *Stream) KillHead(encoderId int) {
|
||||
//streamLogger.Trace().Int("eid", encoderId).Msgf("transcoder: Killing %s encoder head", ts.kind)
|
||||
defer streamLogger.Trace().Int("eid", encoderId).Msgf("transcoder: Killed %s encoder head", ts.kind)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
}
|
||||
}()
|
||||
close(ts.killCh)
|
||||
ts.cancel()
|
||||
if ts.heads[encoderId] == DeletedHead || ts.heads[encoderId].command == nil {
|
||||
return
|
||||
}
|
||||
ts.heads[encoderId].command.Process.Signal(os.Interrupt)
|
||||
//_, _ = ts.heads[encoderId].stdin.Write([]byte("q"))
|
||||
//_ = ts.heads[encoderId].stdin.Close()
|
||||
|
||||
ts.heads[encoderId] = DeletedHead
|
||||
}
|
||||
|
||||
func (ts *Stream) SetIsKilled() {
|
||||
}
|
||||
|
||||
//////////////////////////////
|
||||
|
||||
// Remember to lock before calling this.
|
||||
func (ts *Stream) isSegmentReady(segment int32) bool {
|
||||
select {
|
||||
case <-ts.segments[segment].channel:
|
||||
// if the channel returned, it means it was closed
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *Stream) isSegmentTranscoding(segment int32) bool {
|
||||
for _, head := range ts.heads {
|
||||
if head.segment == segment {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func toSegmentStr(segments []float64) string {
|
||||
return strings.Join(lo.Map(segments, func(seg float64, _ int) string {
|
||||
return fmt.Sprintf("%.6f", seg)
|
||||
}), ",")
|
||||
}
|
||||
|
||||
func (ts *Stream) run(start int32) error {
|
||||
//if ts.IsKilled() {
|
||||
// return nil
|
||||
//}
|
||||
ts.logger.Trace().Msgf("transcoder: Running %s encoder head from %d", ts.kind, start)
|
||||
// Start the transcoder up to the 100th segment (or less)
|
||||
length, isDone := ts.file.Keyframes.Length()
|
||||
end := min(start+100, length)
|
||||
// if keyframes analysis is not finished, always have a 1-segment padding
|
||||
// for the extra segment needed for precise split (look comment before -to flag)
|
||||
if !isDone {
|
||||
end -= 2
|
||||
}
|
||||
// Stop at the first finished segment
|
||||
ts.lockSegments()
|
||||
for i := start; i < end; i++ {
|
||||
if ts.isSegmentReady(i) || ts.isSegmentTranscoding(i) {
|
||||
end = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if start >= end {
|
||||
// this can happen if the start segment was finished between the check
|
||||
// to call run() and the actual call.
|
||||
// since most checks are done in a RLock() instead of a Lock() this can
|
||||
// happens when two goroutines try to make the same segment ready
|
||||
ts.unlockSegments()
|
||||
return nil
|
||||
}
|
||||
ts.unlockSegments()
|
||||
|
||||
ts.lockHeads()
|
||||
encoderId := len(ts.heads)
|
||||
ts.heads = append(ts.heads, Head{segment: start, end: end, command: nil})
|
||||
ts.unlockHeads()
|
||||
|
||||
streamLogger.Trace().Any("eid", encoderId).Msgf(
|
||||
"transcoder: Transcoding %d-%d/%d segments for %s",
|
||||
start,
|
||||
end,
|
||||
length,
|
||||
ts.kind,
|
||||
)
|
||||
|
||||
// Include both the start and end delimiter because -ss and -to are not accurate
|
||||
// Having an extra segment allows us to cut precisely the segments we want with the
|
||||
// -f segment that does cut the beginning and the end at the keyframe like asked
|
||||
startRef := float64(0)
|
||||
startSeg := start
|
||||
if start != 0 {
|
||||
// we always take on segment before the current one, for different reasons for audio/video:
|
||||
// - Audio: we need context before the starting point, without that ffmpeg doesn't know what to do and leave ~100ms of silence
|
||||
// - Video: if a segment is really short (between 20 and 100ms), the padding given in the else block bellow is not enough and
|
||||
// the previous segment is played another time. the -segment_times is way more precise, so it does not do the same with this one
|
||||
startSeg = start - 1
|
||||
if ts.handle.getFlags()&AudioF != 0 {
|
||||
startRef = ts.file.Keyframes.Get(startSeg)
|
||||
} else {
|
||||
// the param for the -ss takes the keyframe before the specified time
|
||||
// (if the specified time is a keyframe, it either takes that keyframe or the one before)
|
||||
// to prevent this weird behavior, we specify a bit after the keyframe that interest us
|
||||
|
||||
// this can't be used with audio since we need to have context before the start-time
|
||||
// without this context, the cut loses a bit of audio (audio gap of ~100ms)
|
||||
if startSeg+1 == length {
|
||||
startRef = (ts.file.Keyframes.Get(startSeg) + float64(ts.file.Info.Duration)) / 2
|
||||
} else {
|
||||
startRef = (ts.file.Keyframes.Get(startSeg) + ts.file.Keyframes.Get(startSeg+1)) / 2
|
||||
}
|
||||
}
|
||||
}
|
||||
endPadding := int32(1)
|
||||
if end == length {
|
||||
endPadding = 0
|
||||
}
|
||||
segments := ts.file.Keyframes.Slice(start+1, end+endPadding)
|
||||
if len(segments) == 0 {
|
||||
// we can't leave that empty else ffmpeg errors out.
|
||||
segments = []float64{9999999}
|
||||
}
|
||||
|
||||
outpath := ts.handle.getOutPath(encoderId)
|
||||
err := os.MkdirAll(filepath.Dir(outpath), 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"-nostats", "-hide_banner", "-loglevel", "warning",
|
||||
}
|
||||
|
||||
args = append(args, ts.settings.HwAccel.DecodeFlags...)
|
||||
|
||||
if startRef != 0 {
|
||||
if ts.handle.getFlags()&VideoF != 0 {
|
||||
// This is the default behavior in transmux mode and needed to force pre/post segment to work
|
||||
// This must be disabled when processing only audio because it creates gaps in audio
|
||||
args = append(args, "-noaccurate_seek")
|
||||
}
|
||||
args = append(args,
|
||||
"-ss", fmt.Sprintf("%.6f", startRef),
|
||||
)
|
||||
}
|
||||
// do not include -to if we want the file to go to the end
|
||||
if end+1 < length {
|
||||
// sometimes, the duration is shorter than expected (only during transcode it seems)
|
||||
// always include more and use the -f segment to split the file where we want
|
||||
endRef := ts.file.Keyframes.Get(end + 1)
|
||||
// it seems that the -to is confused when -ss seek before the given time (because it searches for a keyframe)
|
||||
// add back the time that would be lost otherwise
|
||||
// this only happens when -to is before -i but having -to after -i gave a bug (not sure, don't remember)
|
||||
endRef += startRef - ts.file.Keyframes.Get(startSeg)
|
||||
args = append(args,
|
||||
"-to", fmt.Sprintf("%.6f", endRef),
|
||||
)
|
||||
}
|
||||
args = append(args,
|
||||
"-i", ts.file.Path,
|
||||
// this makes behaviors consistent between soft and hardware decodes.
|
||||
// this also means that after a -ss 50, the output video will start at 50s
|
||||
"-start_at_zero",
|
||||
// for hls streams, -copyts is mandatory
|
||||
"-copyts",
|
||||
// this makes output file start at 0s instead of a random delay + the -ss value
|
||||
// this also cancel -start_at_zero weird delay.
|
||||
// this is not always respected, but generally it gives better results.
|
||||
// even when this is not respected, it does not result in a bugged experience but this is something
|
||||
// to keep in mind when debugging
|
||||
"-muxdelay", "0",
|
||||
)
|
||||
args = append(args, ts.handle.getTranscodeArgs(toSegmentStr(segments))...)
|
||||
args = append(args,
|
||||
"-f", "segment",
|
||||
// needed for rounding issues when forcing keyframes
|
||||
// recommended value is 1/(2*frame_rate), which for a 24fps is ~0.021
|
||||
// we take a little bit more than that to be extra safe but too much can be harmful
|
||||
// when segments are short (can make the video repeat itself)
|
||||
"-segment_time_delta", "0.05",
|
||||
"-segment_format", "mpegts",
|
||||
"-segment_times", toSegmentStr(lop.Map(segments, func(seg float64, _ int) float64 {
|
||||
// segment_times want durations, not timestamps so we must substract the -ss param
|
||||
// since we give a greater value to -ss to prevent wrong seeks but -segment_times
|
||||
// needs precise segments, we use the keyframe we want to seek to as a reference.
|
||||
return seg - ts.file.Keyframes.Get(startSeg)
|
||||
})),
|
||||
"-segment_list_type", "flat",
|
||||
"-segment_list", "pipe:1",
|
||||
"-segment_start_number", fmt.Sprint(start),
|
||||
outpath,
|
||||
)
|
||||
|
||||
// Added logging for ffmpeg command and hardware transcoding state
|
||||
streamLogger.Trace().Msgf("transcoder: ffmpeg command: %s %s", ts.settings.FfmpegPath, strings.Join(args, " "))
|
||||
if len(ts.settings.HwAccel.DecodeFlags) > 0 {
|
||||
streamLogger.Trace().Msgf("transcoder: Hardware transcoding enabled with flags: %v", ts.settings.HwAccel.DecodeFlags)
|
||||
} else {
|
||||
streamLogger.Trace().Msg("transcoder: Hardware transcoding not enabled")
|
||||
}
|
||||
|
||||
cmd := util.NewCmdCtx(context.Background(), ts.settings.FfmpegPath, args...)
|
||||
streamLogger.Trace().Msgf("transcoder: Executing ffmpeg for segments %d-%d of %s", start, end, ts.kind)
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var stderr strings.Builder
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ts.lockHeads()
|
||||
ts.heads[encoderId].command = cmd
|
||||
ts.heads[encoderId].stdin = stdin
|
||||
ts.unlockHeads()
|
||||
|
||||
go func(stdin io.WriteCloser) {
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
format := filepath.Base(outpath)
|
||||
shouldStop := false
|
||||
|
||||
for scanner.Scan() {
|
||||
var segment int32
|
||||
_, _ = fmt.Sscanf(scanner.Text(), format, &segment)
|
||||
|
||||
// If the segment number is less than the starting segment (start), it means it's not relevant for the current processing, so we skip it
|
||||
if segment < start {
|
||||
// This happens because we use -f segments for accurate cutting (since -ss is not)
|
||||
// check comment at beginning of function for more info
|
||||
continue
|
||||
}
|
||||
ts.lockHeads()
|
||||
ts.heads[encoderId].segment = segment
|
||||
ts.unlockHeads()
|
||||
if debugFfmpegOutput {
|
||||
streamLogger.Debug().Int("eid", encoderId).Msgf("t: \t ffmpeg finished segment %d/%d (%d-%d) of %s", segment, end, start, end, ts.kind)
|
||||
}
|
||||
|
||||
ts.lockSegments()
|
||||
// If the segment is already marked as done, we can stop the ffmpeg process
|
||||
if ts.isSegmentReady(segment) {
|
||||
// the current segment is already marked as done so another process has already gone up to here.
|
||||
_, _ = stdin.Write([]byte("q"))
|
||||
_ = stdin.Close()
|
||||
//cmd.Process.Signal(os.Interrupt)
|
||||
if debugFfmpeg {
|
||||
streamLogger.Trace().Int("eid", encoderId).Msgf("transcoder: Terminated ffmpeg, segment %d is ready", segment)
|
||||
}
|
||||
shouldStop = true
|
||||
} else {
|
||||
// Mark the segment as ready
|
||||
ts.segments[segment].encoder = encoderId
|
||||
close(ts.segments[segment].channel)
|
||||
if segment == end-1 {
|
||||
// file finished, ffmpeg will finish soon on its own
|
||||
shouldStop = true
|
||||
} else if ts.isSegmentReady(segment + 1) {
|
||||
// If the next segment is already marked as done, we can stop the ffmpeg process
|
||||
_, _ = stdin.Write([]byte("q"))
|
||||
_ = stdin.Close()
|
||||
//cmd.Process.Signal(os.Interrupt)
|
||||
if debugFfmpeg {
|
||||
streamLogger.Trace().Int("eid", encoderId).Msgf("transcoder: Terminated ffmpeg, next segment %d is ready", segment)
|
||||
}
|
||||
shouldStop = true
|
||||
}
|
||||
}
|
||||
ts.unlockSegments()
|
||||
// we need this and not a return in the condition because we want to unlock
|
||||
// the lock (and can't defer since this is a loop)
|
||||
if shouldStop {
|
||||
if debugFfmpeg {
|
||||
streamLogger.Trace().Int("eid", encoderId).Msgf("transcoder: ffmpeg completed segments %d-%d/%d of %s", start, end, length, ts.kind)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
streamLogger.Error().Int("eid", encoderId).Err(err).Msg("transcoder: Error scanning ffmpeg output")
|
||||
return
|
||||
}
|
||||
}(stdin)
|
||||
|
||||
// Listen for kill signal
|
||||
go func(stdin io.WriteCloser) {
|
||||
select {
|
||||
case <-ts.ctx.Done():
|
||||
streamLogger.Trace().Int("eid", encoderId).Msgf("transcoder: Aborting ffmpeg process for %s", ts.kind)
|
||||
_, _ = stdin.Write([]byte("q"))
|
||||
_ = stdin.Close()
|
||||
return
|
||||
}
|
||||
}(stdin)
|
||||
|
||||
// Listen for process termination
|
||||
go func() {
|
||||
err := cmd.Wait()
|
||||
var exitErr *exec.ExitError
|
||||
// Check if hardware acceleration was attempted and if stderr indicates a failure to use it
|
||||
if len(ts.settings.HwAccel.DecodeFlags) > 0 {
|
||||
lowerOutput := strings.ToLower(stderr.String())
|
||||
if strings.Contains(lowerOutput, "failed") &&
|
||||
(strings.Contains(lowerOutput, "hwaccel") || strings.Contains(lowerOutput, "vaapi") || strings.Contains(lowerOutput, "cuvid") || strings.Contains(lowerOutput, "vdpau")) {
|
||||
streamLogger.Warn().Int("eid", encoderId).Msg("transcoder: ffmpeg failed to use hardware acceleration settings; falling back to CPU")
|
||||
}
|
||||
}
|
||||
|
||||
if errors.As(err, &exitErr) && exitErr.ExitCode() == 255 {
|
||||
streamLogger.Trace().Int("eid", encoderId).Msgf("transcoder: ffmpeg process was terminated")
|
||||
} else if err != nil {
|
||||
streamLogger.Error().Int("eid", encoderId).Err(fmt.Errorf("%s: %s", err, stderr.String())).Msgf("transcoder: ffmpeg process failed")
|
||||
} else {
|
||||
streamLogger.Trace().Int("eid", encoderId).Msgf("transcoder: ffmpeg process for %s exited", ts.kind)
|
||||
}
|
||||
|
||||
ts.lockHeads()
|
||||
defer ts.unlockHeads()
|
||||
// we can't delete the head directly because it would invalidate the others encoderId
|
||||
ts.heads[encoderId] = DeletedHead
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const debugLocks = false
|
||||
const debugFfmpeg = true
|
||||
const debugFfmpegOutput = false
|
||||
const debugStream = false
|
||||
|
||||
func (ts *Stream) lockHeads() {
|
||||
if debugLocks {
|
||||
streamLogger.Debug().Msg("t: Locking heads")
|
||||
}
|
||||
ts.headsLock.Lock()
|
||||
if debugLocks {
|
||||
streamLogger.Debug().Msg("t: \t\tLocked heads")
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *Stream) unlockHeads() {
|
||||
if debugLocks {
|
||||
streamLogger.Debug().Msg("t: Unlocking heads")
|
||||
}
|
||||
ts.headsLock.Unlock()
|
||||
if debugLocks {
|
||||
streamLogger.Debug().Msg("t: \t\tUnlocked heads")
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *Stream) lockSegments() {
|
||||
if debugLocks {
|
||||
streamLogger.Debug().Msg("t: Locking segments")
|
||||
}
|
||||
ts.segmentsLock.Lock()
|
||||
if debugLocks {
|
||||
streamLogger.Debug().Msg("t: \t\tLocked segments")
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *Stream) unlockSegments() {
|
||||
if debugLocks {
|
||||
streamLogger.Debug().Msg("t: Unlocking segments")
|
||||
}
|
||||
ts.segmentsLock.Unlock()
|
||||
if debugLocks {
|
||||
streamLogger.Debug().Msg("t: \t\tUnlocked segments")
|
||||
}
|
||||
}
|
||||
249
seanime-2.9.10/internal/mediastream/transcoder/tracker.go
Normal file
249
seanime-2.9.10/internal/mediastream/transcoder/tracker.go
Normal file
@@ -0,0 +1,249 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type ClientInfo struct {
|
||||
client string
|
||||
path string
|
||||
quality *Quality
|
||||
audio int32
|
||||
head int32
|
||||
}
|
||||
|
||||
type Tracker struct {
|
||||
// key: client_id
|
||||
clients map[string]ClientInfo
|
||||
// key: client_id
|
||||
visitDate map[string]time.Time
|
||||
// key: path
|
||||
lastUsage map[string]time.Time
|
||||
transcoder *Transcoder
|
||||
deletedStream chan string
|
||||
logger *zerolog.Logger
|
||||
killCh chan struct{} // Close channel to stop tracker
|
||||
}
|
||||
|
||||
func NewTracker(t *Transcoder) *Tracker {
|
||||
ret := &Tracker{
|
||||
clients: make(map[string]ClientInfo),
|
||||
visitDate: make(map[string]time.Time),
|
||||
lastUsage: make(map[string]time.Time),
|
||||
transcoder: t,
|
||||
logger: t.logger,
|
||||
deletedStream: make(chan string, 1000),
|
||||
killCh: make(chan struct{}),
|
||||
}
|
||||
go ret.start()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (t *Tracker) Stop() {
|
||||
close(t.killCh)
|
||||
}
|
||||
|
||||
func Abs(x int32) int32 {
|
||||
if x < 0 {
|
||||
return -x
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func (t *Tracker) start() {
|
||||
inactiveTime := 1 * time.Hour
|
||||
timer := time.NewTicker(inactiveTime)
|
||||
defer timer.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-t.killCh:
|
||||
return
|
||||
case info, ok := <-t.transcoder.clientChan:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
old, ok := t.clients[info.client]
|
||||
// First fixup the info. Most routes return partial infos
|
||||
if ok && old.path == info.path {
|
||||
if info.quality == nil {
|
||||
info.quality = old.quality
|
||||
}
|
||||
if info.audio == -1 {
|
||||
info.audio = old.audio
|
||||
}
|
||||
if info.head == -1 {
|
||||
info.head = old.head
|
||||
}
|
||||
}
|
||||
|
||||
t.clients[info.client] = info
|
||||
t.visitDate[info.client] = time.Now()
|
||||
t.lastUsage[info.path] = time.Now()
|
||||
|
||||
// now that the new info is stored and fixed, kill old streams
|
||||
if ok && old.path == info.path {
|
||||
if old.audio != info.audio && old.audio != -1 {
|
||||
t.KillAudioIfDead(old.path, old.audio)
|
||||
}
|
||||
if old.quality != info.quality && old.quality != nil {
|
||||
t.KillQualityIfDead(old.path, *old.quality)
|
||||
}
|
||||
if old.head != -1 && Abs(info.head-old.head) > 100 {
|
||||
t.KillOrphanedHeads(old.path, old.quality, old.audio)
|
||||
}
|
||||
} else if ok {
|
||||
t.KillStreamIfDead(old.path)
|
||||
}
|
||||
|
||||
case <-timer.C:
|
||||
// Purge old clients
|
||||
for client, date := range t.visitDate {
|
||||
if time.Since(date) < inactiveTime {
|
||||
continue
|
||||
}
|
||||
|
||||
info := t.clients[client]
|
||||
|
||||
if !t.KillStreamIfDead(info.path) {
|
||||
audioCleanup := info.audio != -1 && t.KillAudioIfDead(info.path, info.audio)
|
||||
videoCleanup := info.quality != nil && t.KillQualityIfDead(info.path, *info.quality)
|
||||
if !audioCleanup || !videoCleanup {
|
||||
t.KillOrphanedHeads(info.path, info.quality, info.audio)
|
||||
}
|
||||
}
|
||||
|
||||
delete(t.clients, client)
|
||||
delete(t.visitDate, client)
|
||||
}
|
||||
case path := <-t.deletedStream:
|
||||
t.DestroyStreamIfOld(path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tracker) KillStreamIfDead(path string) bool {
|
||||
for _, stream := range t.clients {
|
||||
if stream.path == path {
|
||||
return false
|
||||
}
|
||||
}
|
||||
t.logger.Trace().Msgf("Killing stream %s", path)
|
||||
|
||||
stream, ok := t.transcoder.streams.Get(path)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
stream.Kill()
|
||||
go func() {
|
||||
select {
|
||||
case <-t.killCh:
|
||||
return
|
||||
case <-time.After(4 * time.Hour):
|
||||
t.deletedStream <- path
|
||||
}
|
||||
//time.Sleep(4 * time.Hour)
|
||||
//t.deletedStream <- path
|
||||
}()
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *Tracker) DestroyStreamIfOld(path string) {
|
||||
if time.Since(t.lastUsage[path]) < 4*time.Hour {
|
||||
return
|
||||
}
|
||||
stream, ok := t.transcoder.streams.Get(path)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
t.transcoder.streams.Delete(path)
|
||||
stream.Destroy()
|
||||
}
|
||||
|
||||
func (t *Tracker) KillAudioIfDead(path string, audio int32) bool {
|
||||
for _, stream := range t.clients {
|
||||
if stream.path == path && stream.audio == audio {
|
||||
return false
|
||||
}
|
||||
}
|
||||
t.logger.Trace().Msgf("Killing audio %d of %s", audio, path)
|
||||
|
||||
stream, ok := t.transcoder.streams.Get(path)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
astream, aok := stream.audios.Get(audio)
|
||||
if !aok {
|
||||
return false
|
||||
}
|
||||
astream.Kill()
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *Tracker) KillQualityIfDead(path string, quality Quality) bool {
|
||||
for _, stream := range t.clients {
|
||||
if stream.path == path && stream.quality != nil && *stream.quality == quality {
|
||||
return false
|
||||
}
|
||||
}
|
||||
//start := time.Now()
|
||||
t.logger.Trace().Msgf("transcoder: Killing %s video stream ", quality)
|
||||
|
||||
stream, ok := t.transcoder.streams.Get(path)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
vstream, vok := stream.videos.Get(quality)
|
||||
if !vok {
|
||||
return false
|
||||
}
|
||||
vstream.Kill()
|
||||
|
||||
//t.logger.Trace().Msgf("transcoder: Killed %s video stream in %.2fs", quality, time.Since(start).Seconds())
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *Tracker) KillOrphanedHeads(path string, quality *Quality, audio int32) {
|
||||
stream, ok := t.transcoder.streams.Get(path)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if quality != nil {
|
||||
vstream, vok := stream.videos.Get(*quality)
|
||||
if vok {
|
||||
t.killOrphanedHeads(&vstream.Stream)
|
||||
}
|
||||
}
|
||||
if audio != -1 {
|
||||
astream, aok := stream.audios.Get(audio)
|
||||
if aok {
|
||||
t.killOrphanedHeads(&astream.Stream)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tracker) killOrphanedHeads(stream *Stream) {
|
||||
stream.headsLock.RLock()
|
||||
defer stream.headsLock.RUnlock()
|
||||
|
||||
for encoderId, head := range stream.heads {
|
||||
if head == DeletedHead {
|
||||
continue
|
||||
}
|
||||
|
||||
distance := int32(99999)
|
||||
for _, info := range t.clients {
|
||||
if info.head == -1 {
|
||||
continue
|
||||
}
|
||||
distance = min(Abs(info.head-head.segment), distance)
|
||||
}
|
||||
if distance > 20 {
|
||||
t.logger.Trace().Msgf("transcoder: Killing orphaned head %d", encoderId)
|
||||
stream.KillHead(encoderId)
|
||||
}
|
||||
}
|
||||
}
|
||||
247
seanime-2.9.10/internal/mediastream/transcoder/transcoder.go
Normal file
247
seanime-2.9.10/internal/mediastream/transcoder/transcoder.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"seanime/internal/mediastream/videofile"
|
||||
"seanime/internal/util/result"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type (
|
||||
Transcoder struct {
|
||||
// All file streams currently running, index is file path
|
||||
streams *result.Map[string, *FileStream]
|
||||
clientChan chan ClientInfo
|
||||
tracker *Tracker
|
||||
logger *zerolog.Logger
|
||||
settings Settings
|
||||
}
|
||||
|
||||
Settings struct {
|
||||
StreamDir string
|
||||
HwAccel HwAccelSettings
|
||||
FfmpegPath string
|
||||
FfprobePath string
|
||||
}
|
||||
|
||||
NewTranscoderOptions struct {
|
||||
Logger *zerolog.Logger
|
||||
HwAccelKind string
|
||||
Preset string
|
||||
TempOutDir string
|
||||
FfmpegPath string
|
||||
FfprobePath string
|
||||
HwAccelCustomSettings string
|
||||
}
|
||||
)
|
||||
|
||||
func NewTranscoder(opts *NewTranscoderOptions) (*Transcoder, error) {
|
||||
|
||||
// Create a directory that'll hold the stream segments if it doesn't exist
|
||||
streamDir := filepath.Join(opts.TempOutDir, "streams")
|
||||
_ = os.MkdirAll(streamDir, 0755)
|
||||
|
||||
// Clear the directory containing the streams
|
||||
dir, err := os.ReadDir(streamDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, d := range dir {
|
||||
_ = os.RemoveAll(path.Join(streamDir, d.Name()))
|
||||
}
|
||||
|
||||
ret := &Transcoder{
|
||||
streams: result.NewResultMap[string, *FileStream](),
|
||||
clientChan: make(chan ClientInfo, 1000),
|
||||
logger: opts.Logger,
|
||||
settings: Settings{
|
||||
StreamDir: streamDir,
|
||||
HwAccel: GetHardwareAccelSettings(HwAccelOptions{
|
||||
Kind: opts.HwAccelKind,
|
||||
Preset: opts.Preset,
|
||||
CustomSettings: opts.HwAccelCustomSettings,
|
||||
}),
|
||||
FfmpegPath: opts.FfmpegPath,
|
||||
FfprobePath: opts.FfprobePath,
|
||||
},
|
||||
}
|
||||
ret.tracker = NewTracker(ret)
|
||||
|
||||
ret.logger.Info().Msg("transcoder: Initialized")
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (t *Transcoder) GetSettings() *Settings {
|
||||
return &t.settings
|
||||
}
|
||||
|
||||
// Destroy stops all streams and removes the output directory.
|
||||
// A new transcoder should be created after calling this function.
|
||||
func (t *Transcoder) Destroy() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
}
|
||||
}()
|
||||
t.tracker.Stop()
|
||||
|
||||
t.logger.Debug().Msg("transcoder: Destroying transcoder")
|
||||
for _, s := range t.streams.Values() {
|
||||
s.Destroy()
|
||||
}
|
||||
t.streams.Clear()
|
||||
//close(t.clientChan)
|
||||
t.streams = result.NewResultMap[string, *FileStream]()
|
||||
t.clientChan = make(chan ClientInfo, 10)
|
||||
t.logger.Debug().Msg("transcoder: Transcoder destroyed")
|
||||
}
|
||||
|
||||
func (t *Transcoder) getFileStream(path string, hash string, mediaInfo *videofile.MediaInfo) (*FileStream, error) {
|
||||
if debugStream {
|
||||
start := time.Now()
|
||||
t.logger.Trace().Msgf("transcoder: Getting filestream")
|
||||
defer t.logger.Trace().Msgf("transcoder: Filestream retrieved in %.2fs", time.Since(start).Seconds())
|
||||
}
|
||||
ret, _ := t.streams.GetOrSet(path, func() (*FileStream, error) {
|
||||
return NewFileStream(path, hash, mediaInfo, &t.settings, t.logger), nil
|
||||
})
|
||||
if ret == nil {
|
||||
return nil, fmt.Errorf("could not get filestream, file may not exist")
|
||||
}
|
||||
ret.ready.Wait()
|
||||
if ret.err != nil {
|
||||
t.streams.Delete(path)
|
||||
return nil, ret.err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (t *Transcoder) GetMaster(path string, hash string, mediaInfo *videofile.MediaInfo, client string) (string, error) {
|
||||
if debugStream {
|
||||
start := time.Now()
|
||||
t.logger.Trace().Msgf("transcoder: Retrieving master file")
|
||||
defer t.logger.Trace().Msgf("transcoder: Master file retrieved in %.2fs", time.Since(start).Seconds())
|
||||
}
|
||||
stream, err := t.getFileStream(path, hash, mediaInfo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
t.clientChan <- ClientInfo{
|
||||
client: client,
|
||||
path: path,
|
||||
quality: nil,
|
||||
audio: -1,
|
||||
head: -1,
|
||||
}
|
||||
return stream.GetMaster(), nil
|
||||
}
|
||||
|
||||
func (t *Transcoder) GetVideoIndex(
|
||||
path string,
|
||||
hash string,
|
||||
mediaInfo *videofile.MediaInfo,
|
||||
quality Quality,
|
||||
client string,
|
||||
) (string, error) {
|
||||
if debugStream {
|
||||
start := time.Now()
|
||||
t.logger.Trace().Msgf("transcoder: Retrieving video index file (%s)", quality)
|
||||
defer t.logger.Trace().Msgf("transcoder: Video index file retrieved in %.2fs", time.Since(start).Seconds())
|
||||
}
|
||||
stream, err := t.getFileStream(path, hash, mediaInfo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
t.clientChan <- ClientInfo{
|
||||
client: client,
|
||||
path: path,
|
||||
quality: &quality,
|
||||
audio: -1,
|
||||
head: -1,
|
||||
}
|
||||
return stream.GetVideoIndex(quality)
|
||||
}
|
||||
|
||||
func (t *Transcoder) GetAudioIndex(
|
||||
path string,
|
||||
hash string,
|
||||
mediaInfo *videofile.MediaInfo,
|
||||
audio int32,
|
||||
client string,
|
||||
) (string, error) {
|
||||
if debugStream {
|
||||
start := time.Now()
|
||||
t.logger.Trace().Msgf("transcoder: Retrieving audio index file (%d)", audio)
|
||||
defer t.logger.Trace().Msgf("transcoder: Audio index file retrieved in %.2fs", time.Since(start).Seconds())
|
||||
}
|
||||
stream, err := t.getFileStream(path, hash, mediaInfo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
t.clientChan <- ClientInfo{
|
||||
client: client,
|
||||
path: path,
|
||||
audio: audio,
|
||||
head: -1,
|
||||
}
|
||||
return stream.GetAudioIndex(audio)
|
||||
}
|
||||
|
||||
func (t *Transcoder) GetVideoSegment(
|
||||
path string,
|
||||
hash string,
|
||||
mediaInfo *videofile.MediaInfo,
|
||||
quality Quality,
|
||||
segment int32,
|
||||
client string,
|
||||
) (string, error) {
|
||||
if debugStream {
|
||||
start := time.Now()
|
||||
t.logger.Trace().Msgf("transcoder: Retrieving video segment %d (%s) [GetVideoSegment]", segment, quality)
|
||||
defer t.logger.Trace().Msgf("transcoder: Video segment retrieved in %.2fs", time.Since(start).Seconds())
|
||||
}
|
||||
stream, err := t.getFileStream(path, hash, mediaInfo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
//t.logger.Trace().Msgf("transcoder: Sending client info, segment %d (%s) [GetVideoSegment]", segment, quality)
|
||||
t.clientChan <- ClientInfo{
|
||||
client: client,
|
||||
path: path,
|
||||
quality: &quality,
|
||||
audio: -1,
|
||||
head: segment,
|
||||
}
|
||||
//t.logger.Trace().Msgf("transcoder: Getting video segment %d (%s) [GetVideoSegment]", segment, quality)
|
||||
return stream.GetVideoSegment(quality, segment)
|
||||
}
|
||||
|
||||
func (t *Transcoder) GetAudioSegment(
|
||||
path string,
|
||||
hash string,
|
||||
mediaInfo *videofile.MediaInfo,
|
||||
audio int32,
|
||||
segment int32,
|
||||
client string,
|
||||
) (string, error) {
|
||||
if debugStream {
|
||||
start := time.Now()
|
||||
t.logger.Trace().Msgf("transcoder: Retrieving audio segment %d (%d)", segment, audio)
|
||||
defer t.logger.Trace().Msgf("transcoder: Audio segment %d (%d) retrieved in %.2fs", segment, audio, time.Since(start).Seconds())
|
||||
}
|
||||
stream, err := t.getFileStream(path, hash, mediaInfo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
t.clientChan <- ClientInfo{
|
||||
client: client,
|
||||
path: path,
|
||||
audio: audio,
|
||||
head: segment,
|
||||
}
|
||||
return stream.GetAudioSegment(audio, segment)
|
||||
}
|
||||
58
seanime-2.9.10/internal/mediastream/transcoder/utils.go
Normal file
58
seanime-2.9.10/internal/mediastream/transcoder/utils.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/goccy/go-json"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
func ParseSegment(segment string) (int32, error) {
|
||||
var ret int32
|
||||
_, err := fmt.Sscanf(segment, "segment-%d.ts", &ret)
|
||||
if err != nil {
|
||||
return 0, errors.New("could not parse segment")
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func getSavedInfo[T any](savePath string, mi *T) error {
|
||||
savedFile, err := os.Open(savePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
saved, err := io.ReadAll(savedFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal(saved, mi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func saveInfo[T any](savePath string, mi *T) error {
|
||||
content, err := json.Marshal(*mi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// create directory if it doesn't exist
|
||||
_ = os.MkdirAll(filepath.Dir(savePath), 0755)
|
||||
return os.WriteFile(savePath, content, 0666)
|
||||
}
|
||||
|
||||
func printExecTime(logger *zerolog.Logger, message string, args ...any) func() {
|
||||
msg := fmt.Sprintf(message, args...)
|
||||
start := time.Now()
|
||||
logger.Trace().Msgf("transcoder: Running %s", msg)
|
||||
|
||||
return func() {
|
||||
logger.Trace().Msgf("transcoder: %s finished in %s", msg, time.Since(start))
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,91 @@
|
||||
package transcoder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type VideoStream struct {
|
||||
Stream
|
||||
quality Quality
|
||||
logger *zerolog.Logger
|
||||
settings *Settings
|
||||
}
|
||||
|
||||
func NewVideoStream(file *FileStream, quality Quality, logger *zerolog.Logger, settings *Settings) *VideoStream {
|
||||
logger.Trace().Str("file", filepath.Base(file.Path)).Any("quality", quality).Msgf("transcoder: Creating video stream")
|
||||
ret := new(VideoStream)
|
||||
ret.quality = quality
|
||||
ret.logger = logger
|
||||
ret.settings = settings
|
||||
NewStream(fmt.Sprintf("video (%s)", quality), file, ret, &ret.Stream, settings, logger)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (vs *VideoStream) getFlags() Flags {
|
||||
if vs.quality == Original {
|
||||
return VideoF | Transmux
|
||||
}
|
||||
return VideoF
|
||||
}
|
||||
|
||||
func (vs *VideoStream) getOutPath(encoderId int) string {
|
||||
return filepath.Join(vs.file.Out, fmt.Sprintf("segment-%s-%d-%%d.ts", vs.quality, encoderId))
|
||||
}
|
||||
|
||||
func closestMultiple(n int32, x int32) int32 {
|
||||
if x > n {
|
||||
return x
|
||||
}
|
||||
|
||||
n = n + x/2
|
||||
n = n - (n % x)
|
||||
return n
|
||||
}
|
||||
|
||||
func (vs *VideoStream) getTranscodeArgs(segments string) []string {
|
||||
args := []string{
|
||||
"-map", "0:V:0",
|
||||
}
|
||||
|
||||
if vs.quality == Original {
|
||||
args = append(args,
|
||||
"-c:v", "copy",
|
||||
)
|
||||
vs.logger.Debug().Msg("videostream: Transcoding to original quality")
|
||||
return args
|
||||
}
|
||||
|
||||
vs.logger.Debug().Interface("hwaccelArgs", vs.settings.HwAccel).Msg("videostream: Hardware Acceleration")
|
||||
|
||||
args = append(args, vs.settings.HwAccel.EncodeFlags...)
|
||||
width := int32(float64(vs.quality.Height()) / float64(vs.file.Info.Video.Height) * float64(vs.file.Info.Video.Width))
|
||||
// force a width that is a multiple of two else some apps behave badly.
|
||||
width = closestMultiple(width, 2)
|
||||
args = append(args,
|
||||
"-vf", fmt.Sprintf(vs.settings.HwAccel.ScaleFilter, width, vs.quality.Height()),
|
||||
// Even less sure but buf size are 5x the average bitrate since the average bitrate is only
|
||||
// useful for hls segments.
|
||||
"-bufsize", fmt.Sprint(vs.quality.MaxBitrate()*5),
|
||||
"-b:v", fmt.Sprint(vs.quality.AverageBitrate()),
|
||||
"-maxrate", fmt.Sprint(vs.quality.MaxBitrate()),
|
||||
)
|
||||
if vs.settings.HwAccel.WithForcedIdr {
|
||||
// Force segments to be split exactly on keyframes (only works when transcoding)
|
||||
// forced-idr is needed to force keyframes to be an idr-frame (by default it can be any i frames)
|
||||
// without this option, some hardware encoders uses others i-frames and the -f segment can't cut at them.
|
||||
args = append(args, "-forced-idr", "1")
|
||||
}
|
||||
|
||||
args = append(args,
|
||||
"-force_key_frames", segments,
|
||||
// make ffmpeg globally less buggy
|
||||
"-strict", "-2",
|
||||
)
|
||||
|
||||
vs.logger.Debug().Interface("args", args).Msgf("videostream: Transcoding to %s quality", vs.quality)
|
||||
|
||||
return args
|
||||
}
|
||||
82
seanime-2.9.10/internal/mediastream/videofile/extract.go
Normal file
82
seanime-2.9.10/internal/mediastream/videofile/extract.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package videofile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"seanime/internal/util"
|
||||
"seanime/internal/util/crashlog"
|
||||
)
|
||||
|
||||
func GetFileSubsCacheDir(outDir string, hash string) string {
|
||||
return filepath.Join(outDir, "videofiles", hash, "/subs")
|
||||
}
|
||||
|
||||
func GetFileAttCacheDir(outDir string, hash string) string {
|
||||
return filepath.Join(outDir, "videofiles", hash, "/att")
|
||||
}
|
||||
|
||||
func ExtractAttachment(ffmpegPath string, path string, hash string, mediaInfo *MediaInfo, cacheDir string, logger *zerolog.Logger) (err error) {
|
||||
logger.Debug().Str("hash", hash).Msgf("videofile: Starting media attachment extraction")
|
||||
|
||||
attachmentPath := GetFileAttCacheDir(cacheDir, hash)
|
||||
subsPath := GetFileSubsCacheDir(cacheDir, hash)
|
||||
_ = os.MkdirAll(attachmentPath, 0755)
|
||||
_ = os.MkdirAll(subsPath, 0755)
|
||||
|
||||
subsDir, err := os.ReadDir(subsPath)
|
||||
if err == nil {
|
||||
if len(subsDir) == len(mediaInfo.Subtitles) {
|
||||
logger.Debug().Str("hash", hash).Msgf("videofile: Attachments already extracted")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, sub := range mediaInfo.Subtitles {
|
||||
if sub.Extension == nil || *sub.Extension == "" {
|
||||
logger.Error().Msgf("videofile: Subtitle format is not supported")
|
||||
return fmt.Errorf("videofile: Unsupported subtitle format")
|
||||
}
|
||||
}
|
||||
|
||||
// Instantiate a new crash logger
|
||||
crashLogger := crashlog.GlobalCrashLogger.InitArea("ffmpeg")
|
||||
defer crashLogger.Close()
|
||||
|
||||
crashLogger.LogInfof("Extracting attachments from %s", path)
|
||||
|
||||
// DEVNOTE: All paths fed into this command should be absolute
|
||||
cmd := util.NewCmdCtx(
|
||||
context.Background(),
|
||||
ffmpegPath,
|
||||
"-dump_attachment:t", "",
|
||||
// override old attachments
|
||||
"-y",
|
||||
"-i", path,
|
||||
)
|
||||
// The working directory for the command is the attachment directory
|
||||
cmd.Dir = attachmentPath
|
||||
|
||||
for _, sub := range mediaInfo.Subtitles {
|
||||
if ext := sub.Extension; ext != nil {
|
||||
cmd.Args = append(
|
||||
cmd.Args,
|
||||
"-map", fmt.Sprintf("0:s:%d", sub.Index),
|
||||
"-c:s", "copy",
|
||||
fmt.Sprintf("%s/%d.%s", subsPath, sub.Index, *ext),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
cmd.Stdout = crashLogger.Stdout()
|
||||
cmd.Stderr = crashLogger.Stdout()
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
logger.Error().Err(err).Msgf("videofile: Error starting FFmpeg")
|
||||
crashlog.GlobalCrashLogger.WriteAreaLogToFile(crashLogger)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
436
seanime-2.9.10/internal/mediastream/videofile/info.go
Normal file
436
seanime-2.9.10/internal/mediastream/videofile/info.go
Normal file
@@ -0,0 +1,436 @@
|
||||
package videofile
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"fmt"
|
||||
"mime"
|
||||
"path/filepath"
|
||||
"seanime/internal/util/filecache"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/samber/lo"
|
||||
"golang.org/x/text/language"
|
||||
"gopkg.in/vansante/go-ffprobe.v2"
|
||||
)
|
||||
|
||||
type MediaInfo struct {
|
||||
// closed if the mediainfo is ready for read. open otherwise
|
||||
ready <-chan struct{}
|
||||
// The sha1 of the video file
|
||||
Sha string `json:"sha"`
|
||||
// The internal path of the video file
|
||||
Path string `json:"path"`
|
||||
// The extension currently used to store this video file
|
||||
Extension string `json:"extension"`
|
||||
MimeCodec *string `json:"mimeCodec"`
|
||||
// The file size of the video file
|
||||
Size uint64 `json:"size"`
|
||||
// The length of the media in seconds
|
||||
Duration float32 `json:"duration"`
|
||||
// The container of the video file of this episode
|
||||
Container *string `json:"container"`
|
||||
// The video codec and information
|
||||
Video *Video `json:"video"`
|
||||
// The list of videos if there are multiples
|
||||
Videos []Video `json:"videos"`
|
||||
// The list of audio tracks
|
||||
Audios []Audio `json:"audios"`
|
||||
// The list of subtitles tracks
|
||||
Subtitles []Subtitle `json:"subtitles"`
|
||||
// The list of fonts that can be used to display subtitles
|
||||
Fonts []string `json:"fonts"`
|
||||
// The list of chapters. See Chapter for more information
|
||||
Chapters []Chapter `json:"chapters"`
|
||||
}
|
||||
|
||||
type Video struct {
|
||||
// The codec of this stream (defined as the RFC 6381)
|
||||
Codec string `json:"codec"`
|
||||
// RFC 6381 mime codec, e.g., "video/mp4, codecs=avc1.42E01E, mp4a.40.2"
|
||||
MimeCodec *string `json:"mimeCodec"`
|
||||
// The language of this stream (as a ISO-639-2 language code)
|
||||
Language *string `json:"language"`
|
||||
// The max quality of this video track
|
||||
Quality Quality `json:"quality"`
|
||||
// The width of the video stream
|
||||
Width uint32 `json:"width"`
|
||||
// The height of the video stream
|
||||
Height uint32 `json:"height"`
|
||||
// The average bitrate of the video in bytes/s
|
||||
Bitrate uint32 `json:"bitrate"`
|
||||
}
|
||||
|
||||
type Audio struct {
|
||||
// The index of this track on the media
|
||||
Index uint32 `json:"index"`
|
||||
// The title of the stream
|
||||
Title *string `json:"title"`
|
||||
// The language of this stream (as a ISO-639-2 language code)
|
||||
Language *string `json:"language"`
|
||||
// The codec of this stream
|
||||
Codec string `json:"codec"`
|
||||
MimeCodec *string `json:"mimeCodec"`
|
||||
// Is this stream the default one of its type?
|
||||
IsDefault bool `json:"isDefault"`
|
||||
// Is this stream tagged as forced? (useful only for subtitles)
|
||||
IsForced bool `json:"isForced"`
|
||||
Channels uint32 `json:"channels"`
|
||||
}
|
||||
|
||||
type Subtitle struct {
|
||||
// The index of this track on the media
|
||||
Index uint32 `json:"index"`
|
||||
// The title of the stream
|
||||
Title *string `json:"title"`
|
||||
// The language of this stream (as a ISO-639-2 language code)
|
||||
Language *string `json:"language"`
|
||||
// The codec of this stream
|
||||
Codec string `json:"codec"`
|
||||
// The extension for the codec
|
||||
Extension *string `json:"extension"`
|
||||
// Is this stream the default one of its type?
|
||||
IsDefault bool `json:"isDefault"`
|
||||
// Is this stream tagged as forced? (useful only for subtitles)
|
||||
IsForced bool `json:"isForced"`
|
||||
// Is this subtitle file external?
|
||||
IsExternal bool `json:"isExternal"`
|
||||
// The link to access this subtitle
|
||||
Link *string `json:"link"`
|
||||
}
|
||||
|
||||
type Chapter struct {
|
||||
// The start time of the chapter (in second from the start of the episode)
|
||||
StartTime float32 `json:"startTime"`
|
||||
// The end time of the chapter (in second from the start of the episode)
|
||||
EndTime float32 `json:"endTime"`
|
||||
// The name of this chapter. This should be a human-readable name that could be presented to the user
|
||||
Name string `json:"name"`
|
||||
// TODO: add a type field for Opening, Credits...
|
||||
}
|
||||
|
||||
type MediaInfoExtractor struct {
|
||||
fileCacher *filecache.Cacher
|
||||
logger *zerolog.Logger
|
||||
}
|
||||
|
||||
func NewMediaInfoExtractor(fileCacher *filecache.Cacher, logger *zerolog.Logger) *MediaInfoExtractor {
|
||||
return &MediaInfoExtractor{
|
||||
fileCacher: fileCacher,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// GetInfo returns the media information of a file.
|
||||
// If the information is not in the cache, it will be extracted and saved in the cache.
|
||||
func (e *MediaInfoExtractor) GetInfo(ffprobePath, path string) (mi *MediaInfo, err error) {
|
||||
hash, err := GetHashFromPath(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
e.logger.Debug().Str("path", path).Str("hash", hash).Msg("mediastream: Getting media information [MediaInfoExtractor]")
|
||||
|
||||
bucketName := fmt.Sprintf("mediastream_mediainfo_%s", hash)
|
||||
bucket := filecache.NewBucket(bucketName, 24*7*52*time.Hour)
|
||||
e.logger.Trace().Str("bucketName", bucketName).Msg("mediastream: Using cache bucket [MediaInfoExtractor]")
|
||||
|
||||
e.logger.Trace().Msg("mediastream: Getting media information from cache [MediaInfoExtractor]")
|
||||
|
||||
// Look in the cache
|
||||
if found, _ := e.fileCacher.Get(bucket, hash, &mi); found {
|
||||
e.logger.Debug().Str("hash", hash).Msg("mediastream: Media information cache HIT [MediaInfoExtractor]")
|
||||
return mi, nil
|
||||
}
|
||||
|
||||
e.logger.Debug().Str("hash", hash).Msg("mediastream: Extracting media information using FFprobe")
|
||||
|
||||
// Get the media information of the file.
|
||||
mi, err = FfprobeGetInfo(ffprobePath, path, hash)
|
||||
if err != nil {
|
||||
e.logger.Error().Err(err).Str("path", path).Msg("mediastream: Failed to extract media information using FFprobe")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Save in the cache
|
||||
_ = e.fileCacher.Set(bucket, hash, mi)
|
||||
|
||||
e.logger.Debug().Str("hash", hash).Msg("mediastream: Extracted media information using FFprobe")
|
||||
|
||||
return mi, nil
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func FfprobeGetInfo(ffprobePath, path, hash string) (*MediaInfo, error) {
|
||||
|
||||
if ffprobePath != "" {
|
||||
ffprobe.SetFFProbeBinPath(ffprobePath)
|
||||
}
|
||||
|
||||
ffprobeCtx, cancel := context.WithTimeout(context.Background(), 40*time.Second)
|
||||
defer cancel()
|
||||
|
||||
data, err := ffprobe.ProbeURL(ffprobeCtx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ext := filepath.Ext(path)[1:]
|
||||
|
||||
sizeUint64, _ := strconv.ParseUint(data.Format.Size, 10, 64)
|
||||
|
||||
mi := &MediaInfo{
|
||||
Sha: hash,
|
||||
Path: path,
|
||||
Extension: ext,
|
||||
Size: sizeUint64,
|
||||
Duration: float32(data.Format.DurationSeconds),
|
||||
Container: cmp.Or(lo.ToPtr(data.Format.FormatName), nil),
|
||||
}
|
||||
|
||||
// Get the video streams
|
||||
mi.Videos = streamToMap(data.Streams, ffprobe.StreamVideo, func(stream *ffprobe.Stream, i uint32) Video {
|
||||
lang, _ := language.Parse(stream.Tags.Language)
|
||||
bitrate, _ := strconv.ParseUint(cmp.Or(stream.BitRate, data.Format.BitRate), 10, 32)
|
||||
return Video{
|
||||
Codec: stream.CodecName,
|
||||
MimeCodec: streamToMimeCodec(stream),
|
||||
Language: nullIfZero(lang.String()),
|
||||
Quality: heightToQuality(uint32(stream.Height)),
|
||||
Width: uint32(stream.Width),
|
||||
Height: uint32(stream.Height),
|
||||
// ffmpeg does not report bitrate in mkv files, fallback to bitrate of the whole container
|
||||
// (bigger than the result since it contains audio and other videos but better than nothing).
|
||||
Bitrate: uint32(bitrate),
|
||||
}
|
||||
})
|
||||
|
||||
// Get the audio streams
|
||||
mi.Audios = streamToMap(data.Streams, ffprobe.StreamAudio, func(stream *ffprobe.Stream, i uint32) Audio {
|
||||
lang, _ := language.Parse(stream.Tags.Language)
|
||||
return Audio{
|
||||
Index: i,
|
||||
Title: nullIfZero(stream.Tags.Title),
|
||||
Language: nullIfZero(lang.String()),
|
||||
Codec: stream.CodecName,
|
||||
MimeCodec: streamToMimeCodec(stream),
|
||||
IsDefault: stream.Disposition.Default != 0,
|
||||
IsForced: stream.Disposition.Forced != 0,
|
||||
}
|
||||
})
|
||||
|
||||
// Get the subtitle streams
|
||||
mi.Subtitles = streamToMap(data.Streams, ffprobe.StreamSubtitle, func(stream *ffprobe.Stream, i uint32) Subtitle {
|
||||
subExtensions := map[string]string{
|
||||
"subrip": "srt",
|
||||
"ass": "ass",
|
||||
"vtt": "vtt",
|
||||
"ssa": "ssa",
|
||||
}
|
||||
extension, ok := subExtensions[stream.CodecName]
|
||||
var link *string
|
||||
if ok {
|
||||
x := fmt.Sprintf("/%d.%s", i, extension)
|
||||
link = &x
|
||||
}
|
||||
lang, _ := language.Parse(stream.Tags.Language)
|
||||
return Subtitle{
|
||||
Index: i,
|
||||
Title: nullIfZero(stream.Tags.Title),
|
||||
Language: nullIfZero(lang.String()),
|
||||
Codec: stream.CodecName,
|
||||
Extension: lo.ToPtr(extension),
|
||||
IsDefault: stream.Disposition.Default != 0,
|
||||
IsForced: stream.Disposition.Forced != 0,
|
||||
Link: link,
|
||||
}
|
||||
})
|
||||
|
||||
// Remove subtitles without extensions (not supported)
|
||||
mi.Subtitles = lo.Filter(mi.Subtitles, func(item Subtitle, _ int) bool {
|
||||
if item.Extension == nil || *item.Extension == "" || item.Link == nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// Get chapters
|
||||
mi.Chapters = lo.Map(data.Chapters, func(chapter *ffprobe.Chapter, _ int) Chapter {
|
||||
return Chapter{
|
||||
StartTime: float32(chapter.StartTimeSeconds),
|
||||
EndTime: float32(chapter.EndTimeSeconds),
|
||||
Name: chapter.Title(),
|
||||
}
|
||||
})
|
||||
|
||||
// Get fonts
|
||||
mi.Fonts = streamToMap(data.Streams, ffprobe.StreamAttachment, func(stream *ffprobe.Stream, i uint32) string {
|
||||
filename, _ := stream.TagList.GetString("filename")
|
||||
return filename
|
||||
})
|
||||
|
||||
var codecs []string
|
||||
if len(mi.Videos) > 0 && mi.Videos[0].MimeCodec != nil {
|
||||
codecs = append(codecs, *mi.Videos[0].MimeCodec)
|
||||
}
|
||||
if len(mi.Audios) > 0 && mi.Audios[0].MimeCodec != nil {
|
||||
codecs = append(codecs, *mi.Audios[0].MimeCodec)
|
||||
}
|
||||
container := mime.TypeByExtension(fmt.Sprintf(".%s", mi.Extension))
|
||||
if container != "" {
|
||||
if len(codecs) > 0 {
|
||||
codecsStr := strings.Join(codecs, ", ")
|
||||
mi.MimeCodec = lo.ToPtr(fmt.Sprintf("%s; codecs=\"%s\"", container, codecsStr))
|
||||
} else {
|
||||
mi.MimeCodec = &container
|
||||
}
|
||||
}
|
||||
|
||||
if len(mi.Videos) > 0 {
|
||||
mi.Video = &mi.Videos[0]
|
||||
}
|
||||
|
||||
return mi, nil
|
||||
}
|
||||
|
||||
func nullIfZero[T comparable](v T) *T {
|
||||
var zero T
|
||||
if v != zero {
|
||||
return &v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func streamToMap[T any](streams []*ffprobe.Stream, kind ffprobe.StreamType, mapper func(*ffprobe.Stream, uint32) T) []T {
|
||||
count := 0
|
||||
for _, stream := range streams {
|
||||
if stream.CodecType == string(kind) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
ret := make([]T, count)
|
||||
|
||||
i := uint32(0)
|
||||
for _, stream := range streams {
|
||||
if stream.CodecType == string(kind) {
|
||||
ret[i] = mapper(stream, i)
|
||||
i++
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func streamToMimeCodec(stream *ffprobe.Stream) *string {
|
||||
switch stream.CodecName {
|
||||
case "h264":
|
||||
ret := "avc1"
|
||||
|
||||
switch strings.ToLower(stream.Profile) {
|
||||
case "high":
|
||||
ret += ".6400"
|
||||
case "main":
|
||||
ret += ".4D40"
|
||||
case "baseline":
|
||||
ret += ".42E0"
|
||||
default:
|
||||
// Default to constrained baseline if profile is invalid
|
||||
ret += ".4240"
|
||||
}
|
||||
|
||||
ret += fmt.Sprintf("%02x", stream.Level)
|
||||
return &ret
|
||||
|
||||
case "h265", "hevc":
|
||||
// The h265 syntax is a bit of a mystery at the time this comment was written.
|
||||
// This is what I've found through various sources:
|
||||
// FORMAT: [codecTag].[profile].[constraint?].L[level * 30].[UNKNOWN]
|
||||
ret := "hvc1"
|
||||
|
||||
if stream.Profile == "main 10" {
|
||||
ret += ".2.4"
|
||||
} else {
|
||||
ret += ".1.4"
|
||||
}
|
||||
|
||||
ret += fmt.Sprintf(".L%02X.BO", stream.Level)
|
||||
return &ret
|
||||
|
||||
case "av1":
|
||||
// https://aomedia.org/av1/specification/annex-a/
|
||||
// FORMAT: [codecTag].[profile].[level][tier].[bitDepth]
|
||||
ret := "av01"
|
||||
|
||||
switch strings.ToLower(stream.Profile) {
|
||||
case "main":
|
||||
ret += ".0"
|
||||
case "high":
|
||||
ret += ".1"
|
||||
case "professional":
|
||||
ret += ".2"
|
||||
default:
|
||||
}
|
||||
|
||||
// not sure about this field, we want pixel bit depth
|
||||
bitdepth, _ := strconv.ParseUint(stream.BitsPerRawSample, 10, 32)
|
||||
if bitdepth != 8 && bitdepth != 10 && bitdepth != 12 {
|
||||
// Default to 8 bits
|
||||
bitdepth = 8
|
||||
}
|
||||
|
||||
tierflag := 'M'
|
||||
ret += fmt.Sprintf(".%02X%c.%02d", stream.Level, tierflag, bitdepth)
|
||||
|
||||
return &ret
|
||||
|
||||
case "aac":
|
||||
ret := "mp4a"
|
||||
|
||||
switch strings.ToLower(stream.Profile) {
|
||||
case "he":
|
||||
ret += ".40.5"
|
||||
case "lc":
|
||||
ret += ".40.2"
|
||||
default:
|
||||
ret += ".40.2"
|
||||
}
|
||||
|
||||
return &ret
|
||||
|
||||
case "opus":
|
||||
ret := "Opus"
|
||||
return &ret
|
||||
|
||||
case "ac3":
|
||||
ret := "mp4a.a5"
|
||||
return &ret
|
||||
|
||||
case "eac3":
|
||||
ret := "mp4a.a6"
|
||||
return &ret
|
||||
|
||||
case "flac":
|
||||
ret := "fLaC"
|
||||
return &ret
|
||||
|
||||
case "alac":
|
||||
ret := "alac"
|
||||
return &ret
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func heightToQuality(height uint32) Quality {
|
||||
qualities := Qualities
|
||||
for _, quality := range qualities {
|
||||
if quality.Height() >= height {
|
||||
return quality
|
||||
}
|
||||
}
|
||||
return P240
|
||||
}
|
||||
51
seanime-2.9.10/internal/mediastream/videofile/info_test.go
Normal file
51
seanime-2.9.10/internal/mediastream/videofile/info_test.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package videofile
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"seanime/internal/util"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFfprobeGetInfo_1(t *testing.T) {
|
||||
t.Skip()
|
||||
|
||||
testFilePath := ""
|
||||
|
||||
mi, err := FfprobeGetInfo("", testFilePath, "1")
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting media info: %v", err)
|
||||
}
|
||||
|
||||
util.Spew(mi)
|
||||
}
|
||||
|
||||
func TestExtractAttachment(t *testing.T) {
|
||||
t.Skip()
|
||||
|
||||
testFilePath := ""
|
||||
|
||||
testDir := t.TempDir()
|
||||
|
||||
mi, err := FfprobeGetInfo("", testFilePath, "1")
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting media info: %v", err)
|
||||
}
|
||||
|
||||
util.Spew(mi)
|
||||
|
||||
err = ExtractAttachment("", testFilePath, "1", mi, testDir, util.NewLogger())
|
||||
if err != nil {
|
||||
t.Fatalf("Error extracting attachment: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(filepath.Join(testDir, "videofiles", "1", "att"))
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading directory: %v", err)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
info, _ := entry.Info()
|
||||
t.Logf("Entry: %s, Size: %d\n", entry.Name(), info.Size())
|
||||
}
|
||||
}
|
||||
19
seanime-2.9.10/internal/mediastream/videofile/info_utils.go
Normal file
19
seanime-2.9.10/internal/mediastream/videofile/info_utils.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package videofile
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
)
|
||||
|
||||
func GetHashFromPath(path string) (string, error) {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
h := sha1.New()
|
||||
h.Write([]byte(path))
|
||||
h.Write([]byte(info.ModTime().String()))
|
||||
sha := hex.EncodeToString(h.Sum(nil))
|
||||
return sha, nil
|
||||
}
|
||||
118
seanime-2.9.10/internal/mediastream/videofile/video_quality.go
Normal file
118
seanime-2.9.10/internal/mediastream/videofile/video_quality.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package videofile
|
||||
|
||||
import "errors"
|
||||
|
||||
type Quality string
|
||||
|
||||
const (
|
||||
P240 Quality = "240p"
|
||||
P360 Quality = "360p"
|
||||
P480 Quality = "480p"
|
||||
P720 Quality = "720p"
|
||||
P1080 Quality = "1080p"
|
||||
P1440 Quality = "1440p"
|
||||
P4k Quality = "4k"
|
||||
P8k Quality = "8k"
|
||||
Original Quality = "original"
|
||||
)
|
||||
|
||||
// Qualities Purposefully removing Original from this list (since it require special treatments anyway)
|
||||
var Qualities = []Quality{P240, P360, P480, P720, P1080, P1440, P4k, P8k}
|
||||
|
||||
func QualityFromString(str string) (Quality, error) {
|
||||
if str == string(Original) {
|
||||
return Original, nil
|
||||
}
|
||||
|
||||
qualities := Qualities
|
||||
for _, quality := range qualities {
|
||||
if string(quality) == str {
|
||||
return quality, nil
|
||||
}
|
||||
}
|
||||
return Original, errors.New("invalid quality string")
|
||||
}
|
||||
|
||||
// AverageBitrate
|
||||
// I'm not entirely sure about the values for bit rates. Double-checking would be nice.
|
||||
func (v Quality) AverageBitrate() uint32 {
|
||||
switch v {
|
||||
case P240:
|
||||
return 400_000
|
||||
case P360:
|
||||
return 800_000
|
||||
case P480:
|
||||
return 1_200_000
|
||||
case P720:
|
||||
return 2_400_000
|
||||
case P1080:
|
||||
return 4_800_000
|
||||
case P1440:
|
||||
return 9_600_000
|
||||
case P4k:
|
||||
return 16_000_000
|
||||
case P8k:
|
||||
return 28_000_000
|
||||
case Original:
|
||||
panic("Original quality must be handled specially")
|
||||
}
|
||||
panic("Invalid quality value")
|
||||
}
|
||||
|
||||
func (v Quality) MaxBitrate() uint32 {
|
||||
switch v {
|
||||
case P240:
|
||||
return 700_000
|
||||
case P360:
|
||||
return 1_400_000
|
||||
case P480:
|
||||
return 2_100_000
|
||||
case P720:
|
||||
return 4_000_000
|
||||
case P1080:
|
||||
return 8_000_000
|
||||
case P1440:
|
||||
return 12_000_000
|
||||
case P4k:
|
||||
return 28_000_000
|
||||
case P8k:
|
||||
return 40_000_000
|
||||
case Original:
|
||||
panic("Original quality must be handled specially")
|
||||
}
|
||||
panic("Invalid quality value")
|
||||
}
|
||||
|
||||
func (v Quality) Height() uint32 {
|
||||
switch v {
|
||||
case P240:
|
||||
return 240
|
||||
case P360:
|
||||
return 360
|
||||
case P480:
|
||||
return 480
|
||||
case P720:
|
||||
return 720
|
||||
case P1080:
|
||||
return 1080
|
||||
case P1440:
|
||||
return 1440
|
||||
case P4k:
|
||||
return 2160
|
||||
case P8k:
|
||||
return 4320
|
||||
case Original:
|
||||
panic("Original quality must be handled specially")
|
||||
}
|
||||
panic("Invalid quality value")
|
||||
}
|
||||
|
||||
func GetQualityFromHeight(height uint32) Quality {
|
||||
qualities := Qualities
|
||||
for _, quality := range qualities {
|
||||
if quality.Height() >= height {
|
||||
return quality
|
||||
}
|
||||
}
|
||||
return P240
|
||||
}
|
||||
Reference in New Issue
Block a user