node build fixed

This commit is contained in:
ra_ma
2025-09-20 14:08:38 +01:00
parent c6ebbe069d
commit 3d298fa434
1516 changed files with 535727 additions and 2 deletions

View File

@@ -0,0 +1,214 @@
package nakama
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"fmt"
"io"
"github.com/huin/goupnp/dcps/internetgateway1"
"github.com/huin/goupnp/dcps/internetgateway2"
)
type UPnPClient interface {
GetExternalIPAddress() (string, error)
AddPortMapping(string, uint16, string, uint16, string, bool, string, uint32) error
DeletePortMapping(string, uint16, string) error
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Port forwarding
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
func EnablePortForwarding(port int) (string, error) {
return enablePortForwarding(port)
}
// enablePortForwarding enables port forwarding for a given port and returns the address.
func enablePortForwarding(port int) (string, error) {
// Try IGDv2 first, then fallback to IGDv1
ip, err := addPortMappingIGD(func() ([]UPnPClient, error) {
clients, _, err := internetgateway2.NewWANIPConnection1Clients()
if err != nil {
return nil, err
}
upnpClients := make([]UPnPClient, len(clients))
for i, client := range clients {
upnpClients[i] = client
}
return upnpClients, nil
}, port)
if err != nil {
ip, err = addPortMappingIGD(func() ([]UPnPClient, error) {
clients, _, err := internetgateway1.NewWANIPConnection1Clients()
if err != nil {
return nil, err
}
upnpClients := make([]UPnPClient, len(clients))
for i, client := range clients {
upnpClients[i] = client
}
return upnpClients, nil
}, port)
if err != nil {
return "", fmt.Errorf("failed to add port mapping: %w", err)
}
}
return fmt.Sprintf("http://%s:%d", ip, port), nil
}
func disablePortForwarding(port int) error {
// Try to remove port mapping from both IGDv2 and IGDv1
err1 := removePortMappingIGD(func() ([]UPnPClient, error) {
clients, _, err := internetgateway2.NewWANIPConnection1Clients()
if err != nil {
return nil, err
}
upnpClients := make([]UPnPClient, len(clients))
for i, client := range clients {
upnpClients[i] = client
}
return upnpClients, nil
}, port)
err2 := removePortMappingIGD(func() ([]UPnPClient, error) {
clients, _, err := internetgateway1.NewWANIPConnection1Clients()
if err != nil {
return nil, err
}
upnpClients := make([]UPnPClient, len(clients))
for i, client := range clients {
upnpClients[i] = client
}
return upnpClients, nil
}, port)
// Return error only if both failed
if err1 != nil && err2 != nil {
return fmt.Errorf("failed to remove port mapping from IGDv2: %v, IGDv1: %v", err1, err2)
}
return nil
}
// addPortMappingIGD adds a port mapping using the provided client factory and returns the external IP
func addPortMappingIGD(clientFactory func() ([]UPnPClient, error), port int) (string, error) {
clients, err := clientFactory()
if err != nil {
return "", err
}
for _, client := range clients {
// Get external IP address
externalIP, err := client.GetExternalIPAddress()
if err != nil {
continue // Try next client
}
// Add port mapping
err = client.AddPortMapping(
"", // NewRemoteHost (empty for any)
uint16(port), // NewExternalPort
"TCP", // NewProtocol
uint16(port), // NewInternalPort
"127.0.0.1", // NewInternalClient (localhost)
true, // NewEnabled
"Seanime Nakama", // NewPortMappingDescription
uint32(3600), // NewLeaseDuration (1 hour)
)
if err != nil {
continue // Try next client
}
return externalIP, nil // Success
}
return "", fmt.Errorf("no working UPnP clients found")
}
// removePortMappingIGD removes a port mapping using the provided client factory
func removePortMappingIGD(clientFactory func() ([]UPnPClient, error), port int) error {
clients, err := clientFactory()
if err != nil {
return err
}
for _, client := range clients {
err = client.DeletePortMapping(
"", // NewRemoteHost (empty for any)
uint16(port), // NewExternalPort
"TCP", // NewProtocol
)
if err != nil {
continue // Try next client
}
return nil // Success
}
return fmt.Errorf("no working UPnP clients found")
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Join code (shelved)
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
func EncryptJoinCode(ip string, port int, password string) (string, error) {
plainText := fmt.Sprintf("%s:%d", ip, port)
// Derive 256-bit key from password
key := sha256.Sum256([]byte(password))
block, err := aes.NewCipher(key[:])
if err != nil {
return "", err
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return "", err
}
nonce := make([]byte, gcm.NonceSize())
if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
return "", err
}
ciphertext := gcm.Seal(nonce, nonce, []byte(plainText), nil)
return base64.RawURLEncoding.EncodeToString(ciphertext), nil
}
func DecryptJoinCode(code, password string) (string, error) {
data, err := base64.RawURLEncoding.DecodeString(code)
if err != nil {
return "", err
}
key := sha256.Sum256([]byte(password))
block, err := aes.NewCipher(key[:])
if err != nil {
return "", err
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return "", err
}
nonceSize := gcm.NonceSize()
if len(data) < nonceSize {
return "", fmt.Errorf("ciphertext too short")
}
nonce, ciphertext := data[:nonceSize], data[nonceSize:]
plaintext, err := gcm.Open(nil, nonce, ciphertext, nil)
if err != nil {
return "", err
}
return string(plaintext), nil
}

View File

@@ -0,0 +1,41 @@
package nakama
import (
"testing"
)
func TestPortForwarding(t *testing.T) {
// Test port forwarding for port 43211
address, err := EnablePortForwarding(43211)
if err != nil {
t.Logf("Port forwarding failed (expected if no UPnP router available): %v", err)
t.Skip("No UPnP support available")
return
}
t.Logf("Port forwarding enabled successfully: %s", address)
// Clean up - disable the port forwarding
err = disablePortForwarding(43211)
if err != nil {
t.Logf("Warning: Failed to clean up port forwarding: %v", err)
}
}
func TestEncryptJoinCode(t *testing.T) {
code, err := EncryptJoinCode("127.0.0.1", 4000, "password")
if err != nil {
t.Fatal(err)
}
t.Logf("code: %s", code)
addr, err := DecryptJoinCode(code, "password")
if err != nil {
t.Fatal(err)
}
if addr != "127.0.0.1:4000" {
t.Fatal("invalid decrypted code")
}
}

View File

@@ -0,0 +1,207 @@
package nakama
import (
"encoding/json"
"errors"
"seanime/internal/events"
"time"
)
// registerDefaultHandlers registers the default message handlers
func (m *Manager) registerDefaultHandlers() {
m.messageHandlers[MessageTypeAuth] = m.handleAuthMessage
m.messageHandlers[MessageTypeAuthReply] = m.handleAuthReplyMessage
m.messageHandlers[MessageTypePing] = m.handlePingMessage
m.messageHandlers[MessageTypePong] = m.handlePongMessage
m.messageHandlers[MessageTypeError] = m.handleErrorMessage
m.messageHandlers[MessageTypeCustom] = m.handleCustomMessage
// Watch party handlers
m.messageHandlers[MessageTypeWatchPartyCreated] = m.handleWatchPartyMessage
m.messageHandlers[MessageTypeWatchPartyStopped] = m.handleWatchPartyMessage
m.messageHandlers[MessageTypeWatchPartyJoin] = m.handleWatchPartyMessage
m.messageHandlers[MessageTypeWatchPartyLeave] = m.handleWatchPartyMessage
m.messageHandlers[MessageTypeWatchPartyStateChanged] = m.handleWatchPartyMessage
m.messageHandlers[MessageTypeWatchPartyPlaybackStatus] = m.handleWatchPartyMessage
m.messageHandlers[MessageTypeWatchPartyPlaybackStopped] = m.handleWatchPartyMessage
m.messageHandlers[MessageTypeWatchPartyPeerStatus] = m.handleWatchPartyMessage
m.messageHandlers[MessageTypeWatchPartyBufferUpdate] = m.handleWatchPartyMessage
m.messageHandlers[MessageTypeWatchPartyRelayModeOriginStreamStarted] = m.handleWatchPartyMessage
m.messageHandlers[MessageTypeWatchPartyRelayModeOriginPlaybackStatus] = m.handleWatchPartyMessage
m.messageHandlers[MessageTypeWatchPartyRelayModePeersReady] = m.handleWatchPartyMessage
m.messageHandlers[MessageTypeWatchPartyRelayModePeerBuffering] = m.handleWatchPartyMessage
m.messageHandlers[MessageTypeWatchPartyRelayModeOriginPlaybackStopped] = m.handleWatchPartyMessage
}
// handleMessage routes messages to the appropriate handler
func (m *Manager) handleMessage(message *Message, senderID string) error {
m.handlerMu.RLock()
handler, exists := m.messageHandlers[message.Type]
m.handlerMu.RUnlock()
if !exists {
return errors.New("unknown message type: " + string(message.Type))
}
return handler(message, senderID)
}
// handleAuthMessage handles authentication requests from peers
func (m *Manager) handleAuthMessage(message *Message, senderID string) error {
if !m.settings.IsHost {
return errors.New("not acting as host")
}
// Parse auth payload
authData, err := json.Marshal(message.Payload)
if err != nil {
return err
}
var authPayload AuthPayload
if err := json.Unmarshal(authData, &authPayload); err != nil {
return err
}
// Get peer connection
peerConn, exists := m.peerConnections.Get(senderID)
if !exists {
return errors.New("peer connection not found")
}
// Verify password
success := authPayload.Password == m.settings.HostPassword
var replyMessage string
if success {
// Update the peer connection with the PeerID from auth payload if not already set
if peerConn.PeerId == "" && authPayload.PeerId != "" {
peerConn.PeerId = authPayload.PeerId
}
peerConn.Authenticated = true
replyMessage = "Authentication successful"
m.logger.Info().Str("peerID", peerConn.PeerId).Str("senderID", senderID).Msg("nakama: Peer authenticated successfully")
// Send event to client about new peer connection
m.wsEventManager.SendEvent(events.NakamaPeerConnected, map[string]interface{}{
"peerId": peerConn.PeerId, // Use PeerID for events
"authenticated": true,
})
} else {
replyMessage = "Authentication failed"
m.logger.Warn().Str("peerId", peerConn.PeerId).Str("senderID", senderID).Msg("nakama: Peer authentication failed")
}
// Send auth reply
authReply := &Message{
Type: MessageTypeAuthReply,
Payload: AuthReplyPayload{
Success: success,
Message: replyMessage,
Username: m.username,
PeerId: peerConn.PeerId, // Echo back the peer's UUID
},
Timestamp: time.Now(),
}
return peerConn.SendMessage(authReply)
}
// handleAuthReplyMessage handles authentication replies from hosts
func (m *Manager) handleAuthReplyMessage(message *Message, senderID string) error {
// This should only be received by clients, and is handled in the client connection logic
// We can log it here for debugging purposes
m.logger.Debug().Str("senderID", senderID).Msg("nakama: Received auth reply")
return nil
}
// handlePingMessage handles ping messages
func (m *Manager) handlePingMessage(message *Message, senderID string) error {
// Send pong response
pongMessage := &Message{
Type: MessageTypePong,
Payload: nil,
Timestamp: time.Now(),
}
if m.settings.IsHost {
// We're the host, send pong to peer
peerConn, exists := m.peerConnections.Get(senderID)
if !exists {
return errors.New("peer connection not found")
}
return peerConn.SendMessage(pongMessage)
} else {
// We're a client, send pong to host
m.hostMu.RLock()
defer m.hostMu.RUnlock()
if m.hostConnection == nil {
return errors.New("not connected to host")
}
return m.hostConnection.SendMessage(pongMessage)
}
}
// handlePongMessage handles pong messages
func (m *Manager) handlePongMessage(message *Message, senderID string) error {
// Update last ping time
if m.settings.IsHost {
// Update peer's last ping time
peerConn, exists := m.peerConnections.Get(senderID)
if exists {
peerConn.LastPing = time.Now()
}
} else {
// Update host's last ping time
m.hostMu.Lock()
if m.hostConnection != nil {
m.hostConnection.LastPing = time.Now()
}
m.hostMu.Unlock()
}
return nil
}
// handleErrorMessage handles error messages
func (m *Manager) handleErrorMessage(message *Message, senderID string) error {
// Parse error payload
errorData, err := json.Marshal(message.Payload)
if err != nil {
return err
}
var errorPayload ErrorPayload
if err := json.Unmarshal(errorData, &errorPayload); err != nil {
return err
}
m.logger.Error().Str("senderID", senderID).Str("errorMessage", errorPayload.Message).Str("errorCode", errorPayload.Code).Msg("nakama: Received error message")
// Send event to client about the error
m.wsEventManager.SendEvent(events.NakamaError, map[string]interface{}{
"senderID": senderID,
"message": errorPayload.Message,
"code": errorPayload.Code,
})
return nil
}
// handleCustomMessage handles custom messages
func (m *Manager) handleCustomMessage(message *Message, senderID string) error {
m.logger.Debug().Str("senderID", senderID).Msg("nakama: Received custom message")
// Send event to client with the custom message
m.wsEventManager.SendEvent(events.NakamaCustomMessage, map[string]interface{}{
"senderID": senderID,
"payload": message.Payload,
"requestID": message.RequestID,
"timestamp": message.Timestamp,
})
return nil
}
func (m *Manager) handleWatchPartyMessage(message *Message, senderID string) error {
return m.watchPartyManager.handleMessage(message, senderID)
}

View File

@@ -0,0 +1,238 @@
package nakama
import (
"net/http"
"seanime/internal/constants"
"seanime/internal/events"
"seanime/internal/util"
"time"
"github.com/gorilla/websocket"
)
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true // Allow connections from any origin
},
}
// startHostServices initializes the host services
func (m *Manager) startHostServices() {
if m.settings == nil || !m.settings.IsHost || !m.settings.Enabled {
return
}
m.logger.Info().Msg("nakama: Starting host services")
// Clean up any existing watch party session
m.watchPartyManager.Cleanup()
// Start ping routine for connected peers
go m.hostPingRoutine()
// Start stale connection cleanup routine
go m.staleConnectionCleanupRoutine()
// Send event to client about host mode being enabled
m.wsEventManager.SendEvent(events.NakamaHostStarted, map[string]interface{}{
"enabled": true,
})
}
// stopHostServices stops the host services
func (m *Manager) stopHostServices() {
m.logger.Info().Msg("nakama: Stopping host services")
// Disconnect all peers
m.peerConnections.Range(func(id string, conn *PeerConnection) bool {
conn.Close()
return true
})
m.peerConnections.Clear()
// Send event to client about host mode being disabled
m.wsEventManager.SendEvent(events.NakamaHostStopped, map[string]interface{}{
"enabled": false,
})
}
// HandlePeerConnection handles incoming WebSocket connections from peers
func (m *Manager) HandlePeerConnection(w http.ResponseWriter, r *http.Request) {
if m.settings == nil || !m.settings.IsHost || !m.settings.Enabled {
http.Error(w, "Host mode not enabled", http.StatusForbidden)
return
}
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
m.logger.Error().Err(err).Msg("nakama: Failed to upgrade WebSocket connection")
return
}
username := r.Header.Get("X-Seanime-Nakama-Username")
// Generate a random username if username is not set (this shouldn't be the case because the peer will generate its own username)
if username == "" {
username = "Peer_" + util.RandomStringWithAlphabet(8, "bcdefhijklmnopqrstuvwxyz0123456789")
}
peerID := r.Header.Get("X-Seanime-Nakama-Peer-Id")
if peerID == "" {
m.logger.Error().Msg("nakama: Peer connection missing PeerID header")
http.Error(w, "Missing PeerID header", http.StatusBadRequest)
return
}
serverVersion := r.Header.Get("X-Seanime-Nakama-Server-Version")
if serverVersion != constants.Version {
http.Error(w, "Server version mismatch", http.StatusBadRequest)
return
}
// Check for existing connection with the same PeerID (reconnection scenario)
var existingConnID string
m.peerConnections.Range(func(id string, existingConn *PeerConnection) bool {
if existingConn.PeerId == peerID {
existingConnID = id
return false // Stop iteration
}
return true
})
// Remove existing connection for this PeerID to handle reconnection
if existingConnID != "" {
if oldConn, exists := m.peerConnections.Get(existingConnID); exists {
m.logger.Info().Str("peerID", peerID).Str("oldConnID", existingConnID).Msg("nakama: Removing old connection for reconnecting peer")
m.peerConnections.Delete(existingConnID)
oldConn.Close()
}
}
// Generate new internal connection ID
internalConnID := generateConnectionID()
peerConn := &PeerConnection{
ID: internalConnID,
PeerId: peerID,
Username: username,
Conn: conn,
ConnectionType: ConnectionTypePeer,
Authenticated: false,
LastPing: time.Now(),
}
m.logger.Info().Str("internalConnID", internalConnID).Str("peerID", peerID).Str("username", username).Msg("nakama: New peer connection")
// Add to connections using internal connection ID as key
m.peerConnections.Set(internalConnID, peerConn)
// Handle the connection in a goroutine
go m.handlePeerConnection(peerConn)
}
// handlePeerConnection handles messages from a specific peer
func (m *Manager) handlePeerConnection(peerConn *PeerConnection) {
defer func() {
m.logger.Info().Str("peerId", peerConn.PeerId).Str("internalConnID", peerConn.ID).Msg("nakama: Peer disconnected")
// Remove from connections (safe to call multiple times)
if _, exists := m.peerConnections.Get(peerConn.ID); exists {
m.peerConnections.Delete(peerConn.ID)
// Remove peer from watch party if they were participating
m.watchPartyManager.HandlePeerDisconnected(peerConn.PeerId)
// Send event to client about peer disconnection (only if we actually removed it)
m.wsEventManager.SendEvent(events.NakamaPeerDisconnected, map[string]interface{}{
"peerId": peerConn.PeerId,
})
}
// Close connection (safe to call multiple times)
peerConn.Close()
}()
// Set up ping/pong handler
peerConn.Conn.SetPongHandler(func(appData string) error {
peerConn.LastPing = time.Now()
return nil
})
// Set read deadline
peerConn.Conn.SetReadDeadline(time.Now().Add(60 * time.Second))
for {
select {
case <-m.ctx.Done():
return
default:
var message Message
err := peerConn.Conn.ReadJSON(&message)
if err != nil {
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
m.logger.Error().Err(err).Str("peerId", peerConn.PeerId).Msg("nakama: Unexpected close error")
}
return
}
// Handle the message using internal connection ID for message routing
if err := m.handleMessage(&message, peerConn.ID); err != nil {
m.logger.Error().Err(err).Str("peerId", peerConn.PeerId).Str("messageType", string(message.Type)).Msg("nakama: Failed to handle message")
// Send error response
errorMsg := &Message{
Type: MessageTypeError,
Payload: ErrorPayload{
Message: err.Error(),
},
Timestamp: time.Now(),
}
peerConn.SendMessage(errorMsg)
}
// Reset read deadline
peerConn.Conn.SetReadDeadline(time.Now().Add(60 * time.Second))
}
}
}
// hostPingRoutine sends ping messages to all connected peers
func (m *Manager) hostPingRoutine() {
ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
for {
select {
case <-m.ctx.Done():
return
case <-ticker.C:
m.peerConnections.Range(func(id string, conn *PeerConnection) bool {
// Send ping
message := &Message{
Type: MessageTypePing,
Payload: nil,
Timestamp: time.Now(),
}
if err := conn.SendMessage(message); err != nil {
m.logger.Error().Err(err).Str("peerId", conn.PeerId).Msg("nakama: Failed to send ping")
// Don't close here, let the stale connection cleanup handle it
}
return true
})
}
}
}
// staleConnectionCleanupRoutine periodically removes stale connections
func (m *Manager) staleConnectionCleanupRoutine() {
ticker := time.NewTicker(120 * time.Second) // Run every 2 minutes
defer ticker.Stop()
for {
select {
case <-m.ctx.Done():
return
case <-ticker.C:
m.RemoveStaleConnections()
}
}
}

View File

@@ -0,0 +1,646 @@
package nakama
import (
"cmp"
"context"
"encoding/json"
"errors"
"fmt"
"seanime/internal/database/models"
debrid_client "seanime/internal/debrid/client"
"seanime/internal/events"
"seanime/internal/library/playbackmanager"
"seanime/internal/platforms/platform"
"seanime/internal/torrentstream"
"seanime/internal/util"
"seanime/internal/util/result"
"strings"
"sync"
"time"
"github.com/gorilla/websocket"
"github.com/imroc/req/v3"
"github.com/rs/zerolog"
)
type Manager struct {
serverHost string
serverPort int
username string
logger *zerolog.Logger
settings *models.NakamaSettings
wsEventManager events.WSEventManagerInterface
platform platform.Platform
playbackManager *playbackmanager.PlaybackManager
torrentstreamRepository *torrentstream.Repository
debridClientRepository *debrid_client.Repository
peerId string
// Host connections (when acting as host)
peerConnections *result.Map[string, *PeerConnection]
// Host connection (when connecting to a host)
hostConnection *HostConnection
hostConnectionCtx context.Context
hostConnectionCancel context.CancelFunc
hostMu sync.RWMutex
reconnecting bool // Flag to prevent multiple concurrent reconnection attempts
// Connection management
cancel context.CancelFunc
ctx context.Context
// Message handlers
messageHandlers map[MessageType]func(*Message, string) error
handlerMu sync.RWMutex
// Cleanup functions
cleanups []func()
reqClient *req.Client
watchPartyManager *WatchPartyManager
previousPath string // latest file streamed by the peer - real path on the host
}
type NewManagerOptions struct {
Logger *zerolog.Logger
WSEventManager events.WSEventManagerInterface
PlaybackManager *playbackmanager.PlaybackManager
TorrentstreamRepository *torrentstream.Repository
DebridClientRepository *debrid_client.Repository
Platform platform.Platform
ServerHost string
ServerPort int
}
type ConnectionType string
const (
ConnectionTypeHost ConnectionType = "host"
ConnectionTypePeer ConnectionType = "peer"
)
// MessageType represents the type of message being sent
type MessageType string
const (
MessageTypeAuth MessageType = "auth"
MessageTypeAuthReply MessageType = "auth_reply"
MessageTypePing MessageType = "ping"
MessageTypePong MessageType = "pong"
MessageTypeError MessageType = "error"
MessageTypeCustom MessageType = "custom"
)
// Message represents a message sent between Nakama instances
type Message struct {
Type MessageType `json:"type"`
Payload interface{} `json:"payload"`
RequestID string `json:"requestId,omitempty"`
Timestamp time.Time `json:"timestamp"`
}
// PeerConnection represents a connection from a peer to this host
type PeerConnection struct {
ID string // Internal connection ID (websocket)
PeerId string // UUID generated by the peer (primary identifier)
Username string // Display name (kept for UI purposes)
Conn *websocket.Conn
ConnectionType ConnectionType
Authenticated bool
LastPing time.Time
mu sync.RWMutex
}
// HostConnection represents this instance's connection to a host
type HostConnection struct {
URL string
PeerId string // UUID generated by this peer instance
Username string
Conn *websocket.Conn
Authenticated bool
LastPing time.Time
reconnectTimer *time.Timer
mu sync.RWMutex
}
// NakamaEvent represents events sent to the client
type NakamaEvent struct {
Type string `json:"type"`
Payload interface{} `json:"payload"`
}
// AuthPayload represents authentication data
type AuthPayload struct {
Password string `json:"password"`
PeerId string `json:"peerId"` // UUID generated by the peer
}
// AuthReplyPayload represents authentication response
type AuthReplyPayload struct {
Success bool `json:"success"`
Message string `json:"message"`
Username string `json:"username"`
PeerId string `json:"peerId"` // Echo back the peer's UUID
}
// ErrorPayload represents error messages
type ErrorPayload struct {
Message string `json:"message"`
Code string `json:"code,omitempty"`
}
// HostConnectionStatus represents the status of the host connection
type HostConnectionStatus struct {
Connected bool `json:"connected"`
Authenticated bool `json:"authenticated"`
URL string `json:"url"`
LastPing time.Time `json:"lastPing"`
PeerId string `json:"peerId"`
Username string `json:"username"`
}
// NakamaStatus represents the overall status of Nakama connections
type NakamaStatus struct {
IsHost bool `json:"isHost"`
ConnectedPeers []string `json:"connectedPeers"`
IsConnectedToHost bool `json:"isConnectedToHost"`
HostConnectionStatus *HostConnectionStatus `json:"hostConnectionStatus"`
CurrentWatchPartySession *WatchPartySession `json:"currentWatchPartySession"`
}
// MessageResponse represents a response to message sending requests
type MessageResponse struct {
Success bool `json:"success"`
Message string `json:"message"`
}
type ClientEvent struct {
Type string `json:"type"`
Payload interface{} `json:"payload"`
}
func NewManager(opts *NewManagerOptions) *Manager {
ctx, cancel := context.WithCancel(context.Background())
m := &Manager{
username: "",
logger: opts.Logger,
wsEventManager: opts.WSEventManager,
playbackManager: opts.PlaybackManager,
peerConnections: result.NewResultMap[string, *PeerConnection](),
platform: opts.Platform,
ctx: ctx,
cancel: cancel,
messageHandlers: make(map[MessageType]func(*Message, string) error),
cleanups: make([]func(), 0),
reqClient: req.C(),
serverHost: opts.ServerHost,
serverPort: opts.ServerPort,
settings: &models.NakamaSettings{},
torrentstreamRepository: opts.TorrentstreamRepository,
debridClientRepository: opts.DebridClientRepository,
previousPath: "",
}
m.watchPartyManager = NewWatchPartyManager(m)
// Register default message handlers
m.registerDefaultHandlers()
eventListener := m.wsEventManager.SubscribeToClientEvents("nakama")
go func() {
for event := range eventListener.Channel {
if event.Type == events.NakamaStatusRequested {
currSession, _ := m.GetWatchPartyManager().GetCurrentSession()
status := &NakamaStatus{
IsHost: m.IsHost(),
ConnectedPeers: m.GetConnectedPeers(),
IsConnectedToHost: m.IsConnectedToHost(),
HostConnectionStatus: m.GetHostConnectionStatus(),
CurrentWatchPartySession: currSession,
}
m.wsEventManager.SendEvent(events.NakamaStatus, status)
}
if event.Type == events.NakamaWatchPartyEnableRelayMode {
var payload WatchPartyEnableRelayModePayload
marshaledPayload, err := json.Marshal(event.Payload)
if err != nil {
m.logger.Error().Err(err).Msg("nakama: Failed to marshal watch party enable relay mode payload")
continue
}
err = json.Unmarshal(marshaledPayload, &payload)
if err != nil {
m.logger.Error().Err(err).Msg("nakama: Failed to unmarshal watch party enable relay mode payload")
continue
}
m.GetWatchPartyManager().EnableRelayMode(payload.PeerId)
}
}
}()
return m
}
func (m *Manager) SetSettings(settings *models.NakamaSettings) {
var previousSettings *models.NakamaSettings
if m.settings != nil {
previousSettings = &[]models.NakamaSettings{*m.settings}[0]
}
// If the host password has changed, stop host service
// This will cause a restart of the host service
disconnectAsHost := false
if m.settings != nil && m.settings.HostPassword != settings.HostPassword {
disconnectAsHost = true
m.stopHostServices()
}
m.settings = settings
m.username = cmp.Or(settings.Username, "Peer_"+util.RandomStringWithAlphabet(8, "bcdefhijklmnopqrstuvwxyz0123456789"))
m.logger.Debug().Bool("isHost", settings.IsHost).Str("username", m.username).Str("remoteURL", settings.RemoteServerURL).Msg("nakama: Settings updated")
if previousSettings == nil || previousSettings.IsHost != settings.IsHost || previousSettings.Enabled != settings.Enabled || disconnectAsHost {
// Determine if we should stop host services
shouldStopHost := m.IsHost() && (!settings.Enabled || // Nakama disabled
!settings.IsHost || // Switching to peer mode
disconnectAsHost) // Password changed (requires restart)
// Determine if we should start host services
shouldStartHost := settings.IsHost && settings.Enabled
// Always stop first if needed, then start
if shouldStopHost {
m.stopHostServices()
}
if shouldStartHost {
m.startHostServices()
}
}
if previousSettings == nil || previousSettings.RemoteServerURL != settings.RemoteServerURL || previousSettings.RemoteServerPassword != settings.RemoteServerPassword || previousSettings.Enabled != settings.Enabled {
// Determine if we should disconnect from current host
shouldDisconnect := m.IsConnectedToHost() && (!settings.Enabled || // Nakama disabled
settings.IsHost || // Switching to host mode
settings.RemoteServerURL == "" || // No remote URL
settings.RemoteServerPassword == "" || // No password
(previousSettings != nil && previousSettings.RemoteServerURL != settings.RemoteServerURL) || // URL changed
(previousSettings != nil && previousSettings.RemoteServerPassword != settings.RemoteServerPassword)) // Password changed
// Determine if we should connect to a host
shouldConnect := !settings.IsHost &&
settings.Enabled &&
settings.RemoteServerURL != "" &&
settings.RemoteServerPassword != ""
// Always disconnect first if needed, then connect
if shouldDisconnect {
m.disconnectFromHost()
}
if shouldConnect {
m.connectToHost()
}
}
// if previousSettings == nil || previousSettings.Username != settings.Username {
// m.SendMessage(MessageTypeCustom, map[string]interface{}{
// "type": "nakama_username_changed",
// "username": settings.Username,
// })
// }
}
func (m *Manager) GetHostBaseServerURL() string {
url := m.settings.RemoteServerURL
if strings.HasSuffix(url, "/") {
url = strings.TrimSuffix(url, "/")
}
return url
}
func (m *Manager) IsHost() bool {
return m.settings.IsHost
}
func (m *Manager) GetHostConnection() (*HostConnection, bool) {
m.hostMu.RLock()
defer m.hostMu.RUnlock()
return m.hostConnection, m.hostConnection != nil
}
// GetWatchPartyManager returns the watch party manager
func (m *Manager) GetWatchPartyManager() *WatchPartyManager {
return m.watchPartyManager
}
// Cleanup stops all connections and services
func (m *Manager) Cleanup() {
m.logger.Debug().Msg("nakama: Cleaning up")
if m.cancel != nil {
m.cancel()
}
// Cancel any ongoing host connection attempts
m.hostMu.Lock()
if m.hostConnectionCancel != nil {
m.hostConnectionCancel()
m.hostConnectionCancel = nil
}
m.hostMu.Unlock()
// Cleanup host connections
m.peerConnections.Range(func(id string, conn *PeerConnection) bool {
conn.Close()
return true
})
m.peerConnections.Clear()
// Cleanup client connection
m.hostMu.Lock()
if m.hostConnection != nil {
m.hostConnection.Close()
m.hostConnection = nil
}
m.hostMu.Unlock()
// Run cleanup functions
for _, cleanup := range m.cleanups {
cleanup()
}
}
// RegisterMessageHandler registers a custom message handler
func (m *Manager) RegisterMessageHandler(msgType MessageType, handler func(*Message, string) error) {
m.handlerMu.Lock()
defer m.handlerMu.Unlock()
m.messageHandlers[msgType] = handler
}
// SendMessage sends a message to all connected peers (when acting as host)
func (m *Manager) SendMessage(msgType MessageType, payload interface{}) error {
if !m.settings.IsHost {
return errors.New("not acting as host")
}
message := &Message{
Type: msgType,
Payload: payload,
Timestamp: time.Now(),
}
var lastError error
m.peerConnections.Range(func(id string, conn *PeerConnection) bool {
if err := conn.SendMessage(message); err != nil {
m.logger.Error().Err(err).Str("peerId", conn.PeerId).Msg("nakama: Failed to send message to peer")
lastError = err
}
return true
})
return lastError
}
// SendMessageToPeer sends a message to a specific peer by their PeerID
func (m *Manager) SendMessageToPeer(peerID string, msgType MessageType, payload interface{}) error {
if !m.settings.IsHost {
return errors.New("only hosts can send messages to peers")
}
// Find peer by PeerID
var targetConn *PeerConnection
m.peerConnections.Range(func(id string, conn *PeerConnection) bool {
if conn.PeerId == peerID {
targetConn = conn
return false // Stop iteration
}
return true
})
if targetConn == nil {
return errors.New("peer not found: " + peerID)
}
message := &Message{
Type: msgType,
Payload: payload,
Timestamp: time.Now(),
}
return targetConn.SendMessage(message)
}
// SendMessageToHost sends a message to the host (when acting as peer)
func (m *Manager) SendMessageToHost(msgType MessageType, payload interface{}) error {
m.hostMu.RLock()
defer m.hostMu.RUnlock()
if m.hostConnection == nil || !m.hostConnection.Authenticated {
return errors.New("not connected to host")
}
message := &Message{
Type: msgType,
Payload: payload,
Timestamp: time.Now(),
}
return m.hostConnection.SendMessage(message)
}
// GetConnectedPeers returns a list of connected peer IDs
func (m *Manager) GetConnectedPeers() []string {
if !m.settings.IsHost {
return []string{}
}
peers := make([]string, 0)
m.peerConnections.Range(func(id string, conn *PeerConnection) bool {
if conn.Authenticated {
// Use PeerID as the primary identifier
peerDisplayName := conn.Username
if peerDisplayName == "" {
peerDisplayName = "Unknown"
}
// Format: "Username (PeerID_short)"
// peers = append(peers, fmt.Sprintf("%s (%s)", peerDisplayName, conn.PeerId[:8]))
peers = append(peers, fmt.Sprintf("%s", peerDisplayName))
}
return true
})
return peers
}
// IsConnectedToHost returns whether this instance is connected to a host
func (m *Manager) IsConnectedToHost() bool {
m.hostMu.RLock()
defer m.hostMu.RUnlock()
return m.hostConnection != nil && m.hostConnection.Authenticated
}
// GetHostConnectionStatus returns the status of the host connection
func (m *Manager) GetHostConnectionStatus() *HostConnectionStatus {
m.hostMu.RLock()
defer m.hostMu.RUnlock()
if m.hostConnection == nil {
return nil
}
return &HostConnectionStatus{
Connected: m.hostConnection != nil,
Authenticated: m.hostConnection != nil && m.hostConnection.Authenticated,
URL: m.hostConnection.URL,
LastPing: m.hostConnection.LastPing,
PeerId: m.hostConnection.PeerId,
Username: m.hostConnection.Username,
}
}
func (pc *PeerConnection) SendMessage(message *Message) error {
pc.mu.Lock()
defer pc.mu.Unlock()
return pc.Conn.WriteJSON(message)
}
func (pc *PeerConnection) Close() {
pc.mu.Lock()
defer pc.mu.Unlock()
_ = pc.Conn.Close()
}
func (hc *HostConnection) SendMessage(message *Message) error {
hc.mu.Lock()
defer hc.mu.Unlock()
return hc.Conn.WriteJSON(message)
}
func (hc *HostConnection) Close() {
hc.mu.Lock()
defer hc.mu.Unlock()
if hc.reconnectTimer != nil {
hc.reconnectTimer.Stop()
}
_ = hc.Conn.Close()
}
// Helper function to generate connection IDs
func generateConnectionID() string {
return fmt.Sprintf("conn_%d", time.Now().UnixNano())
}
// ReconnectToHost attempts to reconnect to the host
func (m *Manager) ReconnectToHost() error {
if m.settings == nil || m.settings.RemoteServerURL == "" || m.settings.RemoteServerPassword == "" {
return errors.New("no host connection configured")
}
// Check if already reconnecting
m.hostMu.Lock()
if m.reconnecting {
m.hostMu.Unlock()
return errors.New("reconnection already in progress")
}
m.hostMu.Unlock()
m.logger.Info().Msg("nakama: Manual reconnection to host requested")
// Disconnect current connection if exists
m.disconnectFromHost()
// Wait a moment before reconnecting
time.Sleep(1 * time.Second)
// Reconnect
m.connectToHost()
return nil
}
// RemoveStaleConnections removes connections that haven't responded to ping in a while
func (m *Manager) RemoveStaleConnections() {
if !m.settings.IsHost {
return
}
staleThreshold := 90 * time.Second // Consider connections stale after 90 seconds of no ping
now := time.Now()
var staleConnections []string
m.peerConnections.Range(func(id string, conn *PeerConnection) bool {
conn.mu.RLock()
lastPing := conn.LastPing
authenticated := conn.Authenticated
conn.mu.RUnlock()
// Only check authenticated connections
if !authenticated {
return true
}
// If LastPing is zero, use connection time as reference
if lastPing.IsZero() {
lastPing = now.Add(-staleThreshold - time.Minute)
}
if now.Sub(lastPing) > staleThreshold {
staleConnections = append(staleConnections, id)
}
return true
})
// Remove stale connections
for _, id := range staleConnections {
if conn, exists := m.peerConnections.Get(id); exists {
// Double-check to avoid race conditions
conn.mu.RLock()
lastPing := conn.LastPing
if lastPing.IsZero() {
lastPing = now.Add(-staleThreshold - time.Minute)
}
isStale := now.Sub(lastPing) > staleThreshold
conn.mu.RUnlock()
if isStale {
m.logger.Info().Str("peerId", conn.PeerId).Str("internalConnID", id).Msg("nakama: Removing stale peer connection")
// Remove from map first to prevent re-addition
m.peerConnections.Delete(id)
// Remove peer from watch party if they were participating
m.watchPartyManager.HandlePeerDisconnected(conn.PeerId)
// Then close the connection (this will trigger the defer cleanup in handlePeerConnection)
conn.Close()
// Send event about peer disconnection
m.wsEventManager.SendEvent(events.NakamaPeerDisconnected, map[string]interface{}{
"peerId": conn.PeerId,
"reason": "stale_connection",
})
}
}
}
if len(staleConnections) > 0 {
m.logger.Info().Int("count", len(staleConnections)).Msg("nakama: Removed stale peer connections")
}
}
// FindPeerByPeerID finds a peer connection by their PeerID
func (m *Manager) FindPeerByPeerID(peerID string) (*PeerConnection, bool) {
var found *PeerConnection
m.peerConnections.Range(func(id string, conn *PeerConnection) bool {
if conn.PeerId == peerID {
found = conn
return false // Stop iteration
}
return true
})
return found, found != nil
}

View File

@@ -0,0 +1,386 @@
package nakama
import (
"context"
"encoding/json"
"errors"
"net/http"
"net/url"
"seanime/internal/constants"
"seanime/internal/events"
"seanime/internal/util"
"strings"
"time"
"github.com/google/uuid"
"github.com/gorilla/websocket"
)
// connectToHost establishes a connection to the Nakama host
func (m *Manager) connectToHost() {
if m.settings == nil || !m.settings.Enabled || m.settings.RemoteServerURL == "" || m.settings.RemoteServerPassword == "" {
return
}
m.logger.Info().Str("url", m.settings.RemoteServerURL).Msg("nakama: Connecting to host")
// Cancel any existing connection attempts
m.hostMu.Lock()
if m.hostConnectionCancel != nil {
m.hostConnectionCancel()
}
// Create new context for this connection attempt
m.hostConnectionCtx, m.hostConnectionCancel = context.WithCancel(m.ctx)
// Prevent multiple concurrent connection attempts
if m.reconnecting {
m.hostMu.Unlock()
return
}
m.reconnecting = true
m.hostMu.Unlock()
go m.connectToHostAsync()
}
// disconnectFromHost disconnects from the Nakama host
func (m *Manager) disconnectFromHost() {
m.hostMu.Lock()
defer m.hostMu.Unlock()
// Cancel any ongoing connection attempts
if m.hostConnectionCancel != nil {
m.hostConnectionCancel()
m.hostConnectionCancel = nil
}
if m.hostConnection != nil {
m.logger.Info().Msg("nakama: Disconnecting from host")
// Cancel any reconnection timer
if m.hostConnection.reconnectTimer != nil {
m.hostConnection.reconnectTimer.Stop()
}
m.hostConnection.Close()
m.hostConnection = nil
// Send event to client about disconnection
m.wsEventManager.SendEvent(events.NakamaHostDisconnected, map[string]interface{}{
"connected": false,
})
}
// Reset reconnecting flag
m.reconnecting = false
}
// connectToHostAsync handles the actual connection logic with retries
func (m *Manager) connectToHostAsync() {
defer func() {
m.hostMu.Lock()
m.reconnecting = false
m.hostMu.Unlock()
}()
if m.settings == nil || !m.settings.Enabled || m.settings.RemoteServerURL == "" || m.settings.RemoteServerPassword == "" {
return
}
// Get the connection context
m.hostMu.RLock()
connCtx := m.hostConnectionCtx
m.hostMu.RUnlock()
if connCtx == nil {
return
}
maxRetries := 5
retryDelay := 5 * time.Second
for attempt := 0; attempt < maxRetries; attempt++ {
select {
case <-connCtx.Done():
m.logger.Info().Msg("nakama: Connection attempt cancelled")
return
case <-m.ctx.Done():
return
default:
}
if err := m.attemptHostConnection(connCtx); err != nil {
m.logger.Error().Err(err).Int("attempt", attempt+1).Msg("nakama: Failed to connect to host")
if attempt < maxRetries-1 {
select {
case <-connCtx.Done():
m.logger.Info().Msg("nakama: Connection attempt cancelled")
return
case <-m.ctx.Done():
return
case <-time.After(retryDelay):
retryDelay *= 2 // Exponential backoff
continue
}
}
} else {
// Success
m.logger.Info().Msg("nakama: Successfully connected to host")
return
}
}
// Only log error if not cancelled
select {
case <-connCtx.Done():
m.logger.Info().Msg("nakama: Connection attempts cancelled")
default:
m.logger.Error().Msg("nakama: Failed to connect to host after all retries")
m.wsEventManager.SendEvent(events.ErrorToast, "Failed to connect to Nakama host after multiple attempts.")
}
}
// attemptHostConnection makes a single connection attempt to the host
func (m *Manager) attemptHostConnection(connCtx context.Context) error {
// Parse URL
u, err := url.Parse(m.settings.RemoteServerURL)
if err != nil {
return err
}
// Convert HTTP to WebSocket scheme
switch u.Scheme {
case "http":
u.Scheme = "ws"
case "https":
u.Scheme = "wss"
}
// Add Nakama WebSocket path
if !strings.HasSuffix(u.Path, "/") {
u.Path += "/"
}
u.Path += "api/v1/nakama/ws"
// Generate UUID for this peer instance
peerID := uuid.New().String()
username := m.username
// Generate a random username if username is not set
if username == "" {
username = "Peer_" + util.RandomStringWithAlphabet(8, "bcdefhijklmnopqrstuvwxyz0123456789")
}
// Set up headers for authentication
headers := http.Header{}
headers.Set("X-Seanime-Nakama-Token", m.settings.RemoteServerPassword)
headers.Set("X-Seanime-Nakama-Username", username)
headers.Set("X-Seanime-Nakama-Server-Version", constants.Version)
headers.Set("X-Seanime-Nakama-Peer-Id", peerID)
// Create a dialer with the connection context
dialer := websocket.Dialer{
HandshakeTimeout: 10 * time.Second,
}
// Connect with context
conn, _, err := dialer.DialContext(connCtx, u.String(), headers)
if err != nil {
return err
}
hostConn := &HostConnection{
URL: u.String(),
Conn: conn,
Authenticated: false,
LastPing: time.Now(),
PeerId: peerID, // Store our generated PeerID
}
// Authenticate
authMessage := &Message{
Type: MessageTypeAuth,
Payload: AuthPayload{
Password: m.settings.RemoteServerPassword,
PeerId: peerID, // Include PeerID in auth payload
},
Timestamp: time.Now(),
}
if err := hostConn.SendMessage(authMessage); err != nil {
_ = conn.Close()
return err
}
// Wait for auth response with timeout
_ = conn.SetReadDeadline(time.Now().Add(10 * time.Second))
var authResponse Message
if err := conn.ReadJSON(&authResponse); err != nil {
_ = conn.Close()
return err
}
if authResponse.Type != MessageTypeAuthReply {
_ = conn.Close()
return errors.New("unexpected auth response type")
}
// Parse auth response
authReplyData, err := json.Marshal(authResponse.Payload)
if err != nil {
_ = conn.Close()
return err
}
var authReply AuthReplyPayload
if err := json.Unmarshal(authReplyData, &authReply); err != nil {
_ = conn.Close()
return err
}
if !authReply.Success {
_ = conn.Close()
return errors.New("authentication failed: " + authReply.Message)
}
// Verify that the host echoed back our PeerID
if authReply.PeerId != peerID {
m.logger.Warn().Str("expectedPeerID", peerID).Str("receivedPeerID", authReply.PeerId).Msg("nakama: Host returned different PeerID")
}
hostConn.Username = authReply.Username
if hostConn.Username == "" {
hostConn.Username = "Host_" + util.RandomStringWithAlphabet(8, "bcdefhijklmnopqrstuvwxyz0123456789")
}
hostConn.Authenticated = true
// Set the connection and cancel any existing reconnection timer
m.hostMu.Lock()
if m.hostConnection != nil && m.hostConnection.reconnectTimer != nil {
m.hostConnection.reconnectTimer.Stop()
}
m.hostConnection = hostConn
m.hostMu.Unlock()
// Send event to client about successful connection
m.wsEventManager.SendEvent(events.NakamaHostConnected, map[string]interface{}{
"connected": true,
"authenticated": true,
"url": hostConn.URL,
"peerID": peerID, // Include our PeerID in the event
})
// Start handling the connection
go m.handleHostConnection(hostConn)
// Start client ping routine
go m.clientPingRoutine()
return nil
}
// handleHostConnection handles messages from the host
func (m *Manager) handleHostConnection(hostConn *HostConnection) {
defer func() {
m.logger.Info().Msg("nakama: Host connection closed")
m.hostMu.Lock()
if m.hostConnection == hostConn {
m.hostConnection = nil
}
m.hostMu.Unlock()
// Send event to client about disconnection
m.wsEventManager.SendEvent(events.NakamaHostDisconnected, map[string]interface{}{
"connected": false,
})
// Attempt reconnection after a delay if settings are still valid and not already reconnecting
m.hostMu.Lock()
shouldReconnect := m.settings != nil && m.settings.RemoteServerURL != "" && m.settings.RemoteServerPassword != "" && !m.reconnecting
if shouldReconnect {
m.reconnecting = true
hostConn.reconnectTimer = time.AfterFunc(10*time.Second, func() {
m.connectToHostAsync()
})
}
m.hostMu.Unlock()
}()
// Set up ping/pong handler
hostConn.Conn.SetPongHandler(func(appData string) error {
hostConn.LastPing = time.Now()
return nil
})
// Set read deadline
_ = hostConn.Conn.SetReadDeadline(time.Now().Add(60 * time.Second))
for {
select {
case <-m.ctx.Done():
return
default:
var message Message
err := hostConn.Conn.ReadJSON(&message)
if err != nil {
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
m.logger.Error().Err(err).Msg("nakama: Unexpected close error from host")
}
return
}
// Handle the message
if err := m.handleMessage(&message, "host"); err != nil {
m.logger.Error().Err(err).Str("messageType", string(message.Type)).Msg("nakama: Failed to handle message from host")
}
// Reset read deadline
_ = hostConn.Conn.SetReadDeadline(time.Now().Add(60 * time.Second))
}
}
}
// clientPingRoutine sends ping messages to the host
func (m *Manager) clientPingRoutine() {
ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
for {
select {
case <-m.ctx.Done():
return
case <-ticker.C:
m.hostMu.RLock()
if m.hostConnection == nil || !m.hostConnection.Authenticated {
m.hostMu.RUnlock()
return
}
// Check if host is still alive
if time.Since(m.hostConnection.LastPing) > 90*time.Second {
m.logger.Warn().Msg("nakama: Host connection timeout")
m.hostConnection.Close()
m.hostMu.RUnlock()
return
}
// Send ping
message := &Message{
Type: MessageTypePing,
Payload: nil,
Timestamp: time.Now(),
}
if err := m.hostConnection.SendMessage(message); err != nil {
m.logger.Error().Err(err).Msg("nakama: Failed to send ping to host")
m.hostConnection.Close()
m.hostMu.RUnlock()
return
}
m.hostMu.RUnlock()
}
}
}

View File

@@ -0,0 +1,245 @@
package nakama
import (
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"seanime/internal/api/anilist"
"seanime/internal/api/metadata"
"seanime/internal/events"
"seanime/internal/library/anime"
"seanime/internal/library/playbackmanager"
"seanime/internal/util"
"strconv"
"strings"
"time"
"github.com/imroc/req/v3"
)
type (
HydrateHostAnimeLibraryOptions struct {
AnimeCollection *anilist.AnimeCollection
LibraryCollection *anime.LibraryCollection
MetadataProvider metadata.Provider
}
NakamaAnimeLibrary struct {
LocalFiles []*anime.LocalFile `json:"localFiles"`
AnimeCollection *anilist.AnimeCollection `json:"animeCollection"`
}
)
// generateHMACToken generates an HMAC token for stream authentication
func (m *Manager) generateHMACToken(endpoint string) (string, error) {
// Use the Nakama password as the base secret - HostPassword for hosts, RemoteServerPassword for peers
var secret string
if m.settings.IsHost {
secret = m.settings.HostPassword
} else {
secret = m.settings.RemoteServerPassword
}
hmacAuth := util.NewHMACAuth(secret, 24*time.Hour)
return hmacAuth.GenerateToken(endpoint)
}
func (m *Manager) GetHostAnimeLibraryFiles(mId ...int) (lfs []*anime.LocalFile, hydrated bool) {
if !m.settings.Enabled || !m.settings.IncludeNakamaAnimeLibrary || !m.IsConnectedToHost() {
return nil, false
}
var response *req.Response
var err error
if len(mId) > 0 {
response, err = m.reqClient.R().
SetHeader("X-Seanime-Nakama-Token", m.settings.RemoteServerPassword).
Get(m.GetHostBaseServerURL() + "/api/v1/nakama/host/anime/library/files/" + strconv.Itoa(mId[0]))
if err != nil {
return nil, false
}
} else {
response, err = m.reqClient.R().
SetHeader("X-Seanime-Nakama-Token", m.settings.RemoteServerPassword).
Get(m.GetHostBaseServerURL() + "/api/v1/nakama/host/anime/library/files")
if err != nil {
return nil, false
}
}
if !response.IsSuccessState() {
return nil, false
}
body := response.Bytes()
var entryResponse struct {
Data []*anime.LocalFile `json:"data"`
}
err = json.Unmarshal(body, &entryResponse)
if err != nil {
return nil, false
}
return entryResponse.Data, true
}
func (m *Manager) GetHostAnimeLibrary() (ac *NakamaAnimeLibrary, hydrated bool) {
if !m.settings.Enabled || !m.settings.IncludeNakamaAnimeLibrary || !m.IsConnectedToHost() {
return nil, false
}
var response *req.Response
var err error
response, err = m.reqClient.R().
SetHeader("X-Seanime-Nakama-Token", m.settings.RemoteServerPassword).
Get(m.GetHostBaseServerURL() + "/api/v1/nakama/host/anime/library")
if err != nil {
return nil, false
}
if !response.IsSuccessState() {
return nil, false
}
body := response.Bytes()
var entryResponse struct {
Data *NakamaAnimeLibrary `json:"data"`
}
err = json.Unmarshal(body, &entryResponse)
if err != nil {
return nil, false
}
if entryResponse.Data == nil {
return nil, false
}
return entryResponse.Data, true
}
func (m *Manager) getBaseServerURL() string {
ret := ""
host := m.serverHost
if host == "0.0.0.0" {
host = "127.0.0.1"
}
ret = fmt.Sprintf("http://%s:%d", host, m.serverPort)
if strings.HasPrefix(ret, "http://http") {
ret = strings.Replace(ret, "http://http", "http", 1)
}
return ret
}
func (m *Manager) PlayHostAnimeLibraryFile(path string, userAgent string, media *anilist.BaseAnime, aniDBEpisode string) error {
if !m.settings.Enabled || !m.IsConnectedToHost() {
return errors.New("not connected to host")
}
m.previousPath = path
m.logger.Debug().Int("mediaId", media.ID).Msg("nakama: Playing host anime library file")
m.wsEventManager.SendEvent(events.ShowIndefiniteLoader, "nakama-file")
m.wsEventManager.SendEvent(events.InfoToast, "Sending stream to player...")
// Send a HTTP request to the host to get the anime library
// If we can access it then the host is sharing its anime library
response, err := m.reqClient.R().
SetHeader("X-Seanime-Nakama-Token", m.settings.RemoteServerPassword).
Get(m.GetHostBaseServerURL() + "/api/v1/nakama/host/anime/library/collection")
if err != nil {
return fmt.Errorf("cannot access host's anime library: %w", err)
}
if !response.IsSuccessState() {
body := response.Bytes()
code := response.StatusCode
return fmt.Errorf("cannot access host's anime library: %d, %s", code, string(body))
}
host := m.serverHost
if host == "0.0.0.0" {
host = "127.0.0.1"
}
address := fmt.Sprintf("%s:%d", host, m.serverPort)
ret := fmt.Sprintf("http://%s/api/v1/nakama/stream?type=file&path=%s", address, base64.StdEncoding.EncodeToString([]byte(path)))
if strings.HasPrefix(ret, "http://http") {
ret = strings.Replace(ret, "http://http", "http", 1)
}
windowTitle := media.GetPreferredTitle()
if !media.IsMovieOrSingleEpisode() {
windowTitle += " - Episode " + aniDBEpisode
}
err = m.playbackManager.StartStreamingUsingMediaPlayer(windowTitle, &playbackmanager.StartPlayingOptions{
Payload: ret,
UserAgent: userAgent,
ClientId: "",
}, media, aniDBEpisode)
if err != nil {
m.wsEventManager.SendEvent(events.HideIndefiniteLoader, "nakama-file")
go m.playbackManager.UnsubscribeFromPlaybackStatus("nakama-file")
return err
}
m.playbackManager.RegisterMediaPlayerCallback(func(event playbackmanager.PlaybackEvent, cancel func()) {
switch event.(type) {
case playbackmanager.StreamStartedEvent:
m.wsEventManager.SendEvent(events.HideIndefiniteLoader, "nakama-file")
cancel()
}
})
return nil
}
func (m *Manager) PlayHostAnimeStream(streamType string, userAgent string, media *anilist.BaseAnime, aniDBEpisode string) error {
if !m.settings.Enabled || !m.IsConnectedToHost() {
return errors.New("not connected to host")
}
m.logger.Debug().Int("mediaId", media.ID).Msg("nakama: Playing host anime stream")
m.wsEventManager.SendEvent(events.ShowIndefiniteLoader, "nakama-stream")
m.wsEventManager.SendEvent(events.InfoToast, "Sending stream to player...")
host := m.serverHost
if host == "0.0.0.0" {
host = "127.0.0.1"
}
address := fmt.Sprintf("%s:%d", host, m.serverPort)
ret := fmt.Sprintf("http://%s/api/v1/nakama/stream?type=%s", address, streamType)
if strings.HasPrefix(ret, "http://http") {
ret = strings.Replace(ret, "http://http", "http", 1)
}
windowTitle := media.GetPreferredTitle()
if !media.IsMovieOrSingleEpisode() {
windowTitle += " - Episode " + aniDBEpisode
}
err := m.playbackManager.StartStreamingUsingMediaPlayer(windowTitle, &playbackmanager.StartPlayingOptions{
Payload: ret,
UserAgent: userAgent,
ClientId: "",
}, media, aniDBEpisode)
if err != nil {
m.wsEventManager.SendEvent(events.HideIndefiniteLoader, "nakama-stream")
go m.playbackManager.UnsubscribeFromPlaybackStatus("nakama-stream")
return err
}
m.playbackManager.RegisterMediaPlayerCallback(func(event playbackmanager.PlaybackEvent, cancel func()) {
switch event.(type) {
case playbackmanager.StreamStartedEvent:
m.wsEventManager.SendEvent(events.HideIndefiniteLoader, "nakama-stream")
cancel()
}
})
return nil
}

View File

@@ -0,0 +1,421 @@
package nakama
import (
"context"
"encoding/json"
debrid_client "seanime/internal/debrid/client"
"seanime/internal/library/playbackmanager"
"seanime/internal/mediaplayers/mediaplayer"
"seanime/internal/torrentstream"
"sync"
"time"
"github.com/rs/zerolog"
"github.com/samber/mo"
)
const (
// Host -> Peer
MessageTypeWatchPartyCreated = "watch_party_created" // Host creates a watch party
MessageTypeWatchPartyStateChanged = "watch_party_state_changed" // Host or peer changes the state of the watch party
MessageTypeWatchPartyStopped = "watch_party_stopped" // Host stops a watch party
MessageTypeWatchPartyPlaybackStatus = "watch_party_playback_status" // Host or peer sends playback status to peers (seek, play, pause, etc)
MessageTypeWatchPartyPlaybackStopped = "watch_party_playback_stopped" // Peer sends playback stopped to host
// MessageTypeWatchPartyRelayModeStreamReady = "watch_party_relay_mode_stream_ready" // Relay server signals to origin that the stream is ready
MessageTypeWatchPartyRelayModePeersReady = "watch_party_relay_mode_peers_ready" // Relay server signals to origin that all peers are ready
MessageTypeWatchPartyRelayModePeerBuffering = "watch_party_relay_mode_peer_buffering" // Relay server signals to origin the buffering status (tells origin to pause/unpause)
// Peer -> Host
MessageTypeWatchPartyJoin = "watch_party_join" // Peer joins a watch party
MessageTypeWatchPartyLeave = "watch_party_leave" // Peer leaves a watch party
MessageTypeWatchPartyPeerStatus = "watch_party_peer_status" // Peer reports their current status to host
MessageTypeWatchPartyBufferUpdate = "watch_party_buffer_update" // Peer reports buffering state to host
MessageTypeWatchPartyRelayModeOriginStreamStarted = "watch_party_relay_mode_origin_stream_started" // Relay origin sends is starting a stream, the host will start it too
MessageTypeWatchPartyRelayModeOriginPlaybackStatus = "watch_party_relay_mode_origin_playback_status" // Relay origin sends playback status to relay server
MessageTypeWatchPartyRelayModeOriginPlaybackStopped = "watch_party_relay_mode_origin_playback_stopped" // Relay origin sends playback stopped to relay server
)
const (
// Drift detection and sync thresholds
MinSyncThreshold = 0.8 // Minimum sync threshold to prevent excessive seeking
MaxSyncThreshold = 5.0 // Maximum sync threshold for loose synchronization
AggressiveSyncMultiplier = 0.4 // Multiplier for large drift (>3s) to sync aggressively
ModerateSyncMultiplier = 0.6 // Multiplier for medium drift (>1.5s) to sync more frequently
// Sync timing and delays
MinSeekDelay = 200 * time.Millisecond // Minimum delay for seek operations
MaxSeekDelay = 600 * time.Millisecond // Maximum delay for seek operations
DefaultSeekCooldown = 1 * time.Second // Cooldown between consecutive seeks
// Message staleness and processing
MaxMessageAge = 1.5 // Seconds to ignore stale sync messages
PendingSeekWaitMultiplier = 1.0 // Multiplier for pending seek wait time
// Position and state detection
SignificantPositionJump = 3.0 // Seconds to detect seeking vs normal playback
ResumePositionDriftThreshold = 1.0 // Seconds of drift before syncing on resume
ResumeAheadTolerance = 2.0 // Seconds ahead tolerance to prevent jitter on resume
PausePositionSyncThreshold = 0.7 // Seconds of drift threshold for pause sync
// Catch-up and buffering
CatchUpBehindThreshold = 2.0 // Seconds behind before starting catch-up
CatchUpToleranceThreshold = 0.5 // Seconds within target to stop catch-up
MaxCatchUpDuration = 4 * time.Second // Maximum duration for catch-up operations
CatchUpTickInterval = 200 * time.Millisecond // Interval for catch-up progress checks
// Buffer detection (peer-side)
BufferDetectionMinInterval = 1.5 // Seconds between buffer health checks
BufferDetectionTolerance = 0.6 // Tolerance for playback progress detection
BufferDetectionStallThreshold = 2 // Consecutive stalls before buffering detection
BufferHealthDecrement = 0.15 // Buffer health decrease per stall
EndOfContentThreshold = 2.0 // Seconds from end to disable buffering detection
// Network and timing compensation
MinDynamicDelay = 200 * time.Millisecond // Minimum network delay compensation
MaxDynamicDelay = 500 * time.Millisecond // Maximum network delay compensation
)
type WatchPartyManager struct {
logger *zerolog.Logger
manager *Manager
currentSession mo.Option[*WatchPartySession] // Current watch party session
sessionCtx context.Context // Context for the current watch party session
sessionCtxCancel context.CancelFunc // Cancel function for the current watch party session
mu sync.RWMutex // Mutex for the watch party manager
// Seek management to prevent choppy playback
lastSeekTime time.Time // Time of last seek operation
seekCooldown time.Duration // Minimum time between seeks
// Catch-up management
catchUpCancel context.CancelFunc // Cancel function for catch-up operations
catchUpMu sync.Mutex // Mutex for catch-up operations
// Seek management
pendingSeekTime time.Time // When a seek was initiated
pendingSeekPosition float64 // Position we're seeking to
seekMu sync.Mutex // Mutex for seek state
// Buffering management (host only)
bufferWaitStart time.Time // When we started waiting for peers to buffer
isWaitingForBuffers bool // Whether we're currently waiting for peers to be ready
bufferMu sync.Mutex // Mutex for buffer state changes
statusReportTicker *time.Ticker // Ticker for peer status reporting
statusReportCancel context.CancelFunc // Cancel function for status reporting
waitForPeersCancel context.CancelFunc // Cancel function for waitForPeersReady goroutine
// Buffering detection (peer only)
bufferDetectionMu sync.Mutex // Mutex for buffering detection state
lastPosition float64 // Last known playback position
lastPositionTime time.Time // When we last updated the position
stallCount int // Number of consecutive stalls detected
lastPlayState bool // Last known play/pause state to detect rapid changes
lastPlayStateTime time.Time // When we last changed play state
// Sequence-based message ordering
sequenceMu sync.Mutex // Mutex for sequence number operations
sendSequence uint64 // Current sequence number for outgoing messages
lastRxSequence uint64 // Latest received sequence number
// Peer
peerPlaybackListener *playbackmanager.PlaybackStatusSubscriber // Listener for playback status changes (can be nil)
}
type WatchPartySession struct {
ID string `json:"id"`
Participants map[string]*WatchPartySessionParticipant `json:"participants"`
Settings *WatchPartySessionSettings `json:"settings"`
CreatedAt time.Time `json:"createdAt"`
CurrentMediaInfo *WatchPartySessionMediaInfo `json:"currentMediaInfo"` // can be nil if not set
IsRelayMode bool `json:"isRelayMode"` // Whether this session is in relay mode
mu sync.RWMutex `json:"-"`
}
type WatchPartySessionParticipant struct {
ID string `json:"id"` // PeerID (UUID) for unique identification
Username string `json:"username"` // Display name
IsHost bool `json:"isHost"`
CanControl bool `json:"canControl"`
IsReady bool `json:"isReady"`
LastSeen time.Time `json:"lastSeen"`
Latency int64 `json:"latency"` // in milliseconds
// Buffering state
IsBuffering bool `json:"isBuffering"`
BufferHealth float64 `json:"bufferHealth"` // 0.0 to 1.0, how much buffer is available
PlaybackStatus *mediaplayer.PlaybackStatus `json:"playbackStatus,omitempty"` // Current playback status
// Relay mode
IsRelayOrigin bool `json:"isRelayOrigin"` // Whether this peer is the origin for relay mode
}
type WatchPartySessionMediaInfo struct {
MediaId int `json:"mediaId"`
EpisodeNumber int `json:"episodeNumber"`
AniDBEpisode string `json:"aniDbEpisode"`
StreamType string `json:"streamType"` // "file", "torrent", "debrid", "online"
StreamPath string `json:"streamPath"` // URL for stream playback (e.g. /api/v1/nakama/stream?type=file&path=...)
OnlineStreamParams *OnlineStreamParams `json:"onlineStreamParams,omitempty"`
OptionalTorrentStreamStartOptions *torrentstream.StartStreamOptions `json:"optionalTorrentStreamStartOptions,omitempty"`
}
type OnlineStreamParams struct {
MediaId int `json:"mediaId"`
Provider string `json:"provider"`
Server string `json:"server"`
Dubbed bool `json:"dubbed"`
EpisodeNumber int `json:"episodeNumber"`
Quality string `json:"quality"`
}
type WatchPartySessionSettings struct {
SyncThreshold float64 `json:"syncThreshold"` // Seconds of desync before forcing sync
MaxBufferWaitTime int `json:"maxBufferWaitTime"` // Max time to wait for buffering peers (seconds)
}
// Events
type (
WatchPartyCreatedPayload struct {
Session *WatchPartySession `json:"session"`
}
WatchPartyJoinPayload struct {
PeerId string `json:"peerId"`
Username string `json:"username"`
}
WatchPartyLeavePayload struct {
PeerId string `json:"peerId"`
}
WatchPartyPlaybackStatusPayload struct {
PlaybackStatus mediaplayer.PlaybackStatus `json:"playbackStatus"`
Timestamp int64 `json:"timestamp"` // Unix nano timestamp
SequenceNumber uint64 `json:"sequenceNumber"`
EpisodeNumber int `json:"episodeNumber"` // For episode changes
}
WatchPartyStateChangedPayload struct {
Session *WatchPartySession `json:"session"`
}
WatchPartyPeerStatusPayload struct {
PeerId string `json:"peerId"`
PlaybackStatus mediaplayer.PlaybackStatus `json:"playbackStatus"`
IsBuffering bool `json:"isBuffering"`
BufferHealth float64 `json:"bufferHealth"` // 0.0 to 1.0
Timestamp time.Time `json:"timestamp"`
}
WatchPartyBufferUpdatePayload struct {
PeerId string `json:"peerId"`
IsBuffering bool `json:"isBuffering"`
BufferHealth float64 `json:"bufferHealth"`
Timestamp time.Time `json:"timestamp"`
}
WatchPartyEnableRelayModePayload struct {
PeerId string `json:"peerId"` // PeerID of the peer to promote to origin
}
WatchPartyRelayModeOriginStreamStartedPayload struct {
Filename string `json:"filename"`
Filepath string `json:"filepath"`
StreamType string `json:"streamType"`
OptionalLocalPath string `json:"optionalLocalPath,omitempty"`
OptionalTorrentStreamStartOptions *torrentstream.StartStreamOptions `json:"optionalTorrentStreamStartOptions,omitempty"`
OptionalDebridStreamStartOptions *debrid_client.StartStreamOptions `json:"optionalDebridStreamStartOptions,omitempty"`
Status mediaplayer.PlaybackStatus `json:"status"`
State playbackmanager.PlaybackState `json:"state"`
}
WatchPartyRelayModeOriginPlaybackStatusPayload struct {
Status mediaplayer.PlaybackStatus `json:"status"`
State playbackmanager.PlaybackState `json:"state"`
Timestamp int64 `json:"timestamp"`
}
)
func NewWatchPartyManager(manager *Manager) *WatchPartyManager {
return &WatchPartyManager{
logger: manager.logger,
manager: manager,
seekCooldown: DefaultSeekCooldown,
}
}
// Cleanup stops all goroutines and cleans up resources to prevent memory leaks
func (wpm *WatchPartyManager) Cleanup() {
wpm.mu.Lock()
defer wpm.mu.Unlock()
if wpm.currentSession.IsPresent() {
go wpm.LeaveWatchParty()
go wpm.StopWatchParty()
}
wpm.logger.Debug().Msg("nakama: Cleaning up watch party manager")
// Stop status reporting (peer side)
wpm.stopStatusReporting()
// Cancel any ongoing catch-up operations
wpm.cancelCatchUp()
// Clean up seek management state
wpm.seekMu.Lock()
wpm.pendingSeekTime = time.Time{}
wpm.pendingSeekPosition = 0
wpm.seekMu.Unlock()
// Cancel waitForPeersReady goroutine (host side)
wpm.bufferMu.Lock()
if wpm.waitForPeersCancel != nil {
wpm.waitForPeersCancel()
wpm.waitForPeersCancel = nil
}
wpm.isWaitingForBuffers = false
wpm.bufferMu.Unlock()
// Cancel session context (stops all session-related goroutines)
if wpm.sessionCtxCancel != nil {
wpm.sessionCtxCancel()
wpm.sessionCtx = nil
wpm.sessionCtxCancel = nil
}
// Clear session
wpm.currentSession = mo.None[*WatchPartySession]()
wpm.logger.Debug().Msg("nakama: Watch party manager cleanup completed")
}
// GetCurrentSession returns the current watch party session if it exists
func (wpm *WatchPartyManager) GetCurrentSession() (*WatchPartySession, bool) {
wpm.mu.RLock()
defer wpm.mu.RUnlock()
session, ok := wpm.currentSession.Get()
return session, ok
}
func (wpm *WatchPartyManager) handleMessage(message *Message, senderID string) error {
marshaledPayload, err := json.Marshal(message.Payload)
if err != nil {
return err
}
// wpm.logger.Debug().Str("type", string(message.Type)).Interface("payload", message.Payload).Msg("nakama: Received watch party message")
switch message.Type {
case MessageTypeWatchPartyStateChanged:
// wpm.logger.Debug().Msg("nakama: Received watch party state changed message")
var payload WatchPartyStateChangedPayload
err := json.Unmarshal(marshaledPayload, &payload)
if err != nil {
return err
}
wpm.handleWatchPartyStateChangedEvent(&payload)
case MessageTypeWatchPartyCreated:
wpm.logger.Debug().Msg("nakama: Received watch party created message")
var payload WatchPartyCreatedPayload
err := json.Unmarshal(marshaledPayload, &payload)
if err != nil {
return err
}
wpm.handleWatchPartyCreatedEvent(&payload)
case MessageTypeWatchPartyStopped:
wpm.logger.Debug().Msg("nakama: Received watch party stopped message")
wpm.handleWatchPartyStoppedEvent()
case MessageTypeWatchPartyJoin:
wpm.logger.Debug().Msg("nakama: Received watch party join message")
var payload WatchPartyJoinPayload
err := json.Unmarshal(marshaledPayload, &payload)
if err != nil {
return err
}
wpm.handleWatchPartyPeerJoinedEvent(&payload, message.Timestamp)
case MessageTypeWatchPartyLeave:
wpm.logger.Debug().Msg("nakama: Received watch party leave message")
var payload WatchPartyLeavePayload
err := json.Unmarshal(marshaledPayload, &payload)
if err != nil {
return err
}
wpm.handleWatchPartyPeerLeftEvent(&payload)
case MessageTypeWatchPartyPeerStatus:
//wpm.logger.Debug().Msg("nakama: Received watch party peer status message")
var payload WatchPartyPeerStatusPayload
err := json.Unmarshal(marshaledPayload, &payload)
if err != nil {
return err
}
wpm.handleWatchPartyPeerStatusEvent(&payload)
case MessageTypeWatchPartyBufferUpdate:
//wpm.logger.Debug().Msg("nakama: Received watch party buffer update message")
var payload WatchPartyBufferUpdatePayload
err := json.Unmarshal(marshaledPayload, &payload)
if err != nil {
return err
}
wpm.handleWatchPartyBufferUpdateEvent(&payload)
case MessageTypeWatchPartyPlaybackStatus:
// wpm.logger.Debug().Msg("nakama: Received watch party playback status message")
var payload WatchPartyPlaybackStatusPayload
err := json.Unmarshal(marshaledPayload, &payload)
if err != nil {
return err
}
wpm.handleWatchPartyPlaybackStatusEvent(&payload)
case MessageTypeWatchPartyRelayModeOriginStreamStarted:
wpm.logger.Debug().Msg("nakama: Received relay mode stream from origin message")
var payload WatchPartyRelayModeOriginStreamStartedPayload
err := json.Unmarshal(marshaledPayload, &payload)
if err != nil {
return err
}
wpm.handleWatchPartyRelayModeOriginStreamStartedEvent(&payload)
case MessageTypeWatchPartyRelayModePeerBuffering:
// TODO: Implement
case MessageTypeWatchPartyRelayModePeersReady:
wpm.logger.Debug().Msg("nakama: Received relay mode peers ready message")
wpm.handleWatchPartyRelayModePeersReadyEvent()
case MessageTypeWatchPartyRelayModeOriginPlaybackStatus:
// wpm.logger.Debug().Msg("nakama: Received relay mode origin playback status message")
var payload WatchPartyRelayModeOriginPlaybackStatusPayload
err := json.Unmarshal(marshaledPayload, &payload)
if err != nil {
return err
}
wpm.handleWatchPartyRelayModeOriginPlaybackStatusEvent(&payload)
case MessageTypeWatchPartyRelayModeOriginPlaybackStopped:
wpm.logger.Debug().Msg("nakama: Received relay mode origin playback stopped message")
wpm.handleWatchPartyRelayModeOriginPlaybackStoppedEvent()
}
return nil
}
func (mi *WatchPartySessionMediaInfo) Equals(other *WatchPartySessionMediaInfo) bool {
if mi == nil || other == nil {
return false
}
return mi.MediaId == other.MediaId &&
mi.EpisodeNumber == other.EpisodeNumber &&
mi.AniDBEpisode == other.AniDBEpisode &&
mi.StreamType == other.StreamType &&
mi.StreamPath == other.StreamPath
}

View File

@@ -0,0 +1,858 @@
package nakama
import (
"context"
"errors"
debrid_client "seanime/internal/debrid/client"
"seanime/internal/events"
"seanime/internal/library/playbackmanager"
"seanime/internal/mediaplayers/mediaplayer"
"seanime/internal/torrentstream"
"seanime/internal/util"
"strings"
"time"
"github.com/google/uuid"
"github.com/samber/mo"
)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Host
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
type CreateWatchOptions struct {
Settings *WatchPartySessionSettings `json:"settings"`
}
// CreateWatchParty creates a new watch party (host only)
func (wpm *WatchPartyManager) CreateWatchParty(options *CreateWatchOptions) (*WatchPartySession, error) {
wpm.mu.Lock()
defer wpm.mu.Unlock()
if !wpm.manager.IsHost() {
return nil, errors.New("only hosts can create watch parties")
}
if wpm.sessionCtxCancel != nil {
wpm.sessionCtxCancel()
wpm.sessionCtx = nil
wpm.sessionCtxCancel = nil
wpm.currentSession = mo.None[*WatchPartySession]()
}
wpm.logger.Debug().Msg("nakama: Creating watch party")
wpm.sessionCtx, wpm.sessionCtxCancel = context.WithCancel(context.Background())
// Generate unique ID
sessionID := uuid.New().String()
session := &WatchPartySession{
ID: sessionID,
Participants: make(map[string]*WatchPartySessionParticipant),
CurrentMediaInfo: nil,
Settings: options.Settings,
CreatedAt: time.Now(),
}
// Add host as participant
session.Participants["host"] = &WatchPartySessionParticipant{
ID: "host",
Username: wpm.manager.username,
IsHost: true,
CanControl: true,
IsReady: true,
LastSeen: time.Now(),
Latency: 0,
}
wpm.currentSession = mo.Some(session)
// Reset sequence numbers for new session
wpm.sequenceMu.Lock()
wpm.sendSequence = 0
wpm.lastRxSequence = 0
wpm.sequenceMu.Unlock()
// Notify all peers about the new watch party
_ = wpm.manager.SendMessage(MessageTypeWatchPartyCreated, WatchPartyCreatedPayload{
Session: session,
})
wpm.logger.Debug().Str("sessionId", sessionID).Msg("nakama: Watch party created")
// Send websocket event to update the UI
wpm.manager.wsEventManager.SendEvent(events.NakamaWatchPartyState, session)
go func() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-wpm.sessionCtx.Done():
wpm.logger.Debug().Msg("nakama: Watch party periodic broadcast stopped")
return
case <-ticker.C:
// Broadcast the session state to all peers every 5 seconds
// This is useful for peers that will join later
wpm.broadcastSessionStateToPeers()
}
}
}()
go wpm.listenToPlaybackManager()
// go wpm.listenToOnlineStreaming() // TODO
return session, nil
}
// PromotePeerToRelayModeOrigin promotes a peer to be the origin for relay mode
func (wpm *WatchPartyManager) PromotePeerToRelayModeOrigin(peerId string) {
wpm.mu.Lock()
defer wpm.mu.Unlock()
if !wpm.manager.IsHost() {
return
}
wpm.logger.Debug().Str("peerId", peerId).Msg("nakama: Promoting peer to relay mode origin")
session, ok := wpm.currentSession.Get()
if !ok {
wpm.logger.Warn().Msg("nakama: Cannot promote peer to relay mode origin, no active watch party session")
return
}
// Check if the peer exists in the session
participant, exists := session.Participants[peerId]
if !exists {
wpm.logger.Warn().Str("peerId", peerId).Msg("nakama: Cannot promote peer to relay mode origin, peer not found in session")
return
}
// Set the IsRelayOrigin flag to true
participant.IsRelayOrigin = true
// Broadcast the updated session state to all peers
session.mu.Lock()
session.IsRelayMode = true
session.mu.Unlock()
wpm.logger.Debug().Str("peerId", peerId).Msg("nakama: Peer promoted to relay mode origin")
wpm.broadcastSessionStateToPeers()
wpm.sendSessionStateToClient()
}
func (wpm *WatchPartyManager) StopWatchParty() {
wpm.mu.Lock()
defer wpm.mu.Unlock()
if !wpm.manager.IsHost() {
return
}
wpm.logger.Debug().Msg("nakama: Stopping watch party")
// Cancel any ongoing catch-up operations
wpm.cancelCatchUp()
// Reset buffering state and cancel any waitForPeersReady goroutine
wpm.bufferMu.Lock()
wpm.isWaitingForBuffers = false
if wpm.waitForPeersCancel != nil {
wpm.waitForPeersCancel()
wpm.waitForPeersCancel = nil
}
wpm.bufferMu.Unlock()
// Broadcast the stop event to all peers
_ = wpm.manager.SendMessage(MessageTypeWatchPartyStopped, nil)
if wpm.sessionCtxCancel != nil {
wpm.sessionCtxCancel()
wpm.sessionCtx = nil
wpm.sessionCtxCancel = nil
wpm.currentSession = mo.None[*WatchPartySession]()
}
wpm.broadcastSessionStateToPeers()
wpm.sendSessionStateToClient()
}
// listenToPlaybackManager listens to the playback manager
func (wpm *WatchPartyManager) listenToPlaybackManager() {
playbackSubscriber := wpm.manager.playbackManager.SubscribeToPlaybackStatus("nakama_watch_party")
go func() {
defer util.HandlePanicInModuleThen("nakama/listenToPlaybackManager", func() {})
defer func() {
wpm.logger.Debug().Msg("nakama: Stopping playback manager listener")
go wpm.manager.playbackManager.UnsubscribeFromPlaybackStatus("nakama_watch_party")
}()
for {
select {
case <-wpm.sessionCtx.Done():
wpm.logger.Debug().Msg("nakama: Stopping playback manager listener")
return
case event := <-playbackSubscriber.EventCh:
_, ok := wpm.currentSession.Get()
if !ok {
continue
}
switch event := event.(type) {
case playbackmanager.VideoStoppedEvent, playbackmanager.StreamStoppedEvent:
// Reset
wpm.logger.Debug().Msg("nakama: Playback stopped event received")
wpm.bufferMu.Lock()
wpm.isWaitingForBuffers = true
wpm.bufferWaitStart = time.Now()
// Cancel existing waitForPeersReady goroutine
if wpm.waitForPeersCancel != nil {
wpm.waitForPeersCancel()
wpm.waitForPeersCancel = nil
}
wpm.bufferMu.Unlock()
// Reset the current session media info
wpm.mu.Lock()
session, ok := wpm.currentSession.Get()
if !ok {
wpm.mu.Unlock()
return
}
session.CurrentMediaInfo = nil
wpm.mu.Unlock()
// Broadcast the session state to all peers
go wpm.broadcastSessionStateToPeers()
case playbackmanager.PlaybackStatusChangedEvent:
if event.State.MediaId == 0 {
continue
}
go func(event playbackmanager.PlaybackStatusChangedEvent) {
wpm.manager.playbackManager.PullStatus()
streamType := "file"
if event.Status.PlaybackType == mediaplayer.PlaybackTypeStream {
if strings.Contains(event.Status.Filepath, "/api/v1/torrentstream") {
streamType = "torrent"
} else {
streamType = "debrid"
}
}
optionalTorrentStreamStartOptions, _ := wpm.manager.torrentstreamRepository.GetPreviousStreamOptions()
streamPath := event.Status.Filepath
newCurrentMediaInfo := &WatchPartySessionMediaInfo{
MediaId: event.State.MediaId,
EpisodeNumber: event.State.EpisodeNumber,
AniDBEpisode: event.State.AniDbEpisode,
StreamType: streamType,
StreamPath: streamPath,
OptionalTorrentStreamStartOptions: optionalTorrentStreamStartOptions,
}
wpm.mu.Lock()
session, ok := wpm.currentSession.Get()
if !ok {
wpm.mu.Unlock()
return
}
// If this is the same media, just send the playback status
if session.CurrentMediaInfo.Equals(newCurrentMediaInfo) && event.State.MediaId != 0 {
wpm.mu.Unlock()
// Get next sequence number for message ordering
wpm.sequenceMu.Lock()
wpm.sendSequence++
sequenceNum := wpm.sendSequence
wpm.sequenceMu.Unlock()
// Send message
_ = wpm.manager.SendMessage(MessageTypeWatchPartyPlaybackStatus, WatchPartyPlaybackStatusPayload{
PlaybackStatus: event.Status,
Timestamp: time.Now().UnixNano(),
SequenceNumber: sequenceNum,
EpisodeNumber: event.State.EpisodeNumber,
})
} else {
// For new playback, update the session
wpm.logger.Debug().Msgf("nakama: Playback changed or started: %s", streamPath)
session.CurrentMediaInfo = newCurrentMediaInfo
wpm.mu.Unlock()
// Pause immediately and wait for peers to be ready
_ = wpm.manager.playbackManager.Pause()
// Reset buffering state for new playback
wpm.bufferMu.Lock()
wpm.isWaitingForBuffers = true
wpm.bufferWaitStart = time.Now()
// Cancel existing waitForPeersReady goroutine
if wpm.waitForPeersCancel != nil {
wpm.waitForPeersCancel()
wpm.waitForPeersCancel = nil
}
wpm.bufferMu.Unlock()
go wpm.broadcastSessionStateToPeers()
// Start checking peer readiness
go wpm.waitForPeersReady(func() {
if !session.IsRelayMode {
// resume playback
_ = wpm.manager.playbackManager.Resume()
} else {
// in relay mode, just signal to the origin
_ = wpm.manager.SendMessage(MessageTypeWatchPartyRelayModePeersReady, nil)
}
})
}
}(event)
}
}
}
}()
}
// broadcastSessionStateToPeers broadcasts the session state to all peers
func (wpm *WatchPartyManager) broadcastSessionStateToPeers() {
session, ok := wpm.currentSession.Get()
if !ok {
_ = wpm.manager.SendMessage(MessageTypeWatchPartyStateChanged, WatchPartyStateChangedPayload{
Session: nil,
})
return
}
_ = wpm.manager.SendMessage(MessageTypeWatchPartyStateChanged, WatchPartyStateChangedPayload{
Session: session,
})
}
func (wpm *WatchPartyManager) sendSessionStateToClient() {
session, ok := wpm.currentSession.Get()
if !ok {
wpm.manager.wsEventManager.SendEvent(events.NakamaWatchPartyState, nil)
return
}
wpm.manager.wsEventManager.SendEvent(events.NakamaWatchPartyState, session)
}
// handleWatchPartyPeerJoinedEvent is called when a peer joins a watch party
func (wpm *WatchPartyManager) handleWatchPartyPeerJoinedEvent(payload *WatchPartyJoinPayload, timestamp time.Time) {
if !wpm.manager.IsHost() {
return
}
wpm.mu.Lock()
defer wpm.mu.Unlock()
wpm.logger.Debug().Str("peerId", payload.PeerId).Msg("nakama: Peer joined watch party")
session, ok := wpm.currentSession.Get()
if !ok {
return
}
session.mu.Lock()
// Add the peer to the session
session.Participants[payload.PeerId] = &WatchPartySessionParticipant{
ID: payload.PeerId,
Username: payload.Username,
IsHost: false,
CanControl: false,
IsReady: false,
LastSeen: timestamp,
Latency: 0,
// Initialize buffering state
IsBuffering: false,
BufferHealth: 1.0,
PlaybackStatus: nil,
}
session.mu.Unlock()
// Send session state
go wpm.broadcastSessionStateToPeers()
wpm.logger.Debug().Str("peerId", payload.PeerId).Msg("nakama: Updated watch party state after peer joined")
wpm.sendSessionStateToClient()
}
// handleWatchPartyPeerLeftEvent is called when a peer leaves a watch party
func (wpm *WatchPartyManager) handleWatchPartyPeerLeftEvent(payload *WatchPartyLeavePayload) {
if !wpm.manager.IsHost() {
return
}
wpm.mu.Lock()
defer wpm.mu.Unlock()
wpm.logger.Debug().Str("peerId", payload.PeerId).Msg("nakama: Peer left watch party")
session, ok := wpm.currentSession.Get()
if !ok {
return
}
// Remove the peer from the session
delete(session.Participants, payload.PeerId)
// Send session state
go wpm.broadcastSessionStateToPeers()
wpm.logger.Debug().Str("peerId", payload.PeerId).Msg("nakama: Updated watch party state after peer left")
wpm.sendSessionStateToClient()
}
// HandlePeerDisconnected handles peer disconnections and removes them from the watch party
func (wpm *WatchPartyManager) HandlePeerDisconnected(peerID string) {
if !wpm.manager.IsHost() {
return
}
wpm.mu.Lock()
defer wpm.mu.Unlock()
session, ok := wpm.currentSession.Get()
if !ok {
return
}
// Check if the peer is in the watch party
if _, exists := session.Participants[peerID]; !exists {
return
}
wpm.logger.Debug().Str("peerId", peerID).Msg("nakama: Peer disconnected, removing from watch party")
// Remove the peer from the session
delete(session.Participants, peerID)
// Send session state to remaining peers
go wpm.broadcastSessionStateToPeers()
wpm.logger.Debug().Str("peerId", peerID).Msg("nakama: Updated watch party state after peer disconnected")
// Send websocket event to update the UI
wpm.sendSessionStateToClient()
}
// handleWatchPartyPeerStatusEvent handles regular status reports from peers
func (wpm *WatchPartyManager) handleWatchPartyPeerStatusEvent(payload *WatchPartyPeerStatusPayload) {
if !wpm.manager.IsHost() {
return
}
wpm.mu.Lock()
session, ok := wpm.currentSession.Get()
if !ok {
wpm.mu.Unlock()
return
}
// Update peer status
if participant, exists := session.Participants[payload.PeerId]; exists {
participant.PlaybackStatus = &payload.PlaybackStatus
participant.IsBuffering = payload.IsBuffering
participant.BufferHealth = payload.BufferHealth
participant.LastSeen = payload.Timestamp
participant.IsReady = !payload.IsBuffering && payload.BufferHealth > 0.1 // Consider ready if not buffering and has some buffer
wpm.logger.Debug().
Str("peerId", payload.PeerId).
Bool("isBuffering", payload.IsBuffering).
Float64("bufferHealth", payload.BufferHealth).
Bool("isReady", participant.IsReady).
Msg("nakama: Updated peer status")
}
wpm.mu.Unlock()
// Check if we should start/resume playback based on peer states (call after releasing mutex)
// Run this asynchronously to avoid blocking the event processing
go wpm.checkAndManageBuffering()
// Send session state to client to update the UI
wpm.sendSessionStateToClient()
}
// handleWatchPartyBufferUpdateEvent handles buffer state changes from peers
func (wpm *WatchPartyManager) handleWatchPartyBufferUpdateEvent(payload *WatchPartyBufferUpdatePayload) {
if !wpm.manager.IsHost() {
return
}
wpm.mu.Lock()
session, ok := wpm.currentSession.Get()
if !ok {
wpm.mu.Unlock()
return
}
// Update peer buffer status
if participant, exists := session.Participants[payload.PeerId]; exists {
participant.IsBuffering = payload.IsBuffering
participant.BufferHealth = payload.BufferHealth
participant.LastSeen = payload.Timestamp
participant.IsReady = !payload.IsBuffering && payload.BufferHealth > 0.1
wpm.logger.Debug().
Str("peerId", payload.PeerId).
Bool("isBuffering", payload.IsBuffering).
Float64("bufferHealth", payload.BufferHealth).
Bool("isReady", participant.IsReady).
Msg("nakama: Updated peer buffer status")
}
wpm.mu.Unlock()
// Immediately check if we need to pause/resume based on buffer state (call after releasing mutex)
// Run this asynchronously to avoid blocking the event processing
go wpm.checkAndManageBuffering()
// Broadcast updated session state
go wpm.broadcastSessionStateToPeers()
// Send session state to client to update the UI
wpm.sendSessionStateToClient()
}
// checkAndManageBuffering manages playback based on peer buffering states
// NOTE: This function should NOT be called while holding wpm.mu as it may need to acquire bufferMu
func (wpm *WatchPartyManager) checkAndManageBuffering() {
session, ok := wpm.currentSession.Get()
if !ok {
return
}
// Get current playback status
playbackStatus, hasPlayback := wpm.manager.playbackManager.PullStatus()
if !hasPlayback {
return
}
// Count peer states
var totalPeers, readyPeers, bufferingPeers int
for _, participant := range session.Participants {
if !participant.IsHost {
totalPeers++
if participant.IsReady {
readyPeers++
}
if participant.IsBuffering {
bufferingPeers++
}
}
}
// No peers means no buffering management needed
if totalPeers == 0 {
return
}
wpm.bufferMu.Lock()
defer wpm.bufferMu.Unlock()
maxWaitTime := time.Duration(session.Settings.MaxBufferWaitTime) * time.Second
// If any peer is buffering and we're playing, pause and wait
if bufferingPeers > 0 && playbackStatus.Playing {
if !wpm.isWaitingForBuffers {
wpm.logger.Debug().
Int("bufferingPeers", bufferingPeers).
Int("totalPeers", totalPeers).
Msg("nakama: Pausing playback due to peer buffering")
_ = wpm.manager.playbackManager.Pause()
wpm.isWaitingForBuffers = true
wpm.bufferWaitStart = time.Now()
}
return
}
// If we're waiting for buffers
if wpm.isWaitingForBuffers {
waitTime := time.Since(wpm.bufferWaitStart)
// Resume if all peers are ready or max wait time exceeded
if bufferingPeers == 0 || waitTime > maxWaitTime {
wpm.logger.Debug().
Int("readyPeers", readyPeers).
Int("totalPeers", totalPeers).
Int("bufferingPeers", bufferingPeers).
Float64("waitTimeSeconds", waitTime.Seconds()).
Bool("maxWaitExceeded", waitTime > maxWaitTime).
Msg("nakama: Resuming playback after buffer wait")
_ = wpm.manager.playbackManager.Resume()
wpm.isWaitingForBuffers = false
}
}
}
// waitForPeersReady waits for peers to be ready before resuming playback
func (wpm *WatchPartyManager) waitForPeersReady(onReady func()) {
session, ok := wpm.currentSession.Get()
if !ok {
return
}
// Create cancellable context for this goroutine
ctx, cancel := context.WithCancel(context.Background())
wpm.bufferMu.Lock()
wpm.waitForPeersCancel = cancel
wpm.bufferMu.Unlock()
defer func() {
wpm.bufferMu.Lock()
wpm.waitForPeersCancel = nil
wpm.bufferMu.Unlock()
}()
maxWaitTime := time.Duration(session.Settings.MaxBufferWaitTime) * time.Second
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()
wpm.logger.Debug().Msg("nakama: Waiting for peers to be ready")
for {
select {
case <-ctx.Done():
wpm.logger.Debug().Msg("nakama: waitForPeersReady cancelled")
return
case <-wpm.sessionCtx.Done():
wpm.logger.Debug().Msg("nakama: Session ended while waiting for peers")
return
case <-ticker.C:
wpm.bufferMu.Lock()
// Check if we've been waiting too long
waitTime := time.Since(wpm.bufferWaitStart)
if waitTime > maxWaitTime {
wpm.logger.Debug().Float64("waitTimeSeconds", waitTime.Seconds()).Msg("nakama: Max wait time exceeded, resuming playback")
onReady()
wpm.isWaitingForBuffers = false
wpm.bufferMu.Unlock()
return
}
// Count ready peers
session, ok := wpm.currentSession.Get()
if !ok {
wpm.bufferMu.Unlock()
return
}
var totalPeers, readyPeers int
for _, participant := range session.Participants {
if !participant.IsHost && !participant.IsRelayOrigin {
totalPeers++
if participant.IsReady {
readyPeers++
}
}
}
// If no peers or all peers are ready, resume playback
if totalPeers == 0 || readyPeers == totalPeers {
wpm.logger.Debug().
Int("readyPeers", readyPeers).
Int("totalPeers", totalPeers).
Msg("nakama: All peers are ready, resuming playback")
onReady()
wpm.isWaitingForBuffers = false
wpm.bufferMu.Unlock()
return
}
wpm.logger.Debug().
Int("readyPeers", readyPeers).
Int("totalPeers", totalPeers).
Float64("waitTimeSeconds", waitTime.Seconds()).
Msg("nakama: Still waiting for peers to be ready")
wpm.bufferMu.Unlock()
}
}
}
func (wpm *WatchPartyManager) EnableRelayMode(peerId string) {
wpm.mu.Lock()
defer wpm.mu.Unlock()
wpm.logger.Debug().Str("peerId", peerId).Msg("nakama: Enabling relay mode")
session, ok := wpm.currentSession.Get()
if !ok {
return
}
session.mu.Lock()
participant, exists := session.Participants[peerId]
if !exists {
wpm.logger.Warn().Str("peerId", peerId).Msg("nakama: Cannot enable relay mode, peer not found in session")
wpm.manager.wsEventManager.SendEvent(events.ErrorToast, "Peer not found in session")
return
}
session.IsRelayMode = true
participant.IsRelayOrigin = true
session.mu.Unlock()
wpm.logger.Debug().Str("peerId", peerId).Msg("nakama: Relay mode enabled")
wpm.broadcastSessionStateToPeers()
wpm.sendSessionStateToClient()
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Relay mode
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// handleWatchPartyRelayModeOriginStreamStartedEvent is called when the relay origin sends us (the host) a new stream.
// It starts the same stream as the origin on the host by using the same options as the origin.
func (wpm *WatchPartyManager) handleWatchPartyRelayModeOriginStreamStartedEvent(payload *WatchPartyRelayModeOriginStreamStartedPayload) {
wpm.mu.Lock()
defer wpm.mu.Unlock()
wpm.logger.Debug().Str("filepath", payload.Filepath).Msg("nakama: Relay mode origin stream started")
session, ok := wpm.currentSession.Get()
if !ok {
return
}
session.Settings.MaxBufferWaitTime = 60 // higher buffer wait time for relay mode
event := payload
// Load the stream on the host
// Playback won't actually be started
switch event.StreamType {
case "file":
// Do nothing, the file is already available
case "torrent":
// Start the torrent stream and wait for it to be ready
options := *event.OptionalTorrentStreamStartOptions
options.PlaybackType = torrentstream.PlaybackTypeNoneAndAwait
err := wpm.manager.torrentstreamRepository.StartStream(context.Background(), &options)
if err != nil {
wpm.logger.Error().Err(err).Msg("nakama: Failed to start torrent stream")
}
case "debrid":
// Start the debrid stream and wait for it to be ready
options := *event.OptionalDebridStreamStartOptions
options.PlaybackType = debrid_client.PlaybackTypeNoneAndAwait
err := wpm.manager.debridClientRepository.StartStream(context.Background(), &options)
if err != nil {
wpm.logger.Error().Err(err).Msg("nakama: Failed to start debrid stream")
}
}
// Update the current media info
streamPath := event.Status.Filepath
if event.StreamType == "file" {
// For file streams, we should use the file path directly
streamPath = event.OptionalLocalPath
}
newCurrentMediaInfo := &WatchPartySessionMediaInfo{
MediaId: event.State.MediaId,
EpisodeNumber: event.State.EpisodeNumber,
AniDBEpisode: event.State.AniDbEpisode,
StreamType: event.StreamType,
StreamPath: streamPath,
OptionalTorrentStreamStartOptions: event.OptionalTorrentStreamStartOptions,
}
// Video playback has started, send the media info to the peers
session.CurrentMediaInfo = newCurrentMediaInfo
// Pause immediately and wait for peers to be ready
_ = wpm.manager.playbackManager.Pause()
// Reset buffering state for new playback
wpm.bufferMu.Lock()
wpm.isWaitingForBuffers = true
wpm.bufferWaitStart = time.Now()
// Cancel existing waitForPeersReady goroutine
if wpm.waitForPeersCancel != nil {
wpm.waitForPeersCancel()
wpm.waitForPeersCancel = nil
}
wpm.bufferMu.Unlock()
// broadcast the session state to the peers
// this will not include the relay origin
wpm.broadcastSessionStateToPeers()
// Start checking peer readiness
go wpm.waitForPeersReady(func() {
if !session.IsRelayMode {
// not in relay mode, resume playback
_ = wpm.manager.playbackManager.Resume()
} else {
// in relay mode, just signal to the origin
_ = wpm.manager.SendMessage(MessageTypeWatchPartyRelayModePeersReady, nil)
}
})
}
// handleWatchPartyRelayModeOriginPlaybackStatusEvent is called when the relay origin sends us (the host) a playback status update
func (wpm *WatchPartyManager) handleWatchPartyRelayModeOriginPlaybackStatusEvent(payload *WatchPartyRelayModeOriginPlaybackStatusPayload) {
wpm.mu.Lock()
defer wpm.mu.Unlock()
// wpm.logger.Debug().Msg("nakama: Relay mode origin playback status")
// Send the playback status immediately to the peers
// Get next sequence number for relayed message
wpm.sequenceMu.Lock()
wpm.sendSequence++
sequenceNum := wpm.sendSequence
wpm.sequenceMu.Unlock()
_ = wpm.manager.SendMessage(MessageTypeWatchPartyPlaybackStatus, WatchPartyPlaybackStatusPayload{
PlaybackStatus: payload.Status,
Timestamp: payload.Timestamp, // timestamp of the origin
SequenceNumber: sequenceNum,
EpisodeNumber: payload.State.EpisodeNumber,
})
}
// handleWatchPartyRelayModeOriginPlaybackStoppedEvent is called when the relay origin sends us (the host) a playback stopped event
func (wpm *WatchPartyManager) handleWatchPartyRelayModeOriginPlaybackStoppedEvent() {
wpm.mu.Lock()
defer wpm.mu.Unlock()
wpm.logger.Debug().Msg("nakama: Relay mode origin playback stopped")
session, ok := wpm.currentSession.Get()
if !ok {
return
}
session.mu.Lock()
session.CurrentMediaInfo = nil
session.mu.Unlock()
wpm.broadcastSessionStateToPeers()
wpm.sendSessionStateToClient()
}

View File

@@ -0,0 +1,148 @@
package nakama
import (
"seanime/internal/events"
"time"
"github.com/goccy/go-json"
)
const (
OnlineStreamStartedEvent = "online-stream-started" // reported by host when onCanPlay is called
OnlineStreamPlaybackStatusEvent = "online-stream-playback-status"
)
type OnlineStreamStartedEventPayload struct {
MediaId int `json:"mediaId"`
EpisodeNumber int `json:"episodeNumber"`
Provider string `json:"provider"`
Server string `json:"server"`
Dubbed bool `json:"dubbed"`
Quality string `json:"quality"`
}
func (wpm *WatchPartyManager) listenToOnlineStreaming() {
go func() {
listener := wpm.manager.wsEventManager.SubscribeToClientNakamaEvents("watch_party")
for {
select {
case <-wpm.sessionCtx.Done():
wpm.logger.Debug().Msg("nakama: Stopping online stream listener")
return
case clientEvent := <-listener.Channel:
marshaled, _ := json.Marshal(clientEvent.Payload)
var event NakamaEvent
err := json.Unmarshal(marshaled, &event)
if err != nil {
return
}
marshaledPayload, _ := json.Marshal(event.Payload)
session, ok := wpm.currentSession.Get()
if !ok {
continue
}
switch event.Type {
case OnlineStreamStartedEvent:
wpm.logger.Debug().Msg("nakama: Received online stream started event")
var payload OnlineStreamStartedEventPayload
if err := json.Unmarshal(marshaledPayload, &payload); err != nil {
wpm.logger.Error().Err(err).Msg("nakama: Failed to unmarshal online stream started event")
return
}
wpm.logger.Debug().Interface("payload", payload).Msg("nakama: Received online stream started event")
newCurrentMediaInfo := &WatchPartySessionMediaInfo{
MediaId: payload.MediaId,
EpisodeNumber: payload.EpisodeNumber,
AniDBEpisode: "",
StreamType: "online",
StreamPath: "",
OnlineStreamParams: &OnlineStreamParams{
MediaId: payload.MediaId,
Provider: payload.Provider,
EpisodeNumber: payload.EpisodeNumber,
Server: payload.Server,
Dubbed: payload.Dubbed,
Quality: payload.Quality,
},
}
session.CurrentMediaInfo = newCurrentMediaInfo
// Pause immediately and wait for peers to be ready
//_ = wpm.manager.playbackManager.Pause()
wpm.sendCommandToOnlineStream(OnlineStreamCommandPause)
// Reset buffering state for new playback
wpm.bufferMu.Lock()
wpm.isWaitingForBuffers = true
wpm.bufferWaitStart = time.Now()
// Cancel existing waitForPeersReady goroutine
if wpm.waitForPeersCancel != nil {
wpm.waitForPeersCancel()
wpm.waitForPeersCancel = nil
}
wpm.bufferMu.Unlock()
wpm.broadcastSessionStateToPeers()
// Start checking peer readiness
go wpm.waitForPeersReady(func() {
wpm.sendCommandToOnlineStream(OnlineStreamCommandPlay)
})
}
}
}
}()
}
type OnlineStreamCommand string
type OnlineStreamCommandPayload struct {
Type OnlineStreamCommand `json:"type"` // The command type
Payload interface{} `json:"payload,omitempty"` // Optional payload for the command
}
const (
OnlineStreamCommandStart OnlineStreamCommand = "start" // Start the online stream
OnlineStreamCommandPlay OnlineStreamCommand = "play"
OnlineStreamCommandPause OnlineStreamCommand = "pause"
OnlineStreamCommandSeek OnlineStreamCommand = "seek"
OnlineStreamCommandSeekTo OnlineStreamCommand = "seekTo" // Seek to a specific time in seconds
)
func (wpm *WatchPartyManager) sendCommandToOnlineStream(cmd OnlineStreamCommand, payload ...interface{}) {
session, ok := wpm.currentSession.Get()
if !ok {
return
}
if session.CurrentMediaInfo == nil || session.CurrentMediaInfo.OnlineStreamParams == nil {
wpm.logger.Warn().Msg("nakama: No online stream params available for sending command")
return
}
commandPayload := OnlineStreamCommandPayload{
Type: cmd,
Payload: nil,
}
if len(payload) > 0 {
commandPayload.Payload = payload[0]
}
event := NakamaEvent{
Type: OnlineStreamPlaybackStatusEvent,
Payload: commandPayload,
}
wpm.manager.wsEventManager.SendEvent(events.NakamaOnlineStreamEvent, event)
}

View File

@@ -0,0 +1,676 @@
package nakama
import (
"context"
"errors"
"fmt"
"math"
"seanime/internal/events"
"seanime/internal/library/playbackmanager"
"seanime/internal/mediaplayers/mediaplayer"
"seanime/internal/util"
"strings"
"time"
"github.com/samber/mo"
)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Peer
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
func (wpm *WatchPartyManager) JoinWatchParty() error {
if wpm.manager.IsHost() {
return errors.New("only peers can join watch parties")
}
wpm.logger.Debug().Msg("nakama: Joining watch party")
hostConn, ok := wpm.manager.GetHostConnection()
if !ok {
return errors.New("no host connection found")
}
_, ok = wpm.currentSession.Get() // session should exist
if !ok {
return errors.New("no watch party found")
}
wpm.sessionCtx, wpm.sessionCtxCancel = context.WithCancel(context.Background())
// Reset sequence numbers for new session participation
wpm.sequenceMu.Lock()
wpm.sendSequence = 0
wpm.lastRxSequence = 0
wpm.sequenceMu.Unlock()
// Send join message to host
_ = wpm.manager.SendMessageToHost(MessageTypeWatchPartyJoin, WatchPartyJoinPayload{
PeerId: hostConn.PeerId,
Username: wpm.manager.username,
})
// Start status reporting to host
wpm.startStatusReporting()
// Send websocket event to update the UI
wpm.sendSessionStateToClient()
// Start listening to playback manager
wpm.relayModeListenToPlaybackManager()
return nil
}
// startStatusReporting starts sending status updates to the host every 2 seconds
func (wpm *WatchPartyManager) startStatusReporting() {
if wpm.manager.IsHost() {
return
}
// Stop any existing status reporting
wpm.stopStatusReporting()
wpm.mu.Lock()
defer wpm.mu.Unlock()
// Reset buffering detection state
wpm.bufferDetectionMu.Lock()
wpm.lastPosition = 0
wpm.lastPositionTime = time.Time{}
wpm.stallCount = 0
wpm.bufferDetectionMu.Unlock()
// Create context for status reporting
ctx, cancel := context.WithCancel(context.Background())
wpm.statusReportCancel = cancel
// Start ticker for regular status reports
wpm.statusReportTicker = time.NewTicker(2 * time.Second)
go func() {
defer util.HandlePanicInModuleThen("nakama/startStatusReporting", func() {})
defer wpm.statusReportTicker.Stop()
hostConn, ok := wpm.manager.GetHostConnection()
if !ok {
return
}
wpm.logger.Debug().Msg("nakama: Started status reporting to host")
for {
select {
case <-ctx.Done():
wpm.logger.Debug().Msg("nakama: Stopped status reporting")
return
case <-wpm.statusReportTicker.C:
wpm.sendStatusToHost(hostConn.PeerId)
}
}
}()
}
// stopStatusReporting stops sending status updates to the host
func (wpm *WatchPartyManager) stopStatusReporting() {
if wpm.statusReportCancel != nil {
wpm.statusReportCancel()
wpm.statusReportCancel = nil
}
if wpm.statusReportTicker != nil {
wpm.statusReportTicker.Stop()
wpm.statusReportTicker = nil
}
}
// sendStatusToHost sends current playback status and buffer state to the host
func (wpm *WatchPartyManager) sendStatusToHost(peerId string) {
playbackStatus, hasPlayback := wpm.manager.playbackManager.PullStatus()
if !hasPlayback {
return
}
// Calculate buffer health and buffering state
isBuffering, bufferHealth := wpm.calculateBufferState(playbackStatus)
// Send peer status update
_ = wpm.manager.SendMessageToHost(MessageTypeWatchPartyPeerStatus, WatchPartyPeerStatusPayload{
PeerId: peerId,
PlaybackStatus: *playbackStatus,
IsBuffering: isBuffering,
BufferHealth: bufferHealth,
Timestamp: time.Now(),
})
}
// calculateBufferState calculates buffering state and buffer health from playback status
func (wpm *WatchPartyManager) calculateBufferState(status *mediaplayer.PlaybackStatus) (bool, float64) {
if status == nil {
return true, 0.0 // No status means we're probably buffering
}
wpm.bufferDetectionMu.Lock()
defer wpm.bufferDetectionMu.Unlock()
now := time.Now()
currentPosition := status.CurrentTimeInSeconds
// Initialize tracking on first call
if wpm.lastPositionTime.IsZero() {
wpm.lastPosition = currentPosition
wpm.lastPositionTime = now
wpm.stallCount = 0
return false, 1.0 // Assume good state initially
}
// Time since last position check
timeDelta := now.Sub(wpm.lastPositionTime).Seconds()
positionDelta := currentPosition - wpm.lastPosition
// Update tracking
wpm.lastPosition = currentPosition
wpm.lastPositionTime = now
// Don't check too frequently to avoid false positives
if timeDelta < BufferDetectionMinInterval {
return false, 1.0 // Return good state if checking too soon
}
// Check if we're at the end of the content
isAtEnd := currentPosition >= (status.DurationInSeconds - EndOfContentThreshold)
if isAtEnd {
// Reset stall count when at end
wpm.stallCount = 0
return false, 1.0 // Not buffering if we're at the end
}
// Handle seeking, if position jumped significantly, reset tracking
if math.Abs(positionDelta) > SignificantPositionJump { // Detect seeking vs normal playback
wpm.logger.Debug().
Float64("positionDelta", positionDelta).
Float64("currentPosition", currentPosition).
Msg("nakama: Position change detected, likely seeking, resetting stall tracking")
wpm.stallCount = 0
return false, 1.0 // Reset state after seeking
}
// If the player is playing but position hasn't advanced significantly
if status.Playing {
// Expected minimum position change
expectedMinChange := timeDelta * BufferDetectionTolerance
if positionDelta < expectedMinChange {
// Position hasn't advanced as expected while playing, likely buffering
wpm.stallCount++
// Consider buffering after threshold consecutive stalls to avoid false positives
isBuffering := wpm.stallCount >= BufferDetectionStallThreshold
// Buffer health decreases with consecutive stalls
bufferHealth := math.Max(0.0, 1.0-(float64(wpm.stallCount)*BufferHealthDecrement))
if isBuffering {
wpm.logger.Debug().
Int("stallCount", wpm.stallCount).
Float64("positionDelta", positionDelta).
Float64("expectedMinChange", expectedMinChange).
Float64("bufferHealth", bufferHealth).
Msg("nakama: Buffering detected, position not advancing while playing")
}
return isBuffering, bufferHealth
} else {
// Position is advancing normally, reset stall count
if wpm.stallCount > 0 {
wpm.logger.Debug().
Int("previousStallCount", wpm.stallCount).
Float64("positionDelta", positionDelta).
Msg("nakama: Playback resumed normally, resetting stall count")
}
wpm.stallCount = 0
return false, 0.95 // good buffer health when playing normally
}
} else {
// Player is paused, reset stall count and return good buffer state
if wpm.stallCount > 0 {
wpm.logger.Debug().Msg("nakama: Player paused, resetting stall count")
}
wpm.stallCount = 0
return false, 1.0
}
}
// resetBufferingState resets the buffering detection state (useful when playback changes)
func (wpm *WatchPartyManager) resetBufferingState() {
wpm.bufferDetectionMu.Lock()
defer wpm.bufferDetectionMu.Unlock()
wpm.lastPosition = 0
wpm.lastPositionTime = time.Time{}
wpm.stallCount = 0
wpm.logger.Debug().Msg("nakama: Reset buffering detection state")
}
// LeaveWatchParty signals to the host that the peer is leaving the watch party.
// The host will remove the peer from the session and the peer will receive a new session state.
// DEVNOTE: We don't remove the session from the manager, it should still exist.
func (wpm *WatchPartyManager) LeaveWatchParty() error {
if wpm.manager.IsHost() {
return errors.New("only peers can leave watch parties")
}
wpm.mu.Lock()
defer wpm.mu.Unlock()
wpm.logger.Debug().Msg("nakama: Leaving watch party")
// Stop status reporting
wpm.stopStatusReporting()
// Cancel the session context
if wpm.sessionCtxCancel != nil {
wpm.sessionCtxCancel()
wpm.sessionCtx = nil
wpm.sessionCtxCancel = nil
}
hostConn, ok := wpm.manager.GetHostConnection()
if !ok {
return errors.New("no host connection found")
}
_, ok = wpm.currentSession.Get() // session should exist
if !ok {
return errors.New("no watch party found")
}
_ = wpm.manager.SendMessageToHost(MessageTypeWatchPartyLeave, WatchPartyLeavePayload{
PeerId: hostConn.PeerId,
})
// Send websocket event to update the UI (nil indicates session left)
wpm.manager.wsEventManager.SendEvent(events.NakamaWatchPartyState, nil)
return nil
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Events
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// handleWatchPartyStateChangedEvent is called when the host updates the session state.
// It starts a stream on the peer if there's a new media info.
func (wpm *WatchPartyManager) handleWatchPartyStateChangedEvent(payload *WatchPartyStateChangedPayload) {
if wpm.manager.IsHost() {
return
}
wpm.mu.Lock()
defer wpm.mu.Unlock()
hostConn, ok := wpm.manager.GetHostConnection() // should always be ok
if !ok {
return
}
//
// Session didn't exist
//
// Immediately update the session if it doesn't exist
if _, exists := wpm.currentSession.Get(); !exists && payload.Session != nil {
wpm.currentSession = mo.Some(&WatchPartySession{}) // Add a placeholder session
}
currentSession, exists := wpm.currentSession.Get()
if !exists {
return
}
//
// Session destroyed
//
if payload.Session == nil {
wpm.logger.Debug().Msg("nakama: Session destroyed")
if wpm.sessionCtxCancel != nil {
wpm.sessionCtxCancel()
wpm.sessionCtx = nil
wpm.sessionCtxCancel = nil
}
// Stop playback if it's playing
if _, ok := currentSession.Participants[hostConn.PeerId]; ok {
wpm.logger.Debug().Msg("nakama: Stopping playback due to session destroyed")
_ = wpm.manager.playbackManager.Cancel()
}
wpm.currentSession = mo.None[*WatchPartySession]()
wpm.sendSessionStateToClient()
return
}
// \/ Below, session should exist
participant, isParticipant := payload.Session.Participants[hostConn.PeerId]
//
// Starting playback / Peer joined / Video changed
//
// If the payload session has a media info but the current session doesn't,
// and the peer is a participant, we need to start playback
newPlayback := payload.Session.CurrentMediaInfo != nil && currentSession.CurrentMediaInfo == nil
playbackChanged := payload.Session.CurrentMediaInfo != nil && !payload.Session.CurrentMediaInfo.Equals(currentSession.CurrentMediaInfo)
// Check if peer is newly a participant - they should start playback even if media info hasn't changed
wasParticipant := currentSession.Participants != nil && currentSession.Participants[hostConn.PeerId] != nil
peerJoined := isParticipant && !wasParticipant && payload.Session.CurrentMediaInfo != nil
if (newPlayback || playbackChanged || peerJoined) &&
isParticipant &&
!participant.IsRelayOrigin {
wpm.logger.Debug().Bool("newPlayback", newPlayback).Bool("playbackChanged", playbackChanged).Bool("peerJoined", peerJoined).Msg("nakama: Starting playback due to new media info")
// Reset buffering detection state for new media
wpm.resetBufferingState()
// Fetch the media info
media, err := wpm.manager.platform.GetAnime(context.Background(), payload.Session.CurrentMediaInfo.MediaId)
if err != nil {
wpm.logger.Error().Err(err).Msg("nakama: Failed to fetch media info for watch party")
return
}
// Play the media
wpm.logger.Debug().Int("mediaId", payload.Session.CurrentMediaInfo.MediaId).Msg("nakama: Playing watch party media")
switch payload.Session.CurrentMediaInfo.StreamType {
case "torrent":
if payload.Session.CurrentMediaInfo.OptionalTorrentStreamStartOptions == nil {
wpm.logger.Error().Msg("nakama: No torrent stream start options found")
wpm.manager.wsEventManager.SendEvent(events.ErrorToast, "Watch party: Failed to play media: Host did not return torrent stream start options")
return
}
if !wpm.manager.torrentstreamRepository.IsEnabled() {
wpm.logger.Error().Msg("nakama: Torrent streaming is not enabled")
wpm.manager.wsEventManager.SendEvent(events.ErrorToast, "Watch party: Failed to play media: Torrent streaming is not enabled")
return
}
// Start the torrent
err = wpm.manager.torrentstreamRepository.StartStream(wpm.sessionCtx, payload.Session.CurrentMediaInfo.OptionalTorrentStreamStartOptions)
case "debrid":
err = wpm.manager.PlayHostAnimeStream(payload.Session.CurrentMediaInfo.StreamType, "seanime/nakama", media, payload.Session.CurrentMediaInfo.AniDBEpisode)
case "file":
err = wpm.manager.PlayHostAnimeLibraryFile(payload.Session.CurrentMediaInfo.StreamPath, "seanime/nakama", media, payload.Session.CurrentMediaInfo.AniDBEpisode)
case "online":
wpm.sendCommandToOnlineStream(OnlineStreamCommandStart, payload.Session.CurrentMediaInfo.OnlineStreamParams)
}
if err != nil {
wpm.logger.Error().Err(err).Msg("nakama: Failed to play watch party media")
wpm.manager.wsEventManager.SendEvent(events.ErrorToast, fmt.Sprintf("Watch party: Failed to play media: %s", err.Error()))
}
// Auto-leave the watch party when playback stops
// The user will have to re-join to start the stream again
if payload.Session.CurrentMediaInfo.StreamType != "online" && !participant.IsRelayOrigin {
wpm.peerPlaybackListener = wpm.manager.playbackManager.SubscribeToPlaybackStatus("nakama_peer_playback_listener")
go func() {
defer util.HandlePanicInModuleThen("nakama/handleWatchPartyStateChangedEvent/autoLeaveWatchParty", func() {})
for {
select {
case <-wpm.sessionCtx.Done():
wpm.manager.playbackManager.UnsubscribeFromPlaybackStatus("nakama_peer_playback_listener")
return
case event, ok := <-wpm.peerPlaybackListener.EventCh:
if !ok {
return
}
switch event.(type) {
case playbackmanager.StreamStoppedEvent:
_ = wpm.LeaveWatchParty()
return
}
}
}
}()
}
}
//
// Peer left
//
canceledPlayback := false
// If the peer is a participant in the current session but the new session doesn't have them,
// we need to stop playback and status reporting
if _, ok := currentSession.Participants[hostConn.PeerId]; ok && payload.Session.Participants[hostConn.PeerId] == nil {
wpm.logger.Debug().Msg("nakama: Removing peer from session due to new session state")
// Stop status reporting when removed from session
wpm.stopStatusReporting()
// Before stopping playback, unsubscribe from the playback listener
// This is to prevent the peer from auto-leaving the watch party when host stops playback
if wpm.peerPlaybackListener != nil {
wpm.manager.playbackManager.UnsubscribeFromPlaybackStatus("nakama_peer_playback_listener")
wpm.peerPlaybackListener = nil
}
_ = wpm.manager.playbackManager.Cancel()
canceledPlayback = true
}
//
// Session stopped
//
// If the host stopped the session, we need to cancel playback
if payload.Session.CurrentMediaInfo == nil && currentSession.CurrentMediaInfo != nil && !canceledPlayback {
wpm.logger.Debug().Msg("nakama: Canceling playback due to host stopping session")
// Before stopping playback, unsubscribe from the playback listener
// This is to prevent the peer from auto-leaving the watch party when host stops playback
if wpm.peerPlaybackListener != nil {
wpm.manager.playbackManager.UnsubscribeFromPlaybackStatus("nakama_peer_playback_listener")
wpm.peerPlaybackListener = nil
}
_ = wpm.manager.playbackManager.Cancel()
canceledPlayback = true
}
// Update the session
wpm.currentSession = mo.Some(payload.Session)
wpm.sendSessionStateToClient()
}
// handleWatchPartyCreatedEvent is called when a host creates a watch party
// We cancel any existing session
// We just store the session in the manager, and the peer will decide whether to join or not
func (wpm *WatchPartyManager) handleWatchPartyCreatedEvent(payload *WatchPartyCreatedPayload) {
if wpm.manager.IsHost() {
return
}
wpm.logger.Debug().Msg("nakama: Host created watch party")
// Cancel any existing session
if wpm.sessionCtxCancel != nil {
wpm.sessionCtxCancel()
wpm.sessionCtx = nil
wpm.sessionCtxCancel = nil
wpm.currentSession = mo.None[*WatchPartySession]()
}
// Load the session into the manager
// even if the peer isn't a participant
wpm.currentSession = mo.Some(payload.Session)
wpm.sendSessionStateToClient()
}
// handleWatchPartyStoppedEvent is called when the host stops a watch party.
//
// We check if the user was a participant in an active watch party session.
// If yes, we will cancel playback.
func (wpm *WatchPartyManager) handleWatchPartyStoppedEvent() {
if wpm.manager.IsHost() {
return
}
wpm.logger.Debug().Msg("nakama: Host stopped watch party")
// Stop status reporting
wpm.stopStatusReporting()
// Cancel any ongoing catch-up operations
wpm.cancelCatchUp()
hostConn, ok := wpm.manager.GetHostConnection() // should always be ok
if !ok {
return
}
// Cancel playback if the user was a participant in any previous session
currentSession, ok := wpm.currentSession.Get()
if ok {
if _, ok := currentSession.Participants[hostConn.PeerId]; ok {
_ = wpm.manager.playbackManager.Cancel()
}
}
// Cancel any existing session
if wpm.sessionCtxCancel != nil {
wpm.sessionCtxCancel()
wpm.sessionCtx = nil
wpm.sessionCtxCancel = nil
wpm.currentSession = mo.None[*WatchPartySession]()
}
wpm.manager.wsEventManager.SendEvent(events.NakamaWatchPartyState, nil)
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Relay mode
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// relayModeListenToPlaybackManager starts listening to the playback manager when in relay mode
func (wpm *WatchPartyManager) relayModeListenToPlaybackManager() {
go func() {
defer util.HandlePanicInModuleThen("nakama/relayModeListenToPlaybackManager", func() {})
wpm.logger.Debug().Msg("nakama: Started listening to playback manager for relay mode")
playbackSubscriber := wpm.manager.playbackManager.SubscribeToPlaybackStatus("nakama_peer_relay_mode")
defer wpm.manager.playbackManager.UnsubscribeFromPlaybackStatus("nakama_peer_relay_mode")
newStream := false
streamStartedPayload := WatchPartyRelayModeOriginStreamStartedPayload{}
for {
select {
case <-wpm.sessionCtx.Done():
wpm.logger.Debug().Msg("nakama: Stopped listening to playback manager")
return
case event := <-playbackSubscriber.EventCh:
currentSession, ok := wpm.currentSession.Get() // should always be ok
if !ok {
return
}
hostConn, ok := wpm.manager.GetHostConnection() // should always be ok
if !ok {
return
}
currentSession.mu.Lock()
if !currentSession.IsRelayMode {
currentSession.mu.Unlock()
continue
}
participant, ok := currentSession.Participants[hostConn.PeerId]
if !ok {
currentSession.mu.Unlock()
continue
}
if !participant.IsRelayOrigin {
currentSession.mu.Unlock()
continue
}
switch event := event.(type) {
// 1. Stream started
case playbackmanager.StreamStartedEvent:
wpm.logger.Debug().Msg("nakama: Relay mode origin stream started")
newStream = true
streamStartedPayload = WatchPartyRelayModeOriginStreamStartedPayload{}
// immediately pause the playback
_ = wpm.manager.playbackManager.Pause()
streamStartedPayload.Filename = event.Filename
streamStartedPayload.Filepath = event.Filepath
if strings.Contains(streamStartedPayload.Filepath, "type=file") {
streamStartedPayload.OptionalLocalPath = wpm.manager.previousPath
streamStartedPayload.StreamType = "file"
} else if strings.Contains(streamStartedPayload.Filepath, "/api/v1/torrentstream") {
streamStartedPayload.StreamType = "torrent"
streamStartedPayload.OptionalTorrentStreamStartOptions, _ = wpm.manager.torrentstreamRepository.GetPreviousStreamOptions()
} else {
streamStartedPayload.StreamType = "debrid"
streamStartedPayload.OptionalDebridStreamStartOptions, _ = wpm.manager.debridClientRepository.GetPreviousStreamOptions()
}
// 2. Stream status changed
case playbackmanager.PlaybackStatusChangedEvent:
wpm.logger.Debug().Msg("nakama: Relay mode origin stream status changed")
if newStream {
newStream = false
// this is a new stream, send the stream started payload
_ = wpm.manager.SendMessageToHost(MessageTypeWatchPartyRelayModeOriginStreamStarted, WatchPartyRelayModeOriginStreamStartedPayload{
Filename: streamStartedPayload.Filename,
Filepath: streamStartedPayload.Filepath,
StreamType: streamStartedPayload.StreamType,
OptionalLocalPath: streamStartedPayload.OptionalLocalPath,
OptionalTorrentStreamStartOptions: streamStartedPayload.OptionalTorrentStreamStartOptions,
OptionalDebridStreamStartOptions: streamStartedPayload.OptionalDebridStreamStartOptions,
Status: event.Status,
State: event.State,
})
currentSession.mu.Unlock()
continue
}
// send the playback status to the relay host
_ = wpm.manager.SendMessageToHost(MessageTypeWatchPartyRelayModeOriginPlaybackStatus, WatchPartyRelayModeOriginPlaybackStatusPayload{
Status: event.Status,
State: event.State,
Timestamp: time.Now().UnixNano(),
})
// 3. Stream stopped
case playbackmanager.StreamStoppedEvent:
wpm.logger.Debug().Msg("nakama: Relay mode origin stream stopped")
_ = wpm.manager.SendMessageToHost(MessageTypeWatchPartyRelayModeOriginPlaybackStopped, nil)
}
currentSession.mu.Unlock()
}
}
}()
}
// handleWatchPartyRelayModePeersReadyEvent is called when the host signals that the peers are ready in relay mode
func (wpm *WatchPartyManager) handleWatchPartyRelayModePeersReadyEvent() {
if wpm.manager.IsHost() {
return
}
wpm.logger.Debug().Msg("nakama: Relay mode peers ready")
// resume playback
_ = wpm.manager.playbackManager.Resume()
}

View File

@@ -0,0 +1,413 @@
package nakama
import (
"context"
"math"
"seanime/internal/mediaplayers/mediaplayer"
"time"
)
// handleWatchPartyPlaybackStatusEvent is called when the host sends a playback status.
//
// We check if the peer is a participant in the session.
// If yes, we will update the playback status and sync the playback position.
func (wpm *WatchPartyManager) handleWatchPartyPlaybackStatusEvent(payload *WatchPartyPlaybackStatusPayload) {
if wpm.manager.IsHost() {
return
}
// wpm.logger.Debug().Msg("nakama: Received playback status from watch party")
wpm.mu.Lock()
defer wpm.mu.Unlock()
session, ok := wpm.currentSession.Get()
if !ok {
return
}
hostConn, ok := wpm.manager.GetHostConnection()
if !ok {
return
}
if participant, isParticipant := session.Participants[hostConn.PeerId]; !isParticipant || participant.IsRelayOrigin {
return
}
payloadStatus := payload.PlaybackStatus
// If the peer's session doesn't have a media info, do nothing
if session.CurrentMediaInfo == nil {
return
}
// If the playback manager doesn't have a status, do nothing
playbackStatus, ok := wpm.manager.playbackManager.PullStatus()
if !ok {
return
}
// Check if the message is too old to prevent acting on stale data
wpm.sequenceMu.Lock()
isStale := payload.SequenceNumber != 0 && payload.SequenceNumber <= wpm.lastRxSequence
if payload.SequenceNumber > wpm.lastRxSequence {
wpm.lastRxSequence = payload.SequenceNumber
}
wpm.sequenceMu.Unlock()
if isStale {
wpm.logger.Debug().Uint64("messageSeq", payload.SequenceNumber).Uint64("lastSeq", wpm.lastRxSequence).Msg("nakama: Ignoring stale playback status message (old sequence)")
return
}
now := time.Now().UnixNano()
driftNs := now - payload.Timestamp
timeSinceMessage := float64(driftNs) / 1e9 // Convert to seconds
if timeSinceMessage > 5 { // Clamp to a reasonable maximum delay
timeSinceMessage = 0 // If it's more than 5 seconds, treat it as no delay
}
// Handle play/pause state changes
if payloadStatus.Playing != playbackStatus.Playing {
if payloadStatus.Playing {
// Cancel any ongoing catch-up operation
wpm.cancelCatchUp()
// When host resumes, sync position before resuming if there's significant drift
// Calculate where the host should be NOW, not when they resumed
hostCurrentPosition := payloadStatus.CurrentTimeInSeconds + timeSinceMessage
positionDrift := hostCurrentPosition - playbackStatus.CurrentTimeInSeconds
// Check if we need to seek
shouldSeek := false
if positionDrift < 0 {
// Peer is behind, always seek if beyond threshold
shouldSeek = math.Abs(positionDrift) > ResumePositionDriftThreshold
} else {
// Peer is ahead, only seek backward if significantly ahead to prevent jitter
// This prevents backward seeks when peer is slightly ahead due to pause message delay
shouldSeek = positionDrift > ResumeAheadTolerance
}
if shouldSeek {
// Calculate dynamic seek delay based on message timing
dynamicDelay := time.Duration(timeSinceMessage*1000) * time.Millisecond
if dynamicDelay < MinSeekDelay {
dynamicDelay = MinSeekDelay
}
if dynamicDelay > MaxDynamicDelay {
dynamicDelay = MaxDynamicDelay
}
// Predict where host will be when our seek takes effect
seekPosition := hostCurrentPosition + dynamicDelay.Seconds()
wpm.logger.Debug().
Float64("positionDrift", positionDrift).
Float64("hostCurrentPosition", hostCurrentPosition).
Float64("seekPosition", seekPosition).
Float64("peerPosition", playbackStatus.CurrentTimeInSeconds).
Float64("dynamicDelay", dynamicDelay.Seconds()).
Bool("peerAhead", positionDrift > 0).
Msg("nakama: Host resumed, syncing position before resume")
// Track pending seek
now := time.Now()
wpm.seekMu.Lock()
wpm.pendingSeekTime = now
wpm.pendingSeekPosition = seekPosition
wpm.seekMu.Unlock()
_ = wpm.manager.playbackManager.Seek(seekPosition)
} else if positionDrift > 0 && positionDrift <= ResumeAheadTolerance {
wpm.logger.Debug().
Float64("positionDrift", positionDrift).
Float64("hostCurrentPosition", hostCurrentPosition).
Float64("peerPosition", playbackStatus.CurrentTimeInSeconds).
Msg("nakama: Host resumed, peer slightly ahead, not seeking yet")
}
wpm.logger.Debug().Msg("nakama: Host resumed, resuming peer playback")
_ = wpm.manager.playbackManager.Resume()
} else {
wpm.logger.Debug().Msg("nakama: Host paused, handling peer pause")
wpm.handleHostPause(payloadStatus, *playbackStatus, timeSinceMessage)
}
}
// Handle position sync for different state combinations
if payloadStatus.Playing == playbackStatus.Playing {
// Both in same state, use normal sync
wpm.syncPlaybackPosition(payloadStatus, *playbackStatus, timeSinceMessage, session)
} else if payloadStatus.Playing && !playbackStatus.Playing {
// Host playing, peer paused, sync position and resume
hostExpectedPosition := payloadStatus.CurrentTimeInSeconds + timeSinceMessage
wpm.logger.Debug().
Float64("hostPosition", hostExpectedPosition).
Float64("peerPosition", playbackStatus.CurrentTimeInSeconds).
Msg("nakama: Host is playing but peer is paused, syncing and resuming")
// Resume and sync to host position
_ = wpm.manager.playbackManager.Resume()
// Track pending seek
now := time.Now()
wpm.seekMu.Lock()
wpm.pendingSeekTime = now
wpm.pendingSeekPosition = hostExpectedPosition
wpm.seekMu.Unlock()
_ = wpm.manager.playbackManager.Seek(hostExpectedPosition)
} else if !payloadStatus.Playing && playbackStatus.Playing {
// Host paused, peer playing, pause immediately
wpm.logger.Debug().Msg("nakama: Host is paused but peer is playing, pausing immediately")
// Cancel catch-up and pause
wpm.cancelCatchUp()
wpm.handleHostPause(payloadStatus, *playbackStatus, timeSinceMessage)
}
}
// handleHostPause handles when the host pauses playback
func (wpm *WatchPartyManager) handleHostPause(hostStatus mediaplayer.PlaybackStatus, peerStatus mediaplayer.PlaybackStatus, timeSinceMessage float64) {
// Cancel any ongoing catch-up operation
wpm.cancelCatchUp()
now := time.Now()
// Calculate where the host actually paused based on dynamic timing
hostActualPausePosition := hostStatus.CurrentTimeInSeconds
// Don't add time compensation for pause position, the host has already paused
// Calculate time difference considering message delay
timeDifference := hostActualPausePosition - peerStatus.CurrentTimeInSeconds
// If peer is significantly behind the host, let it catch up before pausing
if timeDifference > CatchUpBehindThreshold {
wpm.logger.Debug().Msgf("nakama: Host paused, peer behind by %.2f seconds, catching up", timeDifference)
wpm.startCatchUp(hostActualPausePosition, timeSinceMessage)
} else {
// Peer is close enough or ahead, pause immediately with position correction
// Use more aggressive sync threshold for pause operations
if math.Abs(timeDifference) > PausePositionSyncThreshold {
wpm.logger.Debug().
Float64("hostPausePosition", hostActualPausePosition).
Float64("peerPosition", peerStatus.CurrentTimeInSeconds).
Float64("timeDifference", timeDifference).
Float64("timeSinceMessage", timeSinceMessage).
Msg("nakama: Host paused, syncing position before pause")
// Track pending seek
wpm.seekMu.Lock()
wpm.pendingSeekTime = now
wpm.pendingSeekPosition = hostActualPausePosition
wpm.seekMu.Unlock()
_ = wpm.manager.playbackManager.Seek(hostActualPausePosition)
}
_ = wpm.manager.playbackManager.Pause()
wpm.logger.Debug().Msgf("nakama: Host paused, peer paused immediately (diff: %.2f)", timeDifference)
}
}
// startCatchUp starts a catch-up operation to sync with the host's pause position
func (wpm *WatchPartyManager) startCatchUp(hostPausePosition float64, timeSinceMessage float64) {
wpm.catchUpMu.Lock()
defer wpm.catchUpMu.Unlock()
// Cancel any existing catch-up
if wpm.catchUpCancel != nil {
wpm.catchUpCancel()
}
// Create a new context for this catch-up operation
ctx, cancel := context.WithCancel(context.Background())
wpm.catchUpCancel = cancel
go func() {
defer cancel()
ticker := time.NewTicker(CatchUpTickInterval)
defer ticker.Stop()
maxCatchUpTime := MaxCatchUpDuration
startTime := time.Now()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
// If catch-up is taking too long, force sync to host position
if time.Since(startTime) > maxCatchUpTime {
wpm.logger.Debug().Msg("nakama: Catch-up timeout, seeking to host position and pausing")
// Seek to host position and pause
now := time.Now()
wpm.seekMu.Lock()
wpm.pendingSeekTime = now
wpm.pendingSeekPosition = hostPausePosition
wpm.seekMu.Unlock()
_ = wpm.manager.playbackManager.Seek(hostPausePosition)
_ = wpm.manager.playbackManager.Pause()
return
}
// Get current playback status
currentStatus, ok := wpm.manager.playbackManager.PullStatus()
if !ok {
continue
}
// Check if we've reached or passed the host's pause position (with tighter tolerance)
positionDiff := hostPausePosition - currentStatus.CurrentTimeInSeconds
if positionDiff <= CatchUpToleranceThreshold {
wpm.logger.Debug().Msgf("nakama: Caught up to host position %.2f (current: %.2f), pausing", hostPausePosition, currentStatus.CurrentTimeInSeconds)
// Track pending seek
now := time.Now()
wpm.seekMu.Lock()
wpm.pendingSeekTime = now
wpm.pendingSeekPosition = hostPausePosition
wpm.seekMu.Unlock()
_ = wpm.manager.playbackManager.Seek(hostPausePosition)
_ = wpm.manager.playbackManager.Pause()
return
}
// Continue trying to catch up to host position
wpm.logger.Debug().
Float64("positionDiff", positionDiff).
Float64("currentPosition", currentStatus.CurrentTimeInSeconds).
Float64("hostPausePosition", hostPausePosition).
Msg("nakama: Still catching up to host pause position")
}
}
}()
}
// cancelCatchUp cancels any ongoing catch-up operation
func (wpm *WatchPartyManager) cancelCatchUp() {
wpm.catchUpMu.Lock()
defer wpm.catchUpMu.Unlock()
if wpm.catchUpCancel != nil {
wpm.catchUpCancel()
wpm.catchUpCancel = nil
}
}
// syncPlaybackPosition synchronizes playback position when both host and peer are in the same play/pause state
func (wpm *WatchPartyManager) syncPlaybackPosition(hostStatus mediaplayer.PlaybackStatus, peerStatus mediaplayer.PlaybackStatus, timeSinceMessage float64, session *WatchPartySession) {
now := time.Now()
// Ignore very old messages to prevent stale syncing
if timeSinceMessage > MaxMessageAge {
return
}
// Check if we have a pending seek operation, use dynamic compensation
wpm.seekMu.Lock()
hasPendingSeek := !wpm.pendingSeekTime.IsZero()
timeSincePendingSeek := now.Sub(wpm.pendingSeekTime)
pendingSeekPosition := wpm.pendingSeekPosition
wpm.seekMu.Unlock()
// Use dynamic compensation, if we have a pending seek, wait for at least the message delay time
dynamicSeekDelay := time.Duration(timeSinceMessage*1000) * time.Millisecond
if dynamicSeekDelay < MinSeekDelay {
dynamicSeekDelay = MinSeekDelay // Minimum delay
}
if dynamicSeekDelay > MaxSeekDelay {
dynamicSeekDelay = MaxSeekDelay // Maximum delay
}
// If we have a pending seek that's still in progress, don't sync
if hasPendingSeek && timeSincePendingSeek < dynamicSeekDelay {
wpm.logger.Debug().
Float64("timeSincePendingSeek", timeSincePendingSeek.Seconds()).
Float64("dynamicSeekDelay", dynamicSeekDelay.Seconds()).
Float64("pendingSeekPosition", pendingSeekPosition).
Msg("nakama: Ignoring sync, pending seek in progress")
return
}
// Clear pending seek if it's been long enough
if hasPendingSeek && timeSincePendingSeek >= dynamicSeekDelay {
wpm.seekMu.Lock()
wpm.pendingSeekTime = time.Time{}
wpm.pendingSeekPosition = 0
wpm.seekMu.Unlock()
}
// Dynamic compensation: Calculate where the host should be NOW based on their timestamp
hostCurrentPosition := hostStatus.CurrentTimeInSeconds
if hostStatus.Playing {
// Add the exact time that has passed since the host's status was captured
hostCurrentPosition += timeSinceMessage
}
// Calculate drift between peer and host's current position
drift := hostCurrentPosition - peerStatus.CurrentTimeInSeconds
driftAbs := drift
if driftAbs < 0 {
driftAbs = -driftAbs
}
// Get sync threshold from session settings
syncThreshold := session.Settings.SyncThreshold
// Clamp
if syncThreshold < MinSyncThreshold {
syncThreshold = MinSyncThreshold
} else if syncThreshold > MaxSyncThreshold {
syncThreshold = MaxSyncThreshold
}
// Check if we're in seek cooldown period
timeSinceLastSeek := now.Sub(wpm.lastSeekTime)
inCooldown := timeSinceLastSeek < wpm.seekCooldown
// Use more aggressive thresholds for different drift ranges
effectiveThreshold := syncThreshold
if driftAbs > 3.0 { // Large drift - be very aggressive
effectiveThreshold = syncThreshold * AggressiveSyncMultiplier
} else if driftAbs > 1.5 { // Medium drift - be more aggressive
effectiveThreshold = syncThreshold * ModerateSyncMultiplier
}
// Only sync if drift exceeds threshold and we're not in cooldown
if driftAbs > effectiveThreshold && !inCooldown {
// For the seek position, predict where the host will be when our seek takes effect
// Use the dynamic delay we calculated based on actual network conditions
seekPosition := hostCurrentPosition
if hostStatus.Playing {
// Add compensation for the time it will take for our seek to take effect
seekPosition += dynamicSeekDelay.Seconds()
}
wpm.logger.Debug().
Float64("drift", drift).
Float64("hostOriginalPosition", hostStatus.CurrentTimeInSeconds).
Float64("hostCurrentPosition", hostCurrentPosition).
Float64("seekPosition", seekPosition).
Float64("peerPosition", peerStatus.CurrentTimeInSeconds).
Float64("timeSinceMessage", timeSinceMessage).
Float64("dynamicSeekDelay", dynamicSeekDelay.Seconds()).
Float64("effectiveThreshold", effectiveThreshold).
Msg("nakama: Syncing playback position with dynamic compensation")
// Track pending seek
wpm.seekMu.Lock()
wpm.pendingSeekTime = now
wpm.pendingSeekPosition = seekPosition
wpm.seekMu.Unlock()
_ = wpm.manager.playbackManager.Seek(seekPosition)
wpm.lastSeekTime = now
}
}