284 lines
10 KiB
TypeScript
284 lines
10 KiB
TypeScript
// import { EventEmitter } from 'events'; // Node.js module dependency might be missing
|
|
|
|
// Custom lightweight Event Emitter for browser compatibility if needed,
|
|
// but 'events' module is usually polyfilled by Vite/Webpack.
|
|
// If not, we can use a simple class.
|
|
class SimpleEventEmitter {
|
|
private listeners: { [key: string]: Function[] } = {};
|
|
|
|
on(event: string, listener: Function) {
|
|
if (!this.listeners[event]) this.listeners[event] = [];
|
|
this.listeners[event].push(listener);
|
|
return () => this.off(event, listener);
|
|
}
|
|
|
|
off(event: string, listener: Function) {
|
|
if (!this.listeners[event]) return;
|
|
this.listeners[event] = this.listeners[event].filter(l => l !== listener);
|
|
}
|
|
|
|
emit(event: string, ...args: any[]) {
|
|
if (!this.listeners[event]) return;
|
|
this.listeners[event].forEach(l => {
|
|
try { l(...args); }
|
|
catch (e) { console.error(`Error in event listener for ${event}:`, e); }
|
|
});
|
|
}
|
|
}
|
|
|
|
export interface EncodedFrame {
|
|
type: 'video' | 'audio';
|
|
data: Uint8Array;
|
|
isKeyFrame: boolean;
|
|
timestamp: number;
|
|
duration?: number;
|
|
streamType?: 'video' | 'screen'; // Added for dual stream support
|
|
}
|
|
|
|
export class MediaEngine extends SimpleEventEmitter {
|
|
private videoEncoder: VideoEncoder | null = null;
|
|
private screenEncoder: VideoEncoder | null = null; // Separate encoder for screen
|
|
private audioEncoder: AudioEncoder | null = null;
|
|
|
|
// Decoders: Map<userId, Decoder> -> Now needs to distinguish stream types
|
|
// We can use keys like "userId-video" and "userId-screen"
|
|
private videoDecoders: Map<string, VideoDecoder> = new Map();
|
|
// Decoders: Map<userId, Decoder> -> Now needs to distinguish stream types
|
|
// We can use keys like "userId-video" and "userId-screen"
|
|
// private videoDecoders: Map<string, VideoDecoder> = new Map(); // Already declared above
|
|
private audioDecoders: Map<string, AudioDecoder> = new Map();
|
|
private videoConfig: VideoEncoderConfig = {
|
|
codec: 'avc1.42001f', // H.264 Baseline Profile Level 3.1 (720p safe)
|
|
width: 1280,
|
|
height: 720,
|
|
bitrate: 2_000_000,
|
|
framerate: 30,
|
|
latencyMode: 'realtime',
|
|
avc: { format: 'annexb' }
|
|
};
|
|
|
|
private screenConfig: VideoEncoderConfig = {
|
|
// High Profile Level 4.2
|
|
codec: 'avc1.64002a',
|
|
width: 1920,
|
|
height: 1080,
|
|
bitrate: 2_000_000, // Reduced to 2 Mbps for better stability/FPS
|
|
framerate: 30,
|
|
latencyMode: 'realtime', // Changed from 'quality' to 'realtime' for lower latency
|
|
avc: { format: 'annexb' }
|
|
};
|
|
|
|
// Audio Config
|
|
private audioConfig: AudioEncoderConfig = {
|
|
codec: 'opus',
|
|
sampleRate: 48000,
|
|
numberOfChannels: 1,
|
|
bitrate: 32000
|
|
};
|
|
|
|
constructor() {
|
|
super();
|
|
this.initializeVideoEncoder();
|
|
this.initializeScreenEncoder();
|
|
this.initializeAudioEncoder();
|
|
}
|
|
|
|
private initializeVideoEncoder() {
|
|
try {
|
|
this.videoEncoder = new VideoEncoder({
|
|
output: (chunk, _metadata) => {
|
|
const buffer = new Uint8Array(chunk.byteLength);
|
|
chunk.copyTo(buffer);
|
|
|
|
// With 'annexb', SPS/PPS should be in the keyframe chunk data.
|
|
|
|
this.emit('encoded-video', {
|
|
type: 'video',
|
|
streamType: 'video',
|
|
data: buffer,
|
|
isKeyFrame: chunk.type === 'key',
|
|
timestamp: chunk.timestamp,
|
|
duration: chunk.duration
|
|
} as EncodedFrame);
|
|
},
|
|
error: (e) => console.error('VideoEncoder error:', e),
|
|
});
|
|
|
|
this.videoEncoder.configure(this.videoConfig);
|
|
console.log('[MediaEngine] VideoEncoder configured:', this.videoConfig);
|
|
} catch (e) {
|
|
console.error('[MediaEngine] Failed to init VideoEncoder:', e);
|
|
}
|
|
}
|
|
|
|
private initializeScreenEncoder() {
|
|
try {
|
|
this.screenEncoder = new VideoEncoder({
|
|
output: (chunk, _metadata) => {
|
|
const buffer = new Uint8Array(chunk.byteLength);
|
|
chunk.copyTo(buffer);
|
|
this.emit('encoded-video', {
|
|
type: 'video',
|
|
streamType: 'screen',
|
|
data: buffer,
|
|
isKeyFrame: chunk.type === 'key',
|
|
timestamp: chunk.timestamp,
|
|
duration: chunk.duration
|
|
} as EncodedFrame);
|
|
},
|
|
error: (e) => console.error('ScreenEncoder error:', e),
|
|
});
|
|
this.screenEncoder.configure(this.screenConfig);
|
|
console.log('[MediaEngine] ScreenEncoder configured:', this.screenConfig);
|
|
} catch (e) {
|
|
console.error('[MediaEngine] Failed to init ScreenEncoder:', e);
|
|
}
|
|
}
|
|
|
|
private initializeAudioEncoder() {
|
|
try {
|
|
this.audioEncoder = new AudioEncoder({
|
|
output: (chunk, _metadata) => {
|
|
const buffer = new Uint8Array(chunk.byteLength);
|
|
chunk.copyTo(buffer);
|
|
this.emit('encoded-audio', {
|
|
type: 'audio',
|
|
data: buffer,
|
|
isKeyFrame: chunk.type === 'key',
|
|
timestamp: chunk.timestamp,
|
|
duration: chunk.duration
|
|
} as EncodedFrame);
|
|
},
|
|
error: (e) => console.error('[MediaEngine] AudioEncoder error:', e),
|
|
});
|
|
this.audioEncoder.configure(this.audioConfig);
|
|
console.log('[MediaEngine] AudioEncoder configured:', this.audioConfig);
|
|
} catch (e) {
|
|
console.error('[MediaEngine] Failed to init AudioEncoder:', e);
|
|
}
|
|
}
|
|
|
|
|
|
// --- Video Encoding ---
|
|
|
|
encodeVideoFrame(frame: VideoFrame, streamType: 'video' | 'screen' = 'video') {
|
|
const encoder = streamType === 'screen' ? this.screenEncoder : this.videoEncoder;
|
|
|
|
if (encoder && encoder.state === 'configured') {
|
|
// Force keyframe every 2 seconds (60 frames)
|
|
const keyFrame = frame.timestamp % 2000000 < 33000;
|
|
encoder.encode(frame, { keyFrame });
|
|
frame.close();
|
|
} else {
|
|
frame.close();
|
|
console.warn(`[MediaEngine] ${streamType === 'screen' ? 'ScreenEncoder' : 'VideoEncoder'} not ready`);
|
|
}
|
|
}
|
|
|
|
// --- Video Decoding ---
|
|
|
|
decodeVideoChunk(chunkData: Uint8Array, userId: number, isKeyFrame: boolean, timestamp: number, streamType: 'video' | 'screen' = 'video') {
|
|
const decoderKey = `${userId}-${streamType}`;
|
|
let decoder = this.videoDecoders.get(decoderKey);
|
|
|
|
if (!decoder) {
|
|
decoder = new VideoDecoder({
|
|
output: (frame) => {
|
|
this.emit('decoded-video', { userId, frame, streamType });
|
|
},
|
|
error: (e) => console.error(`VideoDecoder error (${decoderKey}):`, e),
|
|
});
|
|
|
|
// Configure based on stream type
|
|
// Note: Decoders are usually more flexible, but giving a hint helps.
|
|
// Screen share uses High Profile, Video uses Baseline.
|
|
const config: VideoDecoderConfig = streamType === 'screen'
|
|
? { codec: 'avc1.64002a', optimizeForLatency: false }
|
|
: { codec: 'avc1.42001f', optimizeForLatency: true };
|
|
|
|
decoder.configure(config);
|
|
this.videoDecoders.set(decoderKey, decoder);
|
|
console.log(`[MediaEngine] Created decoder for ${decoderKey} with codec ${config.codec}`);
|
|
}
|
|
|
|
if (decoder.state === 'configured') {
|
|
const chunk = new EncodedVideoChunk({
|
|
type: isKeyFrame ? 'key' : 'delta',
|
|
timestamp: timestamp,
|
|
data: chunkData,
|
|
});
|
|
try {
|
|
decoder.decode(chunk);
|
|
} catch (e) {
|
|
console.error(`[MediaEngine] Decode error ${decoderKey}:`, e);
|
|
}
|
|
}
|
|
}
|
|
|
|
// --- Audio ---
|
|
|
|
// --- Audio (PCM Fallback) ---
|
|
|
|
encodeAudioData(data: AudioData) {
|
|
if (this.audioEncoder && this.audioEncoder.state === 'configured') {
|
|
this.audioEncoder.encode(data);
|
|
data.close();
|
|
} else {
|
|
data.close();
|
|
// console.warn('[MediaEngine] AudioEncoder not ready');
|
|
}
|
|
}
|
|
|
|
decodeAudioChunk(chunkData: Uint8Array, userId: number, timestamp: number) {
|
|
console.log(`[MediaEngine] decodeAudioChunk called: userId=${userId}, dataLen=${chunkData.length}, ts=${timestamp}`);
|
|
const decoderKey = `${userId}-audio`;
|
|
let decoder = this.audioDecoders.get(decoderKey);
|
|
|
|
if (!decoder) {
|
|
decoder = new AudioDecoder({
|
|
output: (data) => {
|
|
this.emit('decoded-audio', { userId, data });
|
|
},
|
|
error: (e) => console.error(`[MediaEngine] AudioDecoder error (${userId}):`, e)
|
|
});
|
|
decoder.configure({
|
|
codec: 'opus',
|
|
sampleRate: 48000,
|
|
numberOfChannels: 1
|
|
});
|
|
this.audioDecoders.set(decoderKey, decoder);
|
|
console.log(`[MediaEngine] Created AudioDecoder for ${userId}`);
|
|
}
|
|
|
|
if (decoder.state === 'configured') {
|
|
const chunk = new EncodedAudioChunk({
|
|
type: 'key', // Opus is usually self-contained
|
|
timestamp: timestamp,
|
|
data: chunkData,
|
|
});
|
|
try {
|
|
decoder.decode(chunk);
|
|
} catch (e) {
|
|
console.error(`[MediaEngine] Audio Decode error ${decoderKey}:`, e);
|
|
}
|
|
} else {
|
|
console.warn(`[MediaEngine] AudioDecoder not configured, state=${decoder.state}`);
|
|
}
|
|
}
|
|
|
|
cleanup() {
|
|
if (this.videoEncoder && this.videoEncoder.state !== 'closed') this.videoEncoder.close();
|
|
if (this.screenEncoder && this.screenEncoder.state !== 'closed') this.screenEncoder.close();
|
|
if (this.audioEncoder && this.audioEncoder.state !== 'closed') this.audioEncoder.close();
|
|
|
|
this.videoDecoders.forEach(d => {
|
|
if (d.state !== 'closed') d.close();
|
|
});
|
|
this.audioDecoders.forEach(d => {
|
|
if (d.state !== 'closed') d.close();
|
|
});
|
|
|
|
this.videoDecoders.clear();
|
|
this.audioDecoders.clear();
|
|
}
|
|
}
|