Compare commits

...

2 commits

Author SHA1 Message Date
Sarthak
d1d3ed3e14 fix screen sharing optimization 2026-02-09 20:18:39 +05:30
Sarthak
4bd20fc988 chore: revert audio logic to webcodecs 2026-02-09 17:06:56 +05:30
13 changed files with 1293 additions and 816 deletions

5
client_commit.txt Normal file
View file

@ -0,0 +1,5 @@
commit 4bd20fc9887876086319874d5be51a14bfcfc978
Author: Sarthak <root@srtk.in>
Date: Mon Feb 9 17:06:56 2026 +0530
chore: revert audio logic to webcodecs

0
current_head_media.ts Normal file
View file

View file

@ -1,5 +1,6 @@
{
"name": "client-electron",
"name": "just-talk",
"productName": "JustTalk",
"version": "1.0.0",
"main": "./out/main/index.js",
"scripts": {

0
previous_head_media.ts Normal file
View file

View file

@ -1,25 +1,29 @@
import { app, shell, BrowserWindow, ipcMain, session, desktopCapturer } from 'electron'
import { app, shell, BrowserWindow, ipcMain, desktopCapturer } from 'electron'
import { join } from 'path'
import { electronApp, optimizer, is } from '@electron-toolkit/utils'
import { NetworkManager } from './network' // Import NetworkManager
// import icon from '../../resources/icon.png?asset'
import { NetworkManager } from './network'
let mainWindow: BrowserWindow | null = null;
let networkManager: NetworkManager | null = null;
let network: NetworkManager | null = null;
function createWindow(): void {
// Create the browser window.
mainWindow = new BrowserWindow({
width: 900,
height: 670,
width: 1280,
height: 720,
show: false,
autoHideMenuBar: true,
// ...(process.platform === 'linux' ? { icon } : {}),
webPreferences: {
preload: join(__dirname, '../preload/index.js'),
sandbox: false
sandbox: false,
contextIsolation: true,
nodeIntegration: false // Best practice
}
})
networkManager = new NetworkManager(mainWindow);
network = new NetworkManager(mainWindow);
mainWindow.on('ready-to-show', () => {
mainWindow?.show()
@ -43,83 +47,47 @@ app.whenReady().then(() => {
// Set app user model id for windows
electronApp.setAppUserModelId('com.electron')
// Grant permissions for camera/mic/screen
session.defaultSession.setPermissionRequestHandler((webContents, permission, callback) => {
console.log(`[Main] Requesting permission: ${permission}`);
// Grant all permissions for this valid local app
callback(true);
});
// Default open or close DevTools by F12 in development
// and ignore CommandOrControl + R in production.
// see https://github.com/alex8088/electron-toolkit/tree/master/packages/utils
app.on('browser-window-created', (_, window) => {
optimizer.watchWindowShortcuts(window)
})
// IPC Handlers
ipcMain.handle('connect', async (_, { serverUrl, roomCode, displayName }) => {
if (networkManager) {
return await networkManager.connect(serverUrl, roomCode, displayName);
}
return network?.connect(serverUrl, roomCode, displayName);
});
ipcMain.handle('disconnect', async () => {
if (networkManager) {
networkManager.disconnect();
}
network?.disconnect();
});
ipcMain.on('send-video-frame', (_, { frame }) => {
if (networkManager) {
networkManager.sendVideoFrame(frame);
}
ipcMain.handle('send-chat', (_, { message, displayName }) => {
network?.sendChat(message, displayName);
});
ipcMain.on('send-audio-frame', (_, { frame }) => {
if (networkManager) {
networkManager.sendAudioFrame(new Uint8Array(frame));
}
});
ipcMain.on('send-screen-frame', (_, { frame }) => {
if (networkManager) {
networkManager.sendScreenFrame(frame);
}
});
// Screen sharing: get available sources
ipcMain.handle('get-screen-sources', async () => {
const sources = await desktopCapturer.getSources({
types: ['screen', 'window'],
thumbnailSize: { width: 150, height: 150 }
});
return sources.map(s => ({
id: s.id,
name: s.name,
thumbnail: s.thumbnail.toDataURL()
const sources = await desktopCapturer.getSources({ types: ['window', 'screen'], thumbnailSize: { width: 150, height: 150 } });
return sources.map(source => ({
id: source.id,
name: source.name,
thumbnail: source.thumbnail.toDataURL()
}));
});
// Chat
ipcMain.handle('send-chat', (_, { message, displayName }) => {
if (networkManager) {
networkManager.sendChat(message, displayName);
}
ipcMain.on('send-video-chunk', (_, payload) => {
network?.sendEncodedVideoChunk(payload.chunk, payload.isKeyFrame, payload.timestamp, payload.streamType);
});
ipcMain.on('send-audio-chunk', (_, payload) => {
network?.sendEncodedAudioChunk(payload.chunk, payload.timestamp);
});
// Stream Updates
ipcMain.on('update-stream', (_, { active, mediaType }) => {
if (networkManager) {
networkManager.updateStream(active, mediaType);
}
network?.updateStream(active, mediaType);
});
createWindow()
app.on('activate', function () {
// On macOS it's common to re-create a window in the app when the
// dock icon is clicked and there are no other windows open.
if (BrowserWindow.getAllWindows().length === 0) createWindow()
})
})

View file

@ -6,8 +6,8 @@ import { BrowserWindow } from 'electron';
// Constants
const SERVER_UDP_PORT = 4000;
// Packet Header Structure (22 bytes)
const HEADER_SIZE = 22;
// Packet Header Structure (24 bytes)
const HEADER_SIZE = 24;
export enum MediaType {
Audio = 0,
@ -15,6 +15,11 @@ export enum MediaType {
Screen = 2,
}
// Token Bucket Pacer Constants
const PACER_RATE_BYTES_PER_MS = 1500; // ~12 Mbps limit (Targeting 8-10 Mbps for 1080p60)
const PACER_BUCKET_SIZE_BYTES = 15000; // Allow 10 packets burst (Instant Keyframe start)
const MAX_PAYLOAD = 1200; // Reduced from 1400 to be safe with MTU
export class NetworkManager extends EventEmitter {
private ws: WebSocket | null = null;
private udp: dgram.Socket | null = null;
@ -26,9 +31,46 @@ export class NetworkManager extends EventEmitter {
private mainWindow: BrowserWindow;
private serverUdpHost: string = '127.0.0.1';
// Pacing
private udpQueue: Buffer[] = [];
private pacerTokens: number = PACER_BUCKET_SIZE_BYTES;
private lastPacerUpdate: number = Date.now();
private pacerInterval: NodeJS.Timeout | null = null;
constructor(mainWindow: BrowserWindow) {
super();
this.mainWindow = mainWindow;
this.startPacer();
}
private startPacer() {
this.pacerInterval = setInterval(() => {
if (!this.udp) return;
const now = Date.now();
const elapsed = now - this.lastPacerUpdate;
this.lastPacerUpdate = now;
// Refill tokens
this.pacerTokens += elapsed * PACER_RATE_BYTES_PER_MS;
if (this.pacerTokens > PACER_BUCKET_SIZE_BYTES) {
this.pacerTokens = PACER_BUCKET_SIZE_BYTES;
}
// Drain queue
while (this.udpQueue.length > 0) {
const packet = this.udpQueue[0];
if (this.pacerTokens >= packet.length) {
this.pacerTokens -= packet.length;
this.udpQueue.shift();
this.udp.send(packet, SERVER_UDP_PORT, this.serverUdpHost, (err) => {
if (err) console.error('UDP Send Error', err);
});
} else {
break; // Not enough tokens, wait for next tick
}
}
}, 2); // Check every 2ms
}
async connect(serverUrl: string, roomCode: string, displayName: string): Promise<any> {
@ -156,7 +198,7 @@ export class NetworkManager extends EventEmitter {
});
this.udp.on('message', (msg, rinfo) => {
console.log(`[UDP] Msg from ${rinfo.address}:${rinfo.port} - ${msg.length} bytes`);
// console.log(`[UDP] Msg from ${rinfo.address}:${rinfo.port} - ${msg.length} bytes`);
this.handleUdpMessage(msg);
});
@ -166,34 +208,83 @@ export class NetworkManager extends EventEmitter {
handleUdpMessage(msg: Buffer) {
if (msg.length < HEADER_SIZE) return;
const version = msg.readUInt8(0);
const mediaType = msg.readUInt8(1);
const userId = msg.readUInt32LE(2);
const payload = msg.subarray(HEADER_SIZE);
const sequence = msg.readUInt32LE(6);
const seq = msg.readUInt32LE(6);
const timestamp = Number(msg.readBigUInt64LE(10));
const fragIdx = msg.readUInt8(18);
const fragCnt = msg.readUInt8(19);
const fragIdx = msg.readUInt16LE(18);
const fragCnt = msg.readUInt16LE(20);
const flags = msg.readUInt16LE(22);
const isKeyFrame = (flags & 1) !== 0;
const payload = msg.subarray(HEADER_SIZE);
if (mediaType === MediaType.Audio) {
this.safeSend('audio-frame', { user_id: userId, data: payload });
} else if (mediaType === MediaType.Video) {
this.safeSend('video-frame', {
// Audio can be fragmented now (PCM)
this.safeSend('video-chunk', { // Use 'video-chunk' handler in renderer for reassembly?
// Wait, App.tsx has separate 'audio-chunk' which doesn't reassemble.
// We need to reassemble here or change App.tsx.
// Reassembling in main process is easier or reusing video logic.
// Let's use 'audio-chunk' but we need to pass frag info?
// No, App.tsx 'audio-chunk' handler just decodes immediately.
// It expects a full frame.
// We MUST reassemble here or update App.tsx.
// Updating App.tsx to use the reassembler for Audio is cleaner.
// But 'video-chunk' in App.tsx calls 'handleIncomingVideoFragment' which uses 'MediaEngine.decodeVideoChunk'.
// Option: Treat Audio as "Video" for transport, but with streamType='audio'?
// MediaType.Audio is distinct.
// Let's implement reassembly here in NetworkManager?
// Or update App.tsx to use 'handleIncomingVideoFragment' for audio too?
// 'handleIncomingVideoFragment' does `decodeVideoChunk`.
// Let's change App.tsx to have `handleIncomingAudioFragment`?
// Or just reassemble here. UDP reassembly in Node.js is fine.
// ACtually, App.tsx's `handleIncomingVideoFragment` is nice.
// Let's emit 'audio-fragment' and add a handler in App.tsx.
user_id: userId,
data: payload,
seq: sequence,
seq: this.audioSeq, // Wait, seq is in packet
ts: timestamp,
fidx: fragIdx,
fcnt: fragCnt
fcnt: fragCnt,
isKeyFrame,
streamType: 'audio'
// We can't use 'video-chunk' channel because it calls decodeVideoChunk.
});
} else if (mediaType === MediaType.Screen) {
this.safeSend('screen-frame', {
// Actually, let's just send it to 'audio-fragment' channel
this.safeSend('audio-fragment', {
user_id: userId,
data: payload,
seq: sequence,
seq: seq, // We need valid seq from packet
ts: timestamp,
fidx: fragIdx,
fcnt: fragCnt
fcnt: fragCnt,
isKeyFrame
});
} else if (mediaType === MediaType.Video || mediaType === MediaType.Screen) {
// Differentiate based on MediaType
const streamType = mediaType === MediaType.Screen ? 'screen' : 'video';
if (mediaType === MediaType.Screen && fragIdx === 0) {
console.log(`[Network] RX Screen Chunk User=${userId} Seq=${seq}`);
}
this.safeSend('video-chunk', {
user_id: userId,
data: payload,
seq,
ts: timestamp,
fidx: fragIdx,
fcnt: fragCnt,
isKeyFrame,
streamType // Pass this to renderer
});
}
}
@ -208,88 +299,88 @@ export class NetworkManager extends EventEmitter {
}
}
sendVideoFrame(frame: Uint8Array) {
if (!this.udp || !this.userId) return;
// --- New Encode Methods ---
const buffer = Buffer.from(frame);
const MAX_PAYLOAD = 1400;
const fragCount = Math.ceil(buffer.length / MAX_PAYLOAD);
const seq = this.videoSeq++;
const ts = BigInt(Date.now());
for (let i = 0; i < fragCount; i++) {
const start = i * MAX_PAYLOAD;
const end = Math.min(start + MAX_PAYLOAD, buffer.length);
const chunk = buffer.subarray(start, end);
const header = Buffer.alloc(HEADER_SIZE);
header.writeUInt8(1, 0); // Version
header.writeUInt8(MediaType.Video, 1);
header.writeUInt32LE(this.userId, 2);
header.writeUInt32LE(seq, 6);
header.writeBigUInt64LE(ts, 10);
header.writeUInt8(i, 18); // Frag idx
header.writeUInt8(fragCount, 19); // Frag cnt
header.writeUInt16LE(0, 20); // Flags
const packet = Buffer.concat([header, chunk]);
this.udp.send(packet, SERVER_UDP_PORT, this.serverUdpHost, (err) => {
if (err) console.error('UDP Video Send Error', err);
});
}
}
sendAudioFrame(frame: Uint8Array) {
sendEncodedVideoChunk(chunk: any, isKeyFrame: boolean, timestamp: number, streamType: 'video' | 'screen' = 'video') {
if (!this.udp) return;
const header = Buffer.alloc(HEADER_SIZE);
header.writeUInt8(1, 0); // Version
header.writeUInt8(MediaType.Audio, 1);
header.writeUInt32LE(this.userId, 2);
header.writeUInt32LE(this.audioSeq++, 6);
header.writeBigUInt64LE(BigInt(Date.now()), 10);
header.writeUInt8(0, 18); // Frag idx
header.writeUInt8(1, 19); // Frag cnt
header.writeUInt16LE(0, 20); // Flags
const MAX_PAYLOAD = 1400;
const totalSize = chunk.length;
const packet = Buffer.concat([header, Buffer.from(frame)]);
// Use generic videoSeq for both? Or separate?
// Best to separate to avoid gap detection issues if one stream is idle.
// But for now, let's share for simplicity or use screenSeq if screen.
// Actually, let's use separate seq if possible, but I only have videoSeq.
// Let's use videoSeq for both for now, assuming the receiver tracks them separately or doesn't care about gaps across types.
// Better: Use a map or separate counters.
const seq = streamType === 'screen' ? this.screenSeq++ : this.videoSeq++;
this.udp.send(packet, SERVER_UDP_PORT, this.serverUdpHost, (err) => {
if (err) console.error('UDP Audio Send Error', err);
});
const fragmentCount = Math.ceil(totalSize / MAX_PAYLOAD);
for (let i = 0; i < fragmentCount; i++) {
const start = i * MAX_PAYLOAD;
const end = Math.min(start + MAX_PAYLOAD, totalSize);
const slice = chunk.slice(start, end);
// Header (22 bytes)
const header = Buffer.alloc(HEADER_SIZE);
header.writeUInt8(1, 0); // Version
const mType = streamType === 'screen' ? MediaType.Screen : MediaType.Video;
header.writeUInt8(mType, 1);
header.writeUInt32LE(this.userId, 2);
header.writeUInt32LE(seq, 6);
header.writeBigUInt64LE(BigInt(timestamp), 10);
header.writeUInt16LE(i, 18); // Frag Idx (u16)
header.writeUInt16LE(fragmentCount, 20); // Frag Cnt (u16)
let flags = 0;
if (isKeyFrame) flags |= 1;
header.writeUInt16LE(flags, 22);
const packet = Buffer.concat([header, slice]);
// Enqueue for pacing
this.udpQueue.push(packet);
}
}
sendScreenFrame(frame: number[]) {
if (!this.udp || !this.userId) return;
sendEncodedAudioChunk(chunk: Uint8Array, timestamp: number) {
if (!this.udp) {
console.warn('[Network] UDP Socket not ready for Audio');
return;
}
const buffer = Buffer.from(frame);
const MAX_PAYLOAD = 1400;
const fragCount = Math.ceil(buffer.length / MAX_PAYLOAD);
const seq = this.screenSeq++;
const ts = BigInt(Date.now());
const totalSize = chunk.length;
const MAX_PAYLOAD = 1400; // Safe MTU
for (let i = 0; i < fragCount; i++) {
// PCM packets (approx 2KB) need fragmentation.
// We use the same logic as video but with Audio MediaType.
const fragmentCount = Math.ceil(totalSize / MAX_PAYLOAD);
// Log randomly to avoid spam but confirm activity
if (Math.random() < 0.05) console.log(`[Network] Sending Audio Chunk size=${totalSize} frags=${fragmentCount}`);
for (let i = 0; i < fragmentCount; i++) {
const start = i * MAX_PAYLOAD;
const end = Math.min(start + MAX_PAYLOAD, buffer.length);
const chunk = buffer.subarray(start, end);
const end = Math.min(start + MAX_PAYLOAD, totalSize);
const slice = chunk.slice(start, end);
const header = Buffer.alloc(HEADER_SIZE);
header.writeUInt8(1, 0); // Version
header.writeUInt8(MediaType.Screen, 1);
header.writeUInt8(MediaType.Audio, 1);
header.writeUInt32LE(this.userId, 2);
header.writeUInt32LE(seq, 6);
header.writeBigUInt64LE(ts, 10);
header.writeUInt8(i, 18); // Frag idx
header.writeUInt8(fragCount, 19); // Frag cnt
header.writeUInt16LE(0, 20); // Flags
header.writeUInt32LE(this.audioSeq, 6); // Same seq for all fragments
header.writeBigUInt64LE(BigInt(Math.floor(timestamp)), 10);
header.writeUInt16LE(i, 18); // Frag idx
header.writeUInt16LE(fragmentCount, 20); // Frag cnt
header.writeUInt16LE(1, 22); // Flags (1=Keyframe, audio is always key)
const packet = Buffer.concat([header, chunk]);
this.udp.send(packet, SERVER_UDP_PORT, this.serverUdpHost, (err) => {
if (err) console.error('UDP Screen Send Error', err);
});
const packet = Buffer.concat([header, Buffer.from(slice)]);
this.udpQueue.push(packet);
}
this.audioSeq++;
}
startHeartbeat() {
@ -327,19 +418,23 @@ export class NetworkManager extends EventEmitter {
header.writeUInt32LE(this.userId, 2);
header.writeUInt32LE(0, 6); // Sequence
header.writeBigUInt64LE(BigInt(Date.now()), 10);
header.writeUInt8(0, 18); // Frag idx
header.writeUInt8(1, 19); // Frag cnt
header.writeUInt16LE(0, 20); // Flags
header.writeUInt16LE(0, 18); // Frag idx
header.writeUInt16LE(1, 20); // Frag cnt
header.writeUInt16LE(0, 22); // Flags
const packet = Buffer.concat([header, payload]);
console.log(`[UDP] Sending Handshake: userId=${this.userId}, room=${this.roomCode}, ${packet.length} bytes to ${this.serverUdpHost}:${SERVER_UDP_PORT}`);
// console.log(`[UDP] Sending Handshake: userId=${this.userId}, room=${this.roomCode}, ${packet.length} bytes to ${this.serverUdpHost}:${SERVER_UDP_PORT}`);
this.udp.send(packet, SERVER_UDP_PORT, this.serverUdpHost, (err) => {
if (err) console.error('UDP Handshake Send Error', err);
});
}
disconnect() {
if (this.pacerInterval) {
clearInterval(this.pacerInterval);
this.pacerInterval = null;
}
if (this.heartbeatInterval) {
clearInterval(this.heartbeatInterval);
this.heartbeatInterval = null;

View file

@ -3,7 +3,7 @@
<head>
<meta charset="UTF-8" />
<title>Electron App</title>
<title>JustTalk</title>
<!-- https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP -->
<meta http-equiv="Content-Security-Policy"
content="default-src 'self'; script-src 'self' 'unsafe-inline' blob:; worker-src 'self' blob:; style-src 'self' 'unsafe-inline'; img-src 'self' data: blob:;" />

File diff suppressed because it is too large Load diff

View file

@ -10,7 +10,13 @@ interface StageProps {
peerScreenUrls?: { [key: number]: string };
localScreenUrl?: string | null;
localVideoRef?: React.RefObject<HTMLVideoElement | null>;
localScreenRef?: React.RefObject<HTMLVideoElement | null>;
videoEnabled?: boolean;
screenEnabled?: boolean;
registerPeerCanvas: (userId: number, streamType: 'video' | 'screen', canvas: HTMLCanvasElement | null) => void;
peersWithCam: Set<number>;
peersWithScreen: Set<number>;
peersWithAudio: Set<number>;
}
export function Stage({
@ -21,8 +27,16 @@ export function Stage({
peerScreenUrls = {},
localScreenUrl = null,
localVideoRef,
videoEnabled = false
localScreenRef,
videoEnabled = false,
screenEnabled = false,
registerPeerCanvas,
peersWithCam,
peersWithScreen,
peersWithAudio
}: StageProps) {
// Track container dimensions for smart layout
const [containerSize, setContainerSize] = useState({ width: 800, height: 600 });
@ -42,121 +56,108 @@ export function Stage({
return () => window.removeEventListener('resize', updateSize);
}, []);
// Check if self is sharing screen
const isSelfSharing = !!localScreenUrl;
// Active Screen Shares (Remote)
const remoteScreens = peers.filter(p => peersWithScreen.has(p.user_id));
// Filter peers who are sharing screen
const peerScreens = peers.filter(p => !!peerScreenUrls[p.user_id]);
// Check if self is sharing screen (via local video/stream state is not passed here directly as boolean,
// but App handles local display via `localVideoRef`... wait.
// App.tsx handles local WEBCAM via `localVideoRef`.
// App.tsx handles local SCREEN via... `localVideoRef` too?
// In App.tsx: `if (localVideoRef.current) localVideoRef.current.srcObject = stream;` for BOTH.
// So if I am sharing screen, `localVideoRef` shows my screen?
// Yes, `toggleVideo` switches off screen if active. `startCapture` sets srcObject.
// BUT `Stage` renders `localVideoRef` in the "Self Webcam" slot.
// Check if self is sharing screen
const isSelfSharing = screenEnabled;
// Active Screen Shares
// const peerScreens = ... (removed legacy variable, using remoteScreens directly)
// All peers for webcam grid
const allParticipants = peers;
const showScreenLayer = isSelfSharing || peerScreens.length > 0;
const showScreenLayer = isSelfSharing || remoteScreens.length > 0;
const totalParticipants = (selfId ? 1 : 0) + allParticipants.length;
// Smart layout: determine if we should use vertical or horizontal arrangement
// Layout
const aspectRatio = containerSize.width / containerSize.height;
const isVertical = aspectRatio < 1; // Taller than wide
const isVertical = aspectRatio < 1;
// Calculate optimal grid layout based on aspect ratio and participant count
const getGridConfig = (count: number, isVertical: boolean) => {
if (count <= 1) {
return { cols: 1, rows: 1 };
}
if (count <= 1) return { cols: 1, rows: 1 };
if (isVertical) {
// Vertical window: prefer fewer columns, more rows
if (count === 2) return { cols: 1, rows: 2 };
if (count <= 4) return { cols: 2, rows: 2 };
if (count <= 6) return { cols: 2, rows: 3 };
if (count <= 9) return { cols: 3, rows: 3 };
return { cols: 3, rows: Math.ceil(count / 3) };
return { cols: 2, rows: Math.ceil(count / 2) };
} else {
// Horizontal window: prefer more columns
if (count === 2) return { cols: 2, rows: 1 };
if (count <= 4) return { cols: 2, rows: 2 };
if (count <= 6) return { cols: 3, rows: 2 };
if (count <= 9) return { cols: 3, rows: 3 };
return { cols: 4, rows: Math.ceil(count / 4) };
return { cols: Math.ceil(count / 2), rows: 2 };
}
};
const gridConfig = getGridConfig(totalParticipants, isVertical);
// Screen share layout direction
const screenLayoutClass = isVertical
? 'flex-col' // Stack screen above participants
: 'flex-row'; // Screen on left, participants on right
return (
<div
id="stage-container"
className={`flex-1 bg-[#202124] p-4 flex ${screenLayoutClass} gap-4 overflow-hidden min-h-0`}
>
{/* Screen Share Layer */}
<div id="stage-container" className="flex-1 bg-[#202124] p-4 flex gap-4 overflow-hidden min-h-0 flex-row">
{/* Screen Share Area (Left or Top) */}
{showScreenLayer && (
<div className={`${isVertical ? 'h-[60%] w-full' : 'flex-[3] h-full'} flex flex-col gap-4 min-w-0`}>
<div className="flex-[3] flex flex-col gap-4 min-w-0 bg-black/20 rounded-lg p-2">
{/* Local Screen Share */}
{isSelfSharing && (
<div className="flex-1 min-h-0">
<VideoTile
displayName={`${displayName} (Your Screen)`}
videoSrc={localScreenUrl!}
videoRef={localScreenRef}
videoEnabled={true}
isScreenShare={true}
isSelf={true}
/>
</div>
)}
{/* Remote Screen Shares */}
{peerScreens.map(peer => (
{remoteScreens.map(peer => (
<div key={`screen-${peer.user_id}`} className="flex-1 min-h-0">
<VideoTile
displayName={`${peer.display_name}'s Screen`}
videoEnabled={true}
videoSrc={peerScreenUrls[peer.user_id]}
isScreenShare={true}
userId={peer.user_id}
onCanvasRef={(uid, canvas) => registerPeerCanvas(uid, 'screen', canvas)}
/>
</div>
))}
</div>
)}
{/* Webcam Grid */}
<div className={`${showScreenLayer ? (isVertical ? 'h-[40%] w-full' : 'w-[300px] flex-shrink-0') : 'flex-1'} h-full overflow-y-auto`}>
<div
className="grid gap-3 h-full w-full"
{/* Webcam Grid (Right or Bottom) */}
<div className="flex-1 overflow-y-auto">
<div className="grid gap-3 w-full"
style={{
gridTemplateColumns: showScreenLayer
? '1fr'
: `repeat(${gridConfig.cols}, 1fr)`,
gridTemplateRows: showScreenLayer
? 'auto'
: `repeat(${gridConfig.rows}, 1fr)`,
justifyContent: 'center',
alignContent: 'center'
}}
>
{/* Self Webcam */}
gridTemplateColumns: `repeat(${showScreenLayer ? 1 : Math.ceil(Math.sqrt(totalParticipants))}, 1fr)`
}}>
{/* Self */}
{selfId && (
<div className="aspect-video min-h-0 min-w-0">
<div className="aspect-video">
<VideoTile
displayName={displayName}
isSelf
audioEnabled={true}
videoEnabled={videoEnabled}
videoEnabled={videoEnabled} // This tracks local toggle
videoRef={localVideoRef}
/>
</div>
)}
{/* Remote Webcam Peers */}
{allParticipants.map(peer => (
<div key={peer.user_id} className="aspect-video min-h-0 min-w-0">
{/* Remote Peers Cam */}
{peers.map(peer => (
<div key={peer.user_id} className="aspect-video">
<VideoTile
displayName={peer.display_name}
audioEnabled={true}
videoEnabled={!!peerVideoUrls[peer.user_id]}
videoSrc={peerVideoUrls[peer.user_id]}
videoEnabled={peersWithCam.has(peer.user_id)}
userId={peer.user_id}
audioEnabled={peersWithAudio.has(peer.user_id)}
onCanvasRef={(uid, canvas) => registerPeerCanvas(uid, 'video', canvas)}
/>
</div>
))}

View file

@ -1,4 +1,5 @@
import { Mic, MicOff } from "lucide-react";
import { useEffect, useRef } from "react";
interface VideoTileProps {
displayName: string;
@ -8,27 +9,58 @@ interface VideoTileProps {
audioEnabled?: boolean;
videoEnabled?: boolean;
isScreenShare?: boolean;
userId?: number;
onCanvasRef?: (userId: number, canvas: HTMLCanvasElement | null) => void;
}
export function VideoTile({
displayName,
isSelf,
videoSrc,
// videoSrc, // Unused
videoRef,
audioEnabled = true,
videoEnabled = false,
isScreenShare = false
isScreenShare = false,
userId,
onCanvasRef
}: VideoTileProps) {
// For self with video ref, use video element bound to the ref
// For remote peers, videoSrc contains blob URL of JPEG frames - use img
const showSelfVideo = isSelf && videoEnabled && videoRef;
const showRemoteMedia = !isSelf && videoSrc;
const showPlaceholder = !showSelfVideo && !showRemoteMedia;
const canvasRef = useRef<HTMLCanvasElement | null>(null);
// Register canvas if applicable
const onCanvasRefRef = useRef(onCanvasRef);
// Update ref when prop changes
useEffect(() => {
onCanvasRefRef.current = onCanvasRef;
}, [onCanvasRef]);
// Use a callback ref to handle canvas mounting/unmounting reliably
const setCanvasRef = (node: HTMLCanvasElement | null) => {
canvasRef.current = node;
if (node) {
if (!isSelf && userId && onCanvasRefRef.current) {
onCanvasRefRef.current(userId, node);
}
} else {
// Cleanup on unmount
if (!isSelf && userId && onCanvasRefRef.current) {
onCanvasRefRef.current(userId, null);
}
}
};
// For self or local preview (using video element)
const showVideoElement = (isSelf || (isScreenShare && !userId)) && videoEnabled && videoRef;
// For remote peers (WebCodecs via Canvas)
const showRemoteCanvas = !isSelf && userId && onCanvasRef && videoEnabled;
const showPlaceholder = !showVideoElement && !showRemoteCanvas;
return (
<div className="relative bg-[#3C4043] rounded-lg overflow-hidden flex items-center justify-center w-full h-full">
{/* Self Video (webcam stream) */}
{showSelfVideo && (
<div className="relative bg-[#202124] rounded-lg overflow-hidden flex items-center justify-center w-full h-full border border-white/5 shadow-sm">
{/* Video Element (Self Cam or Local Screen Preview) */}
{showVideoElement && (
<video
ref={videoRef as React.RefObject<HTMLVideoElement>}
autoPlay
@ -38,12 +70,11 @@ export function VideoTile({
/>
)}
{/* Remote Video/Screen (JPEG blob URLs) */}
{showRemoteMedia && (
<img
src={videoSrc}
alt={displayName}
className={`w-full h-full object-contain bg-black`}
{/* Remote Video (Canvas for WebCodecs) */}
{showRemoteCanvas && (
<canvas
ref={setCanvasRef}
className={`w-full h-full object-contain bg-black ${!isScreenShare ? 'scale-x-[-1]' : ''}`}
/>
)}
@ -53,22 +84,27 @@ export function VideoTile({
<div className={`w-16 h-16 rounded-full flex items-center justify-center text-2xl font-bold text-white ${isSelf ? 'bg-[#5f6368]' : 'bg-[#5865F2]'}`}>
{displayName.charAt(0).toUpperCase()}
</div>
{!videoEnabled && <span className="text-white/50 text-xs">Video Off</span>}
</div>
)}
{/* Audio indicator */}
<div className="absolute top-2 right-2 z-20">
<div className={`p-1.5 rounded-full ${audioEnabled ? 'bg-black/50' : 'bg-red-500'}`}>
<div className={`p-1.5 rounded-full ${audioEnabled ? 'bg-black/40' : 'bg-red-500/90'}`}>
{audioEnabled ? <Mic size={14} className="text-white" /> : <MicOff size={14} className="text-white" />}
</div>
</div>
{/* Name */}
<div className="absolute bottom-3 left-3 z-30">
<div className="bg-black/60 backdrop-blur-sm px-3 py-1.5 rounded-md text-white text-sm font-medium border border-white/10 flex items-center gap-2">
<span className="truncate max-w-[150px]">{displayName}</span>
{isSelf && !isScreenShare && <span className="opacity-60 text-xs bg-white/10 px-1 rounded">(You)</span>}
<div className="absolute bottom-3 left-3 right-3 z-30 flex justify-between items-end">
<div className="bg-black/60 backdrop-blur-sm px-2.5 py-1 rounded-md text-white text-xs font-medium border border-white/10 flex items-center gap-2 truncate max-w-[80%]">
<span className="truncate">{displayName}</span>
</div>
{isSelf && !isScreenShare && (
<span className="bg-white/10 text-[10px] text-white/70 px-1.5 py-0.5 rounded ml-2 whitespace-nowrap">
You
</span>
)}
</div>
</div>
);

View file

@ -3,8 +3,47 @@ import ReactDOM from 'react-dom/client'
import App from './App'
import './index.css'
ReactDOM.createRoot(document.getElementById('root') as HTMLElement).render(
<React.StrictMode>
<App />
</React.StrictMode>
)
class ErrorBoundary extends React.Component<any, { hasError: boolean, error: any }> {
constructor(props: any) {
super(props);
this.state = { hasError: false, error: null };
}
static getDerivedStateFromError(error: any) {
return { hasError: true, error };
}
componentDidCatch(error: any, errorInfo: any) {
console.error("Uncaught error:", error, errorInfo);
}
render() {
if (this.state.hasError) {
return (
<div style={{ padding: 20, color: 'white', background: '#333' }}>
<h1>Something went wrong.</h1>
<pre style={{ whiteSpace: 'pre-wrap' }}>
{this.state.error && this.state.error.toString()}
</pre>
</div>
);
}
return this.props.children;
}
}
console.log("[JustTalk] Renderer process started");
try {
const root = ReactDOM.createRoot(document.getElementById('root') as HTMLElement);
console.log("[JustTalk] Root created, rendering App...");
root.render(
<React.StrictMode>
<ErrorBoundary>
<App />
</ErrorBoundary>
</React.StrictMode>
);
} catch (e) {
console.error("[JustTalk] Failed to render root:", e);
}

View file

@ -0,0 +1,281 @@
// import { EventEmitter } from 'events'; // Node.js module dependency might be missing
// Custom lightweight Event Emitter for browser compatibility if needed,
// but 'events' module is usually polyfilled by Vite/Webpack.
// If not, we can use a simple class.
class SimpleEventEmitter {
private listeners: { [key: string]: Function[] } = {};
on(event: string, listener: Function) {
if (!this.listeners[event]) this.listeners[event] = [];
this.listeners[event].push(listener);
return () => this.off(event, listener);
}
off(event: string, listener: Function) {
if (!this.listeners[event]) return;
this.listeners[event] = this.listeners[event].filter(l => l !== listener);
}
emit(event: string, ...args: any[]) {
if (!this.listeners[event]) return;
this.listeners[event].forEach(l => {
try { l(...args); }
catch (e) { console.error(`Error in event listener for ${event}:`, e); }
});
}
}
export interface EncodedFrame {
type: 'video' | 'audio';
data: Uint8Array;
isKeyFrame: boolean;
timestamp: number;
duration?: number;
streamType?: 'video' | 'screen'; // Added for dual stream support
}
export class MediaEngine extends SimpleEventEmitter {
private videoEncoder: VideoEncoder | null = null;
private screenEncoder: VideoEncoder | null = null; // Separate encoder for screen
private audioEncoder: AudioEncoder | null = null;
// Decoders: Map<userId, Decoder> -> Now needs to distinguish stream types
// We can use keys like "userId-video" and "userId-screen"
private videoDecoders: Map<string, VideoDecoder> = new Map();
// Decoders: Map<userId, Decoder> -> Now needs to distinguish stream types
// We can use keys like "userId-video" and "userId-screen"
// private videoDecoders: Map<string, VideoDecoder> = new Map(); // Already declared above
private audioDecoders: Map<string, AudioDecoder> = new Map();
private videoConfig: VideoEncoderConfig = {
codec: 'avc1.42001f', // H.264 Baseline Profile Level 3.1 (720p safe)
width: 1280,
height: 720,
bitrate: 2_000_000,
framerate: 30,
latencyMode: 'realtime',
avc: { format: 'annexb' }
};
private screenConfig: VideoEncoderConfig = {
// High Profile Level 4.2
codec: 'avc1.64002a',
width: 1920,
height: 1080,
bitrate: 2_000_000, // Reduced to 2 Mbps for better stability/FPS
framerate: 30,
latencyMode: 'realtime', // Changed from 'quality' to 'realtime' for lower latency
avc: { format: 'annexb' }
};
// Audio Config
private audioConfig: AudioEncoderConfig = {
codec: 'opus',
sampleRate: 48000,
numberOfChannels: 1,
bitrate: 32000
};
constructor() {
super();
this.initializeVideoEncoder();
this.initializeScreenEncoder();
this.initializeAudioEncoder();
}
private initializeVideoEncoder() {
try {
this.videoEncoder = new VideoEncoder({
output: (chunk, _metadata) => {
const buffer = new Uint8Array(chunk.byteLength);
chunk.copyTo(buffer);
// With 'annexb', SPS/PPS should be in the keyframe chunk data.
this.emit('encoded-video', {
type: 'video',
streamType: 'video',
data: buffer,
isKeyFrame: chunk.type === 'key',
timestamp: chunk.timestamp,
duration: chunk.duration
} as EncodedFrame);
},
error: (e) => console.error('VideoEncoder error:', e),
});
this.videoEncoder.configure(this.videoConfig);
console.log('[MediaEngine] VideoEncoder configured:', this.videoConfig);
} catch (e) {
console.error('[MediaEngine] Failed to init VideoEncoder:', e);
}
}
private initializeScreenEncoder() {
try {
this.screenEncoder = new VideoEncoder({
output: (chunk, _metadata) => {
const buffer = new Uint8Array(chunk.byteLength);
chunk.copyTo(buffer);
this.emit('encoded-video', {
type: 'video',
streamType: 'screen',
data: buffer,
isKeyFrame: chunk.type === 'key',
timestamp: chunk.timestamp,
duration: chunk.duration
} as EncodedFrame);
},
error: (e) => console.error('ScreenEncoder error:', e),
});
this.screenEncoder.configure(this.screenConfig);
console.log('[MediaEngine] ScreenEncoder configured:', this.screenConfig);
} catch (e) {
console.error('[MediaEngine] Failed to init ScreenEncoder:', e);
}
}
private initializeAudioEncoder() {
try {
this.audioEncoder = new AudioEncoder({
output: (chunk, _metadata) => {
const buffer = new Uint8Array(chunk.byteLength);
chunk.copyTo(buffer);
this.emit('encoded-audio', {
type: 'audio',
data: buffer,
isKeyFrame: chunk.type === 'key',
timestamp: chunk.timestamp,
duration: chunk.duration
} as EncodedFrame);
},
error: (e) => console.error('[MediaEngine] AudioEncoder error:', e),
});
this.audioEncoder.configure(this.audioConfig);
console.log('[MediaEngine] AudioEncoder configured:', this.audioConfig);
} catch (e) {
console.error('[MediaEngine] Failed to init AudioEncoder:', e);
}
}
// --- Video Encoding ---
encodeVideoFrame(frame: VideoFrame, streamType: 'video' | 'screen' = 'video') {
const encoder = streamType === 'screen' ? this.screenEncoder : this.videoEncoder;
if (encoder && encoder.state === 'configured') {
// Force keyframe every 2 seconds (60 frames)
const keyFrame = frame.timestamp % 2000000 < 33000;
encoder.encode(frame, { keyFrame });
frame.close();
} else {
frame.close();
console.warn(`[MediaEngine] ${streamType === 'screen' ? 'ScreenEncoder' : 'VideoEncoder'} not ready`);
}
}
// --- Video Decoding ---
decodeVideoChunk(chunkData: Uint8Array, userId: number, isKeyFrame: boolean, timestamp: number, streamType: 'video' | 'screen' = 'video') {
const decoderKey = `${userId}-${streamType}`;
let decoder = this.videoDecoders.get(decoderKey);
if (!decoder) {
decoder = new VideoDecoder({
output: (frame) => {
this.emit('decoded-video', { userId, frame, streamType });
},
error: (e) => console.error(`VideoDecoder error (${decoderKey}):`, e),
});
// Configure based on stream type
// Note: Decoders are usually more flexible, but giving a hint helps.
// Screen share uses High Profile, Video uses Baseline.
const config: VideoDecoderConfig = streamType === 'screen'
? { codec: 'avc1.64002a', optimizeForLatency: false }
: { codec: 'avc1.42001f', optimizeForLatency: true };
decoder.configure(config);
this.videoDecoders.set(decoderKey, decoder);
console.log(`[MediaEngine] Created decoder for ${decoderKey} with codec ${config.codec}`);
}
if (decoder.state === 'configured') {
const chunk = new EncodedVideoChunk({
type: isKeyFrame ? 'key' : 'delta',
timestamp: timestamp,
data: chunkData,
});
try {
decoder.decode(chunk);
} catch (e) {
console.error(`[MediaEngine] Decode error ${decoderKey}:`, e);
}
}
}
// --- Audio ---
// --- Audio (PCM Fallback) ---
encodeAudioData(data: AudioData) {
if (this.audioEncoder && this.audioEncoder.state === 'configured') {
this.audioEncoder.encode(data);
data.close();
} else {
data.close();
// console.warn('[MediaEngine] AudioEncoder not ready');
}
}
decodeAudioChunk(chunkData: Uint8Array, userId: number, timestamp: number) {
const decoderKey = `${userId}-audio`;
let decoder = this.audioDecoders.get(decoderKey);
if (!decoder) {
decoder = new AudioDecoder({
output: (data) => {
this.emit('decoded-audio', { userId, data });
},
error: (e) => console.error(`[MediaEngine] AudioDecoder error (${userId}):`, e)
});
decoder.configure({
codec: 'opus',
sampleRate: 48000,
numberOfChannels: 1
});
this.audioDecoders.set(decoderKey, decoder);
console.log(`[MediaEngine] Created AudioDecoder for ${userId}`);
}
if (decoder.state === 'configured') {
const chunk = new EncodedAudioChunk({
type: 'key', // Opus is usually self-contained
timestamp: timestamp,
data: chunkData,
});
try {
decoder.decode(chunk);
} catch (e) {
console.error(`[MediaEngine] Audio Decode error ${decoderKey}:`, e);
}
}
}
cleanup() {
if (this.videoEncoder && this.videoEncoder.state !== 'closed') this.videoEncoder.close();
if (this.screenEncoder && this.screenEncoder.state !== 'closed') this.screenEncoder.close();
if (this.audioEncoder && this.audioEncoder.state !== 'closed') this.audioEncoder.close();
this.videoDecoders.forEach(d => {
if (d.state !== 'closed') d.close();
});
this.audioDecoders.forEach(d => {
if (d.state !== 'closed') d.close();
});
this.videoDecoders.clear();
this.audioDecoders.clear();
}
}

24
status.txt Normal file
View file

@ -0,0 +1,24 @@
On branch master
Your branch is up to date with 'origin/master'.
Changes not staged for commit:
(use "git add <file>..." to update what will be committed)
(use "git restore <file>..." to discard changes in working directory)
modified: package.json
modified: src/main/index.ts
modified: src/main/network.ts
modified: src/renderer/index.html
modified: src/renderer/src/App.tsx
modified: src/renderer/src/components/Stage.tsx
modified: src/renderer/src/components/VideoTile.tsx
modified: src/renderer/src/main.tsx
Untracked files:
(use "git add <file>..." to include in what will be committed)
current_head_media.ts
previous_head_media.ts
src/renderer/src/utils/
status.txt
no changes added to commit (use "git add" and/or "git commit -a")
--- SERVER STATUS ---