const toggleCameraButton = document.getElementById('toggleCamera');
const captureFrameButton = document.getElementById('captureFrame');
const captureVideoButton = document.getElementById('captureVideo');
const video = document.getElementById('webcam');
const canvas = document.getElementById('ditheredOutput');
const context = canvas.getContext('2d');
let stream = null;
let isCameraOn = false;
const lowResWidth = 160; // Gameboy camera resolution: 160×144
const lowResHeight = 144;
const videoLength = 3000; // 3 seconds
video.style.width = lowResWidth + 'px';
video.style.height = lowResHeight + 'px';
// This is a utility function I'm using to try and debut why Safari is claiming that "NotSupportedError: mimeType is not supported"
const getSupportedMimeType = possibleTypes => {
for (let i = 0; i < possibleTypes.length; i++) {
if (MediaRecorder.isTypeSupported(possibleTypes[i])) {
return possibleTypes[i];
}
}
return false;
}
// console.log(getSupportedMimeType(['video/webm;codecs=vp8', 'video/webm', 'video/mp4']));
toggleCameraButton.addEventListener('click', async () => {
if (!isCameraOn) {
try {
let constraints = { video: true };
if (/Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent)) {
constraints = {
video: {
width: { ideal: 1280 },
height: { ideal: 720 },
facingMode: 'environment'
}
};
}
stream = await navigator.mediaDevices.getUserMedia(constraints);
video.srcObject = stream;
video.style.display = 'block';
isCameraOn = true;
toggleCameraButton.textContent = 'Turn Camera Off';
captureFrameButton.disabled = false;
captureVideoButton.disabled = false;
captureFrameButton.classList.add('active');
captureVideoButton.classList.add('active');
video.addEventListener('loadeddata', () => {
canvas.width = lowResWidth;
canvas.height = lowResHeight;
requestAnimationFrame(processFrame);
});
} catch (err) {
console.error('Failed to access the camera: ', err);
}
} else {
stream.getTracks().forEach(track => track.stop());
video.style.display = 'none';
isCameraOn = false;
toggleCameraButton.textContent = 'Turn Camera On';
captureFrameButton.disabled = true;
captureVideoButton.disabled = true;
captureFrameButton.classList.remove('active');
captureVideoButton.classList.remove('active');
}
});
captureFrameButton.addEventListener('click', () => {
const link = document.createElement('a');
link.href = canvas.toDataURL('image/png');
link.download = 'dithered-frame.png';
link.click();
});
captureVideoButton.addEventListener('click', () => {
const startRecording = (canvas, duration, frameRate) => {
return new Promise((resolve, reject) => {
const stream = canvas.captureStream(frameRate);
if (!stream) {
const errtxt = 'Stream capture from canvas failed.';
console.error(errtxt);
return reject(errtxt);
}
const mimeType = getSupportedMimeType(['video/webm;codecs=vp8', 'video/webm', 'video/mp4']);
const mediaRecorder = new MediaRecorder(stream, { mimeType: mimeType });
let chunks = [];
mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
chunks.push(event.data);
}
};
mediaRecorder.onstop = () => {
const blob = new Blob(chunks, { type: mimeType });
resolve(blob);
};
mediaRecorder.onerror = (event) => {
console.error('MediaRecorder error:', event.error);
reject(event.error);
};
// Set the frame rate
mediaRecorder.onstart = () => {
const options = mediaRecorder.videoBitsPerSecond;
mediaRecorder.videoBitsPerSecond = options * frameRate;
};
mediaRecorder.start();
captureVideoButton.disabled = true;
captureVideoButton.textContent = 'Recording...';
setTimeout(() => {
mediaRecorder.stop();
captureVideoButton.disabled = false;
captureVideoButton.textContent = 'Capture Video';
}, duration);
});
};
const handleCaptureVideo = async () => {
if (!isCameraOn) {
console.error('Camera is not active.');
return;
}
try {
captureVideoButton.disabled = true;
const frameRate = 10; // Frame rate
const videoBlob = await startRecording(ditheredOutput, videoLength, frameRate);
const videoURL = URL.createObjectURL(videoBlob);
const videoLink = document.createElement('a');
videoLink.href = videoURL;
if (videoBlob.type === 'video/mp4') {
videoLink.download = 'dithered-video.mp4';
} else {
videoLink.download = 'dithered-video.webm';
}
videoLink.click();
// Enable the button again after recording
captureVideoButton.disabled = false;
} catch (error) {
console.error('Error recording video:', error);
captureVideoButton.disabled = false;
}
};
handleCaptureVideo();
});
function processFrame() {
if (!isCameraOn) {
console.error('Camera is not active.');
return;
}
// Crop and draw the middle part of the video frame
const videoAspect = video.videoWidth / video.videoHeight;
const canvasAspect = lowResWidth / lowResHeight;
let sx, sy, sw, sh;
if (videoAspect > canvasAspect) {
sw = video.videoHeight * canvasAspect;
sh = video.videoHeight;
sx = (video.videoWidth - sw) / 2;
sy = 0;
} else {
sw = video.videoWidth;
sh = video.videoWidth / canvasAspect;
sx = 0;
sy = (video.videoHeight - sh) / 2;
}
context.drawImage(video, sx, sy, sw, sh, 0, 0, lowResWidth, lowResHeight);
const frame = context.getImageData(0, 0, lowResWidth, lowResHeight);
applyFloydSteinbergDithering(frame);
context.putImageData(frame, 0, 0);
requestAnimationFrame(processFrame);
}
function applyFloydSteinbergDithering(imageData) {
const { data, width, height } = imageData;
// Helper to access and set pixel values
const getPixelIndex = (x, y) => (y * width + x) * 4;
const getPixelValue = (x, y) => data[getPixelIndex(x, y)];
const setPixelValue = (x, y, value) => {
const index = getPixelIndex(x, y);
data[index] = value;
data[index + 1] = value;
data[index + 2] = value;
};
// Helper that clamps a value between 0 and 255
const clamp = (value) => Math.max(0, Math.min(255, value));
// Process a single pixel
const processPixel = (x, y) => {
const oldPixel = getPixelValue(x, y);
const newPixel = oldPixel < 128 ? 0 : 255; // Make it 1-bit black and white
setPixelValue(x, y, newPixel);
const quantError = oldPixel - newPixel;
// Spread the error to neighboring pixels
if (x + 1 < width) {
const idx = getPixelIndex(x + 1, y);
data[idx] = clamp(data[idx] + quantError * 7 / 16);
}
if (x - 1 >= 0 && y + 1 < height) {
const idx = getPixelIndex(x - 1, y + 1);
data[idx] = clamp(data[idx] + quantError * 3 / 16);
}
if (y + 1 < height) {
const idx = getPixelIndex(x, y + 1);
data[idx] = clamp(data[idx] + quantError * 5 / 16);
}
if (x + 1 < width && y + 1 < height) {
const idx = getPixelIndex(x + 1, y + 1);
data[idx] = clamp(data[idx] + quantError * 1 / 16);
}
};
// Process each pixel in the image one by one
// FIXME: There has to be a better way to do this, right?
for (let y = 0; y < height; y++) {
for (let x = 0; x < width; x++) {
processPixel(x, y);
}
}
}
function clamp(value) {
return Math.max(0, Math.min(255, value));
}