diff --git a/src/rageshake/rageshake.js b/src/rageshake/rageshake.js
index b886f369df..9512f62e42 100644
--- a/src/rageshake/rageshake.js
+++ b/src/rageshake/rageshake.js
@@ -73,7 +73,9 @@ class ConsoleLogger {
 
         // Convert objects and errors to helpful things
         args = args.map((arg) => {
-            if (arg instanceof Error) {
+            if (arg instanceof DOMException) {
+                return arg.message + ` (${arg.name} | ${arg.code}) ` + (arg.stack ? `\n${arg.stack}` : '');
+            } else if (arg instanceof Error) {
                 return arg.message + (arg.stack ? `\n${arg.stack}` : '');
             } else if (typeof (arg) === 'object') {
                 try {
diff --git a/src/voice/VoiceRecording.ts b/src/voice/VoiceRecording.ts
index c4a0a78ce5..402bd8beca 100644
--- a/src/voice/VoiceRecording.ts
+++ b/src/voice/VoiceRecording.ts
@@ -90,78 +90,97 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
     }
 
     private async makeRecorder() {
-        this.recorderStream = await navigator.mediaDevices.getUserMedia({
-            audio: {
-                channelCount: CHANNELS,
-                noiseSuppression: true, // browsers ignore constraints they can't honour
-                deviceId: CallMediaHandler.getAudioInput(),
-            },
-        });
-        this.recorderContext = new AudioContext({
-            // latencyHint: "interactive", // we don't want a latency hint (this causes data smoothing)
-        });
-        this.recorderSource = this.recorderContext.createMediaStreamSource(this.recorderStream);
-        this.recorderFFT = this.recorderContext.createAnalyser();
+        try {
+            this.recorderStream = await navigator.mediaDevices.getUserMedia({
+                audio: {
+                    channelCount: CHANNELS,
+                    noiseSuppression: true, // browsers ignore constraints they can't honour
+                    deviceId: CallMediaHandler.getAudioInput(),
+                },
+            });
+            this.recorderContext = new AudioContext({
+                // latencyHint: "interactive", // we don't want a latency hint (this causes data smoothing)
+            });
+            this.recorderSource = this.recorderContext.createMediaStreamSource(this.recorderStream);
+            this.recorderFFT = this.recorderContext.createAnalyser();
 
-        // Bring the FFT time domain down a bit. The default is 2048, and this must be a power
-        // of two. We use 64 points because we happen to know down the line we need less than
-        // that, but 32 would be too few. Large numbers are not helpful here and do not add
-        // precision: they introduce higher precision outputs of the FFT (frequency data), but
-        // it makes the time domain less than helpful.
-        this.recorderFFT.fftSize = 64;
+            // Bring the FFT time domain down a bit. The default is 2048, and this must be a power
+            // of two. We use 64 points because we happen to know down the line we need less than
+            // that, but 32 would be too few. Large numbers are not helpful here and do not add
+            // precision: they introduce higher precision outputs of the FFT (frequency data), but
+            // it makes the time domain less than helpful.
+            this.recorderFFT.fftSize = 64;
 
-        // Set up our worklet. We use this for timing information and waveform analysis: the
-        // web audio API prefers this be done async to avoid holding the main thread with math.
-        const mxRecorderWorkletPath = document.body.dataset.vectorRecorderWorkletScript;
-        if (!mxRecorderWorkletPath) {
-            throw new Error("Unable to create recorder: no worklet script registered");
-        }
-        await this.recorderContext.audioWorklet.addModule(mxRecorderWorkletPath);
-        this.recorderWorklet = new AudioWorkletNode(this.recorderContext, WORKLET_NAME);
-
-        // Connect our inputs and outputs
-        this.recorderSource.connect(this.recorderFFT);
-        this.recorderSource.connect(this.recorderWorklet);
-        this.recorderWorklet.connect(this.recorderContext.destination);
-
-        // Dev note: we can't use `addEventListener` for some reason. It just doesn't work.
-        this.recorderWorklet.port.onmessage = (ev) => {
-            switch (ev.data['ev']) {
-                case PayloadEvent.Timekeep:
-                    this.processAudioUpdate(ev.data['timeSeconds']);
-                    break;
-                case PayloadEvent.AmplitudeMark:
-                    // Sanity check to make sure we're adding about one sample per second
-                    if (ev.data['forSecond'] === this.amplitudes.length) {
-                        this.amplitudes.push(ev.data['amplitude']);
-                    }
-                    break;
+            // Set up our worklet. We use this for timing information and waveform analysis: the
+            // web audio API prefers this be done async to avoid holding the main thread with math.
+            const mxRecorderWorkletPath = document.body.dataset.vectorRecorderWorkletScript;
+            if (!mxRecorderWorkletPath) {
+                // noinspection ExceptionCaughtLocallyJS
+                throw new Error("Unable to create recorder: no worklet script registered");
             }
-        };
+            await this.recorderContext.audioWorklet.addModule(mxRecorderWorkletPath);
+            this.recorderWorklet = new AudioWorkletNode(this.recorderContext, WORKLET_NAME);
 
-        this.recorder = new Recorder({
-            encoderPath, // magic from webpack
-            encoderSampleRate: SAMPLE_RATE,
-            encoderApplication: 2048, // voice (default is "audio")
-            streamPages: true, // this speeds up the encoding process by using CPU over time
-            encoderFrameSize: 20, // ms, arbitrary frame size we send to the encoder
-            numberOfChannels: CHANNELS,
-            sourceNode: this.recorderSource,
-            encoderBitRate: BITRATE,
+            // Connect our inputs and outputs
+            this.recorderSource.connect(this.recorderFFT);
+            this.recorderSource.connect(this.recorderWorklet);
+            this.recorderWorklet.connect(this.recorderContext.destination);
 
-            // We use low values for the following to ease CPU usage - the resulting waveform
-            // is indistinguishable for a voice message. Note that the underlying library will
-            // pick defaults which prefer the highest possible quality, CPU be damned.
-            encoderComplexity: 3, // 0-10, 10 is slow and high quality.
-            resampleQuality: 3, // 0-10, 10 is slow and high quality
-        });
-        this.recorder.ondataavailable = (a: ArrayBuffer) => {
-            const buf = new Uint8Array(a);
-            const newBuf = new Uint8Array(this.buffer.length + buf.length);
-            newBuf.set(this.buffer, 0);
-            newBuf.set(buf, this.buffer.length);
-            this.buffer = newBuf;
-        };
+            // Dev note: we can't use `addEventListener` for some reason. It just doesn't work.
+            this.recorderWorklet.port.onmessage = (ev) => {
+                switch (ev.data['ev']) {
+                    case PayloadEvent.Timekeep:
+                        this.processAudioUpdate(ev.data['timeSeconds']);
+                        break;
+                    case PayloadEvent.AmplitudeMark:
+                        // Sanity check to make sure we're adding about one sample per second
+                        if (ev.data['forSecond'] === this.amplitudes.length) {
+                            this.amplitudes.push(ev.data['amplitude']);
+                        }
+                        break;
+                }
+            };
+
+            this.recorder = new Recorder({
+                encoderPath, // magic from webpack
+                encoderSampleRate: SAMPLE_RATE,
+                encoderApplication: 2048, // voice (default is "audio")
+                streamPages: true, // this speeds up the encoding process by using CPU over time
+                encoderFrameSize: 20, // ms, arbitrary frame size we send to the encoder
+                numberOfChannels: CHANNELS,
+                sourceNode: this.recorderSource,
+                encoderBitRate: BITRATE,
+
+                // We use low values for the following to ease CPU usage - the resulting waveform
+                // is indistinguishable for a voice message. Note that the underlying library will
+                // pick defaults which prefer the highest possible quality, CPU be damned.
+                encoderComplexity: 3, // 0-10, 10 is slow and high quality.
+                resampleQuality: 3, // 0-10, 10 is slow and high quality
+            });
+            this.recorder.ondataavailable = (a: ArrayBuffer) => {
+                const buf = new Uint8Array(a);
+                const newBuf = new Uint8Array(this.buffer.length + buf.length);
+                newBuf.set(this.buffer, 0);
+                newBuf.set(buf, this.buffer.length);
+                this.buffer = newBuf;
+            };
+        } catch (e) {
+            console.error("Error starting recording: ", e);
+            if (e instanceof DOMException) { // Unhelpful DOMExceptions are common - parse them sanely
+                console.error(`${e.name} (${e.code}): ${e.message}`);
+            }
+
+            // Clean up as best as possible
+            if (this.recorderStream) this.recorderStream.getTracks().forEach(t => t.stop());
+            if (this.recorderSource) this.recorderSource.disconnect();
+            if (this.recorder) this.recorder.close();
+            if (this.recorderContext) {
+                // noinspection ES6MissingAwait - not important that we wait
+                this.recorderContext.close();
+            }
+
+            throw e; // rethrow so upstream can handle it
+        }
     }
 
     private get audioBuffer(): Uint8Array {